From: Michel Jaouen Date: Thu, 29 Sep 2011 15:17:27 +0000 (+0200) Subject: armv7a ,cortex a : add L1, L2 cache support, va to pa support X-Git-Tag: v0.6.0-rc1~567 X-Git-Url: https://git.sur5r.net/?a=commitdiff_plain;h=00ded4eb012006da1f56c0ba39af09cc4a66db07;p=openocd armv7a ,cortex a : add L1, L2 cache support, va to pa support --- diff --git a/src/target/arm_dpm.c b/src/target/arm_dpm.c index 012316bf..dfd3f275 100644 --- a/src/target/arm_dpm.c +++ b/src/target/arm_dpm.c @@ -109,7 +109,7 @@ static int dpm_mcr(struct target *target, int cpnum, /* Toggles between recorded core mode (USR, SVC, etc) and a temporary one. * Routines *must* restore the original mode before returning!! */ -static int dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode) +int dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode) { int retval; uint32_t cpsr; diff --git a/src/target/arm_dpm.h b/src/target/arm_dpm.h index b20184c8..ba2e155c 100644 --- a/src/target/arm_dpm.h +++ b/src/target/arm_dpm.h @@ -133,6 +133,9 @@ int arm_dpm_setup(struct arm_dpm *dpm); int arm_dpm_initialize(struct arm_dpm *dpm); int arm_dpm_read_current_registers(struct arm_dpm *); +int dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode); + + int arm_dpm_write_dirty_registers(struct arm_dpm *, bool bpwp); void arm_dpm_report_wfar(struct arm_dpm *, uint32_t wfar); diff --git a/src/target/armv7a.c b/src/target/armv7a.c index 151deb41..e0d08827 100644 --- a/src/target/armv7a.c +++ b/src/target/armv7a.c @@ -1,6 +1,8 @@ /*************************************************************************** * Copyright (C) 2009 by David Brownell * * * + * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com * + * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * @@ -34,7 +36,8 @@ #include #include "arm_opcodes.h" - +#include "target.h" +#include "target_type.h" static void armv7a_show_fault_registers(struct target *target) { @@ -84,6 +87,661 @@ done: /* (void) */ dpm->finish(dpm); } +int armv7a_read_ttbcr(struct target *target) +{ + struct armv7a_common *armv7a = target_to_armv7a(target); + struct arm_dpm *dpm = armv7a->armv4_5_common.dpm; + uint32_t ttbcr; + int retval = dpm->prepare(dpm); + if (retval!=ERROR_OK) goto done; + /* MRC p15,0,,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/ + retval = dpm->instr_read_data_r0(dpm, + ARMV4_5_MRC(15, 0, 0, 2, 0, 2), + &ttbcr); + if (retval!=ERROR_OK) goto done; + armv7a->armv7a_mmu.ttbr1_used = ((ttbcr & 0x7)!=0)? 1: 0; + armv7a->armv7a_mmu.ttbr0_mask = 7 << (32 -((ttbcr & 0x7))); +#if 0 + LOG_INFO("ttb1 %s ,ttb0_mask %x", + armv7a->armv7a_mmu.ttbr1_used ? "used":"not used", + armv7a->armv7a_mmu.ttbr0_mask); +#endif + if (armv7a->armv7a_mmu.ttbr1_used == 1) + { + LOG_INFO("SVC access above %x", + (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask)); + armv7a->armv7a_mmu.os_border = 0xffffffff & armv7a->armv7a_mmu.ttbr0_mask; + } + else + { + /* fix me , default is hard coded LINUX border */ + armv7a->armv7a_mmu.os_border = 0xc0000000; + } +done: + dpm->finish(dpm); + return retval; +} + + +/* method adapted to cortex A : reused arm v4 v5 method*/ +int armv7a_mmu_translate_va(struct target *target, uint32_t va, uint32_t *val) +{ + uint32_t first_lvl_descriptor = 0x0; + uint32_t second_lvl_descriptor = 0x0; + int retval; + uint32_t cb; + struct armv7a_common *armv7a = target_to_armv7a(target); + struct arm_dpm *dpm = armv7a->armv4_5_common.dpm; + uint32_t ttb = 0; /* default ttb0 */ + if (armv7a->armv7a_mmu.ttbr1_used == -1) armv7a_read_ttbcr(target); + if ((armv7a->armv7a_mmu.ttbr1_used) && + (va > (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask))) + { + /* select ttb 1 */ + ttb = 1; + } + retval = dpm->prepare(dpm); + if (retval != ERROR_OK) + goto done; + + /* MRC p15,0,,c2,c0,ttb */ + retval = dpm->instr_read_data_r0(dpm, + ARMV4_5_MRC(15, 0, 0, 2, 0, ttb), + &ttb); + retval = armv7a->armv7a_mmu.read_physical_memory(target, + (ttb & 0xffffc000) | ((va & 0xfff00000) >> 18), + 4, 1, (uint8_t*)&first_lvl_descriptor); + if (retval != ERROR_OK) + return retval; + first_lvl_descriptor = target_buffer_get_u32(target, (uint8_t*) + &first_lvl_descriptor); + /* reuse armv4_5 piece of code, specific armv7a changes may come later */ + LOG_DEBUG("1st lvl desc: %8.8" PRIx32 "", first_lvl_descriptor); + + if ((first_lvl_descriptor & 0x3) == 0) + { + LOG_ERROR("Address translation failure"); + return ERROR_TARGET_TRANSLATION_FAULT; + } + + + if ((first_lvl_descriptor & 0x3) == 2) + { + /* section descriptor */ + cb = (first_lvl_descriptor & 0xc) >> 2; + *val = (first_lvl_descriptor & 0xfff00000) | (va & 0x000fffff); + return ERROR_OK; + } + + if ((first_lvl_descriptor & 0x3) == 1) + { + /* coarse page table */ + retval = armv7a->armv7a_mmu.read_physical_memory(target, + (first_lvl_descriptor & 0xfffffc00) | ((va & 0x000ff000) >> 10), + 4, 1, (uint8_t*)&second_lvl_descriptor); + if (retval != ERROR_OK) + return retval; + } + else if ((first_lvl_descriptor & 0x3) == 3) + { + /* fine page table */ + retval = armv7a->armv7a_mmu.read_physical_memory(target, + (first_lvl_descriptor & 0xfffff000) | ((va & 0x000ffc00) >> 8), + 4, 1, (uint8_t*)&second_lvl_descriptor); + if (retval != ERROR_OK) + return retval; + } + + second_lvl_descriptor = target_buffer_get_u32(target, (uint8_t*) + &second_lvl_descriptor); + + LOG_DEBUG("2nd lvl desc: %8.8" PRIx32 "", second_lvl_descriptor); + + if ((second_lvl_descriptor & 0x3) == 0) + { + LOG_ERROR("Address translation failure"); + return ERROR_TARGET_TRANSLATION_FAULT; + } + + /* cacheable/bufferable is always specified in bits 3-2 */ + cb = (second_lvl_descriptor & 0xc) >> 2; + + if ((second_lvl_descriptor & 0x3) == 1) + { + /* large page descriptor */ + *val = (second_lvl_descriptor & 0xffff0000) | (va & 0x0000ffff); + return ERROR_OK; + } + + if ((second_lvl_descriptor & 0x3) == 2) + { + /* small page descriptor */ + *val = (second_lvl_descriptor & 0xfffff000) | (va & 0x00000fff); + return ERROR_OK; + } + + if ((second_lvl_descriptor & 0x3) == 3) + { + *val = (second_lvl_descriptor & 0xfffffc00) | (va & 0x000003ff); + return ERROR_OK; + } + + /* should not happen */ + LOG_ERROR("Address translation failure"); + return ERROR_TARGET_TRANSLATION_FAULT; + +done: + return retval; +} + + +/* V7 method VA TO PA */ +int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va, + uint32_t *val, int meminfo) +{ + int retval = ERROR_FAIL; + struct armv7a_common *armv7a = target_to_armv7a(target); + struct arm_dpm *dpm = armv7a->armv4_5_common.dpm; + uint32_t virt = va & ~0xfff; + uint32_t NOS,NS,SH,INNER,OUTER; + *val = 0xdeadbeef; + retval = dpm->prepare(dpm); + if (retval != ERROR_OK) + goto done; + /* mmu must be enable in order to get a correct translation */ + /* use VA to PA CP15 register for conversion */ + retval = dpm->instr_write_data_r0(dpm, + ARMV4_5_MCR(15, 0, 0, 7, 8, 0), + virt); + if (retval!=ERROR_OK) goto done; + retval = dpm->instr_read_data_r0(dpm, + ARMV4_5_MRC(15, 0, 0, 7, 4, 0), + val); + /* decode memory attribute */ + NOS = (*val >> 10) & 1; /* Not Outer shareable */ + NS = (*val >> 9) & 1; /* Non secure */ + SH = (*val >> 7 )& 1; /* shareable */ + INNER = (*val >> 4) & 0x7; + OUTER = (*val >> 2) & 0x3; + + if (retval!=ERROR_OK) goto done; + *val = (*val & ~0xfff) + (va & 0xfff); + if (*val == va) + LOG_WARNING("virt = phys : MMU disable !!"); + if (meminfo) + { + LOG_INFO("%x : %x %s outer shareable %s secured", + va, *val, + NOS == 1 ? "not" : " ", + NS == 1 ? "not" :""); + switch (OUTER) { + case 0 : LOG_INFO("outer: Non-Cacheable"); + break; + case 1 : LOG_INFO("outer: Write-Back, Write-Allocate"); + break; + case 2 : LOG_INFO("outer: Write-Through, No Write-Allocate"); + break; + case 3 : LOG_INFO("outer: Write-Back, no Write-Allocate"); + break; + } + switch (INNER) { + case 0 : LOG_INFO("inner: Non-Cacheable"); + break; + case 1 : LOG_INFO("inner: Strongly-ordered"); + break; + case 3 : LOG_INFO("inner: Device"); + break; + case 5 : LOG_INFO("inner: Write-Back, Write-Allocate"); + break; + case 6 : LOG_INFO("inner: Write-Through"); + break; + case 7 : LOG_INFO("inner: Write-Back, no Write-Allocate"); + + default: LOG_INFO("inner: %x ???",INNER); + } + } + +done: + dpm->finish(dpm); + + return retval; +} + +static int armv7a_handle_inner_cache_info_command(struct command_context *cmd_ctx, + struct armv7a_cache_common *armv7a_cache) +{ + if (armv7a_cache->ctype == -1) + { + command_print(cmd_ctx, "cache not yet identified"); + return ERROR_OK; + } + + command_print(cmd_ctx, + "D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes", + armv7a_cache->d_u_size.linelen, + armv7a_cache->d_u_size.associativity, + armv7a_cache->d_u_size.nsets, + armv7a_cache->d_u_size.cachesize); + + command_print(cmd_ctx, + "I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes", + armv7a_cache->i_size.linelen, + armv7a_cache->i_size.associativity, + armv7a_cache->i_size.nsets, + armv7a_cache->i_size.cachesize); + + return ERROR_OK; +} + +static int _armv7a_flush_all_data(struct target *target) +{ + struct armv7a_common *armv7a = target_to_armv7a(target); + struct arm_dpm *dpm = armv7a->armv4_5_common.dpm; + struct armv7a_cachesize *d_u_size = + &(armv7a->armv7a_mmu.armv7a_cache.d_u_size); + int32_t c_way, c_index = d_u_size->index; + int retval; + /* check that cache data is on at target halt */ + if (!armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) + { + LOG_INFO("flushed not performed :cache not on at target halt"); + return ERROR_OK; + } + retval = dpm->prepare(dpm); + if (retval != ERROR_OK) goto done; + do { + c_way = d_u_size->way; + do { + uint32_t value = (c_index << d_u_size->index_shift) + | (c_way << d_u_size->way_shift); + /* DCCISW */ + //LOG_INFO ("%d %d %x",c_way,c_index,value); + retval = dpm->instr_write_data_r0(dpm, + ARMV4_5_MCR(15, 0, 0, 7, 14, 2), + value); + if (retval!= ERROR_OK) goto done; + c_way -= 1; + } while (c_way >=0); + c_index -= 1; + } while (c_index >=0); + return retval; +done: + LOG_ERROR("flushed failed"); + dpm->finish(dpm); + return retval; +} + +static int armv7a_flush_all_data( struct target * target) +{ + int retval = ERROR_FAIL; + /* check that armv7a_cache is correctly identify */ + struct armv7a_common *armv7a = target_to_armv7a(target); + if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1) + { + LOG_ERROR("trying to flush un-identified cache"); + return retval; + } + + if (target->smp) + { + /* look if all the other target have been flushed in order to flush level + * 2 */ + struct target_list *head; + struct target *curr; + head = target->head; + while(head != (struct target_list*)NULL) + { + curr = head->target; + if ((curr->state == TARGET_HALTED)) + { LOG_INFO("Wait flushing data l1 on core %d",curr->coreid); + retval = _armv7a_flush_all_data(curr); + } + head = head->next; + } + } + else retval = _armv7a_flush_all_data(target); + return retval; +} + + +/* L2 is not specific to armv7a a specific file is needed */ +static int armv7a_l2x_flush_all_data(struct target * target) +{ + +#define L2X0_CLEAN_INV_WAY 0x7FC + int retval = ERROR_FAIL; + struct armv7a_common *armv7a = target_to_armv7a(target); + struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache*) + (armv7a->armv7a_mmu.armv7a_cache.l2_cache); + uint32_t base = l2x_cache->base; + uint32_t l2_way = l2x_cache->way; + uint32_t l2_way_val = (1<type->write_phys_memory(target, + (uint32_t)(base+(uint32_t)L2X0_CLEAN_INV_WAY), + (uint32_t)4, + (uint32_t)1, + (uint8_t*)&l2_way_val); + return retval; +} + +static int armv7a_handle_l2x_cache_info_command(struct command_context *cmd_ctx, + struct armv7a_cache_common *armv7a_cache) +{ + + struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache*) + (armv7a_cache->l2_cache); + + if (armv7a_cache->ctype == -1) + { + command_print(cmd_ctx, "cache not yet identified"); + return ERROR_OK; + } + + command_print(cmd_ctx, + "L1 D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes", + armv7a_cache->d_u_size.linelen, + armv7a_cache->d_u_size.associativity, + armv7a_cache->d_u_size.nsets, + armv7a_cache->d_u_size.cachesize); + + command_print(cmd_ctx, + "L1 I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes", + armv7a_cache->i_size.linelen, + armv7a_cache->i_size.associativity, + armv7a_cache->i_size.nsets, + armv7a_cache->i_size.cachesize); + command_print(cmd_ctx, "L2 unified cache Base Address 0x%x, %d ways", + l2x_cache->base, l2x_cache->way); + + + return ERROR_OK; +} + + +int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way) +{ + struct armv7a_l2x_cache *l2x_cache; + struct target_list *head = target->head; + struct target *curr; + + struct armv7a_common *armv7a = target_to_armv7a(target); + if (armv7a == NULL) + LOG_ERROR("not an armv7a target"); + l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache)); + l2x_cache->base = base; + l2x_cache->way = way; + /*LOG_INFO("cache l2 initialized base %x way %d", + l2x_cache->base,l2x_cache->way);*/ + if (armv7a->armv7a_mmu.armv7a_cache.l2_cache) + { + LOG_INFO("cache l2 already initialized\n"); + } + armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void*) l2x_cache; + /* initialize l1 / l2x cache function */ + armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache + = armv7a_l2x_flush_all_data; + armv7a->armv7a_mmu.armv7a_cache.display_cache_info = + armv7a_handle_l2x_cache_info_command; + /* initialize all target in this cluster (smp target)*/ + /* l2 cache must be configured after smp declaration */ + while(head != (struct target_list*)NULL) + { + curr = head->target; + if (curr != target) + { + armv7a = target_to_armv7a(curr); + if (armv7a->armv7a_mmu.armv7a_cache.l2_cache) + { + LOG_ERROR("smp target : cache l2 already initialized\n"); + } + armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void*) l2x_cache; + armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = + armv7a_l2x_flush_all_data; + armv7a->armv7a_mmu.armv7a_cache.display_cache_info = + armv7a_handle_l2x_cache_info_command; + } + head = head -> next; + } + return JIM_OK; +} + +COMMAND_HANDLER(handle_cache_l2x) +{ + struct target *target = get_current_target(CMD_CTX); + uint32_t base, way; +switch (CMD_ARGC) { + case 0: + return ERROR_COMMAND_SYNTAX_ERROR; + break; + case 2: + //command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]); + + + COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], base); + COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], way); + + /* AP address is in bits 31:24 of DP_SELECT */ + armv7a_l2x_cache_init(target, base, way); + break; + default: + return ERROR_COMMAND_SYNTAX_ERROR; + } +return ERROR_OK; +} + + +int armv7a_handle_cache_info_command(struct command_context *cmd_ctx, + struct armv7a_cache_common *armv7a_cache) +{ + if (armv7a_cache->ctype == -1) + { + command_print(cmd_ctx, "cache not yet identified"); + return ERROR_OK; + } + + if (armv7a_cache->display_cache_info) + armv7a_cache->display_cache_info(cmd_ctx, armv7a_cache); + return ERROR_OK; +} + + +/* retrieve core id cluster id */ +int arnv7a_read_mpidr(struct target *target) +{ + int retval = ERROR_FAIL; + struct armv7a_common *armv7a = target_to_armv7a(target); + struct arm_dpm *dpm = armv7a->armv4_5_common.dpm; + uint32_t mpidr; + retval = dpm->prepare(dpm); + if (retval!=ERROR_OK) goto done; + /* MRC p15,0,,c0,c0,5; read Multiprocessor ID register*/ + + retval = dpm->instr_read_data_r0(dpm, + ARMV4_5_MRC(15, 0, 0, 0, 0, 5), + &mpidr); + if (retval!=ERROR_OK) goto done; + if (mpidr & 1<<31) + { + armv7a->multi_processor_system = (mpidr >> 30) & 1; + armv7a->cluster_id = (mpidr >> 8) & 0xf; + armv7a->cpu_id = mpidr & 0x3; + LOG_INFO("%s cluster %x core %x %s", target->cmd_name, + armv7a->cluster_id, + armv7a->cpu_id, + armv7a->multi_processor_system == 0 ? "multi core": "mono core"); + + } + else + LOG_ERROR("mpdir not in multiprocessor format"); + +done: + dpm->finish(dpm); + return retval; + + +} + + +int armv7a_identify_cache(struct target *target) +{ + /* read cache descriptor */ + int retval = ERROR_FAIL; + struct armv7a_common *armv7a = target_to_armv7a(target); + struct arm_dpm *dpm = armv7a->armv4_5_common.dpm; + uint32_t cache_selected,clidr; + uint32_t cache_i_reg, cache_d_reg; + struct armv7a_cache_common *cache = &(armv7a->armv7a_mmu.armv7a_cache); + armv7a_read_ttbcr(target); + retval = dpm->prepare(dpm); + + if (retval!=ERROR_OK) goto done; + /* retrieve CLIDR */ + /* mrc p15, 1, r0, c0, c0, 1 @ read clidr */ + retval = dpm->instr_read_data_r0(dpm, + ARMV4_5_MRC(15, 1, 0, 0, 0, 1), + &clidr); + if (retval!=ERROR_OK) goto done; + clidr = (clidr & 0x7000000) >> 23; + LOG_INFO("number of cache level %d",clidr /2 ); + if ((clidr /2) > 1) + { + // FIXME not supported present in cortex A8 and later + // in cortex A7, A15 + LOG_ERROR("cache l2 present :not supported"); + } + /* retrieve selected cache */ + /* MRC p15, 2,, c0, c0, 0; Read CSSELR */ + retval = dpm->instr_read_data_r0(dpm, + ARMV4_5_MRC(15, 2, 0, 0, 0, 0), + &cache_selected); + if (retval!=ERROR_OK) goto done; + + retval = armv7a->armv4_5_common.mrc(target, 15, + 2, 0, /* op1, op2 */ + 0, 0, /* CRn, CRm */ + &cache_selected); + /* select instruction cache*/ + /* MCR p15, 2,, c0, c0, 0; Write CSSELR */ + /* [0] : 1 instruction cache selection , 0 data cache selection */ + retval = dpm->instr_write_data_r0(dpm, + ARMV4_5_MRC(15, 2, 0, 0, 0, 0), + 1); + if (retval!=ERROR_OK) goto done; + + /* read CCSIDR*/ + /* MRC P15,1,,C0, C0,0 ;on cortex A9 read CCSIDR */ + /* [2:0] line size 001 eight word per line */ + /* [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */ + retval = dpm->instr_read_data_r0(dpm, + ARMV4_5_MRC(15, 1, 0, 0, 0, 0), + &cache_i_reg); + if (retval!=ERROR_OK) goto done; + + /* select data cache*/ + retval = dpm->instr_write_data_r0(dpm, + ARMV4_5_MRC(15, 2, 0, 0, 0, 0), + 0); + if (retval!=ERROR_OK) goto done; + + retval = dpm->instr_read_data_r0(dpm, + ARMV4_5_MRC(15, 1, 0, 0, 0, 0), + &cache_d_reg); + if (retval!=ERROR_OK) goto done; + + /* restore selected cache */ + dpm->instr_write_data_r0(dpm, + ARMV4_5_MRC(15, 2, 0, 0, 0, 0), + cache_selected); + + if (retval != ERROR_OK) goto done; + dpm->finish(dpm); + + // put fake type + cache->d_u_size.linelen = 16 << (cache_d_reg & 0x7); + cache->d_u_size.cachesize = (((cache_d_reg >> 13) & 0x7fff)+1)/8; + cache->d_u_size.nsets = (cache_d_reg >> 13) & 0x7fff; + cache->d_u_size.associativity = ((cache_d_reg >> 3) & 0x3ff) +1; + /* compute info for set way operation on cache */ + cache->d_u_size.index_shift = (cache_d_reg & 0x7) + 4; + cache->d_u_size.index = (cache_d_reg >> 13) & 0x7fff; + cache->d_u_size.way = ((cache_d_reg >> 3) & 0x3ff); + cache->d_u_size.way_shift = cache->d_u_size.way+1; + { + int i=0; + while(((cache->d_u_size.way_shift >> i) & 1)!=1) i++; + cache->d_u_size.way_shift = 32-i; + } + /*LOG_INFO("data cache index %d << %d, way %d << %d", + cache->d_u_size.index, cache->d_u_size.index_shift, + cache->d_u_size.way, cache->d_u_size.way_shift); + + LOG_INFO("data cache %d bytes %d KBytes asso %d ways", + cache->d_u_size.linelen, + cache->d_u_size.cachesize, + cache->d_u_size.associativity + );*/ + cache->i_size.linelen = 16 << (cache_i_reg & 0x7); + cache->i_size.associativity = ((cache_i_reg >> 3) & 0x3ff) +1; + cache->i_size.nsets = (cache_i_reg >> 13) & 0x7fff; + cache->i_size.cachesize = (((cache_i_reg >> 13) & 0x7fff)+1)/8; + /* compute info for set way operation on cache */ + cache->i_size.index_shift = (cache_i_reg & 0x7) + 4; + cache->i_size.index = (cache_i_reg >> 13) & 0x7fff; + cache->i_size.way = ((cache_i_reg >> 3) & 0x3ff); + cache->i_size.way_shift = cache->i_size.way+1; + { + int i=0; + while(((cache->i_size.way_shift >> i) & 1)!=1) i++; + cache->i_size.way_shift = 32-i; + } + /*LOG_INFO("instruction cache index %d << %d, way %d << %d", + cache->i_size.index, cache->i_size.index_shift, + cache->i_size.way, cache->i_size.way_shift); + + LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways", + cache->i_size.linelen, + cache->i_size.cachesize, + cache->i_size.associativity + );*/ + /* if no l2 cache initialize l1 data cache flush function function */ + if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache == NULL) + { + armv7a->armv7a_mmu.armv7a_cache.display_cache_info = + armv7a_handle_inner_cache_info_command; + armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = + armv7a_flush_all_data; + } + armv7a->armv7a_mmu.armv7a_cache.ctype = 0; + +done: + dpm->finish(dpm); + arnv7a_read_mpidr(target); + return retval; + +} + + + +int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a) +{ + struct armv7a_common *again; + struct arm *armv4_5 = &armv7a->armv4_5_common; + armv4_5->arch_info = armv7a; + target->arch_info = &armv7a->armv4_5_common; + /* target is useful in all function arm v4 5 compatible */ + armv7a->armv4_5_common.target = target; + armv7a->armv4_5_common.common_magic = ARM_COMMON_MAGIC; + armv7a->common_magic = ARMV7_COMMON_MAGIC; + armv7a->armv7a_mmu.armv7a_cache.l2_cache = NULL; + armv7a->armv7a_mmu.armv7a_cache.ctype = -1; + armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = NULL; + armv7a->armv7a_mmu.armv7a_cache.display_cache_info = NULL; + again =target_to_armv7a(target); + return ERROR_OK; +} + int armv7a_arch_state(struct target *target) { static const char *state[] = @@ -103,9 +761,9 @@ int armv7a_arch_state(struct target *target) arm_arch_state(target); LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s", - state[armv7a->armv4_5_mmu.mmu_enabled], - state[armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled], - state[armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled]); + state[armv7a->armv7a_mmu.mmu_enabled], + state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled], + state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]); if (armv4_5->core_mode == ARM_MODE_ABT) armv7a_show_fault_registers(target); @@ -116,11 +774,37 @@ int armv7a_arch_state(struct target *target) return ERROR_OK; } +static const struct command_registration l2_cache_commands[] = { + { + .name = "l2x", + .handler = handle_cache_l2x, + .mode = COMMAND_EXEC, + .help = "configure l2x cache " + "", + .usage = "[base_addr] [number_of_way]", + }, + COMMAND_REGISTRATION_DONE + +}; + +const struct command_registration l2x_cache_command_handlers[] = { + { + .name = "cache_config", + .mode = COMMAND_EXEC, + .help = "cache configuation for a target", + .chain = l2_cache_commands, + }, + COMMAND_REGISTRATION_DONE +}; + const struct command_registration armv7a_command_handlers[] = { { .chain = dap_command_handlers, }, + { + .chain = l2x_cache_command_handlers, + }, COMMAND_REGISTRATION_DONE }; diff --git a/src/target/armv7a.h b/src/target/armv7a.h index f9324563..dde1f23e 100644 --- a/src/target/armv7a.h +++ b/src/target/armv7a.h @@ -43,6 +43,56 @@ enum #define V2POWPW 5 #define V2POWUR 6 #define V2POWUW 7 +/* L210/L220 cache controller support */ +struct armv7a_l2x_cache { + uint32_t base; + uint32_t way; +}; + +struct armv7a_cachesize +{ + uint32_t level_num; + /* cache dimensionning */ + uint32_t linelen; + uint32_t associativity; + uint32_t nsets; + uint32_t cachesize; + /* info for set way operation on cache */ + uint32_t index; + uint32_t index_shift; + uint32_t way; + uint32_t way_shift; +}; + + +struct armv7a_cache_common +{ + int ctype; + struct armv7a_cachesize d_u_size; /* data cache */ + struct armv7a_cachesize i_size; /* instruction cache */ + int i_cache_enabled; + int d_u_cache_enabled; + /* l2 external unified cache if some */ + void *l2_cache; + int (*flush_all_data_cache)(struct target *target); + int (*display_cache_info)(struct command_context *cmd_ctx, + struct armv7a_cache_common *armv7a_cache); +}; + + +struct armv7a_mmu_common +{ + /* following field mmu working way */ + int32_t ttbr1_used; /* -1 not initialized, 0 no ttbr1 1 ttbr1 used and */ + uint32_t ttbr0_mask;/* masked to be used */ + uint32_t os_border; + + int (*read_physical_memory)(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer); + struct armv7a_cache_common armv7a_cache; + uint32_t mmu_enabled; +}; + + struct armv7a_common { @@ -57,9 +107,13 @@ struct armv7a_common uint32_t debug_base; uint8_t debug_ap; uint8_t memory_ap; + /* mdir */ + uint8_t multi_processor_system; + uint8_t cluster_id; + uint8_t cpu_id; - /* Cache and Memory Management Unit */ - struct armv4_5_mmu_common armv4_5_mmu; + /* cache specific to V7 Memory Management Unit compatible with v4_5*/ + struct armv7a_mmu_common armv7a_mmu; int (*examine_debug_reason)(struct target *target); int (*post_debug_entry)(struct target *target); @@ -112,9 +166,16 @@ target_to_armv7a(struct target *target) #define CPUDBG_AUTHSTATUS 0xFB8 int armv7a_arch_state(struct target *target); +int armv7a_identify_cache(struct target *target); struct reg_cache *armv7a_build_reg_cache(struct target *target, struct armv7a_common *armv7a_common); int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a); +int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va, + uint32_t *val,int meminfo); +int armv7a_mmu_translate_va(struct target *target, uint32_t va, uint32_t *val); + +int armv7a_handle_cache_info_command(struct command_context *cmd_ctx, + struct armv7a_cache_common *armv7a_cache); extern const struct command_registration armv7a_command_handlers[]; diff --git a/src/target/cortex_a.c b/src/target/cortex_a.c index 39f1b9e1..7547f17f 100755 --- a/src/target/cortex_a.c +++ b/src/target/cortex_a.c @@ -66,12 +66,6 @@ static int cortex_a8_dap_write_coreregister_u32(struct target *target, static int cortex_a8_mmu(struct target *target, int *enabled); static int cortex_a8_virt2phys(struct target *target, uint32_t virt, uint32_t *phys); -static int cortex_a8_disable_mmu_caches(struct target *target, int mmu, - int d_u_cache, int i_cache); -static int cortex_a8_enable_mmu_caches(struct target *target, int mmu, - int d_u_cache, int i_cache); -static int cortex_a8_get_ttb(struct target *target, uint32_t *result); - /* * FIXME do topology discovery using the ROM; don't @@ -82,6 +76,99 @@ static int cortex_a8_get_ttb(struct target *target, uint32_t *result); #define swjdp_memoryap 0 #define swjdp_debugap 1 +/* restore cp15_control_reg at resume */ +static int cortex_a8_restore_cp15_control_reg(struct target* target) +{ + int retval = ERROR_OK; + struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target); + struct armv7a_common *armv7a = target_to_armv7a(target); + + if (cortex_a8->cp15_control_reg !=cortex_a8->cp15_control_reg_curr) + { + cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg; + //LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg); + retval = armv7a->armv4_5_common.mcr(target, 15, + 0, 0, /* op1, op2 */ + 1, 0, /* CRn, CRm */ + cortex_a8->cp15_control_reg); + } + return ERROR_OK; +} + +/* check address before cortex_a8_apb read write access with mmu on + * remove apb predictible data abort */ +static int cortex_a8_check_address(struct target *target, uint32_t address) +{ + struct armv7a_common *armv7a = target_to_armv7a(target); + struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target); + uint32_t os_border = armv7a->armv7a_mmu.os_border; + if ((address < os_border) && + (armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)){ + LOG_ERROR("%x access in userspace and target in supervisor",address); + return ERROR_FAIL; + } + if ((address >= os_border) && + ( cortex_a8->curr_mode != ARM_MODE_SVC)){ + dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC); + cortex_a8->curr_mode = ARM_MODE_SVC; + LOG_INFO("%x access in kernel space and target not in supervisor", + address); + return ERROR_OK; + } + if ((address < os_border) && + (cortex_a8->curr_mode == ARM_MODE_SVC)){ + dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY); + cortex_a8->curr_mode = ARM_MODE_ANY; + } + return ERROR_OK; +} +/* modify cp15_control_reg in order to enable or disable mmu for : + * - virt2phys address conversion + * - read or write memory in phys or virt address */ +static int cortex_a8_mmu_modify(struct target *target, int enable) +{ + struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target); + struct armv7a_common *armv7a = target_to_armv7a(target); + int retval = ERROR_OK; + if (enable) + { + /* if mmu enabled at target stop and mmu not enable */ + if (!(cortex_a8->cp15_control_reg & 0x1U)) + { + LOG_ERROR("trying to enable mmu on target stopped with mmu disable"); + return ERROR_FAIL; + } + if (!(cortex_a8->cp15_control_reg_curr & 0x1U)) + { + cortex_a8->cp15_control_reg_curr |= 0x1U; + retval = armv7a->armv4_5_common.mcr(target, 15, + 0, 0, /* op1, op2 */ + 1, 0, /* CRn, CRm */ + cortex_a8->cp15_control_reg_curr); + } + } + else + { + if (cortex_a8->cp15_control_reg_curr & 0x4U) + { + /* data cache is active */ + cortex_a8->cp15_control_reg_curr &= ~0x4U; + /* flush data cache armv7 function to be called */ + if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache) + armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target); + } + if ( (cortex_a8->cp15_control_reg_curr & 0x1U)) + { + cortex_a8->cp15_control_reg_curr &= ~0x1U; + retval = armv7a->armv4_5_common.mcr(target, 15, + 0, 0, /* op1, op2 */ + 1, 0, /* CRn, CRm */ + cortex_a8->cp15_control_reg_curr); + } + } + return retval; +} + /* * Cortex-A8 Basic debug access, very low level assumes state is saved */ @@ -929,7 +1016,11 @@ static int cortex_a8_internal_restore(struct target *target, int current, buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc); armv4_5->pc->dirty = 1; armv4_5->pc->valid = 1; - + /* restore dpm_mode at system halt */ + dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY); + /* called it now before restoring context because it uses cpu + * register r0 for restoring cp15 control register */ + retval = cortex_a8_restore_cp15_control_reg(target); retval = cortex_a8_restore_context(target, handle_breakpoints); if (retval != ERROR_OK) return retval; @@ -1147,6 +1238,7 @@ static int cortex_a8_debug_entry(struct target *target) /* read Current PSR */ retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16); + /* store current cpsr */ if (retval != ERROR_OK) return retval; @@ -1220,32 +1312,21 @@ static int cortex_a8_post_debug_entry(struct target *target) if (retval != ERROR_OK) return retval; LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg); + cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg; - if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1) + if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1) { - uint32_t cache_type_reg; - - /* MRC p15,0,,c0,c0,1 ; Read CP15 Cache Type Register */ - retval = armv7a->armv4_5_common.mrc(target, 15, - 0, 1, /* op1, op2 */ - 0, 0, /* CRn, CRm */ - &cache_type_reg); - if (retval != ERROR_OK) - return retval; - LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg); - - /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */ - armv4_5_identify_cache(cache_type_reg, - &armv7a->armv4_5_mmu.armv4_5_cache); + armv7a_identify_cache(target); } - armv7a->armv4_5_mmu.mmu_enabled = + armv7a->armv7a_mmu.mmu_enabled = (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0; - armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = + armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled = (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0; - armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled = + armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled = (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0; - + cortex_a8->curr_mode = armv7a->armv4_5_common.core_mode; + return ERROR_OK; } @@ -1990,18 +2071,9 @@ static int cortex_a8_read_phys_memory(struct target *target, } else { /* read memory through APB-AP */ - int enabled = 0; - - retval = cortex_a8_mmu(target, &enabled); - if (retval != ERROR_OK) - return retval; - - if (enabled) - { - LOG_WARNING("Reading physical memory through \ - APB with MMU enabled is not yet implemented"); - return ERROR_TARGET_FAILURE; - } + /* disable mmu */ + retval = cortex_a8_mmu_modify(target, 0); + if (retval != ERROR_OK) return retval; retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer); } } @@ -2040,6 +2112,11 @@ static int cortex_a8_read_memory(struct target *target, uint32_t address, } retval = cortex_a8_read_phys_memory(target, address, size, count, buffer); } else { + retval = cortex_a8_check_address(target, address); + if (retval != ERROR_OK) return retval; + /* enable mmu */ + retval = cortex_a8_mmu_modify(target, 1); + if (retval != ERROR_OK) return retval; retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer); } return retval; @@ -2081,19 +2158,10 @@ static int cortex_a8_write_phys_memory(struct target *target, } else { /* write memory through APB-AP */ - int enabled = 0; - - retval = cortex_a8_mmu(target, &enabled); + retval = cortex_a8_mmu_modify(target, 0); if (retval != ERROR_OK) return retval; - - if (enabled) - { - LOG_WARNING("Writing physical memory through APB with MMU" \ - "enabled is not yet implemented"); - return ERROR_TARGET_FAILURE; - } - return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer); + return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer); } } @@ -2117,7 +2185,7 @@ static int cortex_a8_write_phys_memory(struct target *target, */ /* invalidate I-Cache */ - if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled) + if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) { /* ICIMVAU - Invalidate Cache single entry * with MVA to PoU @@ -2135,7 +2203,7 @@ static int cortex_a8_write_phys_memory(struct target *target, } /* invalidate D-Cache */ - if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled) + if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) { /* DCIMVAC - Invalidate data Cache line * with MVA to PoC @@ -2191,6 +2259,11 @@ static int cortex_a8_write_memory(struct target *target, uint32_t address, count, buffer); } else { + retval = cortex_a8_check_address(target, address); + if (retval != ERROR_OK) return retval; + /* enable mmu */ + retval = cortex_a8_mmu_modify(target, 1); + if (retval != ERROR_OK) return retval; retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer); } return retval; @@ -2375,7 +2448,6 @@ static int cortex_a8_init_arch_info(struct target *target, struct cortex_a8_common *cortex_a8, struct jtag_tap *tap) { struct armv7a_common *armv7a = &cortex_a8->armv7a_common; - struct arm *armv4_5 = &armv7a->armv4_5_common; struct adiv5_dap *dap = &armv7a->dap; armv7a->armv4_5_common.dap = dap; @@ -2387,7 +2459,6 @@ static int cortex_a8_init_arch_info(struct target *target, { armv7a->armv4_5_common.dap = dap; /* Setup struct cortex_a8_common */ - armv4_5->arch_info = armv7a; /* prepare JTAG information for the new target */ cortex_a8->jtag_info.tap = tap; @@ -2406,31 +2477,20 @@ static int cortex_a8_init_arch_info(struct target *target, cortex_a8->fast_reg_read = 0; - /* Set default value */ - cortex_a8->current_address_mode = ARM_MODE_ANY; - /* register arch-specific functions */ armv7a->examine_debug_reason = NULL; armv7a->post_debug_entry = cortex_a8_post_debug_entry; armv7a->pre_restore_context = NULL; - armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1; - armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb; - armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory; - armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory; - armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches; - armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches; - armv7a->armv4_5_mmu.has_tiny_pages = 1; - armv7a->armv4_5_mmu.mmu_enabled = 0; + armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory; + // arm7_9->handle_target_request = cortex_a8_handle_target_request; /* REVISIT v7a setup should be in a v7a-specific routine */ - arm_init_arch_info(target, armv4_5); - armv7a->common_magic = ARMV7_COMMON_MAGIC; - + armv7a_init_arch_info(target, armv7a); target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target); return ERROR_OK; @@ -2443,133 +2503,6 @@ static int cortex_a8_target_create(struct target *target, Jim_Interp *interp) return cortex_a8_init_arch_info(target, cortex_a8, target->tap); } -static int cortex_a8_get_ttb(struct target *target, uint32_t *result) -{ - struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target); - struct armv7a_common *armv7a = &cortex_a8->armv7a_common; - uint32_t ttb = 0, retval = ERROR_OK; - - /* current_address_mode is set inside cortex_a8_virt2phys() - where we can determine if address belongs to user or kernel */ - if(cortex_a8->current_address_mode == ARM_MODE_SVC) - { - /* MRC p15,0,,c1,c0,0 ; Read CP15 System Control Register */ - retval = armv7a->armv4_5_common.mrc(target, 15, - 0, 1, /* op1, op2 */ - 2, 0, /* CRn, CRm */ - &ttb); - if (retval != ERROR_OK) - return retval; - } - else if(cortex_a8->current_address_mode == ARM_MODE_USR) - { - /* MRC p15,0,,c1,c0,0 ; Read CP15 System Control Register */ - retval = armv7a->armv4_5_common.mrc(target, 15, - 0, 0, /* op1, op2 */ - 2, 0, /* CRn, CRm */ - &ttb); - if (retval != ERROR_OK) - return retval; - } - /* we don't know whose address is: user or kernel - we assume that if we are in kernel mode then - address belongs to kernel else if in user mode - - to user */ - else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC) - { - /* MRC p15,0,,c1,c0,0 ; Read CP15 System Control Register */ - retval = armv7a->armv4_5_common.mrc(target, 15, - 0, 1, /* op1, op2 */ - 2, 0, /* CRn, CRm */ - &ttb); - if (retval != ERROR_OK) - return retval; - } - else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR) - { - /* MRC p15,0,,c1,c0,0 ; Read CP15 System Control Register */ - retval = armv7a->armv4_5_common.mrc(target, 15, - 0, 0, /* op1, op2 */ - 2, 0, /* CRn, CRm */ - &ttb); - if (retval != ERROR_OK) - return retval; - } - /* finally we don't know whose ttb to use: user or kernel */ - else - LOG_ERROR("Don't know how to get ttb for current mode!!!"); - - ttb &= 0xffffc000; - - *result = ttb; - - return ERROR_OK; -} - -static int cortex_a8_disable_mmu_caches(struct target *target, int mmu, - int d_u_cache, int i_cache) -{ - struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target); - struct armv7a_common *armv7a = &cortex_a8->armv7a_common; - uint32_t cp15_control; - int retval; - - /* read cp15 control register */ - retval = armv7a->armv4_5_common.mrc(target, 15, - 0, 0, /* op1, op2 */ - 1, 0, /* CRn, CRm */ - &cp15_control); - if (retval != ERROR_OK) - return retval; - - - if (mmu) - cp15_control &= ~0x1U; - - if (d_u_cache) - cp15_control &= ~0x4U; - - if (i_cache) - cp15_control &= ~0x1000U; - - retval = armv7a->armv4_5_common.mcr(target, 15, - 0, 0, /* op1, op2 */ - 1, 0, /* CRn, CRm */ - cp15_control); - return retval; -} - -static int cortex_a8_enable_mmu_caches(struct target *target, int mmu, - int d_u_cache, int i_cache) -{ - struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target); - struct armv7a_common *armv7a = &cortex_a8->armv7a_common; - uint32_t cp15_control; - int retval; - - /* read cp15 control register */ - retval = armv7a->armv4_5_common.mrc(target, 15, - 0, 0, /* op1, op2 */ - 1, 0, /* CRn, CRm */ - &cp15_control); - if (retval != ERROR_OK) - return retval; - - if (mmu) - cp15_control |= 0x1U; - - if (d_u_cache) - cp15_control |= 0x4U; - - if (i_cache) - cp15_control |= 0x1000U; - - retval = armv7a->armv4_5_common.mcr(target, 15, - 0, 0, /* op1, op2 */ - 1, 0, /* CRn, CRm */ - cp15_control); - return retval; -} static int cortex_a8_mmu(struct target *target, int *enabled) @@ -2579,36 +2512,35 @@ static int cortex_a8_mmu(struct target *target, int *enabled) return ERROR_TARGET_INVALID; } - *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled; + *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled; return ERROR_OK; } static int cortex_a8_virt2phys(struct target *target, uint32_t virt, uint32_t *phys) { - uint32_t cb; - struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target); - // struct armv7a_common *armv7a = &cortex_a8->armv7a_common; + int retval = ERROR_FAIL; struct armv7a_common *armv7a = target_to_armv7a(target); - - /* We assume that virtual address is separated - between user and kernel in Linux style: - 0x00000000-0xbfffffff - User space - 0xc0000000-0xffffffff - Kernel space */ - if( virt < 0xc0000000 ) /* Linux user space */ - cortex_a8->current_address_mode = ARM_MODE_USR; - else /* Linux kernel */ - cortex_a8->current_address_mode = ARM_MODE_SVC; - uint32_t ret; - int retval = armv4_5_mmu_translate_va(target, - &armv7a->armv4_5_mmu, virt, &cb, &ret); - if (retval != ERROR_OK) - return retval; - /* Reset the flag. We don't want someone else to use it by error */ - cortex_a8->current_address_mode = ARM_MODE_ANY; - - *phys = ret; - return ERROR_OK; + struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap; + uint8_t apsel = swjdp->apsel; + if (apsel == swjdp_memoryap) + { + uint32_t ret; + retval = armv7a_mmu_translate_va(target, + virt, &ret); + if (retval != ERROR_OK) + goto done; + *phys = ret; + } + else + { /* use this method if swjdp_memoryap not selected */ + /* mmu must be enable in order to get a correct translation */ + retval = cortex_a8_mmu_modify(target, 1); + if (retval != ERROR_OK) goto done; + retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1); + } +done: + return retval; } COMMAND_HANDLER(cortex_a8_handle_cache_info_command) @@ -2616,8 +2548,8 @@ COMMAND_HANDLER(cortex_a8_handle_cache_info_command) struct target *target = get_current_target(CMD_CTX); struct armv7a_common *armv7a = target_to_armv7a(target); - return armv4_5_handle_cache_info_command(CMD_CTX, - &armv7a->armv4_5_mmu.armv4_5_cache); + return armv7a_handle_cache_info_command(CMD_CTX, + &armv7a->armv7a_mmu.armv7a_cache); } @@ -2789,5 +2721,4 @@ struct target_type cortexa8_target = { .write_phys_memory = cortex_a8_write_phys_memory, .mmu = cortex_a8_mmu, .virt2phys = cortex_a8_virt2phys, - }; diff --git a/src/target/cortex_a.h b/src/target/cortex_a.h index b49e670f..17e44e21 100644 --- a/src/target/cortex_a.h +++ b/src/target/cortex_a.h @@ -63,6 +63,10 @@ struct cortex_a8_common /* Saved cp15 registers */ uint32_t cp15_control_reg; + /* latest cp15 register value written and cpsr processor mode */ + uint32_t cp15_control_reg_curr; + enum arm_mode curr_mode; + /* Breakpoint register pairs */ int brp_num_context; @@ -73,10 +77,8 @@ struct cortex_a8_common /* Use cortex_a8_read_regs_through_mem for fast register reads */ int fast_reg_read; - /* Flag that helps to resolve what ttb to use: user or kernel */ - int current_address_mode; - struct armv7a_common armv7a_common; + }; static inline struct cortex_a8_common *