static int cortex_a_mmu_modify(struct target *target, int enable);
static int cortex_a_virt2phys(struct target *target,
uint32_t virt, uint32_t *phys);
-static int cortex_a_read_apb_ab_memory(struct target *target,
+static int cortex_a_read_cpu_memory(struct target *target,
uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
}
}
-static int cortex_a_write_apb_ab_memory_slow(struct target *target,
+static int cortex_a_write_cpu_memory_slow(struct target *target,
uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
{
/* Writes count objects of size size from *buffer. Old value of DSCR must
* be in *dscr; updated to new value. This is slow because it works for
* non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
- * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
+ * the address is aligned, cortex_a_write_cpu_memory_fast should be
* preferred.
* Preconditions:
* - Address is in R0.
return ERROR_OK;
}
-static int cortex_a_write_apb_ab_memory_fast(struct target *target,
+static int cortex_a_write_cpu_memory_fast(struct target *target,
uint32_t count, const uint8_t *buffer, uint32_t *dscr)
{
/* Writes count objects of size 4 from *buffer. Old value of DSCR must be
4, count, armv7a->debug_base + CPUDBG_DTRRX);
}
-static int cortex_a_write_apb_ab_memory(struct target *target,
+static int cortex_a_write_cpu_memory(struct target *target,
uint32_t address, uint32_t size,
uint32_t count, const uint8_t *buffer)
{
- /* Write memory through APB-AP. */
+ /* Write memory through the CPU. */
int retval, final_retval;
struct armv7a_common *armv7a = target_to_armv7a(target);
struct arm *arm = &armv7a->arm;
uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
- LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
+ LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
address, size, count);
if (target->state != TARGET_HALTED) {
LOG_WARNING("target not halted");
if (size == 4 && (address % 4) == 0) {
/* We are doing a word-aligned transfer, so use fast mode. */
- retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
+ retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
} else {
/* Use slow path. */
- retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
+ retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
}
out:
return final_retval;
}
-static int cortex_a_read_apb_ab_memory_slow(struct target *target,
+static int cortex_a_read_cpu_memory_slow(struct target *target,
uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
{
/* Reads count objects of size size into *buffer. Old value of DSCR must be
* in *dscr; updated to new value. This is slow because it works for
* non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
- * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
+ * the address is aligned, cortex_a_read_cpu_memory_fast should be
* preferred.
* Preconditions:
* - Address is in R0.
return ERROR_OK;
}
-static int cortex_a_read_apb_ab_memory_fast(struct target *target,
+static int cortex_a_read_cpu_memory_fast(struct target *target,
uint32_t count, uint8_t *buffer, uint32_t *dscr)
{
/* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
return ERROR_OK;
}
-static int cortex_a_read_apb_ab_memory(struct target *target,
+static int cortex_a_read_cpu_memory(struct target *target,
uint32_t address, uint32_t size,
uint32_t count, uint8_t *buffer)
{
- /* Read memory through APB-AP. */
+ /* Read memory through the CPU. */
int retval, final_retval;
struct armv7a_common *armv7a = target_to_armv7a(target);
struct arm *arm = &armv7a->arm;
uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
- LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
+ LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
address, size, count);
if (target->state != TARGET_HALTED) {
LOG_WARNING("target not halted");
if (size == 4 && (address % 4) == 0) {
/* We are doing a word-aligned transfer, so use fast mode. */
- retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
+ retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
} else {
/* Use slow path. */
- retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
+ retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
}
out:
if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
- /* read memory through APB-AP */
+ /* read memory through the CPU */
cortex_a_prep_memaccess(target, 1);
- retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
+ retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
cortex_a_post_memaccess(target, 1);
return retval;
size, count);
cortex_a_prep_memaccess(target, 0);
- retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
+ retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
cortex_a_post_memaccess(target, 0);
return retval;
if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
- /* write memory through APB-AP */
+ /* write memory through the CPU */
cortex_a_prep_memaccess(target, 1);
- retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
+ retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
cortex_a_post_memaccess(target, 1);
return retval;
armv7a_cache_auto_flush_on_write(target, address, size * count);
cortex_a_prep_memaccess(target, 0);
- retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
+ retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
cortex_a_post_memaccess(target, 0);
return retval;
}
return retval;
}
- /* Search for the APB-AB - it is needed for access to debug registers */
+ /* Search for the APB-AP - it is needed for access to debug registers */
retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
if (retval != ERROR_OK) {
LOG_ERROR("Could not find APB-AP for debug access");
retval = mem_ap_init(armv7a->memory_ap);
if (retval == ERROR_OK)
armv7a->memory_ap_available = true;
- else
- LOG_WARNING("Could not initialize AHB-AP for memory access - using APB-AP");
- } else {
- /* AHB-AP not found - use APB-AP */
- LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
+ }
+ if (retval != ERROR_OK) {
+ /* AHB-AP not found or unavailable - use the CPU */
+ LOG_DEBUG("No AHB-AP available for memory access");
}
if (!target->dbgbase_set) {