1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2006 by Magnus Lundin *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
38 ***************************************************************************/
43 #include "breakpoints.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_opcodes.h"
49 #include <helper/time_support.h>
51 static int cortex_a8_poll(struct target *target);
52 static int cortex_a8_debug_entry(struct target *target);
53 static int cortex_a8_restore_context(struct target *target, bool bpwp);
54 static int cortex_a8_set_breakpoint(struct target *target,
55 struct breakpoint *breakpoint, uint8_t matchmode);
56 static int cortex_a8_set_context_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
59 struct breakpoint *breakpoint);
60 static int cortex_a8_unset_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
63 uint32_t *value, int regnum);
64 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
65 uint32_t value, int regnum);
66 static int cortex_a8_mmu(struct target *target, int *enabled);
67 static int cortex_a8_virt2phys(struct target *target,
68 uint32_t virt, uint32_t *phys);
69 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
70 int d_u_cache, int i_cache);
71 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
72 int d_u_cache, int i_cache);
73 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
77 * FIXME do topology discovery using the ROM; don't
78 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
79 * cores, with different AP numbering ... don't use a #define
80 * for these numbers, use per-core armv7a state.
82 #define swjdp_memoryap 0
83 #define swjdp_debugap 1
86 * Cortex-A8 Basic debug access, very low level assumes state is saved
88 static int cortex_a8_init_debug_access(struct target *target)
90 struct armv7a_common *armv7a = target_to_armv7a(target);
91 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
97 /* Unlocking the debug registers for modification */
98 /* The debugport might be uninitialised so try twice */
99 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
100 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
101 if (retval != ERROR_OK)
104 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
105 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
106 if (retval == ERROR_OK)
108 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
111 if (retval != ERROR_OK)
113 /* Clear Sticky Power Down status Bit in PRSR to enable access to
114 the registers in the Core Power Domain */
115 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
116 armv7a->debug_base + CPUDBG_PRSR, &dummy);
117 if (retval != ERROR_OK)
120 /* Enabling of instruction execution in debug mode is done in debug_entry code */
122 /* Resync breakpoint registers */
124 /* Since this is likely called from init or reset, update target state information*/
125 return cortex_a8_poll(target);
128 /* To reduce needless round-trips, pass in a pointer to the current
129 * DSCR value. Initialize it to zero if you just need to know the
130 * value on return from this function; or DSCR_INSTR_COMP if you
131 * happen to know that no instruction is pending.
133 static int cortex_a8_exec_opcode(struct target *target,
134 uint32_t opcode, uint32_t *dscr_p)
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
141 dscr = dscr_p ? *dscr_p : 0;
143 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
145 /* Wait for InstrCompl bit to be set */
146 long long then = timeval_ms();
147 while ((dscr & DSCR_INSTR_COMP) == 0)
149 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
150 armv7a->debug_base + CPUDBG_DSCR, &dscr);
151 if (retval != ERROR_OK)
153 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
156 if (timeval_ms() > then + 1000)
158 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
163 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
164 armv7a->debug_base + CPUDBG_ITR, opcode);
165 if (retval != ERROR_OK)
171 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
172 armv7a->debug_base + CPUDBG_DSCR, &dscr);
173 if (retval != ERROR_OK)
175 LOG_ERROR("Could not read DSCR register");
178 if (timeval_ms() > then + 1000)
180 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
184 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
192 /**************************************************************************
193 Read core register with very few exec_opcode, fast but needs work_area.
194 This can cause problems with MMU active.
195 **************************************************************************/
196 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
199 int retval = ERROR_OK;
200 struct armv7a_common *armv7a = target_to_armv7a(target);
201 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
203 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
204 if (retval != ERROR_OK)
206 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
207 if (retval != ERROR_OK)
209 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
210 if (retval != ERROR_OK)
213 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
214 (uint8_t *)(®file[1]), 4*15, address);
219 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
220 uint32_t *value, int regnum)
222 int retval = ERROR_OK;
223 uint8_t reg = regnum&0xFF;
225 struct armv7a_common *armv7a = target_to_armv7a(target);
226 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
233 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
234 retval = cortex_a8_exec_opcode(target,
235 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
237 if (retval != ERROR_OK)
242 /* "MOV r0, r15"; then move r0 to DCCTX */
243 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
244 if (retval != ERROR_OK)
246 retval = cortex_a8_exec_opcode(target,
247 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
249 if (retval != ERROR_OK)
254 /* "MRS r0, CPSR" or "MRS r0, SPSR"
255 * then move r0 to DCCTX
257 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
258 if (retval != ERROR_OK)
260 retval = cortex_a8_exec_opcode(target,
261 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
263 if (retval != ERROR_OK)
267 /* Wait for DTRRXfull then read DTRRTX */
268 long long then = timeval_ms();
269 while ((dscr & DSCR_DTR_TX_FULL) == 0)
271 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
272 armv7a->debug_base + CPUDBG_DSCR, &dscr);
273 if (retval != ERROR_OK)
275 if (timeval_ms() > then + 1000)
277 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
282 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
283 armv7a->debug_base + CPUDBG_DTRTX, value);
284 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
289 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
290 uint32_t value, int regnum)
292 int retval = ERROR_OK;
293 uint8_t Rd = regnum&0xFF;
295 struct armv7a_common *armv7a = target_to_armv7a(target);
296 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
298 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
300 /* Check that DCCRX is not full */
301 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
302 armv7a->debug_base + CPUDBG_DSCR, &dscr);
303 if (retval != ERROR_OK)
305 if (dscr & DSCR_DTR_RX_FULL)
307 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
308 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
309 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
311 if (retval != ERROR_OK)
318 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
319 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
320 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
321 armv7a->debug_base + CPUDBG_DTRRX, value);
322 if (retval != ERROR_OK)
327 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
328 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
331 if (retval != ERROR_OK)
336 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
339 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
341 if (retval != ERROR_OK)
343 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
344 if (retval != ERROR_OK)
349 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
350 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
352 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
354 if (retval != ERROR_OK)
356 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
358 if (retval != ERROR_OK)
361 /* "Prefetch flush" after modifying execution status in CPSR */
364 retval = cortex_a8_exec_opcode(target,
365 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
367 if (retval != ERROR_OK)
375 /* Write to memory mapped registers directly with no cache or mmu handling */
376 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
379 struct armv7a_common *armv7a = target_to_armv7a(target);
380 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
382 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
388 * Cortex-A8 implementation of Debug Programmer's Model
390 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
391 * so there's no need to poll for it before executing an instruction.
393 * NOTE that in several of these cases the "stall" mode might be useful.
394 * It'd let us queue a few operations together... prepare/finish might
395 * be the places to enable/disable that mode.
398 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
400 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
403 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
405 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
406 return mem_ap_sel_write_u32(a8->armv7a_common.armv4_5_common.dap,
407 swjdp_debugap,a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
410 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
413 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
414 uint32_t dscr = DSCR_INSTR_COMP;
420 /* Wait for DTRRXfull */
421 long long then = timeval_ms();
422 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
423 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
424 a8->armv7a_common.debug_base + CPUDBG_DSCR,
426 if (retval != ERROR_OK)
428 if (timeval_ms() > then + 1000)
430 LOG_ERROR("Timeout waiting for read dcc");
435 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
436 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
437 if (retval != ERROR_OK)
439 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
447 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
449 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
450 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
454 /* set up invariant: INSTR_COMP is set after ever DPM operation */
455 long long then = timeval_ms();
458 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
459 a8->armv7a_common.debug_base + CPUDBG_DSCR,
461 if (retval != ERROR_OK)
463 if ((dscr & DSCR_INSTR_COMP) != 0)
465 if (timeval_ms() > then + 1000)
467 LOG_ERROR("Timeout waiting for dpm prepare");
472 /* this "should never happen" ... */
473 if (dscr & DSCR_DTR_RX_FULL) {
474 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
476 retval = cortex_a8_exec_opcode(
477 a8->armv7a_common.armv4_5_common.target,
478 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
480 if (retval != ERROR_OK)
487 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
489 /* REVISIT what could be done here? */
493 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
494 uint32_t opcode, uint32_t data)
496 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
498 uint32_t dscr = DSCR_INSTR_COMP;
500 retval = cortex_a8_write_dcc(a8, data);
501 if (retval != ERROR_OK)
504 return cortex_a8_exec_opcode(
505 a8->armv7a_common.armv4_5_common.target,
510 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
511 uint32_t opcode, uint32_t data)
513 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
514 uint32_t dscr = DSCR_INSTR_COMP;
517 retval = cortex_a8_write_dcc(a8, data);
518 if (retval != ERROR_OK)
521 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
522 retval = cortex_a8_exec_opcode(
523 a8->armv7a_common.armv4_5_common.target,
524 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
526 if (retval != ERROR_OK)
529 /* then the opcode, taking data from R0 */
530 retval = cortex_a8_exec_opcode(
531 a8->armv7a_common.armv4_5_common.target,
538 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
540 struct target *target = dpm->arm->target;
541 uint32_t dscr = DSCR_INSTR_COMP;
543 /* "Prefetch flush" after modifying execution status in CPSR */
544 return cortex_a8_exec_opcode(target,
545 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
549 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
550 uint32_t opcode, uint32_t *data)
552 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
554 uint32_t dscr = DSCR_INSTR_COMP;
556 /* the opcode, writing data to DCC */
557 retval = cortex_a8_exec_opcode(
558 a8->armv7a_common.armv4_5_common.target,
561 if (retval != ERROR_OK)
564 return cortex_a8_read_dcc(a8, data, &dscr);
568 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
569 uint32_t opcode, uint32_t *data)
571 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
572 uint32_t dscr = DSCR_INSTR_COMP;
575 /* the opcode, writing data to R0 */
576 retval = cortex_a8_exec_opcode(
577 a8->armv7a_common.armv4_5_common.target,
580 if (retval != ERROR_OK)
583 /* write R0 to DCC */
584 retval = cortex_a8_exec_opcode(
585 a8->armv7a_common.armv4_5_common.target,
586 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
588 if (retval != ERROR_OK)
591 return cortex_a8_read_dcc(a8, data, &dscr);
594 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
595 uint32_t addr, uint32_t control)
597 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
598 uint32_t vr = a8->armv7a_common.debug_base;
599 uint32_t cr = a8->armv7a_common.debug_base;
603 case 0 ... 15: /* breakpoints */
604 vr += CPUDBG_BVR_BASE;
605 cr += CPUDBG_BCR_BASE;
607 case 16 ... 31: /* watchpoints */
608 vr += CPUDBG_WVR_BASE;
609 cr += CPUDBG_WCR_BASE;
618 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
619 (unsigned) vr, (unsigned) cr);
621 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
623 if (retval != ERROR_OK)
625 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
630 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
632 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
637 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
640 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
648 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
650 /* clear control register */
651 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
654 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
656 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
659 dpm->arm = &a8->armv7a_common.armv4_5_common;
662 dpm->prepare = cortex_a8_dpm_prepare;
663 dpm->finish = cortex_a8_dpm_finish;
665 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
666 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
667 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
669 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
670 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
672 dpm->bpwp_enable = cortex_a8_bpwp_enable;
673 dpm->bpwp_disable = cortex_a8_bpwp_disable;
675 retval = arm_dpm_setup(dpm);
676 if (retval == ERROR_OK)
677 retval = arm_dpm_initialize(dpm);
681 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
683 struct target_list *head;
687 while(head != (struct target_list*)NULL)
690 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
698 static int cortex_a8_halt(struct target *target);
700 static int cortex_a8_halt_smp(struct target *target)
703 struct target_list *head;
706 while(head != (struct target_list*)NULL)
709 if ((curr != target) && (curr->state!= TARGET_HALTED))
711 retval += cortex_a8_halt(curr);
718 static int update_halt_gdb(struct target *target)
721 if (target->gdb_service->core[0]==-1)
723 target->gdb_service->target = target;
724 target->gdb_service->core[0] = target->coreid;
725 retval += cortex_a8_halt_smp(target);
731 * Cortex-A8 Run control
734 static int cortex_a8_poll(struct target *target)
736 int retval = ERROR_OK;
738 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
739 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
740 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
741 enum target_state prev_target_state = target->state;
742 // toggle to another core is done by gdb as follow
743 // maint packet J core_id
745 // the next polling trigger an halt event sent to gdb
746 if ((target->state == TARGET_HALTED) && (target->smp) &&
747 (target->gdb_service) &&
748 (target->gdb_service->target==NULL) )
750 target->gdb_service->target =
751 get_cortex_a8(target, target->gdb_service->core[1]);
752 target_call_event_callbacks(target,
753 TARGET_EVENT_HALTED);
756 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
757 armv7a->debug_base + CPUDBG_DSCR, &dscr);
758 if (retval != ERROR_OK)
762 cortex_a8->cpudbg_dscr = dscr;
764 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
766 if (prev_target_state != TARGET_HALTED)
768 /* We have a halting debug event */
769 LOG_DEBUG("Target halted");
770 target->state = TARGET_HALTED;
771 if ((prev_target_state == TARGET_RUNNING)
772 || (prev_target_state == TARGET_RESET))
774 retval = cortex_a8_debug_entry(target);
775 if (retval != ERROR_OK)
779 retval = update_halt_gdb(target);
780 if (retval != ERROR_OK)
783 target_call_event_callbacks(target,
784 TARGET_EVENT_HALTED);
786 if (prev_target_state == TARGET_DEBUG_RUNNING)
790 retval = cortex_a8_debug_entry(target);
791 if (retval != ERROR_OK)
795 retval = update_halt_gdb(target);
796 if (retval != ERROR_OK)
800 target_call_event_callbacks(target,
801 TARGET_EVENT_DEBUG_HALTED);
805 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
807 target->state = TARGET_RUNNING;
811 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
812 target->state = TARGET_UNKNOWN;
818 static int cortex_a8_halt(struct target *target)
820 int retval = ERROR_OK;
822 struct armv7a_common *armv7a = target_to_armv7a(target);
823 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
826 * Tell the core to be halted by writing DRCR with 0x1
827 * and then wait for the core to be halted.
829 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
830 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
831 if (retval != ERROR_OK)
835 * enter halting debug mode
837 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
838 armv7a->debug_base + CPUDBG_DSCR, &dscr);
839 if (retval != ERROR_OK)
842 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
843 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
844 if (retval != ERROR_OK)
847 long long then = timeval_ms();
850 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
851 armv7a->debug_base + CPUDBG_DSCR, &dscr);
852 if (retval != ERROR_OK)
854 if ((dscr & DSCR_CORE_HALTED) != 0)
858 if (timeval_ms() > then + 1000)
860 LOG_ERROR("Timeout waiting for halt");
865 target->debug_reason = DBG_REASON_DBGRQ;
870 static int cortex_a8_internal_restore(struct target *target, int current,
871 uint32_t *address, int handle_breakpoints, int debug_execution)
873 struct armv7a_common *armv7a = target_to_armv7a(target);
874 struct arm *armv4_5 = &armv7a->armv4_5_common;
878 if (!debug_execution)
879 target_free_all_working_areas(target);
884 /* Disable interrupts */
885 /* We disable interrupts in the PRIMASK register instead of
886 * masking with C_MASKINTS,
887 * This is probably the same issue as Cortex-M3 Errata 377493:
888 * C_MASKINTS in parallel with disabled interrupts can cause
889 * local faults to not be taken. */
890 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
891 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
892 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
894 /* Make sure we are in Thumb mode */
895 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
896 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
897 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
898 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
902 /* current = 1: continue on current pc, otherwise continue at <address> */
903 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
905 resume_pc = *address;
907 *address = resume_pc;
909 /* Make sure that the Armv7 gdb thumb fixups does not
910 * kill the return address
912 switch (armv4_5->core_state)
915 resume_pc &= 0xFFFFFFFC;
917 case ARM_STATE_THUMB:
918 case ARM_STATE_THUMB_EE:
919 /* When the return address is loaded into PC
920 * bit 0 must be 1 to stay in Thumb state
924 case ARM_STATE_JAZELLE:
925 LOG_ERROR("How do I resume into Jazelle state??");
928 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
929 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
930 armv4_5->pc->dirty = 1;
931 armv4_5->pc->valid = 1;
933 retval = cortex_a8_restore_context(target, handle_breakpoints);
934 if (retval != ERROR_OK)
936 target->debug_reason = DBG_REASON_NOTHALTED;
937 target->state = TARGET_RUNNING;
939 /* registers are now invalid */
940 register_cache_invalidate(armv4_5->core_cache);
943 /* the front-end may request us not to handle breakpoints */
944 if (handle_breakpoints)
946 /* Single step past breakpoint at current address */
947 if ((breakpoint = breakpoint_find(target, resume_pc)))
949 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
950 cortex_m3_unset_breakpoint(target, breakpoint);
951 cortex_m3_single_step_core(target);
952 cortex_m3_set_breakpoint(target, breakpoint);
960 static int cortex_a8_internal_restart(struct target *target)
962 struct armv7a_common *armv7a = target_to_armv7a(target);
963 struct arm *armv4_5 = &armv7a->armv4_5_common;
964 struct adiv5_dap *swjdp = armv4_5->dap;
968 * Restart core and wait for it to be started. Clear ITRen and sticky
969 * exception flags: see ARMv7 ARM, C5.9.
971 * REVISIT: for single stepping, we probably want to
972 * disable IRQs by default, with optional override...
975 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
976 armv7a->debug_base + CPUDBG_DSCR, &dscr);
977 if (retval != ERROR_OK)
980 if ((dscr & DSCR_INSTR_COMP) == 0)
981 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
983 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
984 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
985 if (retval != ERROR_OK)
988 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
989 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
990 DRCR_CLEAR_EXCEPTIONS);
991 if (retval != ERROR_OK)
994 long long then = timeval_ms();
997 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
998 armv7a->debug_base + CPUDBG_DSCR, &dscr);
999 if (retval != ERROR_OK)
1001 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1003 if (timeval_ms() > then + 1000)
1005 LOG_ERROR("Timeout waiting for resume");
1010 target->debug_reason = DBG_REASON_NOTHALTED;
1011 target->state = TARGET_RUNNING;
1013 /* registers are now invalid */
1014 register_cache_invalidate(armv4_5->core_cache);
1019 static int cortex_a8_restore_smp(struct target *target,int handle_breakpoints)
1022 struct target_list *head;
1023 struct target *curr;
1025 head = target->head;
1026 while(head != (struct target_list*)NULL)
1028 curr = head->target;
1029 if ((curr != target) && (curr->state != TARGET_RUNNING))
1031 /* resume current address , not in step mode */
1032 retval += cortex_a8_internal_restore(curr, 1, &address,
1033 handle_breakpoints, 0);
1034 retval += cortex_a8_internal_restart(curr);
1042 static int cortex_a8_resume(struct target *target, int current,
1043 uint32_t address, int handle_breakpoints, int debug_execution)
1046 /* dummy resume for smp toggle in order to reduce gdb impact */
1047 if ((target->smp) && (target->gdb_service->core[1]!=-1))
1049 /* simulate a start and halt of target */
1050 target->gdb_service->target = NULL;
1051 target->gdb_service->core[0] = target->gdb_service->core[1];
1052 /* fake resume at next poll we play the target core[1], see poll*/
1053 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1056 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1058 { target->gdb_service->core[0] = -1;
1059 retval += cortex_a8_restore_smp(target, handle_breakpoints);
1061 cortex_a8_internal_restart(target);
1063 if (!debug_execution)
1065 target->state = TARGET_RUNNING;
1066 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1067 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1071 target->state = TARGET_DEBUG_RUNNING;
1072 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1073 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1079 static int cortex_a8_debug_entry(struct target *target)
1082 uint32_t regfile[16], cpsr, dscr;
1083 int retval = ERROR_OK;
1084 struct working_area *regfile_working_area = NULL;
1085 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1086 struct armv7a_common *armv7a = target_to_armv7a(target);
1087 struct arm *armv4_5 = &armv7a->armv4_5_common;
1088 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1091 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1093 /* REVISIT surely we should not re-read DSCR !! */
1094 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1095 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1096 if (retval != ERROR_OK)
1099 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1100 * imprecise data aborts get discarded by issuing a Data
1101 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1104 /* Enable the ITR execution once we are in debug mode */
1105 dscr |= DSCR_ITR_EN;
1106 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1107 armv7a->debug_base + CPUDBG_DSCR, dscr);
1108 if (retval != ERROR_OK)
1111 /* Examine debug reason */
1112 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1114 /* save address of instruction that triggered the watchpoint? */
1115 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1118 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1119 armv7a->debug_base + CPUDBG_WFAR,
1121 if (retval != ERROR_OK)
1123 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1126 /* REVISIT fast_reg_read is never set ... */
1128 /* Examine target state and mode */
1129 if (cortex_a8->fast_reg_read)
1130 target_alloc_working_area(target, 64, ®file_working_area);
1132 /* First load register acessible through core debug port*/
1133 if (!regfile_working_area)
1135 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1139 retval = cortex_a8_read_regs_through_mem(target,
1140 regfile_working_area->address, regfile);
1142 target_free_working_area(target, regfile_working_area);
1143 if (retval != ERROR_OK)
1148 /* read Current PSR */
1149 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1150 if (retval != ERROR_OK)
1153 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1155 arm_set_cpsr(armv4_5, cpsr);
1158 for (i = 0; i <= ARM_PC; i++)
1160 reg = arm_reg_current(armv4_5, i);
1162 buf_set_u32(reg->value, 0, 32, regfile[i]);
1167 /* Fixup PC Resume Address */
1168 if (cpsr & (1 << 5))
1170 // T bit set for Thumb or ThumbEE state
1171 regfile[ARM_PC] -= 4;
1176 regfile[ARM_PC] -= 8;
1180 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1181 reg->dirty = reg->valid;
1185 /* TODO, Move this */
1186 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1187 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1188 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1190 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1191 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1193 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1194 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1197 /* Are we in an exception handler */
1198 // armv4_5->exception_number = 0;
1199 if (armv7a->post_debug_entry)
1201 retval = armv7a->post_debug_entry(target);
1202 if (retval != ERROR_OK)
1209 static int cortex_a8_post_debug_entry(struct target *target)
1211 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1212 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1215 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1216 retval = armv7a->armv4_5_common.mrc(target, 15,
1217 0, 0, /* op1, op2 */
1218 1, 0, /* CRn, CRm */
1219 &cortex_a8->cp15_control_reg);
1220 if (retval != ERROR_OK)
1222 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1224 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1226 uint32_t cache_type_reg;
1228 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1229 retval = armv7a->armv4_5_common.mrc(target, 15,
1230 0, 1, /* op1, op2 */
1231 0, 0, /* CRn, CRm */
1233 if (retval != ERROR_OK)
1235 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1237 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1238 armv4_5_identify_cache(cache_type_reg,
1239 &armv7a->armv4_5_mmu.armv4_5_cache);
1242 armv7a->armv4_5_mmu.mmu_enabled =
1243 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1244 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1245 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1246 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1247 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1252 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1253 int handle_breakpoints)
1255 struct armv7a_common *armv7a = target_to_armv7a(target);
1256 struct arm *armv4_5 = &armv7a->armv4_5_common;
1257 struct breakpoint *breakpoint = NULL;
1258 struct breakpoint stepbreakpoint;
1262 if (target->state != TARGET_HALTED)
1264 LOG_WARNING("target not halted");
1265 return ERROR_TARGET_NOT_HALTED;
1268 /* current = 1: continue on current pc, otherwise continue at <address> */
1272 buf_set_u32(r->value, 0, 32, address);
1276 address = buf_get_u32(r->value, 0, 32);
1279 /* The front-end may request us not to handle breakpoints.
1280 * But since Cortex-A8 uses breakpoint for single step,
1281 * we MUST handle breakpoints.
1283 handle_breakpoints = 1;
1284 if (handle_breakpoints) {
1285 breakpoint = breakpoint_find(target, address);
1287 cortex_a8_unset_breakpoint(target, breakpoint);
1290 /* Setup single step breakpoint */
1291 stepbreakpoint.address = address;
1292 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1294 stepbreakpoint.type = BKPT_HARD;
1295 stepbreakpoint.set = 0;
1297 /* Break on IVA mismatch */
1298 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1300 target->debug_reason = DBG_REASON_SINGLESTEP;
1302 retval = cortex_a8_resume(target, 1, address, 0, 0);
1303 if (retval != ERROR_OK)
1306 long long then = timeval_ms();
1307 while (target->state != TARGET_HALTED)
1309 retval = cortex_a8_poll(target);
1310 if (retval != ERROR_OK)
1312 if (timeval_ms() > then + 1000)
1314 LOG_ERROR("timeout waiting for target halt");
1319 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1321 target->debug_reason = DBG_REASON_BREAKPOINT;
1324 cortex_a8_set_breakpoint(target, breakpoint, 0);
1326 if (target->state != TARGET_HALTED)
1327 LOG_DEBUG("target stepped");
1332 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1334 struct armv7a_common *armv7a = target_to_armv7a(target);
1338 if (armv7a->pre_restore_context)
1339 armv7a->pre_restore_context(target);
1341 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1346 * Cortex-A8 Breakpoint and watchpoint functions
1349 /* Setup hardware Breakpoint Register Pair */
1350 static int cortex_a8_set_breakpoint(struct target *target,
1351 struct breakpoint *breakpoint, uint8_t matchmode)
1356 uint8_t byte_addr_select = 0x0F;
1357 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1358 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1359 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1361 if (breakpoint->set)
1363 LOG_WARNING("breakpoint already set");
1367 if (breakpoint->type == BKPT_HARD)
1369 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1371 if (brp_i >= cortex_a8->brp_num)
1373 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1374 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1376 breakpoint->set = brp_i + 1;
1377 if (breakpoint->length == 2)
1379 byte_addr_select = (3 << (breakpoint->address & 0x02));
1381 control = ((matchmode & 0x7) << 20)
1382 | (byte_addr_select << 5)
1384 brp_list[brp_i].used = 1;
1385 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1386 brp_list[brp_i].control = control;
1387 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1388 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1389 brp_list[brp_i].value);
1390 if (retval != ERROR_OK)
1392 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1393 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1394 brp_list[brp_i].control);
1395 if (retval != ERROR_OK)
1397 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1398 brp_list[brp_i].control,
1399 brp_list[brp_i].value);
1401 else if (breakpoint->type == BKPT_SOFT)
1404 if (breakpoint->length == 2)
1406 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1410 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1412 retval = target->type->read_memory(target,
1413 breakpoint->address & 0xFFFFFFFE,
1414 breakpoint->length, 1,
1415 breakpoint->orig_instr);
1416 if (retval != ERROR_OK)
1418 retval = target->type->write_memory(target,
1419 breakpoint->address & 0xFFFFFFFE,
1420 breakpoint->length, 1, code);
1421 if (retval != ERROR_OK)
1423 breakpoint->set = 0x11; /* Any nice value but 0 */
1429 static int cortex_a8_set_context_breakpoint(struct target *target,
1430 struct breakpoint *breakpoint, uint8_t matchmode)
1432 int retval = ERROR_FAIL;
1435 uint8_t byte_addr_select = 0x0F;
1436 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1437 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1438 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1440 if (breakpoint->set)
1442 LOG_WARNING("breakpoint already set");
1445 /*check available context BRPs*/
1446 while ((brp_list[brp_i].used || (brp_list[brp_i].type!=BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1449 if (brp_i >= cortex_a8->brp_num)
1451 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1455 breakpoint->set = brp_i + 1;
1456 control = ((matchmode & 0x7) << 20)
1457 | (byte_addr_select << 5)
1459 brp_list[brp_i].used = 1;
1460 brp_list[brp_i].value = (breakpoint->asid);
1461 brp_list[brp_i].control = control;
1462 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1463 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1464 brp_list[brp_i].value);
1465 if(retval != ERROR_OK)
1467 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1468 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1469 brp_list[brp_i].control);
1470 if(retval != ERROR_OK)
1472 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1473 brp_list[brp_i].control,
1474 brp_list[brp_i].value);
1479 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1481 int retval = ERROR_FAIL;
1482 int brp_1=0; //holds the contextID pair
1483 int brp_2=0; // holds the IVA pair
1484 uint32_t control_CTX, control_IVA;
1485 uint8_t CTX_byte_addr_select = 0x0F;
1486 uint8_t IVA_byte_addr_select = 0x0F;
1487 uint8_t CTX_machmode = 0x03;
1488 uint8_t IVA_machmode = 0x01;
1489 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1490 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1491 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1495 if (breakpoint->set)
1497 LOG_WARNING("breakpoint already set");
1500 /*check available context BRPs*/
1501 while ((brp_list[brp_1].used || (brp_list[brp_1].type!=BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1504 printf("brp(CTX) found num: %d \n",brp_1);
1505 if (brp_1 >= cortex_a8->brp_num)
1507 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1511 while ((brp_list[brp_2].used || (brp_list[brp_2].type!=BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1514 printf("brp(IVA) found num: %d \n",brp_2);
1515 if (brp_2 >= cortex_a8->brp_num)
1517 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1521 breakpoint->set = brp_1 + 1;
1522 breakpoint->linked_BRP= brp_2;
1523 control_CTX = ((CTX_machmode & 0x7) << 20)
1526 | (CTX_byte_addr_select << 5)
1528 brp_list[brp_1].used = 1;
1529 brp_list[brp_1].value = (breakpoint->asid);
1530 brp_list[brp_1].control = control_CTX;
1531 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1532 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1533 brp_list[brp_1].value);
1534 if (retval != ERROR_OK)
1536 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1537 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1538 brp_list[brp_1].control);
1539 if( retval != ERROR_OK )
1542 control_IVA = ((IVA_machmode & 0x7) << 20)
1544 | (IVA_byte_addr_select << 5)
1546 brp_list[brp_2].used = 1;
1547 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1548 brp_list[brp_2].control = control_IVA;
1549 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1550 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1551 brp_list[brp_2].value);
1552 if (retval != ERROR_OK)
1554 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1555 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1556 brp_list[brp_2].control);
1557 if (retval != ERROR_OK )
1564 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1567 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1568 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1569 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1571 if (!breakpoint->set)
1573 LOG_WARNING("breakpoint not set");
1577 if (breakpoint->type == BKPT_HARD)
1579 if ((breakpoint->address != 0) && (breakpoint->asid != 0))
1581 int brp_i = breakpoint->set - 1;
1582 int brp_j = breakpoint->linked_BRP;
1583 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1585 LOG_DEBUG("Invalid BRP number in breakpoint");
1588 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1589 brp_list[brp_i].control, brp_list[brp_i].value);
1590 brp_list[brp_i].used = 0;
1591 brp_list[brp_i].value = 0;
1592 brp_list[brp_i].control = 0;
1593 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1594 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1595 brp_list[brp_i].control);
1596 if (retval != ERROR_OK)
1598 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1599 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1600 brp_list[brp_i].value);
1601 if (retval != ERROR_OK)
1603 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num))
1605 LOG_DEBUG("Invalid BRP number in breakpoint");
1608 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1609 brp_list[brp_j].control, brp_list[brp_j].value);
1610 brp_list[brp_j].used = 0;
1611 brp_list[brp_j].value = 0;
1612 brp_list[brp_j].control = 0;
1613 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1614 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1615 brp_list[brp_j].control);
1616 if (retval != ERROR_OK)
1618 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1619 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1620 brp_list[brp_j].value);
1621 if (retval != ERROR_OK)
1623 breakpoint->linked_BRP = 0;
1624 breakpoint->set = 0;
1630 int brp_i = breakpoint->set - 1;
1631 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1633 LOG_DEBUG("Invalid BRP number in breakpoint");
1636 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1637 brp_list[brp_i].control, brp_list[brp_i].value);
1638 brp_list[brp_i].used = 0;
1639 brp_list[brp_i].value = 0;
1640 brp_list[brp_i].control = 0;
1641 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1642 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1643 brp_list[brp_i].control);
1644 if (retval != ERROR_OK)
1646 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1647 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1648 brp_list[brp_i].value);
1649 if (retval != ERROR_OK)
1651 breakpoint->set = 0;
1657 /* restore original instruction (kept in target endianness) */
1658 if (breakpoint->length == 4)
1660 retval = target->type->write_memory(target,
1661 breakpoint->address & 0xFFFFFFFE,
1662 4, 1, breakpoint->orig_instr);
1663 if (retval != ERROR_OK)
1668 retval = target->type->write_memory(target,
1669 breakpoint->address & 0xFFFFFFFE,
1670 2, 1, breakpoint->orig_instr);
1671 if (retval != ERROR_OK)
1675 breakpoint->set = 0;
1680 static int cortex_a8_add_breakpoint(struct target *target,
1681 struct breakpoint *breakpoint)
1683 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1685 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1687 LOG_INFO("no hardware breakpoint available");
1688 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1691 if (breakpoint->type == BKPT_HARD)
1692 cortex_a8->brp_num_available--;
1694 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1697 static int cortex_a8_add_context_breakpoint(struct target *target,
1698 struct breakpoint *breakpoint)
1700 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1702 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1704 LOG_INFO("no hardware breakpoint available");
1705 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1708 if (breakpoint->type == BKPT_HARD)
1709 cortex_a8->brp_num_available--;
1711 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1714 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1715 struct breakpoint *breakpoint)
1717 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1719 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1721 LOG_INFO("no hardware breakpoint available");
1722 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1725 if (breakpoint->type == BKPT_HARD)
1726 cortex_a8->brp_num_available--;
1728 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1732 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1734 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1737 /* It is perfectly possible to remove breakpoints while the target is running */
1738 if (target->state != TARGET_HALTED)
1740 LOG_WARNING("target not halted");
1741 return ERROR_TARGET_NOT_HALTED;
1745 if (breakpoint->set)
1747 cortex_a8_unset_breakpoint(target, breakpoint);
1748 if (breakpoint->type == BKPT_HARD)
1749 cortex_a8->brp_num_available++ ;
1759 * Cortex-A8 Reset functions
1762 static int cortex_a8_assert_reset(struct target *target)
1764 struct armv7a_common *armv7a = target_to_armv7a(target);
1768 /* FIXME when halt is requested, make it work somehow... */
1770 /* Issue some kind of warm reset. */
1771 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1772 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1773 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1774 /* REVISIT handle "pulls" cases, if there's
1775 * hardware that needs them to work.
1777 jtag_add_reset(0, 1);
1779 LOG_ERROR("%s: how to reset?", target_name(target));
1783 /* registers are now invalid */
1784 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1786 target->state = TARGET_RESET;
1791 static int cortex_a8_deassert_reset(struct target *target)
1797 /* be certain SRST is off */
1798 jtag_add_reset(0, 0);
1800 retval = cortex_a8_poll(target);
1801 if (retval != ERROR_OK)
1804 if (target->reset_halt) {
1805 if (target->state != TARGET_HALTED) {
1806 LOG_WARNING("%s: ran after reset and before halt ...",
1807 target_name(target));
1808 if ((retval = target_halt(target)) != ERROR_OK)
1817 static int cortex_a8_write_apb_ab_memory(struct target *target,
1818 uint32_t address, uint32_t size,
1819 uint32_t count, const uint8_t *buffer)
1822 /* write memory through APB-AP */
1824 int retval = ERROR_INVALID_ARGUMENTS;
1825 struct armv7a_common *armv7a = target_to_armv7a(target);
1826 struct arm *armv4_5 = &armv7a->armv4_5_common;
1827 int total_bytes = count * size;
1828 int start_byte, nbytes_to_write, i;
1835 if (target->state != TARGET_HALTED)
1837 LOG_WARNING("target not halted");
1838 return ERROR_TARGET_NOT_HALTED;
1841 reg = arm_reg_current(armv4_5, 0);
1843 reg = arm_reg_current(armv4_5, 1);
1846 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1847 if (retval != ERROR_OK)
1850 start_byte = address & 0x3;
1852 while (total_bytes > 0) {
1854 nbytes_to_write = 4 - start_byte;
1855 if (total_bytes < nbytes_to_write)
1856 nbytes_to_write = total_bytes;
1858 if ( nbytes_to_write != 4 ) {
1860 /* execute instruction LDR r1, [r0] */
1861 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDR(1, 0), NULL);
1862 if (retval != ERROR_OK)
1865 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1866 if (retval != ERROR_OK)
1870 for (i = 0; i < nbytes_to_write; ++i)
1871 data.uc_a[i + start_byte] = *buffer++;
1873 retval = cortex_a8_dap_write_coreregister_u32(target, data.ui, 1);
1874 if (retval != ERROR_OK)
1877 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1878 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRW_IP(1, 0) , NULL);
1879 if (retval != ERROR_OK)
1882 total_bytes -= nbytes_to_write;
1890 static int cortex_a8_read_apb_ab_memory(struct target *target,
1891 uint32_t address, uint32_t size,
1892 uint32_t count, uint8_t *buffer)
1895 /* read memory through APB-AP */
1897 int retval = ERROR_INVALID_ARGUMENTS;
1898 struct armv7a_common *armv7a = target_to_armv7a(target);
1899 struct arm *armv4_5 = &armv7a->armv4_5_common;
1900 int total_bytes = count * size;
1901 int start_byte, nbytes_to_read, i;
1908 if (target->state != TARGET_HALTED)
1910 LOG_WARNING("target not halted");
1911 return ERROR_TARGET_NOT_HALTED;
1914 reg = arm_reg_current(armv4_5, 0);
1916 reg = arm_reg_current(armv4_5, 1);
1919 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1920 if (retval != ERROR_OK)
1923 start_byte = address & 0x3;
1925 while (total_bytes > 0) {
1927 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
1928 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRW_IP(1, 0), NULL);
1929 if (retval != ERROR_OK)
1932 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1933 if (retval != ERROR_OK)
1936 nbytes_to_read = 4 - start_byte;
1937 if (total_bytes < nbytes_to_read)
1938 nbytes_to_read = total_bytes;
1940 for (i = 0; i < nbytes_to_read; ++i)
1941 *buffer++ = data.uc_a[i + start_byte];
1943 total_bytes -= nbytes_to_read;
1953 * Cortex-A8 Memory access
1955 * This is same Cortex M3 but we must also use the correct
1956 * ap number for every access.
1959 static int cortex_a8_read_phys_memory(struct target *target,
1960 uint32_t address, uint32_t size,
1961 uint32_t count, uint8_t *buffer)
1963 struct armv7a_common *armv7a = target_to_armv7a(target);
1964 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1965 int retval = ERROR_INVALID_ARGUMENTS;
1966 uint8_t apsel = swjdp->apsel;
1967 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
1968 address, size, count);
1970 if (count && buffer) {
1972 if ( apsel == swjdp_memoryap ) {
1974 /* read memory through AHB-AP */
1978 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1979 buffer, 4 * count, address);
1982 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1983 buffer, 2 * count, address);
1986 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1987 buffer, count, address);
1992 /* read memory through APB-AP */
1995 retval = cortex_a8_mmu(target, &enabled);
1996 if (retval != ERROR_OK)
2001 LOG_WARNING("Reading physical memory through \
2002 APB with MMU enabled is not yet implemented");
2003 return ERROR_TARGET_FAILURE;
2005 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2011 static int cortex_a8_read_memory(struct target *target, uint32_t address,
2012 uint32_t size, uint32_t count, uint8_t *buffer)
2015 uint32_t virt, phys;
2017 struct armv7a_common *armv7a = target_to_armv7a(target);
2018 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2019 uint8_t apsel = swjdp->apsel;
2021 /* cortex_a8 handles unaligned memory access */
2022 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2024 if (apsel == swjdp_memoryap) {
2025 retval = cortex_a8_mmu(target, &enabled);
2026 if (retval != ERROR_OK)
2033 retval = cortex_a8_virt2phys(target, virt, &phys);
2034 if (retval != ERROR_OK)
2037 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
2041 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
2043 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2048 static int cortex_a8_write_phys_memory(struct target *target,
2049 uint32_t address, uint32_t size,
2050 uint32_t count, const uint8_t *buffer)
2052 struct armv7a_common *armv7a = target_to_armv7a(target);
2053 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2054 int retval = ERROR_INVALID_ARGUMENTS;
2055 uint8_t apsel = swjdp->apsel;
2057 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2060 if (count && buffer) {
2062 if ( apsel == swjdp_memoryap ) {
2064 /* write memory through AHB-AP */
2068 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
2069 buffer, 4 * count, address);
2072 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
2073 buffer, 2 * count, address);
2076 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
2077 buffer, count, address);
2083 /* write memory through APB-AP */
2086 retval = cortex_a8_mmu(target, &enabled);
2087 if (retval != ERROR_OK)
2092 LOG_WARNING("Writing physical memory through APB with MMU" \
2093 "enabled is not yet implemented");
2094 return ERROR_TARGET_FAILURE;
2096 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2101 /* REVISIT this op is generic ARMv7-A/R stuff */
2102 if (retval == ERROR_OK && target->state == TARGET_HALTED)
2104 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
2106 retval = dpm->prepare(dpm);
2107 if (retval != ERROR_OK)
2110 /* The Cache handling will NOT work with MMU active, the
2111 * wrong addresses will be invalidated!
2113 * For both ICache and DCache, walk all cache lines in the
2114 * address range. Cortex-A8 has fixed 64 byte line length.
2116 * REVISIT per ARMv7, these may trigger watchpoints ...
2119 /* invalidate I-Cache */
2120 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
2122 /* ICIMVAU - Invalidate Cache single entry
2124 * MCR p15, 0, r0, c7, c5, 1
2126 for (uint32_t cacheline = address;
2127 cacheline < address + size * count;
2129 retval = dpm->instr_write_data_r0(dpm,
2130 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2132 if (retval != ERROR_OK)
2137 /* invalidate D-Cache */
2138 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
2140 /* DCIMVAC - Invalidate data Cache line
2142 * MCR p15, 0, r0, c7, c6, 1
2144 for (uint32_t cacheline = address;
2145 cacheline < address + size * count;
2147 retval = dpm->instr_write_data_r0(dpm,
2148 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2150 if (retval != ERROR_OK)
2155 /* (void) */ dpm->finish(dpm);
2161 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2162 uint32_t size, uint32_t count, const uint8_t *buffer)
2165 uint32_t virt, phys;
2167 struct armv7a_common *armv7a = target_to_armv7a(target);
2168 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2169 uint8_t apsel = swjdp->apsel;
2170 /* cortex_a8 handles unaligned memory access */
2171 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2173 if (apsel == swjdp_memoryap) {
2175 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
2176 retval = cortex_a8_mmu(target, &enabled);
2177 if (retval != ERROR_OK)
2183 retval = cortex_a8_virt2phys(target, virt, &phys);
2184 if (retval != ERROR_OK)
2186 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
2190 retval = cortex_a8_write_phys_memory(target, address, size,
2194 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2199 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
2200 uint32_t count, const uint8_t *buffer)
2202 return cortex_a8_write_memory(target, address, 4, count, buffer);
2206 static int cortex_a8_handle_target_request(void *priv)
2208 struct target *target = priv;
2209 struct armv7a_common *armv7a = target_to_armv7a(target);
2210 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2213 if (!target_was_examined(target))
2215 if (!target->dbg_msg_enabled)
2218 if (target->state == TARGET_RUNNING)
2222 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2223 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2225 /* check if we have data */
2226 while ((dscr & DSCR_DTR_TX_FULL) && (retval==ERROR_OK))
2228 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2229 armv7a->debug_base+ CPUDBG_DTRTX, &request);
2230 if (retval == ERROR_OK)
2232 target_request(target, request);
2233 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2234 armv7a->debug_base+ CPUDBG_DSCR, &dscr);
2243 * Cortex-A8 target information and configuration
2246 static int cortex_a8_examine_first(struct target *target)
2248 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2249 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2250 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
2252 int retval = ERROR_OK;
2253 uint32_t didr, ctypr, ttypr, cpuid;
2255 /* We do one extra read to ensure DAP is configured,
2256 * we call ahbap_debugport_init(swjdp) instead
2258 retval = ahbap_debugport_init(swjdp);
2259 if (retval != ERROR_OK)
2262 if (!target->dbgbase_set)
2265 /* Get ROM Table base */
2267 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2268 if (retval != ERROR_OK)
2270 /* Lookup 0x15 -- Processor DAP */
2271 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2272 &armv7a->debug_base);
2273 if (retval != ERROR_OK)
2278 armv7a->debug_base = target->dbgbase;
2281 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2282 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2283 if (retval != ERROR_OK)
2286 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2287 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
2289 LOG_DEBUG("Examine %s failed", "CPUID");
2293 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2294 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
2296 LOG_DEBUG("Examine %s failed", "CTYPR");
2300 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2301 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
2303 LOG_DEBUG("Examine %s failed", "TTYPR");
2307 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2308 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
2310 LOG_DEBUG("Examine %s failed", "DIDR");
2314 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2315 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2316 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2317 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2319 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
2320 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2321 if (retval != ERROR_OK)
2324 /* Setup Breakpoint Register Pairs */
2325 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2326 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2327 cortex_a8->brp_num_available = cortex_a8->brp_num;
2328 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2329 // cortex_a8->brb_enabled = ????;
2330 for (i = 0; i < cortex_a8->brp_num; i++)
2332 cortex_a8->brp_list[i].used = 0;
2333 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2334 cortex_a8->brp_list[i].type = BRP_NORMAL;
2336 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2337 cortex_a8->brp_list[i].value = 0;
2338 cortex_a8->brp_list[i].control = 0;
2339 cortex_a8->brp_list[i].BRPn = i;
2342 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2344 target_set_examined(target);
2348 static int cortex_a8_examine(struct target *target)
2350 int retval = ERROR_OK;
2352 /* don't re-probe hardware after each reset */
2353 if (!target_was_examined(target))
2354 retval = cortex_a8_examine_first(target);
2356 /* Configure core debug access */
2357 if (retval == ERROR_OK)
2358 retval = cortex_a8_init_debug_access(target);
2364 * Cortex-A8 target creation and initialization
2367 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2368 struct target *target)
2370 /* examine_first() does a bunch of this */
2374 static int cortex_a8_init_arch_info(struct target *target,
2375 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2377 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2378 struct arm *armv4_5 = &armv7a->armv4_5_common;
2379 struct adiv5_dap *dap = &armv7a->dap;
2381 armv7a->armv4_5_common.dap = dap;
2383 /* Setup struct cortex_a8_common */
2384 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2385 /* tap has no dap initialized */
2388 armv7a->armv4_5_common.dap = dap;
2389 /* Setup struct cortex_a8_common */
2390 armv4_5->arch_info = armv7a;
2392 /* prepare JTAG information for the new target */
2393 cortex_a8->jtag_info.tap = tap;
2394 cortex_a8->jtag_info.scann_size = 4;
2396 /* Leave (only) generic DAP stuff for debugport_init() */
2397 dap->jtag_info = &cortex_a8->jtag_info;
2399 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2400 dap->tar_autoincr_block = (1 << 10);
2401 dap->memaccess_tck = 80;
2405 armv7a->armv4_5_common.dap = tap->dap;
2407 cortex_a8->fast_reg_read = 0;
2409 /* Set default value */
2410 cortex_a8->current_address_mode = ARM_MODE_ANY;
2412 /* register arch-specific functions */
2413 armv7a->examine_debug_reason = NULL;
2415 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2417 armv7a->pre_restore_context = NULL;
2418 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2419 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
2420 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
2421 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
2422 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
2423 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
2424 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2425 armv7a->armv4_5_mmu.mmu_enabled = 0;
2428 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2430 /* REVISIT v7a setup should be in a v7a-specific routine */
2431 arm_init_arch_info(target, armv4_5);
2432 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2434 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2439 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2441 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2443 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2446 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
2448 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2449 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2450 uint32_t ttb = 0, retval = ERROR_OK;
2452 /* current_address_mode is set inside cortex_a8_virt2phys()
2453 where we can determine if address belongs to user or kernel */
2454 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
2456 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2457 retval = armv7a->armv4_5_common.mrc(target, 15,
2458 0, 1, /* op1, op2 */
2459 2, 0, /* CRn, CRm */
2461 if (retval != ERROR_OK)
2464 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
2466 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2467 retval = armv7a->armv4_5_common.mrc(target, 15,
2468 0, 0, /* op1, op2 */
2469 2, 0, /* CRn, CRm */
2471 if (retval != ERROR_OK)
2474 /* we don't know whose address is: user or kernel
2475 we assume that if we are in kernel mode then
2476 address belongs to kernel else if in user mode
2478 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2480 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2481 retval = armv7a->armv4_5_common.mrc(target, 15,
2482 0, 1, /* op1, op2 */
2483 2, 0, /* CRn, CRm */
2485 if (retval != ERROR_OK)
2488 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2490 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2491 retval = armv7a->armv4_5_common.mrc(target, 15,
2492 0, 0, /* op1, op2 */
2493 2, 0, /* CRn, CRm */
2495 if (retval != ERROR_OK)
2498 /* finally we don't know whose ttb to use: user or kernel */
2500 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2509 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
2510 int d_u_cache, int i_cache)
2512 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2513 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2514 uint32_t cp15_control;
2517 /* read cp15 control register */
2518 retval = armv7a->armv4_5_common.mrc(target, 15,
2519 0, 0, /* op1, op2 */
2520 1, 0, /* CRn, CRm */
2522 if (retval != ERROR_OK)
2527 cp15_control &= ~0x1U;
2530 cp15_control &= ~0x4U;
2533 cp15_control &= ~0x1000U;
2535 retval = armv7a->armv4_5_common.mcr(target, 15,
2536 0, 0, /* op1, op2 */
2537 1, 0, /* CRn, CRm */
2542 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
2543 int d_u_cache, int i_cache)
2545 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2546 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2547 uint32_t cp15_control;
2550 /* read cp15 control register */
2551 retval = armv7a->armv4_5_common.mrc(target, 15,
2552 0, 0, /* op1, op2 */
2553 1, 0, /* CRn, CRm */
2555 if (retval != ERROR_OK)
2559 cp15_control |= 0x1U;
2562 cp15_control |= 0x4U;
2565 cp15_control |= 0x1000U;
2567 retval = armv7a->armv4_5_common.mcr(target, 15,
2568 0, 0, /* op1, op2 */
2569 1, 0, /* CRn, CRm */
2575 static int cortex_a8_mmu(struct target *target, int *enabled)
2577 if (target->state != TARGET_HALTED) {
2578 LOG_ERROR("%s: target not halted", __func__);
2579 return ERROR_TARGET_INVALID;
2582 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2586 static int cortex_a8_virt2phys(struct target *target,
2587 uint32_t virt, uint32_t *phys)
2590 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2591 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2592 struct armv7a_common *armv7a = target_to_armv7a(target);
2594 /* We assume that virtual address is separated
2595 between user and kernel in Linux style:
2596 0x00000000-0xbfffffff - User space
2597 0xc0000000-0xffffffff - Kernel space */
2598 if( virt < 0xc0000000 ) /* Linux user space */
2599 cortex_a8->current_address_mode = ARM_MODE_USR;
2600 else /* Linux kernel */
2601 cortex_a8->current_address_mode = ARM_MODE_SVC;
2603 int retval = armv4_5_mmu_translate_va(target,
2604 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2605 if (retval != ERROR_OK)
2607 /* Reset the flag. We don't want someone else to use it by error */
2608 cortex_a8->current_address_mode = ARM_MODE_ANY;
2614 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2616 struct target *target = get_current_target(CMD_CTX);
2617 struct armv7a_common *armv7a = target_to_armv7a(target);
2619 return armv4_5_handle_cache_info_command(CMD_CTX,
2620 &armv7a->armv4_5_mmu.armv4_5_cache);
2624 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2626 struct target *target = get_current_target(CMD_CTX);
2627 if (!target_was_examined(target))
2629 LOG_ERROR("target not examined yet");
2633 return cortex_a8_init_debug_access(target);
2635 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2637 struct target *target = get_current_target(CMD_CTX);
2638 /* check target is an smp target */
2639 struct target_list *head;
2640 struct target *curr;
2641 head = target->head;
2643 if (head != (struct target_list*)NULL)
2645 while (head != (struct target_list*)NULL)
2647 curr = head->target;
2651 /* fixes the target display to the debugger */
2652 target->gdb_service->target = target;
2657 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2659 struct target *target = get_current_target(CMD_CTX);
2660 struct target_list *head;
2661 struct target *curr;
2662 head = target->head;
2663 if (head != (struct target_list*)NULL)
2665 while (head != (struct target_list*)NULL)
2667 curr = head->target;
2675 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2677 struct target *target = get_current_target(CMD_CTX);
2678 int retval = ERROR_OK;
2679 struct target_list *head;
2680 head = target->head;
2681 if (head != (struct target_list*)NULL)
2686 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2687 if (ERROR_OK != retval)
2689 target->gdb_service->core[1]=coreid;
2692 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2693 , target->gdb_service->core[1]);
2698 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2700 .name = "cache_info",
2701 .handler = cortex_a8_handle_cache_info_command,
2702 .mode = COMMAND_EXEC,
2703 .help = "display information about target caches",
2707 .handler = cortex_a8_handle_dbginit_command,
2708 .mode = COMMAND_EXEC,
2709 .help = "Initialize core debug",
2712 .handler = cortex_a8_handle_smp_off_command,
2713 .mode = COMMAND_EXEC,
2714 .help = "Stop smp handling",
2718 .handler = cortex_a8_handle_smp_on_command,
2719 .mode = COMMAND_EXEC,
2720 .help = "Restart smp handling",
2724 .handler = cortex_a8_handle_smp_gdb_command,
2725 .mode = COMMAND_EXEC,
2726 .help = "display/fix current core played to gdb",
2730 COMMAND_REGISTRATION_DONE
2732 static const struct command_registration cortex_a8_command_handlers[] = {
2734 .chain = arm_command_handlers,
2737 .chain = armv7a_command_handlers,
2740 .name = "cortex_a8",
2741 .mode = COMMAND_ANY,
2742 .help = "Cortex-A8 command group",
2743 .chain = cortex_a8_exec_command_handlers,
2745 COMMAND_REGISTRATION_DONE
2748 struct target_type cortexa8_target = {
2749 .name = "cortex_a8",
2751 .poll = cortex_a8_poll,
2752 .arch_state = armv7a_arch_state,
2754 .target_request_data = NULL,
2756 .halt = cortex_a8_halt,
2757 .resume = cortex_a8_resume,
2758 .step = cortex_a8_step,
2760 .assert_reset = cortex_a8_assert_reset,
2761 .deassert_reset = cortex_a8_deassert_reset,
2762 .soft_reset_halt = NULL,
2764 /* REVISIT allow exporting VFP3 registers ... */
2765 .get_gdb_reg_list = arm_get_gdb_reg_list,
2767 .read_memory = cortex_a8_read_memory,
2768 .write_memory = cortex_a8_write_memory,
2769 .bulk_write_memory = cortex_a8_bulk_write_memory,
2771 .checksum_memory = arm_checksum_memory,
2772 .blank_check_memory = arm_blank_check_memory,
2774 .run_algorithm = armv4_5_run_algorithm,
2776 .add_breakpoint = cortex_a8_add_breakpoint,
2777 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2778 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2779 .remove_breakpoint = cortex_a8_remove_breakpoint,
2780 .add_watchpoint = NULL,
2781 .remove_watchpoint = NULL,
2783 .commands = cortex_a8_command_handlers,
2784 .target_create = cortex_a8_target_create,
2785 .init_target = cortex_a8_init_target,
2786 .examine = cortex_a8_examine,
2788 .read_phys_memory = cortex_a8_read_phys_memory,
2789 .write_phys_memory = cortex_a8_write_phys_memory,
2790 .mmu = cortex_a8_mmu,
2791 .virt2phys = cortex_a8_virt2phys,