2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
22 #include "armv8_dpm.h"
23 #include <jtag/jtag.h>
25 #include "breakpoints.h"
26 #include "target_type.h"
27 #include "armv8_opcodes.h"
29 #include "helper/time_support.h"
32 #define T32_FMTITR(instr) (((instr & 0x0000FFFF) << 16) | ((instr & 0xFFFF0000) >> 16))
36 * Implements various ARM DPM operations using architectural debug registers.
37 * These routines layer over core-specific communication methods to cope with
38 * implementation differences between cores like ARM1136 and Cortex-A8.
40 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
41 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
42 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
43 * are abstracted through internal programming interfaces to share code and
44 * to minimize needless differences in debug behavior between cores.
48 * Get core state from EDSCR, without necessity to retrieve CPSR
50 enum arm_state armv8_dpm_get_core_state(struct arm_dpm *dpm)
52 int el = (dpm->dscr >> 8) & 0x3;
53 int rw = (dpm->dscr >> 10) & 0xF;
58 /* find the first '0' in DSCR.RW */
59 for (pos = 3; pos >= 0; pos--) {
60 if ((rw & (1 << pos)) == 0)
65 return ARM_STATE_AARCH64;
70 /*----------------------------------------------------------------------*/
72 static int dpmv8_write_dcc(struct armv8_common *armv8, uint32_t data)
74 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
75 return mem_ap_write_u32(armv8->debug_ap,
76 armv8->debug_base + CPUV8_DBG_DTRRX, data);
79 static int dpmv8_write_dcc_64(struct armv8_common *armv8, uint64_t data)
82 LOG_DEBUG("write DCC 0x%016" PRIx64, data);
83 ret = mem_ap_write_u32(armv8->debug_ap,
84 armv8->debug_base + CPUV8_DBG_DTRRX, data);
86 ret = mem_ap_write_u32(armv8->debug_ap,
87 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
91 static int dpmv8_read_dcc(struct armv8_common *armv8, uint32_t *data,
94 uint32_t dscr = DSCR_ITE;
100 /* Wait for DTRRXfull */
101 long long then = timeval_ms();
102 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
103 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
104 armv8->debug_base + CPUV8_DBG_DSCR,
106 if (retval != ERROR_OK)
108 if (timeval_ms() > then + 1000) {
109 LOG_ERROR("Timeout waiting for read dcc");
114 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
115 armv8->debug_base + CPUV8_DBG_DTRTX,
117 if (retval != ERROR_OK)
119 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
127 static int dpmv8_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
130 uint32_t dscr = DSCR_ITE;
137 /* Wait for DTRRXfull */
138 long long then = timeval_ms();
139 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
140 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
141 armv8->debug_base + CPUV8_DBG_DSCR,
143 if (retval != ERROR_OK)
145 if (timeval_ms() > then + 1000) {
146 LOG_ERROR("Timeout waiting for DTR_TX_FULL, dscr = 0x%08" PRIx32, dscr);
151 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
152 armv8->debug_base + CPUV8_DBG_DTRTX,
154 if (retval != ERROR_OK)
157 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
158 armv8->debug_base + CPUV8_DBG_DTRRX,
160 if (retval != ERROR_OK)
163 *data = *(uint32_t *)data | (uint64_t)higher << 32;
164 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
172 static int dpmv8_dpm_prepare(struct arm_dpm *dpm)
174 struct armv8_common *armv8 = dpm->arm->arch_info;
178 /* set up invariant: ITE is set after ever DPM operation */
179 long long then = timeval_ms();
181 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
182 armv8->debug_base + CPUV8_DBG_DSCR,
184 if (retval != ERROR_OK)
186 if ((dscr & DSCR_ITE) != 0)
188 if (timeval_ms() > then + 1000) {
189 LOG_ERROR("Timeout waiting for dpm prepare");
194 /* update the stored copy of dscr */
197 /* this "should never happen" ... */
198 if (dscr & DSCR_DTR_RX_FULL) {
199 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
201 retval = mem_ap_read_u32(armv8->debug_ap,
202 armv8->debug_base + CPUV8_DBG_DTRRX, &dscr);
203 if (retval != ERROR_OK)
210 static int dpmv8_dpm_finish(struct arm_dpm *dpm)
212 /* REVISIT what could be done here? */
216 static int dpmv8_exec_opcode(struct arm_dpm *dpm,
217 uint32_t opcode, uint32_t *p_dscr)
219 struct armv8_common *armv8 = dpm->arm->arch_info;
220 uint32_t dscr = dpm->dscr;
223 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
228 /* Wait for InstrCompl bit to be set */
229 long long then = timeval_ms();
230 while ((dscr & DSCR_ITE) == 0) {
231 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
232 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
233 if (retval != ERROR_OK) {
234 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
237 if (timeval_ms() > then + 1000) {
238 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
243 if (armv8_dpm_get_core_state(dpm) != ARM_STATE_AARCH64)
244 opcode = T32_FMTITR(opcode);
246 retval = mem_ap_write_u32(armv8->debug_ap,
247 armv8->debug_base + CPUV8_DBG_ITR, opcode);
248 if (retval != ERROR_OK)
253 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
254 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
255 if (retval != ERROR_OK) {
256 LOG_ERROR("Could not read DSCR register");
259 if (timeval_ms() > then + 1000) {
260 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
263 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
265 /* update dscr and el after each command execution */
267 if (dpm->last_el != ((dscr >> 8) & 3))
268 LOG_DEBUG("EL %i -> %i", dpm->last_el, (dscr >> 8) & 3);
269 dpm->last_el = (dscr >> 8) & 3;
271 if (dscr & DSCR_ERR) {
272 LOG_ERROR("Opcode 0x%08"PRIx32", DSCR.ERR=1, DSCR.EL=%i", opcode, dpm->last_el);
273 armv8_dpm_handle_exception(dpm);
283 static int dpmv8_instr_execute(struct arm_dpm *dpm, uint32_t opcode)
285 return dpmv8_exec_opcode(dpm, opcode, NULL);
288 static int dpmv8_instr_write_data_dcc(struct arm_dpm *dpm,
289 uint32_t opcode, uint32_t data)
291 struct armv8_common *armv8 = dpm->arm->arch_info;
294 retval = dpmv8_write_dcc(armv8, data);
295 if (retval != ERROR_OK)
298 return dpmv8_exec_opcode(dpm, opcode, 0);
301 static int dpmv8_instr_write_data_dcc_64(struct arm_dpm *dpm,
302 uint32_t opcode, uint64_t data)
304 struct armv8_common *armv8 = dpm->arm->arch_info;
307 retval = dpmv8_write_dcc_64(armv8, data);
308 if (retval != ERROR_OK)
311 return dpmv8_exec_opcode(dpm, opcode, 0);
314 static int dpmv8_instr_write_data_r0(struct arm_dpm *dpm,
315 uint32_t opcode, uint32_t data)
317 struct armv8_common *armv8 = dpm->arm->arch_info;
318 uint32_t dscr = DSCR_ITE;
321 retval = dpmv8_write_dcc(armv8, data);
322 if (retval != ERROR_OK)
325 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, READ_REG_DTRRX), &dscr);
326 if (retval != ERROR_OK)
329 /* then the opcode, taking data from R0 */
330 return dpmv8_exec_opcode(dpm, opcode, &dscr);
333 static int dpmv8_instr_write_data_r0_64(struct arm_dpm *dpm,
334 uint32_t opcode, uint64_t data)
336 struct armv8_common *armv8 = dpm->arm->arch_info;
339 if (dpm->arm->core_state != ARM_STATE_AARCH64)
340 return dpmv8_instr_write_data_r0(dpm, opcode, data);
342 /* transfer data from DCC to R0 */
343 retval = dpmv8_write_dcc_64(armv8, data);
344 if (retval == ERROR_OK)
345 retval = dpmv8_exec_opcode(dpm, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dpm->dscr);
347 /* then the opcode, taking data from R0 */
348 if (retval == ERROR_OK)
349 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
354 static int dpmv8_instr_cpsr_sync(struct arm_dpm *dpm)
357 struct armv8_common *armv8 = dpm->arm->arch_info;
359 /* "Prefetch flush" after modifying execution status in CPSR */
360 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_DSB_SY), &dpm->dscr);
361 if (retval == ERROR_OK)
362 dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_ISB_SY), &dpm->dscr);
366 static int dpmv8_instr_read_data_dcc(struct arm_dpm *dpm,
367 uint32_t opcode, uint32_t *data)
369 struct armv8_common *armv8 = dpm->arm->arch_info;
372 /* the opcode, writing data to DCC */
373 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
374 if (retval != ERROR_OK)
377 return dpmv8_read_dcc(armv8, data, &dpm->dscr);
380 static int dpmv8_instr_read_data_dcc_64(struct arm_dpm *dpm,
381 uint32_t opcode, uint64_t *data)
383 struct armv8_common *armv8 = dpm->arm->arch_info;
386 /* the opcode, writing data to DCC */
387 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
388 if (retval != ERROR_OK)
391 return dpmv8_read_dcc_64(armv8, data, &dpm->dscr);
394 static int dpmv8_instr_read_data_r0(struct arm_dpm *dpm,
395 uint32_t opcode, uint32_t *data)
397 struct armv8_common *armv8 = dpm->arm->arch_info;
400 /* the opcode, writing data to R0 */
401 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
402 if (retval != ERROR_OK)
405 /* write R0 to DCC */
406 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, WRITE_REG_DTRTX), &dpm->dscr);
407 if (retval != ERROR_OK)
410 return dpmv8_read_dcc(armv8, data, &dpm->dscr);
413 static int dpmv8_instr_read_data_r0_64(struct arm_dpm *dpm,
414 uint32_t opcode, uint64_t *data)
416 struct armv8_common *armv8 = dpm->arm->arch_info;
419 if (dpm->arm->core_state != ARM_STATE_AARCH64) {
421 retval = dpmv8_instr_read_data_r0(dpm, opcode, &tmp);
422 if (retval == ERROR_OK)
427 /* the opcode, writing data to R0 */
428 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
429 if (retval != ERROR_OK)
432 /* write R0 to DCC */
433 retval = dpmv8_exec_opcode(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dpm->dscr);
434 if (retval != ERROR_OK)
437 return dpmv8_read_dcc_64(armv8, data, &dpm->dscr);
441 static int dpmv8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
442 target_addr_t addr, uint32_t control)
444 struct armv8_common *armv8 = dpm->arm->arch_info;
445 uint32_t vr = armv8->debug_base;
446 uint32_t cr = armv8->debug_base;
450 case 0 ... 15: /* breakpoints */
451 vr += CPUV8_DBG_BVR_BASE;
452 cr += CPUV8_DBG_BCR_BASE;
454 case 16 ... 31: /* watchpoints */
455 vr += CPUV8_DBG_WVR_BASE;
456 cr += CPUV8_DBG_WCR_BASE;
465 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
466 (unsigned) vr, (unsigned) cr);
468 retval = mem_ap_write_atomic_u32(armv8->debug_ap, vr, addr);
469 if (retval != ERROR_OK)
471 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, control);
475 static int dpmv8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
477 struct armv8_common *armv8 = dpm->arm->arch_info;
482 cr = armv8->debug_base + CPUV8_DBG_BCR_BASE;
485 cr = armv8->debug_base + CPUV8_DBG_WCR_BASE;
493 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
495 /* clear control register */
496 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, 0);
500 * Coprocessor support
503 /* Read coprocessor */
504 static int dpmv8_mrc(struct target *target, int cpnum,
505 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
508 struct arm *arm = target_to_arm(target);
509 struct arm_dpm *dpm = arm->dpm;
512 retval = dpm->prepare(dpm);
513 if (retval != ERROR_OK)
516 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum,
517 (int) op1, (int) CRn,
518 (int) CRm, (int) op2);
520 /* read coprocessor register into R0; return via DCC */
521 retval = dpm->instr_read_data_r0(dpm,
522 ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2),
525 /* (void) */ dpm->finish(dpm);
529 static int dpmv8_mcr(struct target *target, int cpnum,
530 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
533 struct arm *arm = target_to_arm(target);
534 struct arm_dpm *dpm = arm->dpm;
537 retval = dpm->prepare(dpm);
538 if (retval != ERROR_OK)
541 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum,
542 (int) op1, (int) CRn,
543 (int) CRm, (int) op2);
545 /* read DCC into r0; then write coprocessor register from R0 */
546 retval = dpm->instr_write_data_r0(dpm,
547 ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2),
550 /* (void) */ dpm->finish(dpm);
554 /*----------------------------------------------------------------------*/
557 * Register access utilities
560 int armv8_dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode)
562 struct armv8_common *armv8 = (struct armv8_common *)dpm->arm->arch_info;
563 int retval = ERROR_OK;
564 unsigned int target_el;
565 enum arm_state core_state;
568 /* restore previous mode */
569 if (mode == ARM_MODE_ANY) {
570 cpsr = buf_get_u32(dpm->arm->cpsr->value, 0, 32);
572 LOG_DEBUG("restoring mode, cpsr = 0x%08"PRIx32, cpsr);
575 LOG_DEBUG("setting mode 0x%"PRIx32, mode);
577 /* else force to the specified mode */
578 if (is_arm_mode(mode))
584 switch (cpsr & 0x1f) {
596 * TODO: handle ARM_MODE_HYP
606 target_el = (cpsr >> 2) & 3;
609 if (target_el > SYSTEM_CUREL_EL3) {
610 LOG_ERROR("%s: Invalid target exception level %i", __func__, target_el);
614 LOG_DEBUG("target_el = %i, last_el = %i", target_el, dpm->last_el);
615 if (target_el > dpm->last_el) {
616 retval = dpm->instr_execute(dpm,
617 armv8_opcode(armv8, ARMV8_OPC_DCPS) | target_el);
619 /* DCPS clobbers registers just like an exception taken */
620 armv8_dpm_handle_exception(dpm);
622 core_state = armv8_dpm_get_core_state(dpm);
623 if (core_state != ARM_STATE_AARCH64) {
624 /* cannot do DRPS/ERET when already in EL0 */
625 if (dpm->last_el != 0) {
626 /* load SPSR with the desired mode and execute DRPS */
627 LOG_DEBUG("SPSR = 0x%08"PRIx32, cpsr);
628 retval = dpm->instr_write_data_r0(dpm,
629 ARMV8_MSR_GP_xPSR_T1(1, 0, 15), cpsr);
630 if (retval == ERROR_OK)
631 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
635 * need to execute multiple DRPS instructions until target_el
638 while (retval == ERROR_OK && dpm->last_el != target_el) {
639 unsigned int cur_el = dpm->last_el;
640 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
641 if (cur_el == dpm->last_el) {
642 LOG_INFO("Cannot reach EL %i, SPSR corrupted?", target_el);
648 /* On executing DRPS, DSPSR and DLR become UNKNOWN, mark them as dirty */
649 dpm->arm->cpsr->dirty = true;
650 dpm->arm->pc->dirty = true;
653 * re-evaluate the core state, we might be in Aarch32 state now
654 * we rely on dpm->dscr being up-to-date
656 core_state = armv8_dpm_get_core_state(dpm);
657 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
658 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
665 * Common register read, relies on armv8_select_reg_access() having been called.
667 static int dpmv8_read_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
669 struct armv8_common *armv8 = dpm->arm->arch_info;
673 retval = armv8->read_reg_u64(armv8, regnum, &value_64);
675 if (retval == ERROR_OK) {
678 buf_set_u64(r->value, 0, r->size, value_64);
680 LOG_DEBUG("READ: %s, %16.8llx", r->name, (unsigned long long) value_64);
682 LOG_DEBUG("READ: %s, %8.8x", r->name, (unsigned int) value_64);
688 * Common register write, relies on armv8_select_reg_access() having been called.
690 static int dpmv8_write_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
692 struct armv8_common *armv8 = dpm->arm->arch_info;
693 int retval = ERROR_FAIL;
696 value_64 = buf_get_u64(r->value, 0, r->size);
698 retval = armv8->write_reg_u64(armv8, regnum, value_64);
699 if (retval == ERROR_OK) {
702 LOG_DEBUG("WRITE: %s, %16.8llx", r->name, (unsigned long long)value_64);
704 LOG_DEBUG("WRITE: %s, %8.8x", r->name, (unsigned int)value_64);
711 * Read basic registers of the the current context: R0 to R15, and CPSR;
712 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
713 * In normal operation this is called on entry to halting debug state,
714 * possibly after some other operations supporting restore of debug state
715 * or making sure the CPU is fully idle (drain write buffer, etc).
717 int armv8_dpm_read_current_registers(struct arm_dpm *dpm)
719 struct arm *arm = dpm->arm;
720 struct armv8_common *armv8 = (struct armv8_common *)arm->arch_info;
721 struct reg_cache *cache;
726 retval = dpm->prepare(dpm);
727 if (retval != ERROR_OK)
730 cache = arm->core_cache;
732 /* read R0 first (it's used for scratch), then CPSR */
733 r = cache->reg_list + 0;
735 retval = dpmv8_read_reg(dpm, r, 0);
736 if (retval != ERROR_OK)
741 /* read cpsr to r0 and get it back */
742 retval = dpm->instr_read_data_r0(dpm,
743 armv8_opcode(armv8, READ_REG_DSPSR), &cpsr);
744 if (retval != ERROR_OK)
747 /* update core mode and state */
748 armv8_set_cpsr(arm, cpsr);
750 for (unsigned int i = 1; i < cache->num_regs ; i++) {
751 struct arm_reg *arm_reg;
753 r = armv8_reg_current(arm, i);
758 * Only read registers that are available from the
759 * current EL (or core mode).
761 arm_reg = r->arch_info;
762 if (arm_reg->mode != ARM_MODE_ANY &&
763 dpm->last_el != armv8_curel_from_core_mode(arm_reg->mode))
766 retval = dpmv8_read_reg(dpm, r, i);
767 if (retval != ERROR_OK)
777 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
778 * unless they're removed, or need updating because of single-stepping
779 * or running debugger code.
781 static int dpmv8_maybe_update_bpwp(struct arm_dpm *dpm, bool bpwp,
782 struct dpm_bpwp *xp, int *set_p)
784 int retval = ERROR_OK;
791 /* removed or startup; we must disable it */
796 /* disabled, but we must set it */
797 xp->dirty = disable = false;
802 /* set, but we must temporarily disable it */
803 xp->dirty = disable = true;
808 retval = dpm->bpwp_disable(dpm, xp->number);
810 retval = dpm->bpwp_enable(dpm, xp->number,
811 xp->address, xp->control);
813 if (retval != ERROR_OK)
814 LOG_ERROR("%s: can't %s HW %spoint %d",
815 disable ? "disable" : "enable",
816 target_name(dpm->arm->target),
817 (xp->number < 16) ? "break" : "watch",
823 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp);
826 * Writes all modified core registers for all processor modes. In normal
827 * operation this is called on exit from halting debug state.
829 * @param dpm: represents the processor
830 * @param bpwp: true ensures breakpoints and watchpoints are set,
831 * false ensures they are cleared
833 int armv8_dpm_write_dirty_registers(struct arm_dpm *dpm, bool bpwp)
835 struct arm *arm = dpm->arm;
836 struct reg_cache *cache = arm->core_cache;
839 retval = dpm->prepare(dpm);
840 if (retval != ERROR_OK)
843 /* If we're managing hardware breakpoints for this core, enable
844 * or disable them as requested.
846 * REVISIT We don't yet manage them for ANY cores. Eventually
847 * we should be able to assume we handle them; but until then,
848 * cope with the hand-crafted breakpoint code.
850 if (arm->target->type->add_breakpoint == dpmv8_add_breakpoint) {
851 for (unsigned i = 0; i < dpm->nbp; i++) {
852 struct dpm_bp *dbp = dpm->dbp + i;
853 struct breakpoint *bp = dbp->bp;
855 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dbp->bpwp,
856 bp ? &bp->set : NULL);
857 if (retval != ERROR_OK)
862 /* enable/disable watchpoints */
863 for (unsigned i = 0; i < dpm->nwp; i++) {
864 struct dpm_wp *dwp = dpm->dwp + i;
865 struct watchpoint *wp = dwp->wp;
867 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dwp->bpwp,
868 wp ? &wp->set : NULL);
869 if (retval != ERROR_OK)
873 /* NOTE: writes to breakpoint and watchpoint registers might
874 * be queued, and need (efficient/batched) flushing later.
877 /* Restore original core mode and state */
878 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
879 if (retval != ERROR_OK)
882 /* check everything except our scratch register R0 */
883 for (unsigned i = 1; i < cache->num_regs; i++) {
886 /* skip PC and CPSR */
887 if (i == ARMV8_PC || i == ARMV8_xPSR)
890 if (!cache->reg_list[i].valid)
893 if (!cache->reg_list[i].dirty)
896 /* skip all registers not on the current EL */
897 r = cache->reg_list[i].arch_info;
898 if (r->mode != ARM_MODE_ANY &&
899 dpm->last_el != armv8_curel_from_core_mode(r->mode))
902 retval = dpmv8_write_reg(dpm, &cache->reg_list[i], i);
903 if (retval != ERROR_OK)
907 /* flush CPSR and PC */
908 if (retval == ERROR_OK)
909 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_xPSR], ARMV8_xPSR);
910 if (retval == ERROR_OK)
911 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_PC], ARMV8_PC);
912 /* flush R0 -- it's *very* dirty by now */
913 if (retval == ERROR_OK)
914 retval = dpmv8_write_reg(dpm, &cache->reg_list[0], 0);
915 if (retval == ERROR_OK)
916 dpm->instr_cpsr_sync(dpm);
923 * Standard ARM register accessors ... there are three methods
924 * in "struct arm", to support individual read/write and bulk read
928 static int armv8_dpm_read_core_reg(struct target *target, struct reg *r,
929 int regnum, enum arm_mode mode)
931 struct arm *arm = target_to_arm(target);
932 struct arm_dpm *dpm = target_to_arm(target)->dpm;
934 int max = arm->core_cache->num_regs;
936 if (regnum < 0 || regnum >= max)
937 return ERROR_COMMAND_SYNTAX_ERROR;
940 * REVISIT what happens if we try to read SPSR in a core mode
941 * which has no such register?
943 retval = dpm->prepare(dpm);
944 if (retval != ERROR_OK)
947 retval = dpmv8_read_reg(dpm, r, regnum);
948 if (retval != ERROR_OK)
952 /* (void) */ dpm->finish(dpm);
956 static int armv8_dpm_write_core_reg(struct target *target, struct reg *r,
957 int regnum, enum arm_mode mode, uint8_t *value)
959 struct arm *arm = target_to_arm(target);
960 struct arm_dpm *dpm = target_to_arm(target)->dpm;
962 int max = arm->core_cache->num_regs;
964 if (regnum < 0 || regnum > max)
965 return ERROR_COMMAND_SYNTAX_ERROR;
967 /* REVISIT what happens if we try to write SPSR in a core mode
968 * which has no such register?
971 retval = dpm->prepare(dpm);
972 if (retval != ERROR_OK)
975 retval = dpmv8_write_reg(dpm, r, regnum);
977 /* always clean up, regardless of error */
983 static int armv8_dpm_full_context(struct target *target)
985 struct arm *arm = target_to_arm(target);
986 struct arm_dpm *dpm = arm->dpm;
987 struct reg_cache *cache = arm->core_cache;
991 retval = dpm->prepare(dpm);
992 if (retval != ERROR_OK)
996 enum arm_mode mode = ARM_MODE_ANY;
1000 /* We "know" arm_dpm_read_current_registers() was called so
1001 * the unmapped registers (R0..R7, PC, AND CPSR) and some
1002 * view of R8..R14 are current. We also "know" oddities of
1003 * register mapping: special cases for R8..R12 and SPSR.
1005 * Pick some mode with unread registers and read them all.
1006 * Repeat until done.
1008 for (unsigned i = 0; i < cache->num_regs; i++) {
1011 if (cache->reg_list[i].valid)
1013 r = cache->reg_list[i].arch_info;
1015 /* may need to pick a mode and set CPSR */
1020 /* For regular (ARM_MODE_ANY) R8..R12
1021 * in case we've entered debug state
1022 * in FIQ mode we need to patch mode.
1024 if (mode != ARM_MODE_ANY)
1025 retval = armv8_dpm_modeswitch(dpm, mode);
1027 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_USR);
1029 if (retval != ERROR_OK)
1032 if (r->mode != mode)
1035 /* CPSR was read, so "R16" must mean SPSR */
1036 retval = dpmv8_read_reg(dpm,
1037 &cache->reg_list[i],
1038 (r->num == 16) ? 17 : r->num);
1039 if (retval != ERROR_OK)
1045 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
1046 /* (void) */ dpm->finish(dpm);
1052 /*----------------------------------------------------------------------*/
1055 * Breakpoint and Watchpoint support.
1057 * Hardware {break,watch}points are usually left active, to minimize
1058 * debug entry/exit costs. When they are set or cleared, it's done in
1059 * batches. Also, DPM-conformant hardware can update debug registers
1060 * regardless of whether the CPU is running or halted ... though that
1061 * fact isn't currently leveraged.
1064 static int dpmv8_bpwp_setup(struct arm_dpm *dpm, struct dpm_bpwp *xp,
1065 uint32_t addr, uint32_t length)
1069 control = (1 << 0) /* enable */
1070 | (3 << 1); /* both user and privileged access */
1072 /* Match 1, 2, or all 4 byte addresses in this word.
1074 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
1075 * Support larger length, when addr is suitably aligned. In
1076 * particular, allow watchpoints on 8 byte "double" values.
1078 * REVISIT allow watchpoints on unaligned 2-bit values; and on
1079 * v7 hardware, unaligned 4-byte ones too.
1083 control |= (1 << (addr & 3)) << 5;
1086 /* require 2-byte alignment */
1088 control |= (3 << (addr & 2)) << 5;
1093 /* require 4-byte alignment */
1095 control |= 0xf << 5;
1100 LOG_ERROR("unsupported {break,watch}point length/alignment");
1101 return ERROR_COMMAND_SYNTAX_ERROR;
1104 /* other shared control bits:
1105 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
1106 * bit 20 == 0 ... not linked to a context ID
1107 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
1110 xp->address = addr & ~3;
1111 xp->control = control;
1114 LOG_DEBUG("BPWP: addr %8.8" PRIx32 ", control %" PRIx32 ", number %d",
1115 xp->address, control, xp->number);
1117 /* hardware is updated in write_dirty_registers() */
1121 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp)
1123 struct arm *arm = target_to_arm(target);
1124 struct arm_dpm *dpm = arm->dpm;
1125 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1128 return ERROR_COMMAND_SYNTAX_ERROR;
1129 if (!dpm->bpwp_enable)
1132 /* FIXME we need a generic solution for software breakpoints. */
1133 if (bp->type == BKPT_SOFT)
1134 LOG_DEBUG("using HW bkpt, not SW...");
1136 for (unsigned i = 0; i < dpm->nbp; i++) {
1137 if (!dpm->dbp[i].bp) {
1138 retval = dpmv8_bpwp_setup(dpm, &dpm->dbp[i].bpwp,
1139 bp->address, bp->length);
1140 if (retval == ERROR_OK)
1141 dpm->dbp[i].bp = bp;
1149 static int dpmv8_remove_breakpoint(struct target *target, struct breakpoint *bp)
1151 struct arm *arm = target_to_arm(target);
1152 struct arm_dpm *dpm = arm->dpm;
1153 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1155 for (unsigned i = 0; i < dpm->nbp; i++) {
1156 if (dpm->dbp[i].bp == bp) {
1157 dpm->dbp[i].bp = NULL;
1158 dpm->dbp[i].bpwp.dirty = true;
1160 /* hardware is updated in write_dirty_registers() */
1169 static int dpmv8_watchpoint_setup(struct arm_dpm *dpm, unsigned index_t,
1170 struct watchpoint *wp)
1173 struct dpm_wp *dwp = dpm->dwp + index_t;
1176 /* this hardware doesn't support data value matching or masking */
1177 if (wp->value || wp->mask != ~(uint32_t)0) {
1178 LOG_DEBUG("watchpoint values and masking not supported");
1179 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1182 retval = dpmv8_bpwp_setup(dpm, &dwp->bpwp, wp->address, wp->length);
1183 if (retval != ERROR_OK)
1186 control = dwp->bpwp.control;
1198 dwp->bpwp.control = control;
1200 dpm->dwp[index_t].wp = wp;
1205 static int dpmv8_add_watchpoint(struct target *target, struct watchpoint *wp)
1207 struct arm *arm = target_to_arm(target);
1208 struct arm_dpm *dpm = arm->dpm;
1209 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1211 if (dpm->bpwp_enable) {
1212 for (unsigned i = 0; i < dpm->nwp; i++) {
1213 if (!dpm->dwp[i].wp) {
1214 retval = dpmv8_watchpoint_setup(dpm, i, wp);
1223 static int dpmv8_remove_watchpoint(struct target *target, struct watchpoint *wp)
1225 struct arm *arm = target_to_arm(target);
1226 struct arm_dpm *dpm = arm->dpm;
1227 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1229 for (unsigned i = 0; i < dpm->nwp; i++) {
1230 if (dpm->dwp[i].wp == wp) {
1231 dpm->dwp[i].wp = NULL;
1232 dpm->dwp[i].bpwp.dirty = true;
1234 /* hardware is updated in write_dirty_registers() */
1243 void armv8_dpm_report_wfar(struct arm_dpm *dpm, uint64_t addr)
1245 switch (dpm->arm->core_state) {
1247 case ARM_STATE_AARCH64:
1250 case ARM_STATE_THUMB:
1251 case ARM_STATE_THUMB_EE:
1254 case ARM_STATE_JAZELLE:
1258 LOG_DEBUG("Unknown core_state");
1265 * Handle exceptions taken in debug state. This happens mostly for memory
1266 * accesses that violated a MMU policy. Taking an exception while in debug
1267 * state clobbers certain state registers on the target exception level.
1268 * Just mark those registers dirty so that they get restored on resume.
1269 * This works both for Aarch32 and Aarch64 states.
1271 * This function must not perform any actions that trigger another exception
1272 * or a recursion will happen.
1274 void armv8_dpm_handle_exception(struct arm_dpm *dpm)
1276 struct armv8_common *armv8 = dpm->arm->arch_info;
1277 struct reg_cache *cache = dpm->arm->core_cache;
1278 enum arm_state core_state;
1283 static const int clobbered_regs_by_el[3][5] = {
1284 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL1, ARMV8_ESR_EL1, ARMV8_SPSR_EL1 },
1285 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL2, ARMV8_ESR_EL2, ARMV8_SPSR_EL2 },
1286 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL3, ARMV8_ESR_EL3, ARMV8_SPSR_EL3 },
1289 el = (dpm->dscr >> 8) & 3;
1291 /* safety check, must not happen since EL0 cannot be a target for an exception */
1292 if (el < SYSTEM_CUREL_EL1 || el > SYSTEM_CUREL_EL3) {
1293 LOG_ERROR("%s: EL %i is invalid, DSCR corrupted?", __func__, el);
1297 /* Clear sticky error */
1298 mem_ap_write_u32(armv8->debug_ap,
1299 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1301 armv8->read_reg_u64(armv8, ARMV8_xPSR, &dlr);
1303 armv8->read_reg_u64(armv8, ARMV8_PC, &dlr);
1305 LOG_DEBUG("Exception taken to EL %i, DLR=0x%016"PRIx64" DSPSR=0x%08"PRIx32,
1308 /* mark all clobbered registers as dirty */
1309 for (int i = 0; i < 5; i++)
1310 cache->reg_list[clobbered_regs_by_el[el-1][i]].dirty = true;
1313 * re-evaluate the core state, we might be in Aarch64 state now
1314 * we rely on dpm->dscr being up-to-date
1316 core_state = armv8_dpm_get_core_state(dpm);
1317 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
1318 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
1321 /*----------------------------------------------------------------------*/
1324 * Other debug and support utilities
1327 void armv8_dpm_report_dscr(struct arm_dpm *dpm, uint32_t dscr)
1329 struct target *target = dpm->arm->target;
1332 dpm->last_el = (dscr >> 8) & 3;
1334 /* Examine debug reason */
1335 switch (DSCR_ENTRY(dscr)) {
1336 /* FALL THROUGH -- assume a v6 core in abort mode */
1337 case DSCRV8_ENTRY_EXT_DEBUG: /* EDBGRQ */
1338 target->debug_reason = DBG_REASON_DBGRQ;
1340 case DSCRV8_ENTRY_HALT_STEP_EXECLU: /* HALT step */
1341 case DSCRV8_ENTRY_HALT_STEP_NORMAL: /* Halt step*/
1342 case DSCRV8_ENTRY_HALT_STEP:
1343 target->debug_reason = DBG_REASON_SINGLESTEP;
1345 case DSCRV8_ENTRY_HLT: /* HLT instruction (software breakpoint) */
1346 case DSCRV8_ENTRY_BKPT: /* SW BKPT (?) */
1347 case DSCRV8_ENTRY_RESET_CATCH: /* Reset catch */
1348 case DSCRV8_ENTRY_OS_UNLOCK: /*OS unlock catch*/
1349 case DSCRV8_ENTRY_EXCEPTION_CATCH: /*exception catch*/
1350 case DSCRV8_ENTRY_SW_ACCESS_DBG: /*SW access dbg register*/
1351 target->debug_reason = DBG_REASON_BREAKPOINT;
1353 case DSCRV8_ENTRY_WATCHPOINT: /* asynch watchpoint */
1354 target->debug_reason = DBG_REASON_WATCHPOINT;
1357 target->debug_reason = DBG_REASON_UNDEFINED;
1363 /*----------------------------------------------------------------------*/
1366 * Setup and management support.
1370 * Hooks up this DPM to its associated target; call only once.
1371 * Initially this only covers the register cache.
1373 * Oh, and watchpoints. Yeah.
1375 int armv8_dpm_setup(struct arm_dpm *dpm)
1377 struct arm *arm = dpm->arm;
1378 struct target *target = arm->target;
1379 struct reg_cache *cache;
1382 /* register access setup */
1383 arm->full_context = armv8_dpm_full_context;
1384 arm->read_core_reg = armv8_dpm_read_core_reg;
1385 arm->write_core_reg = armv8_dpm_write_core_reg;
1387 if (arm->core_cache == NULL) {
1388 cache = armv8_build_reg_cache(target);
1393 /* coprocessor access setup */
1394 arm->mrc = dpmv8_mrc;
1395 arm->mcr = dpmv8_mcr;
1397 dpm->prepare = dpmv8_dpm_prepare;
1398 dpm->finish = dpmv8_dpm_finish;
1400 dpm->instr_execute = dpmv8_instr_execute;
1401 dpm->instr_write_data_dcc = dpmv8_instr_write_data_dcc;
1402 dpm->instr_write_data_dcc_64 = dpmv8_instr_write_data_dcc_64;
1403 dpm->instr_write_data_r0 = dpmv8_instr_write_data_r0;
1404 dpm->instr_write_data_r0_64 = dpmv8_instr_write_data_r0_64;
1405 dpm->instr_cpsr_sync = dpmv8_instr_cpsr_sync;
1407 dpm->instr_read_data_dcc = dpmv8_instr_read_data_dcc;
1408 dpm->instr_read_data_dcc_64 = dpmv8_instr_read_data_dcc_64;
1409 dpm->instr_read_data_r0 = dpmv8_instr_read_data_r0;
1410 dpm->instr_read_data_r0_64 = dpmv8_instr_read_data_r0_64;
1412 dpm->arm_reg_current = armv8_reg_current;
1414 /* dpm->bpwp_enable = dpmv8_bpwp_enable; */
1415 dpm->bpwp_disable = dpmv8_bpwp_disable;
1417 /* breakpoint setup -- optional until it works everywhere */
1418 if (!target->type->add_breakpoint) {
1419 target->type->add_breakpoint = dpmv8_add_breakpoint;
1420 target->type->remove_breakpoint = dpmv8_remove_breakpoint;
1423 /* watchpoint setup */
1424 target->type->add_watchpoint = dpmv8_add_watchpoint;
1425 target->type->remove_watchpoint = dpmv8_remove_watchpoint;
1427 /* FIXME add vector catch support */
1429 dpm->nbp = 1 + ((dpm->didr >> 12) & 0xf);
1430 dpm->dbp = calloc(dpm->nbp, sizeof *dpm->dbp);
1432 dpm->nwp = 1 + ((dpm->didr >> 20) & 0xf);
1433 dpm->dwp = calloc(dpm->nwp, sizeof *dpm->dwp);
1435 if (!dpm->dbp || !dpm->dwp) {
1441 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1442 target_name(target), dpm->nbp, dpm->nwp);
1444 /* REVISIT ... and some of those breakpoints could match
1445 * execution context IDs...
1452 * Reinitializes DPM state at the beginning of a new debug session
1453 * or after a reset which may have affected the debug module.
1455 int armv8_dpm_initialize(struct arm_dpm *dpm)
1457 /* Disable all breakpoints and watchpoints at startup. */
1458 if (dpm->bpwp_disable) {
1461 for (i = 0; i < dpm->nbp; i++) {
1462 dpm->dbp[i].bpwp.number = i;
1463 (void) dpm->bpwp_disable(dpm, i);
1465 for (i = 0; i < dpm->nwp; i++) {
1466 dpm->dwp[i].bpwp.number = 16 + i;
1467 (void) dpm->bpwp_disable(dpm, 16 + i);
1470 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1471 target_name(dpm->arm->target));