2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
22 #include "armv8_dpm.h"
23 #include <jtag/jtag.h>
25 #include "breakpoints.h"
26 #include "target_type.h"
27 #include "armv8_opcodes.h"
29 #include "helper/time_support.h"
32 #define T32_FMTITR(instr) (((instr & 0x0000FFFF) << 16) | ((instr & 0xFFFF0000) >> 16))
36 * Implements various ARM DPM operations using architectural debug registers.
37 * These routines layer over core-specific communication methods to cope with
38 * implementation differences between cores like ARM1136 and Cortex-A8.
40 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
41 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
42 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
43 * are abstracted through internal programming interfaces to share code and
44 * to minimize needless differences in debug behavior between cores.
48 * Get core state from EDSCR, without necessity to retrieve CPSR
50 enum arm_state armv8_dpm_get_core_state(struct arm_dpm *dpm)
52 int el = (dpm->dscr >> 8) & 0x3;
53 int rw = (dpm->dscr >> 10) & 0xF;
58 /* find the first '0' in DSCR.RW */
59 for (pos = 3; pos >= 0; pos--) {
60 if ((rw & (1 << pos)) == 0)
65 return ARM_STATE_AARCH64;
70 /*----------------------------------------------------------------------*/
72 static int dpmv8_write_dcc(struct armv8_common *armv8, uint32_t data)
74 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
75 return mem_ap_write_u32(armv8->debug_ap,
76 armv8->debug_base + CPUV8_DBG_DTRRX, data);
79 static int dpmv8_write_dcc_64(struct armv8_common *armv8, uint64_t data)
82 LOG_DEBUG("write DCC 0x%016" PRIx64, data);
83 ret = mem_ap_write_u32(armv8->debug_ap,
84 armv8->debug_base + CPUV8_DBG_DTRRX, data);
85 ret += mem_ap_write_u32(armv8->debug_ap,
86 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
90 static int dpmv8_read_dcc(struct armv8_common *armv8, uint32_t *data,
93 uint32_t dscr = DSCR_ITE;
99 /* Wait for DTRRXfull */
100 long long then = timeval_ms();
101 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
102 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
103 armv8->debug_base + CPUV8_DBG_DSCR,
105 if (retval != ERROR_OK)
107 if (timeval_ms() > then + 1000) {
108 LOG_ERROR("Timeout waiting for read dcc");
113 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
114 armv8->debug_base + CPUV8_DBG_DTRTX,
116 if (retval != ERROR_OK)
118 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
126 static int dpmv8_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
129 uint32_t dscr = DSCR_ITE;
136 /* Wait for DTRRXfull */
137 long long then = timeval_ms();
138 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
139 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
140 armv8->debug_base + CPUV8_DBG_DSCR,
142 if (retval != ERROR_OK)
144 if (timeval_ms() > then + 1000) {
145 LOG_ERROR("Timeout waiting for DTR_TX_FULL, dscr = 0x%08" PRIx32, dscr);
150 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
151 armv8->debug_base + CPUV8_DBG_DTRTX,
153 if (retval != ERROR_OK)
156 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
157 armv8->debug_base + CPUV8_DBG_DTRRX,
159 if (retval != ERROR_OK)
162 *data = *(uint32_t *)data | (uint64_t)higher << 32;
163 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
171 static int dpmv8_dpm_prepare(struct arm_dpm *dpm)
173 struct armv8_common *armv8 = dpm->arm->arch_info;
177 /* set up invariant: INSTR_COMP is set after ever DPM operation */
178 long long then = timeval_ms();
180 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
181 armv8->debug_base + CPUV8_DBG_DSCR,
183 if (retval != ERROR_OK)
185 if ((dscr & DSCR_ITE) != 0)
187 if (timeval_ms() > then + 1000) {
188 LOG_ERROR("Timeout waiting for dpm prepare");
193 /* update the stored copy of dscr */
196 /* this "should never happen" ... */
197 if (dscr & DSCR_DTR_RX_FULL) {
198 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
200 retval = mem_ap_read_u32(armv8->debug_ap,
201 armv8->debug_base + CPUV8_DBG_DTRRX, &dscr);
202 if (retval != ERROR_OK)
209 static int dpmv8_dpm_finish(struct arm_dpm *dpm)
211 /* REVISIT what could be done here? */
215 static int dpmv8_exec_opcode(struct arm_dpm *dpm,
216 uint32_t opcode, uint32_t *p_dscr)
218 struct armv8_common *armv8 = dpm->arm->arch_info;
219 uint32_t dscr = DSCR_ITE;
222 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
227 /* Wait for InstrCompl bit to be set */
228 long long then = timeval_ms();
229 while ((dscr & DSCR_ITE) == 0) {
230 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
231 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
232 if (retval != ERROR_OK) {
233 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
236 if (timeval_ms() > then + 1000) {
237 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
242 if (armv8_dpm_get_core_state(dpm) != ARM_STATE_AARCH64)
243 opcode = T32_FMTITR(opcode);
245 retval = mem_ap_write_u32(armv8->debug_ap,
246 armv8->debug_base + CPUV8_DBG_ITR, opcode);
247 if (retval != ERROR_OK)
252 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
253 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
254 if (retval != ERROR_OK) {
255 LOG_ERROR("Could not read DSCR register");
258 if (timeval_ms() > then + 1000) {
259 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
262 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
264 /* update dscr and el after each command execution */
266 if (dpm->last_el != ((dscr >> 8) & 3))
267 LOG_DEBUG("EL %i -> %i", dpm->last_el, (dscr >> 8) & 3);
268 dpm->last_el = (dscr >> 8) & 3;
270 if (dscr & DSCR_ERR) {
271 LOG_ERROR("Opcode 0x%08"PRIx32", DSCR.ERR=1, DSCR.EL=%i", opcode, dpm->last_el);
272 armv8_dpm_handle_exception(dpm);
282 static int dpmv8_instr_execute(struct arm_dpm *dpm, uint32_t opcode)
284 return dpmv8_exec_opcode(dpm, opcode, NULL);
287 static int dpmv8_instr_write_data_dcc(struct arm_dpm *dpm,
288 uint32_t opcode, uint32_t data)
290 struct armv8_common *armv8 = dpm->arm->arch_info;
293 retval = dpmv8_write_dcc(armv8, data);
294 if (retval != ERROR_OK)
297 return dpmv8_exec_opcode(dpm, opcode, 0);
300 static int dpmv8_instr_write_data_dcc_64(struct arm_dpm *dpm,
301 uint32_t opcode, uint64_t data)
303 struct armv8_common *armv8 = dpm->arm->arch_info;
306 retval = dpmv8_write_dcc_64(armv8, data);
307 if (retval != ERROR_OK)
310 return dpmv8_exec_opcode(dpm, opcode, 0);
313 static int dpmv8_instr_write_data_r0(struct arm_dpm *dpm,
314 uint32_t opcode, uint32_t data)
316 struct armv8_common *armv8 = dpm->arm->arch_info;
317 uint32_t dscr = DSCR_ITE;
320 retval = dpmv8_write_dcc(armv8, data);
321 if (retval != ERROR_OK)
324 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, READ_REG_DTRRX), &dscr);
325 if (retval != ERROR_OK)
328 /* then the opcode, taking data from R0 */
329 return dpmv8_exec_opcode(dpm, opcode, &dscr);
332 static int dpmv8_instr_write_data_r0_64(struct arm_dpm *dpm,
333 uint32_t opcode, uint64_t data)
335 struct armv8_common *armv8 = dpm->arm->arch_info;
336 uint32_t dscr = DSCR_ITE;
339 retval = dpmv8_write_dcc_64(armv8, data);
340 if (retval != ERROR_OK)
343 retval = dpmv8_exec_opcode(dpm, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
344 if (retval != ERROR_OK)
347 /* then the opcode, taking data from R0 */
348 return dpmv8_exec_opcode(dpm, opcode, &dscr);
351 static int dpmv8_instr_cpsr_sync(struct arm_dpm *dpm)
354 struct armv8_common *armv8 = dpm->arm->arch_info;
356 /* "Prefetch flush" after modifying execution status in CPSR */
357 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_DSB_SY), &dpm->dscr);
358 if (retval == ERROR_OK)
359 dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_ISB_SY), &dpm->dscr);
363 static int dpmv8_instr_read_data_dcc(struct arm_dpm *dpm,
364 uint32_t opcode, uint32_t *data)
366 struct armv8_common *armv8 = dpm->arm->arch_info;
367 uint32_t dscr = DSCR_ITE;
370 /* the opcode, writing data to DCC */
371 retval = dpmv8_exec_opcode(dpm, opcode, &dscr);
372 if (retval != ERROR_OK)
375 return dpmv8_read_dcc(armv8, data, &dscr);
378 static int dpmv8_instr_read_data_dcc_64(struct arm_dpm *dpm,
379 uint32_t opcode, uint64_t *data)
381 struct armv8_common *armv8 = dpm->arm->arch_info;
382 uint32_t dscr = DSCR_ITE;
385 /* the opcode, writing data to DCC */
386 retval = dpmv8_exec_opcode(dpm, opcode, &dscr);
387 if (retval != ERROR_OK)
390 return dpmv8_read_dcc_64(armv8, data, &dscr);
393 static int dpmv8_instr_read_data_r0(struct arm_dpm *dpm,
394 uint32_t opcode, uint32_t *data)
396 struct armv8_common *armv8 = dpm->arm->arch_info;
397 uint32_t dscr = DSCR_ITE;
400 /* the opcode, writing data to R0 */
401 retval = dpmv8_exec_opcode(dpm, opcode, &dscr);
402 if (retval != ERROR_OK)
405 /* write R0 to DCC */
406 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, WRITE_REG_DTRTX), &dscr);
407 if (retval != ERROR_OK)
410 return dpmv8_read_dcc(armv8, data, &dscr);
413 static int dpmv8_instr_read_data_r0_64(struct arm_dpm *dpm,
414 uint32_t opcode, uint64_t *data)
416 struct armv8_common *armv8 = dpm->arm->arch_info;
417 uint32_t dscr = DSCR_ITE;
420 /* the opcode, writing data to R0 */
421 retval = dpmv8_exec_opcode(dpm, opcode, &dscr);
422 if (retval != ERROR_OK)
425 /* write R0 to DCC */
426 retval = dpmv8_exec_opcode(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
427 if (retval != ERROR_OK)
430 return dpmv8_read_dcc_64(armv8, data, &dscr);
434 static int dpmv8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
435 target_addr_t addr, uint32_t control)
437 struct armv8_common *armv8 = dpm->arm->arch_info;
438 uint32_t vr = armv8->debug_base;
439 uint32_t cr = armv8->debug_base;
443 case 0 ... 15: /* breakpoints */
444 vr += CPUV8_DBG_BVR_BASE;
445 cr += CPUV8_DBG_BCR_BASE;
447 case 16 ... 31: /* watchpoints */
448 vr += CPUV8_DBG_WVR_BASE;
449 cr += CPUV8_DBG_WCR_BASE;
458 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
459 (unsigned) vr, (unsigned) cr);
461 retval = mem_ap_write_atomic_u32(armv8->debug_ap, vr, addr);
462 if (retval != ERROR_OK)
464 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, control);
468 static int dpmv8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
470 struct armv8_common *armv8 = dpm->arm->arch_info;
475 cr = armv8->debug_base + CPUV8_DBG_BCR_BASE;
478 cr = armv8->debug_base + CPUV8_DBG_WCR_BASE;
486 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
488 /* clear control register */
489 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, 0);
493 * Coprocessor support
496 /* Read coprocessor */
497 static int dpmv8_mrc(struct target *target, int cpnum,
498 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
501 struct arm *arm = target_to_arm(target);
502 struct arm_dpm *dpm = arm->dpm;
505 retval = dpm->prepare(dpm);
506 if (retval != ERROR_OK)
509 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum,
510 (int) op1, (int) CRn,
511 (int) CRm, (int) op2);
513 /* read coprocessor register into R0; return via DCC */
514 retval = dpm->instr_read_data_r0(dpm,
515 ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2),
518 /* (void) */ dpm->finish(dpm);
522 static int dpmv8_mcr(struct target *target, int cpnum,
523 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
526 struct arm *arm = target_to_arm(target);
527 struct arm_dpm *dpm = arm->dpm;
530 retval = dpm->prepare(dpm);
531 if (retval != ERROR_OK)
534 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum,
535 (int) op1, (int) CRn,
536 (int) CRm, (int) op2);
538 /* read DCC into r0; then write coprocessor register from R0 */
539 retval = dpm->instr_write_data_r0(dpm,
540 ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2),
543 /* (void) */ dpm->finish(dpm);
547 static int dpmv8_mrs(struct target *target, uint32_t op0,
548 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
551 struct arm *arm = target_to_arm(target);
552 struct arm_dpm *dpm = arm->dpm;
556 retval = dpm->prepare(dpm);
557 if (retval != ERROR_OK)
559 op_code = ((op0 & 0x3) << 19 | (op1 & 0x7) << 16 | (CRn & 0xF) << 12 |\
560 (CRm & 0xF) << 8 | (op2 & 0x7) << 5);
562 LOG_DEBUG("MRS p%d, %d, r0, c%d, c%d, %d", (int)op0,
563 (int) op1, (int) CRn,
564 (int) CRm, (int) op2);
565 /* read coprocessor register into R0; return via DCC */
566 retval = dpm->instr_read_data_r0(dpm,
567 ARMV8_MRS(op_code, 0),
570 /* (void) */ dpm->finish(dpm);
574 static int dpmv8_msr(struct target *target, uint32_t op0,
575 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
578 struct arm *arm = target_to_arm(target);
579 struct arm_dpm *dpm = arm->dpm;
583 retval = dpm->prepare(dpm);
584 if (retval != ERROR_OK)
587 op_code = ((op0 & 0x3) << 19 | (op1 & 0x7) << 16 | (CRn & 0xF) << 12 |\
588 (CRm & 0xF) << 8 | (op2 & 0x7) << 5);
590 LOG_DEBUG("MSR p%d, %d, r0, c%d, c%d, %d", (int)op0,
591 (int) op1, (int) CRn,
592 (int) CRm, (int) op2);
594 /* read DCC into r0; then write coprocessor register from R0 */
595 retval = dpm->instr_write_data_r0(dpm,
596 ARMV8_MSR_GP(op_code, 0),
599 /* (void) */ dpm->finish(dpm);
603 /*----------------------------------------------------------------------*/
606 * Register access utilities
609 int armv8_dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode)
611 struct armv8_common *armv8 = (struct armv8_common *)dpm->arm->arch_info;
612 int retval = ERROR_OK;
613 unsigned int target_el;
614 enum arm_state core_state;
617 /* restore previous mode */
618 if (mode == ARM_MODE_ANY) {
619 cpsr = buf_get_u32(dpm->arm->cpsr->value, 0, 32);
621 LOG_DEBUG("restoring mode, cpsr = 0x%08"PRIx32, cpsr);
624 LOG_DEBUG("setting mode 0x%"PRIx32, mode);
626 /* else force to the specified mode */
627 if (is_arm_mode(mode))
633 switch (cpsr & 0x1f) {
645 * TODO: handle ARM_MODE_HYP
655 target_el = (cpsr >> 2) & 3;
658 if (target_el > SYSTEM_CUREL_EL3) {
659 LOG_ERROR("%s: Invalid target exception level %i", __func__, target_el);
663 LOG_DEBUG("target_el = %i, last_el = %i", target_el, dpm->last_el);
664 if (target_el > dpm->last_el) {
665 retval = dpm->instr_execute(dpm,
666 armv8_opcode(armv8, ARMV8_OPC_DCPS) | target_el);
668 /* DCPS clobbers registers just like an exception taken */
669 armv8_dpm_handle_exception(dpm);
671 core_state = armv8_dpm_get_core_state(dpm);
672 if (core_state != ARM_STATE_AARCH64) {
673 /* cannot do DRPS/ERET when already in EL0 */
674 if (dpm->last_el != 0) {
675 /* load SPSR with the desired mode and execute DRPS */
676 LOG_DEBUG("SPSR = 0x%08"PRIx32, cpsr);
677 retval = dpm->instr_write_data_r0(dpm,
678 ARMV8_MSR_GP_xPSR_T1(1, 0, 15), cpsr);
679 if (retval == ERROR_OK)
680 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
684 * need to execute multiple DRPS instructions until target_el
687 while (retval == ERROR_OK && dpm->last_el != target_el) {
688 unsigned int cur_el = dpm->last_el;
689 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
690 if (cur_el == dpm->last_el) {
691 LOG_INFO("Cannot reach EL %i, SPSR corrupted?", target_el);
697 /* On executing DRPS, DSPSR and DLR become UNKNOWN, mark them as dirty */
698 dpm->arm->cpsr->dirty = true;
699 dpm->arm->pc->dirty = true;
702 * re-evaluate the core state, we might be in Aarch32 state now
703 * we rely on dpm->dscr being up-to-date
705 core_state = armv8_dpm_get_core_state(dpm);
706 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
707 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
714 * Common register read, relies on armv8_select_reg_access() having been called.
716 static int dpmv8_read_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
718 struct armv8_common *armv8 = dpm->arm->arch_info;
722 retval = armv8->read_reg_u64(armv8, regnum, &value_64);
724 if (retval == ERROR_OK) {
727 buf_set_u64(r->value, 0, r->size, value_64);
729 LOG_DEBUG("READ: %s, %16.8llx", r->name, (unsigned long long) value_64);
731 LOG_DEBUG("READ: %s, %8.8x", r->name, (unsigned int) value_64);
737 * Common register write, relies on armv8_select_reg_access() having been called.
739 static int dpmv8_write_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
741 struct armv8_common *armv8 = dpm->arm->arch_info;
742 int retval = ERROR_FAIL;
745 value_64 = buf_get_u64(r->value, 0, r->size);
747 retval = armv8->write_reg_u64(armv8, regnum, value_64);
748 if (retval == ERROR_OK) {
751 LOG_DEBUG("WRITE: %s, %16.8llx", r->name, (unsigned long long)value_64);
753 LOG_DEBUG("WRITE: %s, %8.8x", r->name, (unsigned int)value_64);
760 * Read basic registers of the the current context: R0 to R15, and CPSR;
761 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
762 * In normal operation this is called on entry to halting debug state,
763 * possibly after some other operations supporting restore of debug state
764 * or making sure the CPU is fully idle (drain write buffer, etc).
766 int armv8_dpm_read_current_registers(struct arm_dpm *dpm)
768 struct arm *arm = dpm->arm;
769 struct armv8_common *armv8 = (struct armv8_common *)arm->arch_info;
770 struct reg_cache *cache;
775 retval = dpm->prepare(dpm);
776 if (retval != ERROR_OK)
779 cache = arm->core_cache;
781 /* read R0 first (it's used for scratch), then CPSR */
782 r = cache->reg_list + 0;
784 retval = dpmv8_read_reg(dpm, r, 0);
785 if (retval != ERROR_OK)
790 /* read cpsr to r0 and get it back */
791 retval = dpm->instr_read_data_r0(dpm,
792 armv8_opcode(armv8, READ_REG_DSPSR), &cpsr);
793 if (retval != ERROR_OK)
796 /* update core mode and state */
797 armv8_set_cpsr(arm, cpsr);
799 for (unsigned int i = 1; i < cache->num_regs ; i++) {
800 struct arm_reg *arm_reg;
802 r = armv8_reg_current(arm, i);
807 * Only read registers that are available from the
808 * current EL (or core mode).
810 arm_reg = r->arch_info;
811 if (arm_reg->mode != ARM_MODE_ANY &&
812 dpm->last_el != armv8_curel_from_core_mode(arm_reg->mode))
815 retval = dpmv8_read_reg(dpm, r, i);
816 if (retval != ERROR_OK)
826 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
827 * unless they're removed, or need updating because of single-stepping
828 * or running debugger code.
830 static int dpmv8_maybe_update_bpwp(struct arm_dpm *dpm, bool bpwp,
831 struct dpm_bpwp *xp, int *set_p)
833 int retval = ERROR_OK;
840 /* removed or startup; we must disable it */
845 /* disabled, but we must set it */
846 xp->dirty = disable = false;
851 /* set, but we must temporarily disable it */
852 xp->dirty = disable = true;
857 retval = dpm->bpwp_disable(dpm, xp->number);
859 retval = dpm->bpwp_enable(dpm, xp->number,
860 xp->address, xp->control);
862 if (retval != ERROR_OK)
863 LOG_ERROR("%s: can't %s HW %spoint %d",
864 disable ? "disable" : "enable",
865 target_name(dpm->arm->target),
866 (xp->number < 16) ? "break" : "watch",
872 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp);
875 * Writes all modified core registers for all processor modes. In normal
876 * operation this is called on exit from halting debug state.
878 * @param dpm: represents the processor
879 * @param bpwp: true ensures breakpoints and watchpoints are set,
880 * false ensures they are cleared
882 int armv8_dpm_write_dirty_registers(struct arm_dpm *dpm, bool bpwp)
884 struct arm *arm = dpm->arm;
885 struct reg_cache *cache = arm->core_cache;
888 retval = dpm->prepare(dpm);
889 if (retval != ERROR_OK)
892 /* If we're managing hardware breakpoints for this core, enable
893 * or disable them as requested.
895 * REVISIT We don't yet manage them for ANY cores. Eventually
896 * we should be able to assume we handle them; but until then,
897 * cope with the hand-crafted breakpoint code.
899 if (arm->target->type->add_breakpoint == dpmv8_add_breakpoint) {
900 for (unsigned i = 0; i < dpm->nbp; i++) {
901 struct dpm_bp *dbp = dpm->dbp + i;
902 struct breakpoint *bp = dbp->bp;
904 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dbp->bpwp,
905 bp ? &bp->set : NULL);
906 if (retval != ERROR_OK)
911 /* enable/disable watchpoints */
912 for (unsigned i = 0; i < dpm->nwp; i++) {
913 struct dpm_wp *dwp = dpm->dwp + i;
914 struct watchpoint *wp = dwp->wp;
916 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dwp->bpwp,
917 wp ? &wp->set : NULL);
918 if (retval != ERROR_OK)
922 /* NOTE: writes to breakpoint and watchpoint registers might
923 * be queued, and need (efficient/batched) flushing later.
926 /* Restore original core mode and state */
927 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
928 if (retval != ERROR_OK)
931 /* check everything except our scratch register R0 */
932 for (unsigned i = 1; i < cache->num_regs; i++) {
935 /* skip PC and CPSR */
936 if (i == ARMV8_PC || i == ARMV8_xPSR)
939 if (!cache->reg_list[i].valid)
942 if (!cache->reg_list[i].dirty)
945 /* skip all registers not on the current EL */
946 r = cache->reg_list[i].arch_info;
947 if (r->mode != ARM_MODE_ANY &&
948 dpm->last_el != armv8_curel_from_core_mode(r->mode))
951 retval = dpmv8_write_reg(dpm, &cache->reg_list[i], i);
952 if (retval != ERROR_OK)
956 /* flush CPSR and PC */
957 if (retval == ERROR_OK)
958 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_xPSR], ARMV8_xPSR);
959 if (retval == ERROR_OK)
960 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_PC], ARMV8_PC);
961 /* flush R0 -- it's *very* dirty by now */
962 if (retval == ERROR_OK)
963 retval = dpmv8_write_reg(dpm, &cache->reg_list[0], 0);
964 if (retval == ERROR_OK)
965 dpm->instr_cpsr_sync(dpm);
972 * Standard ARM register accessors ... there are three methods
973 * in "struct arm", to support individual read/write and bulk read
977 static int armv8_dpm_read_core_reg(struct target *target, struct reg *r,
978 int regnum, enum arm_mode mode)
980 struct arm *arm = target_to_arm(target);
981 struct arm_dpm *dpm = target_to_arm(target)->dpm;
983 int max = arm->core_cache->num_regs;
985 if (regnum < 0 || regnum >= max)
986 return ERROR_COMMAND_SYNTAX_ERROR;
989 * REVISIT what happens if we try to read SPSR in a core mode
990 * which has no such register?
992 retval = dpm->prepare(dpm);
993 if (retval != ERROR_OK)
996 retval = dpmv8_read_reg(dpm, r, regnum);
997 if (retval != ERROR_OK)
1001 /* (void) */ dpm->finish(dpm);
1005 static int armv8_dpm_write_core_reg(struct target *target, struct reg *r,
1006 int regnum, enum arm_mode mode, uint8_t *value)
1008 struct arm *arm = target_to_arm(target);
1009 struct arm_dpm *dpm = target_to_arm(target)->dpm;
1011 int max = arm->core_cache->num_regs;
1013 if (regnum < 0 || regnum > max)
1014 return ERROR_COMMAND_SYNTAX_ERROR;
1016 /* REVISIT what happens if we try to write SPSR in a core mode
1017 * which has no such register?
1020 retval = dpm->prepare(dpm);
1021 if (retval != ERROR_OK)
1024 retval = dpmv8_write_reg(dpm, r, regnum);
1026 /* always clean up, regardless of error */
1032 static int armv8_dpm_full_context(struct target *target)
1034 struct arm *arm = target_to_arm(target);
1035 struct arm_dpm *dpm = arm->dpm;
1036 struct reg_cache *cache = arm->core_cache;
1040 retval = dpm->prepare(dpm);
1041 if (retval != ERROR_OK)
1045 enum arm_mode mode = ARM_MODE_ANY;
1049 /* We "know" arm_dpm_read_current_registers() was called so
1050 * the unmapped registers (R0..R7, PC, AND CPSR) and some
1051 * view of R8..R14 are current. We also "know" oddities of
1052 * register mapping: special cases for R8..R12 and SPSR.
1054 * Pick some mode with unread registers and read them all.
1055 * Repeat until done.
1057 for (unsigned i = 0; i < cache->num_regs; i++) {
1060 if (cache->reg_list[i].valid)
1062 r = cache->reg_list[i].arch_info;
1064 /* may need to pick a mode and set CPSR */
1069 /* For regular (ARM_MODE_ANY) R8..R12
1070 * in case we've entered debug state
1071 * in FIQ mode we need to patch mode.
1073 if (mode != ARM_MODE_ANY)
1074 retval = armv8_dpm_modeswitch(dpm, mode);
1076 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_USR);
1078 if (retval != ERROR_OK)
1081 if (r->mode != mode)
1084 /* CPSR was read, so "R16" must mean SPSR */
1085 retval = dpmv8_read_reg(dpm,
1086 &cache->reg_list[i],
1087 (r->num == 16) ? 17 : r->num);
1088 if (retval != ERROR_OK)
1094 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
1095 /* (void) */ dpm->finish(dpm);
1101 /*----------------------------------------------------------------------*/
1104 * Breakpoint and Watchpoint support.
1106 * Hardware {break,watch}points are usually left active, to minimize
1107 * debug entry/exit costs. When they are set or cleared, it's done in
1108 * batches. Also, DPM-conformant hardware can update debug registers
1109 * regardless of whether the CPU is running or halted ... though that
1110 * fact isn't currently leveraged.
1113 static int dpmv8_bpwp_setup(struct arm_dpm *dpm, struct dpm_bpwp *xp,
1114 uint32_t addr, uint32_t length)
1118 control = (1 << 0) /* enable */
1119 | (3 << 1); /* both user and privileged access */
1121 /* Match 1, 2, or all 4 byte addresses in this word.
1123 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
1124 * Support larger length, when addr is suitably aligned. In
1125 * particular, allow watchpoints on 8 byte "double" values.
1127 * REVISIT allow watchpoints on unaligned 2-bit values; and on
1128 * v7 hardware, unaligned 4-byte ones too.
1132 control |= (1 << (addr & 3)) << 5;
1135 /* require 2-byte alignment */
1137 control |= (3 << (addr & 2)) << 5;
1142 /* require 4-byte alignment */
1144 control |= 0xf << 5;
1149 LOG_ERROR("unsupported {break,watch}point length/alignment");
1150 return ERROR_COMMAND_SYNTAX_ERROR;
1153 /* other shared control bits:
1154 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
1155 * bit 20 == 0 ... not linked to a context ID
1156 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
1159 xp->address = addr & ~3;
1160 xp->control = control;
1163 LOG_DEBUG("BPWP: addr %8.8" PRIx32 ", control %" PRIx32 ", number %d",
1164 xp->address, control, xp->number);
1166 /* hardware is updated in write_dirty_registers() */
1170 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp)
1172 struct arm *arm = target_to_arm(target);
1173 struct arm_dpm *dpm = arm->dpm;
1174 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1177 return ERROR_COMMAND_SYNTAX_ERROR;
1178 if (!dpm->bpwp_enable)
1181 /* FIXME we need a generic solution for software breakpoints. */
1182 if (bp->type == BKPT_SOFT)
1183 LOG_DEBUG("using HW bkpt, not SW...");
1185 for (unsigned i = 0; i < dpm->nbp; i++) {
1186 if (!dpm->dbp[i].bp) {
1187 retval = dpmv8_bpwp_setup(dpm, &dpm->dbp[i].bpwp,
1188 bp->address, bp->length);
1189 if (retval == ERROR_OK)
1190 dpm->dbp[i].bp = bp;
1198 static int dpmv8_remove_breakpoint(struct target *target, struct breakpoint *bp)
1200 struct arm *arm = target_to_arm(target);
1201 struct arm_dpm *dpm = arm->dpm;
1202 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1204 for (unsigned i = 0; i < dpm->nbp; i++) {
1205 if (dpm->dbp[i].bp == bp) {
1206 dpm->dbp[i].bp = NULL;
1207 dpm->dbp[i].bpwp.dirty = true;
1209 /* hardware is updated in write_dirty_registers() */
1218 static int dpmv8_watchpoint_setup(struct arm_dpm *dpm, unsigned index_t,
1219 struct watchpoint *wp)
1222 struct dpm_wp *dwp = dpm->dwp + index_t;
1225 /* this hardware doesn't support data value matching or masking */
1226 if (wp->value || wp->mask != ~(uint32_t)0) {
1227 LOG_DEBUG("watchpoint values and masking not supported");
1228 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1231 retval = dpmv8_bpwp_setup(dpm, &dwp->bpwp, wp->address, wp->length);
1232 if (retval != ERROR_OK)
1235 control = dwp->bpwp.control;
1247 dwp->bpwp.control = control;
1249 dpm->dwp[index_t].wp = wp;
1254 static int dpmv8_add_watchpoint(struct target *target, struct watchpoint *wp)
1256 struct arm *arm = target_to_arm(target);
1257 struct arm_dpm *dpm = arm->dpm;
1258 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1260 if (dpm->bpwp_enable) {
1261 for (unsigned i = 0; i < dpm->nwp; i++) {
1262 if (!dpm->dwp[i].wp) {
1263 retval = dpmv8_watchpoint_setup(dpm, i, wp);
1272 static int dpmv8_remove_watchpoint(struct target *target, struct watchpoint *wp)
1274 struct arm *arm = target_to_arm(target);
1275 struct arm_dpm *dpm = arm->dpm;
1276 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1278 for (unsigned i = 0; i < dpm->nwp; i++) {
1279 if (dpm->dwp[i].wp == wp) {
1280 dpm->dwp[i].wp = NULL;
1281 dpm->dwp[i].bpwp.dirty = true;
1283 /* hardware is updated in write_dirty_registers() */
1292 void armv8_dpm_report_wfar(struct arm_dpm *dpm, uint64_t addr)
1294 switch (dpm->arm->core_state) {
1296 case ARM_STATE_AARCH64:
1299 case ARM_STATE_THUMB:
1300 case ARM_STATE_THUMB_EE:
1303 case ARM_STATE_JAZELLE:
1307 LOG_DEBUG("Unknown core_state");
1314 * Handle exceptions taken in debug state. This happens mostly for memory
1315 * accesses that violated a MMU policy. Taking an exception while in debug
1316 * state clobbers certain state registers on the target exception level.
1317 * Just mark those registers dirty so that they get restored on resume.
1318 * This works both for Aarch32 and Aarch64 states.
1320 * This function must not perform any actions that trigger another exception
1321 * or a recursion will happen.
1323 void armv8_dpm_handle_exception(struct arm_dpm *dpm)
1325 struct armv8_common *armv8 = dpm->arm->arch_info;
1326 struct reg_cache *cache = dpm->arm->core_cache;
1327 enum arm_state core_state;
1332 static const int clobbered_regs_by_el[3][5] = {
1333 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL1, ARMV8_ESR_EL1, ARMV8_SPSR_EL1 },
1334 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL2, ARMV8_ESR_EL2, ARMV8_SPSR_EL2 },
1335 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL3, ARMV8_ESR_EL3, ARMV8_SPSR_EL3 },
1338 el = (dpm->dscr >> 8) & 3;
1340 /* safety check, must not happen since EL0 cannot be a target for an exception */
1341 if (el < SYSTEM_CUREL_EL1 || el > SYSTEM_CUREL_EL3) {
1342 LOG_ERROR("%s: EL %i is invalid, DSCR corrupted?", __func__, el);
1346 /* Clear sticky error */
1347 mem_ap_write_u32(armv8->debug_ap,
1348 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1350 armv8->read_reg_u64(armv8, ARMV8_xPSR, &dlr);
1352 armv8->read_reg_u64(armv8, ARMV8_PC, &dlr);
1354 LOG_DEBUG("Exception taken to EL %i, DLR=0x%016"PRIx64" DSPSR=0x%08"PRIx32,
1357 /* mark all clobbered registers as dirty */
1358 for (int i = 0; i < 5; i++)
1359 cache->reg_list[clobbered_regs_by_el[el-1][i]].dirty = true;
1362 * re-evaluate the core state, we might be in Aarch64 state now
1363 * we rely on dpm->dscr being up-to-date
1365 core_state = armv8_dpm_get_core_state(dpm);
1366 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
1367 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
1370 /*----------------------------------------------------------------------*/
1373 * Other debug and support utilities
1376 void armv8_dpm_report_dscr(struct arm_dpm *dpm, uint32_t dscr)
1378 struct target *target = dpm->arm->target;
1381 dpm->last_el = (dscr >> 8) & 3;
1383 /* Examine debug reason */
1384 switch (DSCR_ENTRY(dscr)) {
1385 /* FALL THROUGH -- assume a v6 core in abort mode */
1386 case DSCRV8_ENTRY_EXT_DEBUG: /* EDBGRQ */
1387 target->debug_reason = DBG_REASON_DBGRQ;
1389 case DSCRV8_ENTRY_HALT_STEP_EXECLU: /* HALT step */
1390 case DSCRV8_ENTRY_HALT_STEP_NORMAL: /* Halt step*/
1391 case DSCRV8_ENTRY_HALT_STEP:
1392 target->debug_reason = DBG_REASON_SINGLESTEP;
1394 case DSCRV8_ENTRY_HLT: /* HLT instruction (software breakpoint) */
1395 case DSCRV8_ENTRY_BKPT: /* SW BKPT (?) */
1396 case DSCRV8_ENTRY_RESET_CATCH: /* Reset catch */
1397 case DSCRV8_ENTRY_OS_UNLOCK: /*OS unlock catch*/
1398 case DSCRV8_ENTRY_EXCEPTION_CATCH: /*exception catch*/
1399 case DSCRV8_ENTRY_SW_ACCESS_DBG: /*SW access dbg register*/
1400 target->debug_reason = DBG_REASON_BREAKPOINT;
1402 case DSCRV8_ENTRY_WATCHPOINT: /* asynch watchpoint */
1403 target->debug_reason = DBG_REASON_WATCHPOINT;
1406 target->debug_reason = DBG_REASON_UNDEFINED;
1412 /*----------------------------------------------------------------------*/
1415 * Setup and management support.
1419 * Hooks up this DPM to its associated target; call only once.
1420 * Initially this only covers the register cache.
1422 * Oh, and watchpoints. Yeah.
1424 int armv8_dpm_setup(struct arm_dpm *dpm)
1426 struct arm *arm = dpm->arm;
1427 struct target *target = arm->target;
1428 struct reg_cache *cache;
1431 /* register access setup */
1432 arm->full_context = armv8_dpm_full_context;
1433 arm->read_core_reg = armv8_dpm_read_core_reg;
1434 arm->write_core_reg = armv8_dpm_write_core_reg;
1436 if (arm->core_cache == NULL) {
1437 cache = armv8_build_reg_cache(target);
1442 /* coprocessor access setup */
1443 arm->mrc = dpmv8_mrc;
1444 arm->mcr = dpmv8_mcr;
1445 arm->mrs = dpmv8_mrs;
1446 arm->msr = dpmv8_msr;
1448 dpm->prepare = dpmv8_dpm_prepare;
1449 dpm->finish = dpmv8_dpm_finish;
1451 dpm->instr_execute = dpmv8_instr_execute;
1452 dpm->instr_write_data_dcc = dpmv8_instr_write_data_dcc;
1453 dpm->instr_write_data_dcc_64 = dpmv8_instr_write_data_dcc_64;
1454 dpm->instr_write_data_r0 = dpmv8_instr_write_data_r0;
1455 dpm->instr_write_data_r0_64 = dpmv8_instr_write_data_r0_64;
1456 dpm->instr_cpsr_sync = dpmv8_instr_cpsr_sync;
1458 dpm->instr_read_data_dcc = dpmv8_instr_read_data_dcc;
1459 dpm->instr_read_data_dcc_64 = dpmv8_instr_read_data_dcc_64;
1460 dpm->instr_read_data_r0 = dpmv8_instr_read_data_r0;
1461 dpm->instr_read_data_r0_64 = dpmv8_instr_read_data_r0_64;
1463 dpm->arm_reg_current = armv8_reg_current;
1465 /* dpm->bpwp_enable = dpmv8_bpwp_enable; */
1466 dpm->bpwp_disable = dpmv8_bpwp_disable;
1468 /* breakpoint setup -- optional until it works everywhere */
1469 if (!target->type->add_breakpoint) {
1470 target->type->add_breakpoint = dpmv8_add_breakpoint;
1471 target->type->remove_breakpoint = dpmv8_remove_breakpoint;
1474 /* watchpoint setup */
1475 target->type->add_watchpoint = dpmv8_add_watchpoint;
1476 target->type->remove_watchpoint = dpmv8_remove_watchpoint;
1478 /* FIXME add vector catch support */
1480 dpm->nbp = 1 + ((dpm->didr >> 12) & 0xf);
1481 dpm->dbp = calloc(dpm->nbp, sizeof *dpm->dbp);
1483 dpm->nwp = 1 + ((dpm->didr >> 20) & 0xf);
1484 dpm->dwp = calloc(dpm->nwp, sizeof *dpm->dwp);
1486 if (!dpm->dbp || !dpm->dwp) {
1492 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1493 target_name(target), dpm->nbp, dpm->nwp);
1495 /* REVISIT ... and some of those breakpoints could match
1496 * execution context IDs...
1503 * Reinitializes DPM state at the beginning of a new debug session
1504 * or after a reset which may have affected the debug module.
1506 int armv8_dpm_initialize(struct arm_dpm *dpm)
1508 /* Disable all breakpoints and watchpoints at startup. */
1509 if (dpm->bpwp_disable) {
1512 for (i = 0; i < dpm->nbp; i++) {
1513 dpm->dbp[i].bpwp.number = i;
1514 (void) dpm->bpwp_disable(dpm, i);
1516 for (i = 0; i < dpm->nwp; i++) {
1517 dpm->dwp[i].bpwp.number = 16 + i;
1518 (void) dpm->bpwp_disable(dpm, 16 + i);
1521 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1522 target_name(dpm->arm->target));