]> git.sur5r.net Git - openocd/blob - src/target/cortex_a.c
1cf66560474db7493f8b4058eec56ce0cdeff7dc
[openocd] / src / target / cortex_a.c
1 /***************************************************************************
2  *   Copyright (C) 2005 by Dominic Rath                                    *
3  *   Dominic.Rath@gmx.de                                                   *
4  *                                                                         *
5  *   Copyright (C) 2006 by Magnus Lundin                                   *
6  *   lundin@mlu.mine.nu                                                    *
7  *                                                                         *
8  *   Copyright (C) 2008 by Spencer Oliver                                  *
9  *   spen@spen-soft.co.uk                                                  *
10  *                                                                         *
11  *   Copyright (C) 2009 by Dirk Behme                                      *
12  *   dirk.behme@gmail.com - copy from cortex_m3                            *
13  *                                                                         *
14  *   Copyright (C) 2010 Ã˜yvind Harboe                                      *
15  *   oyvind.harboe@zylin.com                                               *
16  *                                                                         *
17  *   Copyright (C) ST-Ericsson SA 2011                                     *
18  *   michel.jaouen@stericsson.com : smp minimum support                    *
19  *                                                                         *
20  *   Copyright (C) Broadcom 2012                                           *
21  *   ehunter@broadcom.com : Cortex-R4 support                              *
22  *                                                                         *
23  *   Copyright (C) 2013 Kamal Dasu                                         *
24  *   kdasu.kdev@gmail.com                                                  *
25  *                                                                         *
26  *   This program is free software; you can redistribute it and/or modify  *
27  *   it under the terms of the GNU General Public License as published by  *
28  *   the Free Software Foundation; either version 2 of the License, or     *
29  *   (at your option) any later version.                                   *
30  *                                                                         *
31  *   This program is distributed in the hope that it will be useful,       *
32  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
33  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
34  *   GNU General Public License for more details.                          *
35  *                                                                         *
36  *   You should have received a copy of the GNU General Public License     *
37  *   along with this program.  If not, see <http://www.gnu.org/licenses/>. *
38  *                                                                         *
39  *   Cortex-A8(tm) TRM, ARM DDI 0344H                                      *
40  *   Cortex-A9(tm) TRM, ARM DDI 0407F                                      *
41  *   Cortex-A4(tm) TRM, ARM DDI 0363E                                      *
42  *   Cortex-A15(tm)TRM, ARM DDI 0438C                                      *
43  *                                                                         *
44  ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include "jtag/swd.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64         struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66         struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68         struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70         struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72         uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74         uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78         target_addr_t virt, target_addr_t *phys);
79 static int cortex_a_read_cpu_memory(struct target *target,
80         uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /*  restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86         int retval = ERROR_OK;
87         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88         struct armv7a_common *armv7a = target_to_armv7a(target);
89
90         if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91                 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93                 retval = armv7a->arm.mcr(target, 15,
94                                 0, 0,   /* op1, op2 */
95                                 1, 0,   /* CRn, CRm */
96                                 cortex_a->cp15_control_reg);
97         }
98         return retval;
99 }
100
101 /*
102  * Set up ARM core for memory access.
103  * If !phys_access, switch to SVC mode and make sure MMU is on
104  * If phys_access, switch off mmu
105  */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108         struct armv7a_common *armv7a = target_to_armv7a(target);
109         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110         int mmu_enabled = 0;
111
112         if (phys_access == 0) {
113                 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114                 cortex_a_mmu(target, &mmu_enabled);
115                 if (mmu_enabled)
116                         cortex_a_mmu_modify(target, 1);
117                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118                         /* overwrite DACR to all-manager */
119                         armv7a->arm.mcr(target, 15,
120                                         0, 0, 3, 0,
121                                         0xFFFFFFFF);
122                 }
123         } else {
124                 cortex_a_mmu(target, &mmu_enabled);
125                 if (mmu_enabled)
126                         cortex_a_mmu_modify(target, 0);
127         }
128         return ERROR_OK;
129 }
130
131 /*
132  * Restore ARM core after memory access.
133  * If !phys_access, switch to previous mode
134  * If phys_access, restore MMU setting
135  */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138         struct armv7a_common *armv7a = target_to_armv7a(target);
139         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141         if (phys_access == 0) {
142                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143                         /* restore */
144                         armv7a->arm.mcr(target, 15,
145                                         0, 0, 3, 0,
146                                         cortex_a->cp15_dacr_reg);
147                 }
148                 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149         } else {
150                 int mmu_enabled = 0;
151                 cortex_a_mmu(target, &mmu_enabled);
152                 if (mmu_enabled)
153                         cortex_a_mmu_modify(target, 1);
154         }
155         return ERROR_OK;
156 }
157
158
159 /*  modify cp15_control_reg in order to enable or disable mmu for :
160  *  - virt2phys address conversion
161  *  - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165         struct armv7a_common *armv7a = target_to_armv7a(target);
166         int retval = ERROR_OK;
167         int need_write = 0;
168
169         if (enable) {
170                 /*  if mmu enabled at target stop and mmu not enable */
171                 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173                         return ERROR_FAIL;
174                 }
175                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176                         cortex_a->cp15_control_reg_curr |= 0x1U;
177                         need_write = 1;
178                 }
179         } else {
180                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181                         cortex_a->cp15_control_reg_curr &= ~0x1U;
182                         need_write = 1;
183                 }
184         }
185
186         if (need_write) {
187                 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188                         enable ? "enable mmu" : "disable mmu",
189                         cortex_a->cp15_control_reg_curr);
190
191                 retval = armv7a->arm.mcr(target, 15,
192                                 0, 0,   /* op1, op2 */
193                                 1, 0,   /* CRn, CRm */
194                                 cortex_a->cp15_control_reg_curr);
195         }
196         return retval;
197 }
198
199 /*
200  * Cortex-A Basic debug access, very low level assumes state is saved
201  */
202 static int cortex_a_init_debug_access(struct target *target)
203 {
204         struct armv7a_common *armv7a = target_to_armv7a(target);
205         int retval;
206
207         /* lock memory-mapped access to debug registers to prevent
208          * software interference */
209         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
210                         armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
211         if (retval != ERROR_OK)
212                 return retval;
213
214         /* Disable cacheline fills and force cache write-through in debug state */
215         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
216                         armv7a->debug_base + CPUDBG_DSCCR, 0);
217         if (retval != ERROR_OK)
218                 return retval;
219
220         /* Disable TLB lookup and refill/eviction in debug state */
221         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
222                         armv7a->debug_base + CPUDBG_DSMCR, 0);
223         if (retval != ERROR_OK)
224                 return retval;
225
226         /* Enabling of instruction execution in debug mode is done in debug_entry code */
227
228         /* Resync breakpoint registers */
229
230         /* Since this is likely called from init or reset, update target state information*/
231         return cortex_a_poll(target);
232 }
233
234 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
235 {
236         /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
237          * Writes final value of DSCR into *dscr. Pass force to force always
238          * reading DSCR at least once. */
239         struct armv7a_common *armv7a = target_to_armv7a(target);
240         int64_t then = timeval_ms();
241         while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
242                 force = false;
243                 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
244                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
245                 if (retval != ERROR_OK) {
246                         LOG_ERROR("Could not read DSCR register");
247                         return retval;
248                 }
249                 if (timeval_ms() > then + 1000) {
250                         LOG_ERROR("Timeout waiting for InstrCompl=1");
251                         return ERROR_FAIL;
252                 }
253         }
254         return ERROR_OK;
255 }
256
257 /* To reduce needless round-trips, pass in a pointer to the current
258  * DSCR value.  Initialize it to zero if you just need to know the
259  * value on return from this function; or DSCR_INSTR_COMP if you
260  * happen to know that no instruction is pending.
261  */
262 static int cortex_a_exec_opcode(struct target *target,
263         uint32_t opcode, uint32_t *dscr_p)
264 {
265         uint32_t dscr;
266         int retval;
267         struct armv7a_common *armv7a = target_to_armv7a(target);
268
269         dscr = dscr_p ? *dscr_p : 0;
270
271         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
272
273         /* Wait for InstrCompl bit to be set */
274         retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
275         if (retval != ERROR_OK)
276                 return retval;
277
278         retval = mem_ap_write_u32(armv7a->debug_ap,
279                         armv7a->debug_base + CPUDBG_ITR, opcode);
280         if (retval != ERROR_OK)
281                 return retval;
282
283         int64_t then = timeval_ms();
284         do {
285                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
286                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
287                 if (retval != ERROR_OK) {
288                         LOG_ERROR("Could not read DSCR register");
289                         return retval;
290                 }
291                 if (timeval_ms() > then + 1000) {
292                         LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
293                         return ERROR_FAIL;
294                 }
295         } while ((dscr & DSCR_INSTR_COMP) == 0);        /* Wait for InstrCompl bit to be set */
296
297         if (dscr_p)
298                 *dscr_p = dscr;
299
300         return retval;
301 }
302
303 /**************************************************************************
304 Read core register with very few exec_opcode, fast but needs work_area.
305 This can cause problems with MMU active.
306 **************************************************************************/
307 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
308         uint32_t *regfile)
309 {
310         int retval = ERROR_OK;
311         struct armv7a_common *armv7a = target_to_armv7a(target);
312
313         retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
314         if (retval != ERROR_OK)
315                 return retval;
316         retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
317         if (retval != ERROR_OK)
318                 return retval;
319         retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
320         if (retval != ERROR_OK)
321                 return retval;
322
323         retval = mem_ap_read_buf(armv7a->memory_ap,
324                         (uint8_t *)(&regfile[1]), 4, 15, address);
325
326         return retval;
327 }
328
329 static int cortex_a_dap_read_coreregister_u32(struct target *target,
330         uint32_t *value, int regnum)
331 {
332         int retval = ERROR_OK;
333         uint8_t reg = regnum&0xFF;
334         uint32_t dscr = 0;
335         struct armv7a_common *armv7a = target_to_armv7a(target);
336
337         if (reg > 17)
338                 return retval;
339
340         if (reg < 15) {
341                 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0"  0xEE00nE15 */
342                 retval = cortex_a_exec_opcode(target,
343                                 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
344                                 &dscr);
345                 if (retval != ERROR_OK)
346                         return retval;
347         } else if (reg == 15) {
348                 /* "MOV r0, r15"; then move r0 to DCCTX */
349                 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
350                 if (retval != ERROR_OK)
351                         return retval;
352                 retval = cortex_a_exec_opcode(target,
353                                 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
354                                 &dscr);
355                 if (retval != ERROR_OK)
356                         return retval;
357         } else {
358                 /* "MRS r0, CPSR" or "MRS r0, SPSR"
359                  * then move r0 to DCCTX
360                  */
361                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
362                 if (retval != ERROR_OK)
363                         return retval;
364                 retval = cortex_a_exec_opcode(target,
365                                 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
366                                 &dscr);
367                 if (retval != ERROR_OK)
368                         return retval;
369         }
370
371         /* Wait for DTRRXfull then read DTRRTX */
372         int64_t then = timeval_ms();
373         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
374                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
375                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
376                 if (retval != ERROR_OK)
377                         return retval;
378                 if (timeval_ms() > then + 1000) {
379                         LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
380                         return ERROR_FAIL;
381                 }
382         }
383
384         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
385                         armv7a->debug_base + CPUDBG_DTRTX, value);
386         LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
387
388         return retval;
389 }
390
391 static int cortex_a_dap_write_coreregister_u32(struct target *target,
392         uint32_t value, int regnum)
393 {
394         int retval = ERROR_OK;
395         uint8_t Rd = regnum&0xFF;
396         uint32_t dscr;
397         struct armv7a_common *armv7a = target_to_armv7a(target);
398
399         LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
400
401         /* Check that DCCRX is not full */
402         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
403                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
404         if (retval != ERROR_OK)
405                 return retval;
406         if (dscr & DSCR_DTR_RX_FULL) {
407                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
408                 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode  0xEE100E15 */
409                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
410                                 &dscr);
411                 if (retval != ERROR_OK)
412                         return retval;
413         }
414
415         if (Rd > 17)
416                 return retval;
417
418         /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
419         LOG_DEBUG("write DCC 0x%08" PRIx32, value);
420         retval = mem_ap_write_u32(armv7a->debug_ap,
421                         armv7a->debug_base + CPUDBG_DTRRX, value);
422         if (retval != ERROR_OK)
423                 return retval;
424
425         if (Rd < 15) {
426                 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
427                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
428                                 &dscr);
429
430                 if (retval != ERROR_OK)
431                         return retval;
432         } else if (Rd == 15) {
433                 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
434                  * then "mov r15, r0"
435                  */
436                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
437                                 &dscr);
438                 if (retval != ERROR_OK)
439                         return retval;
440                 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
441                 if (retval != ERROR_OK)
442                         return retval;
443         } else {
444                 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
445                  * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
446                  */
447                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
448                                 &dscr);
449                 if (retval != ERROR_OK)
450                         return retval;
451                 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
452                                 &dscr);
453                 if (retval != ERROR_OK)
454                         return retval;
455
456                 /* "Prefetch flush" after modifying execution status in CPSR */
457                 if (Rd == 16) {
458                         retval = cortex_a_exec_opcode(target,
459                                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
460                                         &dscr);
461                         if (retval != ERROR_OK)
462                                 return retval;
463                 }
464         }
465
466         return retval;
467 }
468
469 /* Write to memory mapped registers directly with no cache or mmu handling */
470 static int cortex_a_dap_write_memap_register_u32(struct target *target,
471         uint32_t address,
472         uint32_t value)
473 {
474         int retval;
475         struct armv7a_common *armv7a = target_to_armv7a(target);
476
477         retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
478
479         return retval;
480 }
481
482 /*
483  * Cortex-A implementation of Debug Programmer's Model
484  *
485  * NOTE the invariant:  these routines return with DSCR_INSTR_COMP set,
486  * so there's no need to poll for it before executing an instruction.
487  *
488  * NOTE that in several of these cases the "stall" mode might be useful.
489  * It'd let us queue a few operations together... prepare/finish might
490  * be the places to enable/disable that mode.
491  */
492
493 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
494 {
495         return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
496 }
497
498 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
499 {
500         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
501         return mem_ap_write_u32(a->armv7a_common.debug_ap,
502                         a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
503 }
504
505 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
506         uint32_t *dscr_p)
507 {
508         uint32_t dscr = DSCR_INSTR_COMP;
509         int retval;
510
511         if (dscr_p)
512                 dscr = *dscr_p;
513
514         /* Wait for DTRRXfull */
515         int64_t then = timeval_ms();
516         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
517                 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
518                                 a->armv7a_common.debug_base + CPUDBG_DSCR,
519                                 &dscr);
520                 if (retval != ERROR_OK)
521                         return retval;
522                 if (timeval_ms() > then + 1000) {
523                         LOG_ERROR("Timeout waiting for read dcc");
524                         return ERROR_FAIL;
525                 }
526         }
527
528         retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
529                         a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
530         if (retval != ERROR_OK)
531                 return retval;
532         /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
533
534         if (dscr_p)
535                 *dscr_p = dscr;
536
537         return retval;
538 }
539
540 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
541 {
542         struct cortex_a_common *a = dpm_to_a(dpm);
543         uint32_t dscr;
544         int retval;
545
546         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
547         int64_t then = timeval_ms();
548         for (;; ) {
549                 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
550                                 a->armv7a_common.debug_base + CPUDBG_DSCR,
551                                 &dscr);
552                 if (retval != ERROR_OK)
553                         return retval;
554                 if ((dscr & DSCR_INSTR_COMP) != 0)
555                         break;
556                 if (timeval_ms() > then + 1000) {
557                         LOG_ERROR("Timeout waiting for dpm prepare");
558                         return ERROR_FAIL;
559                 }
560         }
561
562         /* this "should never happen" ... */
563         if (dscr & DSCR_DTR_RX_FULL) {
564                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
565                 /* Clear DCCRX */
566                 retval = cortex_a_exec_opcode(
567                                 a->armv7a_common.arm.target,
568                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
569                                 &dscr);
570                 if (retval != ERROR_OK)
571                         return retval;
572         }
573
574         return retval;
575 }
576
577 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
578 {
579         /* REVISIT what could be done here? */
580         return ERROR_OK;
581 }
582
583 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
584         uint32_t opcode, uint32_t data)
585 {
586         struct cortex_a_common *a = dpm_to_a(dpm);
587         int retval;
588         uint32_t dscr = DSCR_INSTR_COMP;
589
590         retval = cortex_a_write_dcc(a, data);
591         if (retval != ERROR_OK)
592                 return retval;
593
594         return cortex_a_exec_opcode(
595                         a->armv7a_common.arm.target,
596                         opcode,
597                         &dscr);
598 }
599
600 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
601         uint32_t opcode, uint32_t data)
602 {
603         struct cortex_a_common *a = dpm_to_a(dpm);
604         uint32_t dscr = DSCR_INSTR_COMP;
605         int retval;
606
607         retval = cortex_a_write_dcc(a, data);
608         if (retval != ERROR_OK)
609                 return retval;
610
611         /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
612         retval = cortex_a_exec_opcode(
613                         a->armv7a_common.arm.target,
614                         ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
615                         &dscr);
616         if (retval != ERROR_OK)
617                 return retval;
618
619         /* then the opcode, taking data from R0 */
620         retval = cortex_a_exec_opcode(
621                         a->armv7a_common.arm.target,
622                         opcode,
623                         &dscr);
624
625         return retval;
626 }
627
628 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
629 {
630         struct target *target = dpm->arm->target;
631         uint32_t dscr = DSCR_INSTR_COMP;
632
633         /* "Prefetch flush" after modifying execution status in CPSR */
634         return cortex_a_exec_opcode(target,
635                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
636                         &dscr);
637 }
638
639 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
640         uint32_t opcode, uint32_t *data)
641 {
642         struct cortex_a_common *a = dpm_to_a(dpm);
643         int retval;
644         uint32_t dscr = DSCR_INSTR_COMP;
645
646         /* the opcode, writing data to DCC */
647         retval = cortex_a_exec_opcode(
648                         a->armv7a_common.arm.target,
649                         opcode,
650                         &dscr);
651         if (retval != ERROR_OK)
652                 return retval;
653
654         return cortex_a_read_dcc(a, data, &dscr);
655 }
656
657
658 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
659         uint32_t opcode, uint32_t *data)
660 {
661         struct cortex_a_common *a = dpm_to_a(dpm);
662         uint32_t dscr = DSCR_INSTR_COMP;
663         int retval;
664
665         /* the opcode, writing data to R0 */
666         retval = cortex_a_exec_opcode(
667                         a->armv7a_common.arm.target,
668                         opcode,
669                         &dscr);
670         if (retval != ERROR_OK)
671                 return retval;
672
673         /* write R0 to DCC */
674         retval = cortex_a_exec_opcode(
675                         a->armv7a_common.arm.target,
676                         ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
677                         &dscr);
678         if (retval != ERROR_OK)
679                 return retval;
680
681         return cortex_a_read_dcc(a, data, &dscr);
682 }
683
684 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
685         uint32_t addr, uint32_t control)
686 {
687         struct cortex_a_common *a = dpm_to_a(dpm);
688         uint32_t vr = a->armv7a_common.debug_base;
689         uint32_t cr = a->armv7a_common.debug_base;
690         int retval;
691
692         switch (index_t) {
693                 case 0 ... 15:  /* breakpoints */
694                         vr += CPUDBG_BVR_BASE;
695                         cr += CPUDBG_BCR_BASE;
696                         break;
697                 case 16 ... 31: /* watchpoints */
698                         vr += CPUDBG_WVR_BASE;
699                         cr += CPUDBG_WCR_BASE;
700                         index_t -= 16;
701                         break;
702                 default:
703                         return ERROR_FAIL;
704         }
705         vr += 4 * index_t;
706         cr += 4 * index_t;
707
708         LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
709                 (unsigned) vr, (unsigned) cr);
710
711         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
712                         vr, addr);
713         if (retval != ERROR_OK)
714                 return retval;
715         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
716                         cr, control);
717         return retval;
718 }
719
720 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
721 {
722         struct cortex_a_common *a = dpm_to_a(dpm);
723         uint32_t cr;
724
725         switch (index_t) {
726                 case 0 ... 15:
727                         cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
728                         break;
729                 case 16 ... 31:
730                         cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
731                         index_t -= 16;
732                         break;
733                 default:
734                         return ERROR_FAIL;
735         }
736         cr += 4 * index_t;
737
738         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
739
740         /* clear control register */
741         return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
742 }
743
744 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
745 {
746         struct arm_dpm *dpm = &a->armv7a_common.dpm;
747         int retval;
748
749         dpm->arm = &a->armv7a_common.arm;
750         dpm->didr = didr;
751
752         dpm->prepare = cortex_a_dpm_prepare;
753         dpm->finish = cortex_a_dpm_finish;
754
755         dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
756         dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
757         dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
758
759         dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
760         dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
761
762         dpm->bpwp_enable = cortex_a_bpwp_enable;
763         dpm->bpwp_disable = cortex_a_bpwp_disable;
764
765         retval = arm_dpm_setup(dpm);
766         if (retval == ERROR_OK)
767                 retval = arm_dpm_initialize(dpm);
768
769         return retval;
770 }
771 static struct target *get_cortex_a(struct target *target, int32_t coreid)
772 {
773         struct target_list *head;
774         struct target *curr;
775
776         head = target->head;
777         while (head != (struct target_list *)NULL) {
778                 curr = head->target;
779                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
780                         return curr;
781                 head = head->next;
782         }
783         return target;
784 }
785 static int cortex_a_halt(struct target *target);
786
787 static int cortex_a_halt_smp(struct target *target)
788 {
789         int retval = 0;
790         struct target_list *head;
791         struct target *curr;
792         head = target->head;
793         while (head != (struct target_list *)NULL) {
794                 curr = head->target;
795                 if ((curr != target) && (curr->state != TARGET_HALTED)
796                         && target_was_examined(curr))
797                         retval += cortex_a_halt(curr);
798                 head = head->next;
799         }
800         return retval;
801 }
802
803 static int update_halt_gdb(struct target *target)
804 {
805         int retval = 0;
806         if (target->gdb_service && target->gdb_service->core[0] == -1) {
807                 target->gdb_service->target = target;
808                 target->gdb_service->core[0] = target->coreid;
809                 retval += cortex_a_halt_smp(target);
810         }
811         return retval;
812 }
813
814 /*
815  * Cortex-A Run control
816  */
817
818 static int cortex_a_poll(struct target *target)
819 {
820         int retval = ERROR_OK;
821         uint32_t dscr;
822         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
823         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
824         enum target_state prev_target_state = target->state;
825         /*  toggle to another core is done by gdb as follow */
826         /*  maint packet J core_id */
827         /*  continue */
828         /*  the next polling trigger an halt event sent to gdb */
829         if ((target->state == TARGET_HALTED) && (target->smp) &&
830                 (target->gdb_service) &&
831                 (target->gdb_service->target == NULL)) {
832                 target->gdb_service->target =
833                         get_cortex_a(target, target->gdb_service->core[1]);
834                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
835                 return retval;
836         }
837         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
838                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
839         if (retval != ERROR_OK)
840                 return retval;
841         cortex_a->cpudbg_dscr = dscr;
842
843         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
844                 if (prev_target_state != TARGET_HALTED) {
845                         /* We have a halting debug event */
846                         LOG_DEBUG("Target halted");
847                         target->state = TARGET_HALTED;
848                         if ((prev_target_state == TARGET_RUNNING)
849                                 || (prev_target_state == TARGET_UNKNOWN)
850                                 || (prev_target_state == TARGET_RESET)) {
851                                 retval = cortex_a_debug_entry(target);
852                                 if (retval != ERROR_OK)
853                                         return retval;
854                                 if (target->smp) {
855                                         retval = update_halt_gdb(target);
856                                         if (retval != ERROR_OK)
857                                                 return retval;
858                                 }
859
860                                 if (arm_semihosting(target, &retval) != 0)
861                                         return retval;
862
863                                 target_call_event_callbacks(target,
864                                         TARGET_EVENT_HALTED);
865                         }
866                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
867                                 LOG_DEBUG(" ");
868
869                                 retval = cortex_a_debug_entry(target);
870                                 if (retval != ERROR_OK)
871                                         return retval;
872                                 if (target->smp) {
873                                         retval = update_halt_gdb(target);
874                                         if (retval != ERROR_OK)
875                                                 return retval;
876                                 }
877
878                                 target_call_event_callbacks(target,
879                                         TARGET_EVENT_DEBUG_HALTED);
880                         }
881                 }
882         } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
883                 target->state = TARGET_RUNNING;
884         else {
885                 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
886                 target->state = TARGET_UNKNOWN;
887         }
888
889         return retval;
890 }
891
892 static int cortex_a_halt(struct target *target)
893 {
894         int retval = ERROR_OK;
895         uint32_t dscr;
896         struct armv7a_common *armv7a = target_to_armv7a(target);
897
898         /*
899          * Tell the core to be halted by writing DRCR with 0x1
900          * and then wait for the core to be halted.
901          */
902         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
903                         armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
904         if (retval != ERROR_OK)
905                 return retval;
906
907         /*
908          * enter halting debug mode
909          */
910         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
911                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
912         if (retval != ERROR_OK)
913                 return retval;
914
915         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
916                         armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
917         if (retval != ERROR_OK)
918                 return retval;
919
920         int64_t then = timeval_ms();
921         for (;; ) {
922                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
923                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
924                 if (retval != ERROR_OK)
925                         return retval;
926                 if ((dscr & DSCR_CORE_HALTED) != 0)
927                         break;
928                 if (timeval_ms() > then + 1000) {
929                         LOG_ERROR("Timeout waiting for halt");
930                         return ERROR_FAIL;
931                 }
932         }
933
934         target->debug_reason = DBG_REASON_DBGRQ;
935
936         return ERROR_OK;
937 }
938
939 static int cortex_a_internal_restore(struct target *target, int current,
940         target_addr_t *address, int handle_breakpoints, int debug_execution)
941 {
942         struct armv7a_common *armv7a = target_to_armv7a(target);
943         struct arm *arm = &armv7a->arm;
944         int retval;
945         uint32_t resume_pc;
946
947         if (!debug_execution)
948                 target_free_all_working_areas(target);
949
950 #if 0
951         if (debug_execution) {
952                 /* Disable interrupts */
953                 /* We disable interrupts in the PRIMASK register instead of
954                  * masking with C_MASKINTS,
955                  * This is probably the same issue as Cortex-M3 Errata 377493:
956                  * C_MASKINTS in parallel with disabled interrupts can cause
957                  * local faults to not be taken. */
958                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
959                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
960                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
961
962                 /* Make sure we are in Thumb mode */
963                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
964                         buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
965                         32) | (1 << 24));
966                 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
967                 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
968         }
969 #endif
970
971         /* current = 1: continue on current pc, otherwise continue at <address> */
972         resume_pc = buf_get_u32(arm->pc->value, 0, 32);
973         if (!current)
974                 resume_pc = *address;
975         else
976                 *address = resume_pc;
977
978         /* Make sure that the Armv7 gdb thumb fixups does not
979          * kill the return address
980          */
981         switch (arm->core_state) {
982                 case ARM_STATE_ARM:
983                         resume_pc &= 0xFFFFFFFC;
984                         break;
985                 case ARM_STATE_THUMB:
986                 case ARM_STATE_THUMB_EE:
987                         /* When the return address is loaded into PC
988                          * bit 0 must be 1 to stay in Thumb state
989                          */
990                         resume_pc |= 0x1;
991                         break;
992                 case ARM_STATE_JAZELLE:
993                         LOG_ERROR("How do I resume into Jazelle state??");
994                         return ERROR_FAIL;
995                 case ARM_STATE_AARCH64:
996                         LOG_ERROR("Shoudn't be in AARCH64 state");
997                         return ERROR_FAIL;
998         }
999         LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1000         buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1001         arm->pc->dirty = 1;
1002         arm->pc->valid = 1;
1003
1004         /* restore dpm_mode at system halt */
1005         dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1006         /* called it now before restoring context because it uses cpu
1007          * register r0 for restoring cp15 control register */
1008         retval = cortex_a_restore_cp15_control_reg(target);
1009         if (retval != ERROR_OK)
1010                 return retval;
1011         retval = cortex_a_restore_context(target, handle_breakpoints);
1012         if (retval != ERROR_OK)
1013                 return retval;
1014         target->debug_reason = DBG_REASON_NOTHALTED;
1015         target->state = TARGET_RUNNING;
1016
1017         /* registers are now invalid */
1018         register_cache_invalidate(arm->core_cache);
1019
1020 #if 0
1021         /* the front-end may request us not to handle breakpoints */
1022         if (handle_breakpoints) {
1023                 /* Single step past breakpoint at current address */
1024                 breakpoint = breakpoint_find(target, resume_pc);
1025                 if (breakpoint) {
1026                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1027                         cortex_m3_unset_breakpoint(target, breakpoint);
1028                         cortex_m3_single_step_core(target);
1029                         cortex_m3_set_breakpoint(target, breakpoint);
1030                 }
1031         }
1032
1033 #endif
1034         return retval;
1035 }
1036
1037 static int cortex_a_internal_restart(struct target *target)
1038 {
1039         struct armv7a_common *armv7a = target_to_armv7a(target);
1040         struct arm *arm = &armv7a->arm;
1041         int retval;
1042         uint32_t dscr;
1043         /*
1044          * * Restart core and wait for it to be started.  Clear ITRen and sticky
1045          * * exception flags: see ARMv7 ARM, C5.9.
1046          *
1047          * REVISIT: for single stepping, we probably want to
1048          * disable IRQs by default, with optional override...
1049          */
1050
1051         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1052                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1053         if (retval != ERROR_OK)
1054                 return retval;
1055
1056         if ((dscr & DSCR_INSTR_COMP) == 0)
1057                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1058
1059         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1060                         armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1061         if (retval != ERROR_OK)
1062                 return retval;
1063
1064         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1065                         armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1066                         DRCR_CLEAR_EXCEPTIONS);
1067         if (retval != ERROR_OK)
1068                 return retval;
1069
1070         int64_t then = timeval_ms();
1071         for (;; ) {
1072                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1073                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1074                 if (retval != ERROR_OK)
1075                         return retval;
1076                 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1077                         break;
1078                 if (timeval_ms() > then + 1000) {
1079                         LOG_ERROR("Timeout waiting for resume");
1080                         return ERROR_FAIL;
1081                 }
1082         }
1083
1084         target->debug_reason = DBG_REASON_NOTHALTED;
1085         target->state = TARGET_RUNNING;
1086
1087         /* registers are now invalid */
1088         register_cache_invalidate(arm->core_cache);
1089
1090         return ERROR_OK;
1091 }
1092
1093 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1094 {
1095         int retval = 0;
1096         struct target_list *head;
1097         struct target *curr;
1098         target_addr_t address;
1099         head = target->head;
1100         while (head != (struct target_list *)NULL) {
1101                 curr = head->target;
1102                 if ((curr != target) && (curr->state != TARGET_RUNNING)
1103                         && target_was_examined(curr)) {
1104                         /*  resume current address , not in step mode */
1105                         retval += cortex_a_internal_restore(curr, 1, &address,
1106                                         handle_breakpoints, 0);
1107                         retval += cortex_a_internal_restart(curr);
1108                 }
1109                 head = head->next;
1110
1111         }
1112         return retval;
1113 }
1114
1115 static int cortex_a_resume(struct target *target, int current,
1116         target_addr_t address, int handle_breakpoints, int debug_execution)
1117 {
1118         int retval = 0;
1119         /* dummy resume for smp toggle in order to reduce gdb impact  */
1120         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1121                 /*   simulate a start and halt of target */
1122                 target->gdb_service->target = NULL;
1123                 target->gdb_service->core[0] = target->gdb_service->core[1];
1124                 /*  fake resume at next poll we play the  target core[1], see poll*/
1125                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1126                 return 0;
1127         }
1128         cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1129         if (target->smp) {
1130                 target->gdb_service->core[0] = -1;
1131                 retval = cortex_a_restore_smp(target, handle_breakpoints);
1132                 if (retval != ERROR_OK)
1133                         return retval;
1134         }
1135         cortex_a_internal_restart(target);
1136
1137         if (!debug_execution) {
1138                 target->state = TARGET_RUNNING;
1139                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1140                 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1141         } else {
1142                 target->state = TARGET_DEBUG_RUNNING;
1143                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1144                 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1145         }
1146
1147         return ERROR_OK;
1148 }
1149
1150 static int cortex_a_debug_entry(struct target *target)
1151 {
1152         int i;
1153         uint32_t regfile[16], cpsr, spsr, dscr;
1154         int retval = ERROR_OK;
1155         struct working_area *regfile_working_area = NULL;
1156         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1157         struct armv7a_common *armv7a = target_to_armv7a(target);
1158         struct arm *arm = &armv7a->arm;
1159         struct reg *reg;
1160
1161         LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1162
1163         /* REVISIT surely we should not re-read DSCR !! */
1164         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1165                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1166         if (retval != ERROR_OK)
1167                 return retval;
1168
1169         /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1170          * imprecise data aborts get discarded by issuing a Data
1171          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1172          */
1173
1174         /* Enable the ITR execution once we are in debug mode */
1175         dscr |= DSCR_ITR_EN;
1176         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1177                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1178         if (retval != ERROR_OK)
1179                 return retval;
1180
1181         /* Examine debug reason */
1182         arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1183
1184         /* save address of instruction that triggered the watchpoint? */
1185         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1186                 uint32_t wfar;
1187
1188                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1189                                 armv7a->debug_base + CPUDBG_WFAR,
1190                                 &wfar);
1191                 if (retval != ERROR_OK)
1192                         return retval;
1193                 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1194         }
1195
1196         /* REVISIT fast_reg_read is never set ... */
1197
1198         /* Examine target state and mode */
1199         if (cortex_a->fast_reg_read)
1200                 target_alloc_working_area(target, 64, &regfile_working_area);
1201
1202
1203         /* First load register acessible through core debug port*/
1204         if (!regfile_working_area)
1205                 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1206         else {
1207                 retval = cortex_a_read_regs_through_mem(target,
1208                                 regfile_working_area->address, regfile);
1209
1210                 target_free_working_area(target, regfile_working_area);
1211                 if (retval != ERROR_OK)
1212                         return retval;
1213
1214                 /* read Current PSR */
1215                 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1216                 /*  store current cpsr */
1217                 if (retval != ERROR_OK)
1218                         return retval;
1219
1220                 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1221
1222                 arm_set_cpsr(arm, cpsr);
1223
1224                 /* update cache */
1225                 for (i = 0; i <= ARM_PC; i++) {
1226                         reg = arm_reg_current(arm, i);
1227
1228                         buf_set_u32(reg->value, 0, 32, regfile[i]);
1229                         reg->valid = 1;
1230                         reg->dirty = 0;
1231                 }
1232
1233                 /* Fixup PC Resume Address */
1234                 if (cpsr & (1 << 5)) {
1235                         /* T bit set for Thumb or ThumbEE state */
1236                         regfile[ARM_PC] -= 4;
1237                 } else {
1238                         /* ARM state */
1239                         regfile[ARM_PC] -= 8;
1240                 }
1241
1242                 reg = arm->pc;
1243                 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1244                 reg->dirty = reg->valid;
1245         }
1246
1247         if (arm->spsr) {
1248                 /* read Saved PSR */
1249                 retval = cortex_a_dap_read_coreregister_u32(target, &spsr, 17);
1250                 /*  store current spsr */
1251                 if (retval != ERROR_OK)
1252                         return retval;
1253
1254                 reg = arm->spsr;
1255                 buf_set_u32(reg->value, 0, 32, spsr);
1256                 reg->valid = 1;
1257                 reg->dirty = 0;
1258         }
1259
1260 #if 0
1261 /* TODO, Move this */
1262         uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1263         cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1264         LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1265
1266         cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1267         LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1268
1269         cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1270         LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1271 #endif
1272
1273         /* Are we in an exception handler */
1274 /*      armv4_5->exception_number = 0; */
1275         if (armv7a->post_debug_entry) {
1276                 retval = armv7a->post_debug_entry(target);
1277                 if (retval != ERROR_OK)
1278                         return retval;
1279         }
1280
1281         return retval;
1282 }
1283
1284 static int cortex_a_post_debug_entry(struct target *target)
1285 {
1286         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1287         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1288         int retval;
1289
1290         /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1291         retval = armv7a->arm.mrc(target, 15,
1292                         0, 0,   /* op1, op2 */
1293                         1, 0,   /* CRn, CRm */
1294                         &cortex_a->cp15_control_reg);
1295         if (retval != ERROR_OK)
1296                 return retval;
1297         LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1298         cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1299
1300         if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1301                 armv7a_identify_cache(target);
1302
1303         if (armv7a->is_armv7r) {
1304                 armv7a->armv7a_mmu.mmu_enabled = 0;
1305         } else {
1306                 armv7a->armv7a_mmu.mmu_enabled =
1307                         (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1308         }
1309         armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1310                 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1311         armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1312                 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1313         cortex_a->curr_mode = armv7a->arm.core_mode;
1314
1315         /* switch to SVC mode to read DACR */
1316         dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1317         armv7a->arm.mrc(target, 15,
1318                         0, 0, 3, 0,
1319                         &cortex_a->cp15_dacr_reg);
1320
1321         LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1322                         cortex_a->cp15_dacr_reg);
1323
1324         dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1325         return ERROR_OK;
1326 }
1327
1328 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1329 {
1330         struct armv7a_common *armv7a = target_to_armv7a(target);
1331         uint32_t dscr;
1332
1333         /* Read DSCR */
1334         int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1335                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1336         if (ERROR_OK != retval)
1337                 return retval;
1338
1339         /* clear bitfield */
1340         dscr &= ~bit_mask;
1341         /* put new value */
1342         dscr |= value & bit_mask;
1343
1344         /* write new DSCR */
1345         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1346                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1347         return retval;
1348 }
1349
1350 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1351         int handle_breakpoints)
1352 {
1353         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1354         struct armv7a_common *armv7a = target_to_armv7a(target);
1355         struct arm *arm = &armv7a->arm;
1356         struct breakpoint *breakpoint = NULL;
1357         struct breakpoint stepbreakpoint;
1358         struct reg *r;
1359         int retval;
1360
1361         if (target->state != TARGET_HALTED) {
1362                 LOG_WARNING("target not halted");
1363                 return ERROR_TARGET_NOT_HALTED;
1364         }
1365
1366         /* current = 1: continue on current pc, otherwise continue at <address> */
1367         r = arm->pc;
1368         if (!current)
1369                 buf_set_u32(r->value, 0, 32, address);
1370         else
1371                 address = buf_get_u32(r->value, 0, 32);
1372
1373         /* The front-end may request us not to handle breakpoints.
1374          * But since Cortex-A uses breakpoint for single step,
1375          * we MUST handle breakpoints.
1376          */
1377         handle_breakpoints = 1;
1378         if (handle_breakpoints) {
1379                 breakpoint = breakpoint_find(target, address);
1380                 if (breakpoint)
1381                         cortex_a_unset_breakpoint(target, breakpoint);
1382         }
1383
1384         /* Setup single step breakpoint */
1385         stepbreakpoint.address = address;
1386         stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1387                 ? 2 : 4;
1388         stepbreakpoint.type = BKPT_HARD;
1389         stepbreakpoint.set = 0;
1390
1391         /* Disable interrupts during single step if requested */
1392         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1393                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1394                 if (ERROR_OK != retval)
1395                         return retval;
1396         }
1397
1398         /* Break on IVA mismatch */
1399         cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1400
1401         target->debug_reason = DBG_REASON_SINGLESTEP;
1402
1403         retval = cortex_a_resume(target, 1, address, 0, 0);
1404         if (retval != ERROR_OK)
1405                 return retval;
1406
1407         int64_t then = timeval_ms();
1408         while (target->state != TARGET_HALTED) {
1409                 retval = cortex_a_poll(target);
1410                 if (retval != ERROR_OK)
1411                         return retval;
1412                 if (timeval_ms() > then + 1000) {
1413                         LOG_ERROR("timeout waiting for target halt");
1414                         return ERROR_FAIL;
1415                 }
1416         }
1417
1418         cortex_a_unset_breakpoint(target, &stepbreakpoint);
1419
1420         /* Re-enable interrupts if they were disabled */
1421         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1422                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1423                 if (ERROR_OK != retval)
1424                         return retval;
1425         }
1426
1427
1428         target->debug_reason = DBG_REASON_BREAKPOINT;
1429
1430         if (breakpoint)
1431                 cortex_a_set_breakpoint(target, breakpoint, 0);
1432
1433         if (target->state != TARGET_HALTED)
1434                 LOG_DEBUG("target stepped");
1435
1436         return ERROR_OK;
1437 }
1438
1439 static int cortex_a_restore_context(struct target *target, bool bpwp)
1440 {
1441         struct armv7a_common *armv7a = target_to_armv7a(target);
1442
1443         LOG_DEBUG(" ");
1444
1445         if (armv7a->pre_restore_context)
1446                 armv7a->pre_restore_context(target);
1447
1448         return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1449 }
1450
1451 /*
1452  * Cortex-A Breakpoint and watchpoint functions
1453  */
1454
1455 /* Setup hardware Breakpoint Register Pair */
1456 static int cortex_a_set_breakpoint(struct target *target,
1457         struct breakpoint *breakpoint, uint8_t matchmode)
1458 {
1459         int retval;
1460         int brp_i = 0;
1461         uint32_t control;
1462         uint8_t byte_addr_select = 0x0F;
1463         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1464         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1465         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1466
1467         if (breakpoint->set) {
1468                 LOG_WARNING("breakpoint already set");
1469                 return ERROR_OK;
1470         }
1471
1472         if (breakpoint->type == BKPT_HARD) {
1473                 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1474                         brp_i++;
1475                 if (brp_i >= cortex_a->brp_num) {
1476                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1477                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1478                 }
1479                 breakpoint->set = brp_i + 1;
1480                 if (breakpoint->length == 2)
1481                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1482                 control = ((matchmode & 0x7) << 20)
1483                         | (byte_addr_select << 5)
1484                         | (3 << 1) | 1;
1485                 brp_list[brp_i].used = 1;
1486                 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1487                 brp_list[brp_i].control = control;
1488                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1489                                 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1490                                 brp_list[brp_i].value);
1491                 if (retval != ERROR_OK)
1492                         return retval;
1493                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1494                                 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1495                                 brp_list[brp_i].control);
1496                 if (retval != ERROR_OK)
1497                         return retval;
1498                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1499                         brp_list[brp_i].control,
1500                         brp_list[brp_i].value);
1501         } else if (breakpoint->type == BKPT_SOFT) {
1502                 uint8_t code[4];
1503                 if (breakpoint->length == 2)
1504                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1505                 else
1506                         buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1507                 retval = target_read_memory(target,
1508                                 breakpoint->address & 0xFFFFFFFE,
1509                                 breakpoint->length, 1,
1510                                 breakpoint->orig_instr);
1511                 if (retval != ERROR_OK)
1512                         return retval;
1513
1514                 /* make sure data cache is cleaned & invalidated down to PoC */
1515                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1516                         armv7a_cache_flush_virt(target, breakpoint->address,
1517                                                 breakpoint->length);
1518                 }
1519
1520                 retval = target_write_memory(target,
1521                                 breakpoint->address & 0xFFFFFFFE,
1522                                 breakpoint->length, 1, code);
1523                 if (retval != ERROR_OK)
1524                         return retval;
1525
1526                 /* update i-cache at breakpoint location */
1527                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1528                                         breakpoint->length);
1529                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1530                                                  breakpoint->length);
1531
1532                 breakpoint->set = 0x11; /* Any nice value but 0 */
1533         }
1534
1535         return ERROR_OK;
1536 }
1537
1538 static int cortex_a_set_context_breakpoint(struct target *target,
1539         struct breakpoint *breakpoint, uint8_t matchmode)
1540 {
1541         int retval = ERROR_FAIL;
1542         int brp_i = 0;
1543         uint32_t control;
1544         uint8_t byte_addr_select = 0x0F;
1545         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1546         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1547         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1548
1549         if (breakpoint->set) {
1550                 LOG_WARNING("breakpoint already set");
1551                 return retval;
1552         }
1553         /*check available context BRPs*/
1554         while ((brp_list[brp_i].used ||
1555                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1556                 brp_i++;
1557
1558         if (brp_i >= cortex_a->brp_num) {
1559                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1560                 return ERROR_FAIL;
1561         }
1562
1563         breakpoint->set = brp_i + 1;
1564         control = ((matchmode & 0x7) << 20)
1565                 | (byte_addr_select << 5)
1566                 | (3 << 1) | 1;
1567         brp_list[brp_i].used = 1;
1568         brp_list[brp_i].value = (breakpoint->asid);
1569         brp_list[brp_i].control = control;
1570         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1571                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1572                         brp_list[brp_i].value);
1573         if (retval != ERROR_OK)
1574                 return retval;
1575         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1576                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1577                         brp_list[brp_i].control);
1578         if (retval != ERROR_OK)
1579                 return retval;
1580         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1581                 brp_list[brp_i].control,
1582                 brp_list[brp_i].value);
1583         return ERROR_OK;
1584
1585 }
1586
1587 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1588 {
1589         int retval = ERROR_FAIL;
1590         int brp_1 = 0;  /* holds the contextID pair */
1591         int brp_2 = 0;  /* holds the IVA pair */
1592         uint32_t control_CTX, control_IVA;
1593         uint8_t CTX_byte_addr_select = 0x0F;
1594         uint8_t IVA_byte_addr_select = 0x0F;
1595         uint8_t CTX_machmode = 0x03;
1596         uint8_t IVA_machmode = 0x01;
1597         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1598         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1599         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1600
1601         if (breakpoint->set) {
1602                 LOG_WARNING("breakpoint already set");
1603                 return retval;
1604         }
1605         /*check available context BRPs*/
1606         while ((brp_list[brp_1].used ||
1607                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1608                 brp_1++;
1609
1610         printf("brp(CTX) found num: %d\n", brp_1);
1611         if (brp_1 >= cortex_a->brp_num) {
1612                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1613                 return ERROR_FAIL;
1614         }
1615
1616         while ((brp_list[brp_2].used ||
1617                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1618                 brp_2++;
1619
1620         printf("brp(IVA) found num: %d\n", brp_2);
1621         if (brp_2 >= cortex_a->brp_num) {
1622                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1623                 return ERROR_FAIL;
1624         }
1625
1626         breakpoint->set = brp_1 + 1;
1627         breakpoint->linked_BRP = brp_2;
1628         control_CTX = ((CTX_machmode & 0x7) << 20)
1629                 | (brp_2 << 16)
1630                 | (0 << 14)
1631                 | (CTX_byte_addr_select << 5)
1632                 | (3 << 1) | 1;
1633         brp_list[brp_1].used = 1;
1634         brp_list[brp_1].value = (breakpoint->asid);
1635         brp_list[brp_1].control = control_CTX;
1636         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1637                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1638                         brp_list[brp_1].value);
1639         if (retval != ERROR_OK)
1640                 return retval;
1641         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1642                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1643                         brp_list[brp_1].control);
1644         if (retval != ERROR_OK)
1645                 return retval;
1646
1647         control_IVA = ((IVA_machmode & 0x7) << 20)
1648                 | (brp_1 << 16)
1649                 | (IVA_byte_addr_select << 5)
1650                 | (3 << 1) | 1;
1651         brp_list[brp_2].used = 1;
1652         brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1653         brp_list[brp_2].control = control_IVA;
1654         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1655                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1656                         brp_list[brp_2].value);
1657         if (retval != ERROR_OK)
1658                 return retval;
1659         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1660                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1661                         brp_list[brp_2].control);
1662         if (retval != ERROR_OK)
1663                 return retval;
1664
1665         return ERROR_OK;
1666 }
1667
1668 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1669 {
1670         int retval;
1671         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1672         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1673         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1674
1675         if (!breakpoint->set) {
1676                 LOG_WARNING("breakpoint not set");
1677                 return ERROR_OK;
1678         }
1679
1680         if (breakpoint->type == BKPT_HARD) {
1681                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1682                         int brp_i = breakpoint->set - 1;
1683                         int brp_j = breakpoint->linked_BRP;
1684                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1685                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1686                                 return ERROR_OK;
1687                         }
1688                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1689                                 brp_list[brp_i].control, brp_list[brp_i].value);
1690                         brp_list[brp_i].used = 0;
1691                         brp_list[brp_i].value = 0;
1692                         brp_list[brp_i].control = 0;
1693                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1694                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1695                                         brp_list[brp_i].control);
1696                         if (retval != ERROR_OK)
1697                                 return retval;
1698                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1699                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1700                                         brp_list[brp_i].value);
1701                         if (retval != ERROR_OK)
1702                                 return retval;
1703                         if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1704                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1705                                 return ERROR_OK;
1706                         }
1707                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1708                                 brp_list[brp_j].control, brp_list[brp_j].value);
1709                         brp_list[brp_j].used = 0;
1710                         brp_list[brp_j].value = 0;
1711                         brp_list[brp_j].control = 0;
1712                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1713                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1714                                         brp_list[brp_j].control);
1715                         if (retval != ERROR_OK)
1716                                 return retval;
1717                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1718                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1719                                         brp_list[brp_j].value);
1720                         if (retval != ERROR_OK)
1721                                 return retval;
1722                         breakpoint->linked_BRP = 0;
1723                         breakpoint->set = 0;
1724                         return ERROR_OK;
1725
1726                 } else {
1727                         int brp_i = breakpoint->set - 1;
1728                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1729                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1730                                 return ERROR_OK;
1731                         }
1732                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1733                                 brp_list[brp_i].control, brp_list[brp_i].value);
1734                         brp_list[brp_i].used = 0;
1735                         brp_list[brp_i].value = 0;
1736                         brp_list[brp_i].control = 0;
1737                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1738                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1739                                         brp_list[brp_i].control);
1740                         if (retval != ERROR_OK)
1741                                 return retval;
1742                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1743                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1744                                         brp_list[brp_i].value);
1745                         if (retval != ERROR_OK)
1746                                 return retval;
1747                         breakpoint->set = 0;
1748                         return ERROR_OK;
1749                 }
1750         } else {
1751
1752                 /* make sure data cache is cleaned & invalidated down to PoC */
1753                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1754                         armv7a_cache_flush_virt(target, breakpoint->address,
1755                                                 breakpoint->length);
1756                 }
1757
1758                 /* restore original instruction (kept in target endianness) */
1759                 if (breakpoint->length == 4) {
1760                         retval = target_write_memory(target,
1761                                         breakpoint->address & 0xFFFFFFFE,
1762                                         4, 1, breakpoint->orig_instr);
1763                         if (retval != ERROR_OK)
1764                                 return retval;
1765                 } else {
1766                         retval = target_write_memory(target,
1767                                         breakpoint->address & 0xFFFFFFFE,
1768                                         2, 1, breakpoint->orig_instr);
1769                         if (retval != ERROR_OK)
1770                                 return retval;
1771                 }
1772
1773                 /* update i-cache at breakpoint location */
1774                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1775                                                  breakpoint->length);
1776                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1777                                                  breakpoint->length);
1778         }
1779         breakpoint->set = 0;
1780
1781         return ERROR_OK;
1782 }
1783
1784 static int cortex_a_add_breakpoint(struct target *target,
1785         struct breakpoint *breakpoint)
1786 {
1787         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1788
1789         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1790                 LOG_INFO("no hardware breakpoint available");
1791                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1792         }
1793
1794         if (breakpoint->type == BKPT_HARD)
1795                 cortex_a->brp_num_available--;
1796
1797         return cortex_a_set_breakpoint(target, breakpoint, 0x00);       /* Exact match */
1798 }
1799
1800 static int cortex_a_add_context_breakpoint(struct target *target,
1801         struct breakpoint *breakpoint)
1802 {
1803         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1804
1805         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1806                 LOG_INFO("no hardware breakpoint available");
1807                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1808         }
1809
1810         if (breakpoint->type == BKPT_HARD)
1811                 cortex_a->brp_num_available--;
1812
1813         return cortex_a_set_context_breakpoint(target, breakpoint, 0x02);       /* asid match */
1814 }
1815
1816 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1817         struct breakpoint *breakpoint)
1818 {
1819         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1820
1821         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1822                 LOG_INFO("no hardware breakpoint available");
1823                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1824         }
1825
1826         if (breakpoint->type == BKPT_HARD)
1827                 cortex_a->brp_num_available--;
1828
1829         return cortex_a_set_hybrid_breakpoint(target, breakpoint);      /* ??? */
1830 }
1831
1832
1833 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1834 {
1835         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1836
1837 #if 0
1838 /* It is perfectly possible to remove breakpoints while the target is running */
1839         if (target->state != TARGET_HALTED) {
1840                 LOG_WARNING("target not halted");
1841                 return ERROR_TARGET_NOT_HALTED;
1842         }
1843 #endif
1844
1845         if (breakpoint->set) {
1846                 cortex_a_unset_breakpoint(target, breakpoint);
1847                 if (breakpoint->type == BKPT_HARD)
1848                         cortex_a->brp_num_available++;
1849         }
1850
1851
1852         return ERROR_OK;
1853 }
1854
1855 /*
1856  * Cortex-A Reset functions
1857  */
1858
1859 static int cortex_a_assert_reset(struct target *target)
1860 {
1861         struct armv7a_common *armv7a = target_to_armv7a(target);
1862
1863         LOG_DEBUG(" ");
1864
1865         /* FIXME when halt is requested, make it work somehow... */
1866
1867         /* This function can be called in "target not examined" state */
1868
1869         /* Issue some kind of warm reset. */
1870         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1871                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1872         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1873                 /* REVISIT handle "pulls" cases, if there's
1874                  * hardware that needs them to work.
1875                  */
1876
1877                 /*
1878                  * FIXME: fix reset when transport is SWD. This is a temporary
1879                  * work-around for release v0.10 that is not intended to stay!
1880                  */
1881                 if (transport_is_swd() ||
1882                                 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1883                         jtag_add_reset(0, 1);
1884
1885         } else {
1886                 LOG_ERROR("%s: how to reset?", target_name(target));
1887                 return ERROR_FAIL;
1888         }
1889
1890         /* registers are now invalid */
1891         if (target_was_examined(target))
1892                 register_cache_invalidate(armv7a->arm.core_cache);
1893
1894         target->state = TARGET_RESET;
1895
1896         return ERROR_OK;
1897 }
1898
1899 static int cortex_a_deassert_reset(struct target *target)
1900 {
1901         int retval;
1902
1903         LOG_DEBUG(" ");
1904
1905         /* be certain SRST is off */
1906         jtag_add_reset(0, 0);
1907
1908         if (target_was_examined(target)) {
1909                 retval = cortex_a_poll(target);
1910                 if (retval != ERROR_OK)
1911                         return retval;
1912         }
1913
1914         if (target->reset_halt) {
1915                 if (target->state != TARGET_HALTED) {
1916                         LOG_WARNING("%s: ran after reset and before halt ...",
1917                                 target_name(target));
1918                         if (target_was_examined(target)) {
1919                                 retval = target_halt(target);
1920                                 if (retval != ERROR_OK)
1921                                         return retval;
1922                         } else
1923                                 target->state = TARGET_UNKNOWN;
1924                 }
1925         }
1926
1927         return ERROR_OK;
1928 }
1929
1930 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1931 {
1932         /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1933          * New desired mode must be in mode. Current value of DSCR must be in
1934          * *dscr, which is updated with new value.
1935          *
1936          * This function elides actually sending the mode-change over the debug
1937          * interface if the mode is already set as desired.
1938          */
1939         uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1940         if (new_dscr != *dscr) {
1941                 struct armv7a_common *armv7a = target_to_armv7a(target);
1942                 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1943                                 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1944                 if (retval == ERROR_OK)
1945                         *dscr = new_dscr;
1946                 return retval;
1947         } else {
1948                 return ERROR_OK;
1949         }
1950 }
1951
1952 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1953         uint32_t value, uint32_t *dscr)
1954 {
1955         /* Waits until the specified bit(s) of DSCR take on a specified value. */
1956         struct armv7a_common *armv7a = target_to_armv7a(target);
1957         int64_t then = timeval_ms();
1958         int retval;
1959
1960         while ((*dscr & mask) != value) {
1961                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1962                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
1963                 if (retval != ERROR_OK)
1964                         return retval;
1965                 if (timeval_ms() > then + 1000) {
1966                         LOG_ERROR("timeout waiting for DSCR bit change");
1967                         return ERROR_FAIL;
1968                 }
1969         }
1970         return ERROR_OK;
1971 }
1972
1973 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1974         uint32_t *data, uint32_t *dscr)
1975 {
1976         int retval;
1977         struct armv7a_common *armv7a = target_to_armv7a(target);
1978
1979         /* Move from coprocessor to R0. */
1980         retval = cortex_a_exec_opcode(target, opcode, dscr);
1981         if (retval != ERROR_OK)
1982                 return retval;
1983
1984         /* Move from R0 to DTRTX. */
1985         retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1986         if (retval != ERROR_OK)
1987                 return retval;
1988
1989         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1990          * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1991          * must also check TXfull_l). Most of the time this will be free
1992          * because TXfull_l will be set immediately and cached in dscr. */
1993         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1994                         DSCR_DTRTX_FULL_LATCHED, dscr);
1995         if (retval != ERROR_OK)
1996                 return retval;
1997
1998         /* Read the value transferred to DTRTX. */
1999         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2000                         armv7a->debug_base + CPUDBG_DTRTX, data);
2001         if (retval != ERROR_OK)
2002                 return retval;
2003
2004         return ERROR_OK;
2005 }
2006
2007 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2008         uint32_t *dfsr, uint32_t *dscr)
2009 {
2010         int retval;
2011
2012         if (dfar) {
2013                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2014                 if (retval != ERROR_OK)
2015                         return retval;
2016         }
2017
2018         if (dfsr) {
2019                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2020                 if (retval != ERROR_OK)
2021                         return retval;
2022         }
2023
2024         return ERROR_OK;
2025 }
2026
2027 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2028         uint32_t data, uint32_t *dscr)
2029 {
2030         int retval;
2031         struct armv7a_common *armv7a = target_to_armv7a(target);
2032
2033         /* Write the value into DTRRX. */
2034         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2035                         armv7a->debug_base + CPUDBG_DTRRX, data);
2036         if (retval != ERROR_OK)
2037                 return retval;
2038
2039         /* Move from DTRRX to R0. */
2040         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2041         if (retval != ERROR_OK)
2042                 return retval;
2043
2044         /* Move from R0 to coprocessor. */
2045         retval = cortex_a_exec_opcode(target, opcode, dscr);
2046         if (retval != ERROR_OK)
2047                 return retval;
2048
2049         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2050          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2051          * check RXfull_l). Most of the time this will be free because RXfull_l
2052          * will be cleared immediately and cached in dscr. */
2053         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2054         if (retval != ERROR_OK)
2055                 return retval;
2056
2057         return ERROR_OK;
2058 }
2059
2060 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2061         uint32_t dfsr, uint32_t *dscr)
2062 {
2063         int retval;
2064
2065         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2066         if (retval != ERROR_OK)
2067                 return retval;
2068
2069         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2070         if (retval != ERROR_OK)
2071                 return retval;
2072
2073         return ERROR_OK;
2074 }
2075
2076 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2077 {
2078         uint32_t status, upper4;
2079
2080         if (dfsr & (1 << 9)) {
2081                 /* LPAE format. */
2082                 status = dfsr & 0x3f;
2083                 upper4 = status >> 2;
2084                 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2085                         return ERROR_TARGET_TRANSLATION_FAULT;
2086                 else if (status == 33)
2087                         return ERROR_TARGET_UNALIGNED_ACCESS;
2088                 else
2089                         return ERROR_TARGET_DATA_ABORT;
2090         } else {
2091                 /* Normal format. */
2092                 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2093                 if (status == 1)
2094                         return ERROR_TARGET_UNALIGNED_ACCESS;
2095                 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2096                                 status == 9 || status == 11 || status == 13 || status == 15)
2097                         return ERROR_TARGET_TRANSLATION_FAULT;
2098                 else
2099                         return ERROR_TARGET_DATA_ABORT;
2100         }
2101 }
2102
2103 static int cortex_a_write_cpu_memory_slow(struct target *target,
2104         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2105 {
2106         /* Writes count objects of size size from *buffer. Old value of DSCR must
2107          * be in *dscr; updated to new value. This is slow because it works for
2108          * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2109          * the address is aligned, cortex_a_write_cpu_memory_fast should be
2110          * preferred.
2111          * Preconditions:
2112          * - Address is in R0.
2113          * - R0 is marked dirty.
2114          */
2115         struct armv7a_common *armv7a = target_to_armv7a(target);
2116         struct arm *arm = &armv7a->arm;
2117         int retval;
2118
2119         /* Mark register R1 as dirty, to use for transferring data. */
2120         arm_reg_current(arm, 1)->dirty = true;
2121
2122         /* Switch to non-blocking mode if not already in that mode. */
2123         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2124         if (retval != ERROR_OK)
2125                 return retval;
2126
2127         /* Go through the objects. */
2128         while (count) {
2129                 /* Write the value to store into DTRRX. */
2130                 uint32_t data, opcode;
2131                 if (size == 1)
2132                         data = *buffer;
2133                 else if (size == 2)
2134                         data = target_buffer_get_u16(target, buffer);
2135                 else
2136                         data = target_buffer_get_u32(target, buffer);
2137                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2138                                 armv7a->debug_base + CPUDBG_DTRRX, data);
2139                 if (retval != ERROR_OK)
2140                         return retval;
2141
2142                 /* Transfer the value from DTRRX to R1. */
2143                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2144                 if (retval != ERROR_OK)
2145                         return retval;
2146
2147                 /* Write the value transferred to R1 into memory. */
2148                 if (size == 1)
2149                         opcode = ARMV4_5_STRB_IP(1, 0);
2150                 else if (size == 2)
2151                         opcode = ARMV4_5_STRH_IP(1, 0);
2152                 else
2153                         opcode = ARMV4_5_STRW_IP(1, 0);
2154                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2155                 if (retval != ERROR_OK)
2156                         return retval;
2157
2158                 /* Check for faults and return early. */
2159                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2160                         return ERROR_OK; /* A data fault is not considered a system failure. */
2161
2162                 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2163                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2164                  * must also check RXfull_l). Most of the time this will be free
2165                  * because RXfull_l will be cleared immediately and cached in dscr. */
2166                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2167                 if (retval != ERROR_OK)
2168                         return retval;
2169
2170                 /* Advance. */
2171                 buffer += size;
2172                 --count;
2173         }
2174
2175         return ERROR_OK;
2176 }
2177
2178 static int cortex_a_write_cpu_memory_fast(struct target *target,
2179         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2180 {
2181         /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2182          * in *dscr; updated to new value. This is fast but only works for
2183          * word-sized objects at aligned addresses.
2184          * Preconditions:
2185          * - Address is in R0 and must be a multiple of 4.
2186          * - R0 is marked dirty.
2187          */
2188         struct armv7a_common *armv7a = target_to_armv7a(target);
2189         int retval;
2190
2191         /* Switch to fast mode if not already in that mode. */
2192         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2193         if (retval != ERROR_OK)
2194                 return retval;
2195
2196         /* Latch STC instruction. */
2197         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2198                         armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2199         if (retval != ERROR_OK)
2200                 return retval;
2201
2202         /* Transfer all the data and issue all the instructions. */
2203         return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2204                         4, count, armv7a->debug_base + CPUDBG_DTRRX);
2205 }
2206
2207 static int cortex_a_write_cpu_memory(struct target *target,
2208         uint32_t address, uint32_t size,
2209         uint32_t count, const uint8_t *buffer)
2210 {
2211         /* Write memory through the CPU. */
2212         int retval, final_retval;
2213         struct armv7a_common *armv7a = target_to_armv7a(target);
2214         struct arm *arm = &armv7a->arm;
2215         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2216
2217         LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2218                           address, size, count);
2219         if (target->state != TARGET_HALTED) {
2220                 LOG_WARNING("target not halted");
2221                 return ERROR_TARGET_NOT_HALTED;
2222         }
2223
2224         if (!count)
2225                 return ERROR_OK;
2226
2227         /* Clear any abort. */
2228         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2229                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2230         if (retval != ERROR_OK)
2231                 return retval;
2232
2233         /* Read DSCR. */
2234         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2235                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2236         if (retval != ERROR_OK)
2237                 return retval;
2238
2239         /* Switch to non-blocking mode if not already in that mode. */
2240         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2241         if (retval != ERROR_OK)
2242                 goto out;
2243
2244         /* Mark R0 as dirty. */
2245         arm_reg_current(arm, 0)->dirty = true;
2246
2247         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2248         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2249         if (retval != ERROR_OK)
2250                 goto out;
2251
2252         /* Get the memory address into R0. */
2253         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2254                         armv7a->debug_base + CPUDBG_DTRRX, address);
2255         if (retval != ERROR_OK)
2256                 goto out;
2257         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2258         if (retval != ERROR_OK)
2259                 goto out;
2260
2261         if (size == 4 && (address % 4) == 0) {
2262                 /* We are doing a word-aligned transfer, so use fast mode. */
2263                 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2264         } else {
2265                 /* Use slow path. */
2266                 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2267         }
2268
2269 out:
2270         final_retval = retval;
2271
2272         /* Switch to non-blocking mode if not already in that mode. */
2273         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2274         if (final_retval == ERROR_OK)
2275                 final_retval = retval;
2276
2277         /* Wait for last issued instruction to complete. */
2278         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2279         if (final_retval == ERROR_OK)
2280                 final_retval = retval;
2281
2282         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2283          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2284          * check RXfull_l). Most of the time this will be free because RXfull_l
2285          * will be cleared immediately and cached in dscr. However, don't do this
2286          * if there is fault, because then the instruction might not have completed
2287          * successfully. */
2288         if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2289                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2290                 if (retval != ERROR_OK)
2291                         return retval;
2292         }
2293
2294         /* If there were any sticky abort flags, clear them. */
2295         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2296                 fault_dscr = dscr;
2297                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2298                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2299                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2300         } else {
2301                 fault_dscr = 0;
2302         }
2303
2304         /* Handle synchronous data faults. */
2305         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2306                 if (final_retval == ERROR_OK) {
2307                         /* Final return value will reflect cause of fault. */
2308                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2309                         if (retval == ERROR_OK) {
2310                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2311                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2312                         } else
2313                                 final_retval = retval;
2314                 }
2315                 /* Fault destroyed DFAR/DFSR; restore them. */
2316                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2317                 if (retval != ERROR_OK)
2318                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2319         }
2320
2321         /* Handle asynchronous data faults. */
2322         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2323                 if (final_retval == ERROR_OK)
2324                         /* No other error has been recorded so far, so keep this one. */
2325                         final_retval = ERROR_TARGET_DATA_ABORT;
2326         }
2327
2328         /* If the DCC is nonempty, clear it. */
2329         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2330                 uint32_t dummy;
2331                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2332                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2333                 if (final_retval == ERROR_OK)
2334                         final_retval = retval;
2335         }
2336         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2337                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2338                 if (final_retval == ERROR_OK)
2339                         final_retval = retval;
2340         }
2341
2342         /* Done. */
2343         return final_retval;
2344 }
2345
2346 static int cortex_a_read_cpu_memory_slow(struct target *target,
2347         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2348 {
2349         /* Reads count objects of size size into *buffer. Old value of DSCR must be
2350          * in *dscr; updated to new value. This is slow because it works for
2351          * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2352          * the address is aligned, cortex_a_read_cpu_memory_fast should be
2353          * preferred.
2354          * Preconditions:
2355          * - Address is in R0.
2356          * - R0 is marked dirty.
2357          */
2358         struct armv7a_common *armv7a = target_to_armv7a(target);
2359         struct arm *arm = &armv7a->arm;
2360         int retval;
2361
2362         /* Mark register R1 as dirty, to use for transferring data. */
2363         arm_reg_current(arm, 1)->dirty = true;
2364
2365         /* Switch to non-blocking mode if not already in that mode. */
2366         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2367         if (retval != ERROR_OK)
2368                 return retval;
2369
2370         /* Go through the objects. */
2371         while (count) {
2372                 /* Issue a load of the appropriate size to R1. */
2373                 uint32_t opcode, data;
2374                 if (size == 1)
2375                         opcode = ARMV4_5_LDRB_IP(1, 0);
2376                 else if (size == 2)
2377                         opcode = ARMV4_5_LDRH_IP(1, 0);
2378                 else
2379                         opcode = ARMV4_5_LDRW_IP(1, 0);
2380                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2381                 if (retval != ERROR_OK)
2382                         return retval;
2383
2384                 /* Issue a write of R1 to DTRTX. */
2385                 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2386                 if (retval != ERROR_OK)
2387                         return retval;
2388
2389                 /* Check for faults and return early. */
2390                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2391                         return ERROR_OK; /* A data fault is not considered a system failure. */
2392
2393                 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2394                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2395                  * must also check TXfull_l). Most of the time this will be free
2396                  * because TXfull_l will be set immediately and cached in dscr. */
2397                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2398                                 DSCR_DTRTX_FULL_LATCHED, dscr);
2399                 if (retval != ERROR_OK)
2400                         return retval;
2401
2402                 /* Read the value transferred to DTRTX into the buffer. */
2403                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2404                                 armv7a->debug_base + CPUDBG_DTRTX, &data);
2405                 if (retval != ERROR_OK)
2406                         return retval;
2407                 if (size == 1)
2408                         *buffer = (uint8_t) data;
2409                 else if (size == 2)
2410                         target_buffer_set_u16(target, buffer, (uint16_t) data);
2411                 else
2412                         target_buffer_set_u32(target, buffer, data);
2413
2414                 /* Advance. */
2415                 buffer += size;
2416                 --count;
2417         }
2418
2419         return ERROR_OK;
2420 }
2421
2422 static int cortex_a_read_cpu_memory_fast(struct target *target,
2423         uint32_t count, uint8_t *buffer, uint32_t *dscr)
2424 {
2425         /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2426          * *dscr; updated to new value. This is fast but only works for word-sized
2427          * objects at aligned addresses.
2428          * Preconditions:
2429          * - Address is in R0 and must be a multiple of 4.
2430          * - R0 is marked dirty.
2431          */
2432         struct armv7a_common *armv7a = target_to_armv7a(target);
2433         uint32_t u32;
2434         int retval;
2435
2436         /* Switch to non-blocking mode if not already in that mode. */
2437         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2438         if (retval != ERROR_OK)
2439                 return retval;
2440
2441         /* Issue the LDC instruction via a write to ITR. */
2442         retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2443         if (retval != ERROR_OK)
2444                 return retval;
2445
2446         count--;
2447
2448         if (count > 0) {
2449                 /* Switch to fast mode if not already in that mode. */
2450                 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2451                 if (retval != ERROR_OK)
2452                         return retval;
2453
2454                 /* Latch LDC instruction. */
2455                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2456                                 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2457                 if (retval != ERROR_OK)
2458                         return retval;
2459
2460                 /* Read the value transferred to DTRTX into the buffer. Due to fast
2461                  * mode rules, this blocks until the instruction finishes executing and
2462                  * then reissues the read instruction to read the next word from
2463                  * memory. The last read of DTRTX in this call reads the second-to-last
2464                  * word from memory and issues the read instruction for the last word.
2465                  */
2466                 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2467                                 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2468                 if (retval != ERROR_OK)
2469                         return retval;
2470
2471                 /* Advance. */
2472                 buffer += count * 4;
2473         }
2474
2475         /* Wait for last issued instruction to complete. */
2476         retval = cortex_a_wait_instrcmpl(target, dscr, false);
2477         if (retval != ERROR_OK)
2478                 return retval;
2479
2480         /* Switch to non-blocking mode if not already in that mode. */
2481         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2482         if (retval != ERROR_OK)
2483                 return retval;
2484
2485         /* Check for faults and return early. */
2486         if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2487                 return ERROR_OK; /* A data fault is not considered a system failure. */
2488
2489         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2490          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2491          * check TXfull_l). Most of the time this will be free because TXfull_l
2492          * will be set immediately and cached in dscr. */
2493         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2494                         DSCR_DTRTX_FULL_LATCHED, dscr);
2495         if (retval != ERROR_OK)
2496                 return retval;
2497
2498         /* Read the value transferred to DTRTX into the buffer. This is the last
2499          * word. */
2500         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2501                         armv7a->debug_base + CPUDBG_DTRTX, &u32);
2502         if (retval != ERROR_OK)
2503                 return retval;
2504         target_buffer_set_u32(target, buffer, u32);
2505
2506         return ERROR_OK;
2507 }
2508
2509 static int cortex_a_read_cpu_memory(struct target *target,
2510         uint32_t address, uint32_t size,
2511         uint32_t count, uint8_t *buffer)
2512 {
2513         /* Read memory through the CPU. */
2514         int retval, final_retval;
2515         struct armv7a_common *armv7a = target_to_armv7a(target);
2516         struct arm *arm = &armv7a->arm;
2517         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2518
2519         LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2520                           address, size, count);
2521         if (target->state != TARGET_HALTED) {
2522                 LOG_WARNING("target not halted");
2523                 return ERROR_TARGET_NOT_HALTED;
2524         }
2525
2526         if (!count)
2527                 return ERROR_OK;
2528
2529         /* Clear any abort. */
2530         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2531                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2532         if (retval != ERROR_OK)
2533                 return retval;
2534
2535         /* Read DSCR */
2536         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2537                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2538         if (retval != ERROR_OK)
2539                 return retval;
2540
2541         /* Switch to non-blocking mode if not already in that mode. */
2542         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2543         if (retval != ERROR_OK)
2544                 goto out;
2545
2546         /* Mark R0 as dirty. */
2547         arm_reg_current(arm, 0)->dirty = true;
2548
2549         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2550         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2551         if (retval != ERROR_OK)
2552                 goto out;
2553
2554         /* Get the memory address into R0. */
2555         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2556                         armv7a->debug_base + CPUDBG_DTRRX, address);
2557         if (retval != ERROR_OK)
2558                 goto out;
2559         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2560         if (retval != ERROR_OK)
2561                 goto out;
2562
2563         if (size == 4 && (address % 4) == 0) {
2564                 /* We are doing a word-aligned transfer, so use fast mode. */
2565                 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2566         } else {
2567                 /* Use slow path. */
2568                 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2569         }
2570
2571 out:
2572         final_retval = retval;
2573
2574         /* Switch to non-blocking mode if not already in that mode. */
2575         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2576         if (final_retval == ERROR_OK)
2577                 final_retval = retval;
2578
2579         /* Wait for last issued instruction to complete. */
2580         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2581         if (final_retval == ERROR_OK)
2582                 final_retval = retval;
2583
2584         /* If there were any sticky abort flags, clear them. */
2585         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2586                 fault_dscr = dscr;
2587                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2588                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2589                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2590         } else {
2591                 fault_dscr = 0;
2592         }
2593
2594         /* Handle synchronous data faults. */
2595         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2596                 if (final_retval == ERROR_OK) {
2597                         /* Final return value will reflect cause of fault. */
2598                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2599                         if (retval == ERROR_OK) {
2600                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2601                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2602                         } else
2603                                 final_retval = retval;
2604                 }
2605                 /* Fault destroyed DFAR/DFSR; restore them. */
2606                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2607                 if (retval != ERROR_OK)
2608                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2609         }
2610
2611         /* Handle asynchronous data faults. */
2612         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2613                 if (final_retval == ERROR_OK)
2614                         /* No other error has been recorded so far, so keep this one. */
2615                         final_retval = ERROR_TARGET_DATA_ABORT;
2616         }
2617
2618         /* If the DCC is nonempty, clear it. */
2619         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2620                 uint32_t dummy;
2621                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2622                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2623                 if (final_retval == ERROR_OK)
2624                         final_retval = retval;
2625         }
2626         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2627                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2628                 if (final_retval == ERROR_OK)
2629                         final_retval = retval;
2630         }
2631
2632         /* Done. */
2633         return final_retval;
2634 }
2635
2636
2637 /*
2638  * Cortex-A Memory access
2639  *
2640  * This is same Cortex-M3 but we must also use the correct
2641  * ap number for every access.
2642  */
2643
2644 static int cortex_a_read_phys_memory(struct target *target,
2645         target_addr_t address, uint32_t size,
2646         uint32_t count, uint8_t *buffer)
2647 {
2648         struct armv7a_common *armv7a = target_to_armv7a(target);
2649         struct adiv5_dap *swjdp = armv7a->arm.dap;
2650         uint8_t apsel = swjdp->apsel;
2651         int retval;
2652
2653         if (!count || !buffer)
2654                 return ERROR_COMMAND_SYNTAX_ERROR;
2655
2656         LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2657                 address, size, count);
2658
2659         if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2660                 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2661
2662         /* read memory through the CPU */
2663         cortex_a_prep_memaccess(target, 1);
2664         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2665         cortex_a_post_memaccess(target, 1);
2666
2667         return retval;
2668 }
2669
2670 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2671         uint32_t size, uint32_t count, uint8_t *buffer)
2672 {
2673         int retval;
2674
2675         /* cortex_a handles unaligned memory access */
2676         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2677                 address, size, count);
2678
2679         cortex_a_prep_memaccess(target, 0);
2680         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2681         cortex_a_post_memaccess(target, 0);
2682
2683         return retval;
2684 }
2685
2686 static int cortex_a_read_memory_ahb(struct target *target, target_addr_t address,
2687         uint32_t size, uint32_t count, uint8_t *buffer)
2688 {
2689         int mmu_enabled = 0;
2690         target_addr_t virt, phys;
2691         int retval;
2692         struct armv7a_common *armv7a = target_to_armv7a(target);
2693         struct adiv5_dap *swjdp = armv7a->arm.dap;
2694         uint8_t apsel = swjdp->apsel;
2695
2696         if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2697                 return target_read_memory(target, address, size, count, buffer);
2698
2699         /* cortex_a handles unaligned memory access */
2700         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2701                 address, size, count);
2702
2703         /* determine if MMU was enabled on target stop */
2704         if (!armv7a->is_armv7r) {
2705                 retval = cortex_a_mmu(target, &mmu_enabled);
2706                 if (retval != ERROR_OK)
2707                         return retval;
2708         }
2709
2710         if (mmu_enabled) {
2711                 virt = address;
2712                 retval = cortex_a_virt2phys(target, virt, &phys);
2713                 if (retval != ERROR_OK)
2714                         return retval;
2715
2716                 LOG_DEBUG("Reading at virtual address. "
2717                           "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2718                           virt, phys);
2719                 address = phys;
2720         }
2721
2722         if (!count || !buffer)
2723                 return ERROR_COMMAND_SYNTAX_ERROR;
2724
2725         retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2726
2727         return retval;
2728 }
2729
2730 static int cortex_a_write_phys_memory(struct target *target,
2731         target_addr_t address, uint32_t size,
2732         uint32_t count, const uint8_t *buffer)
2733 {
2734         struct armv7a_common *armv7a = target_to_armv7a(target);
2735         struct adiv5_dap *swjdp = armv7a->arm.dap;
2736         uint8_t apsel = swjdp->apsel;
2737         int retval;
2738
2739         if (!count || !buffer)
2740                 return ERROR_COMMAND_SYNTAX_ERROR;
2741
2742         LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2743                 address, size, count);
2744
2745         if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2746                 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2747
2748         /* write memory through the CPU */
2749         cortex_a_prep_memaccess(target, 1);
2750         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2751         cortex_a_post_memaccess(target, 1);
2752
2753         return retval;
2754 }
2755
2756 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2757         uint32_t size, uint32_t count, const uint8_t *buffer)
2758 {
2759         int retval;
2760
2761         /* cortex_a handles unaligned memory access */
2762         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2763                 address, size, count);
2764
2765         /* memory writes bypass the caches, must flush before writing */
2766         armv7a_cache_auto_flush_on_write(target, address, size * count);
2767
2768         cortex_a_prep_memaccess(target, 0);
2769         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2770         cortex_a_post_memaccess(target, 0);
2771         return retval;
2772 }
2773
2774 static int cortex_a_write_memory_ahb(struct target *target, target_addr_t address,
2775         uint32_t size, uint32_t count, const uint8_t *buffer)
2776 {
2777         int mmu_enabled = 0;
2778         target_addr_t virt, phys;
2779         int retval;
2780         struct armv7a_common *armv7a = target_to_armv7a(target);
2781         struct adiv5_dap *swjdp = armv7a->arm.dap;
2782         uint8_t apsel = swjdp->apsel;
2783
2784         if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2785                 return target_write_memory(target, address, size, count, buffer);
2786
2787         /* cortex_a handles unaligned memory access */
2788         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2789                 address, size, count);
2790
2791         /* determine if MMU was enabled on target stop */
2792         if (!armv7a->is_armv7r) {
2793                 retval = cortex_a_mmu(target, &mmu_enabled);
2794                 if (retval != ERROR_OK)
2795                         return retval;
2796         }
2797
2798         if (mmu_enabled) {
2799                 virt = address;
2800                 retval = cortex_a_virt2phys(target, virt, &phys);
2801                 if (retval != ERROR_OK)
2802                         return retval;
2803
2804                 LOG_DEBUG("Writing to virtual address. "
2805                           "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2806                           virt,
2807                           phys);
2808                 address = phys;
2809         }
2810
2811         if (!count || !buffer)
2812                 return ERROR_COMMAND_SYNTAX_ERROR;
2813
2814         retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2815
2816         return retval;
2817 }
2818
2819 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2820                                 uint32_t count, uint8_t *buffer)
2821 {
2822         uint32_t size;
2823
2824         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2825          * will have something to do with the size we leave to it. */
2826         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2827                 if (address & size) {
2828                         int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2829                         if (retval != ERROR_OK)
2830                                 return retval;
2831                         address += size;
2832                         count -= size;
2833                         buffer += size;
2834                 }
2835         }
2836
2837         /* Read the data with as large access size as possible. */
2838         for (; size > 0; size /= 2) {
2839                 uint32_t aligned = count - count % size;
2840                 if (aligned > 0) {
2841                         int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2842                         if (retval != ERROR_OK)
2843                                 return retval;
2844                         address += aligned;
2845                         count -= aligned;
2846                         buffer += aligned;
2847                 }
2848         }
2849
2850         return ERROR_OK;
2851 }
2852
2853 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2854                                  uint32_t count, const uint8_t *buffer)
2855 {
2856         uint32_t size;
2857
2858         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2859          * will have something to do with the size we leave to it. */
2860         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2861                 if (address & size) {
2862                         int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2863                         if (retval != ERROR_OK)
2864                                 return retval;
2865                         address += size;
2866                         count -= size;
2867                         buffer += size;
2868                 }
2869         }
2870
2871         /* Write the data with as large access size as possible. */
2872         for (; size > 0; size /= 2) {
2873                 uint32_t aligned = count - count % size;
2874                 if (aligned > 0) {
2875                         int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2876                         if (retval != ERROR_OK)
2877                                 return retval;
2878                         address += aligned;
2879                         count -= aligned;
2880                         buffer += aligned;
2881                 }
2882         }
2883
2884         return ERROR_OK;
2885 }
2886
2887 static int cortex_a_handle_target_request(void *priv)
2888 {
2889         struct target *target = priv;
2890         struct armv7a_common *armv7a = target_to_armv7a(target);
2891         int retval;
2892
2893         if (!target_was_examined(target))
2894                 return ERROR_OK;
2895         if (!target->dbg_msg_enabled)
2896                 return ERROR_OK;
2897
2898         if (target->state == TARGET_RUNNING) {
2899                 uint32_t request;
2900                 uint32_t dscr;
2901                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2902                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2903
2904                 /* check if we have data */
2905                 int64_t then = timeval_ms();
2906                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2907                         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2908                                         armv7a->debug_base + CPUDBG_DTRTX, &request);
2909                         if (retval == ERROR_OK) {
2910                                 target_request(target, request);
2911                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2912                                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2913                         }
2914                         if (timeval_ms() > then + 1000) {
2915                                 LOG_ERROR("Timeout waiting for dtr tx full");
2916                                 return ERROR_FAIL;
2917                         }
2918                 }
2919         }
2920
2921         return ERROR_OK;
2922 }
2923
2924 /*
2925  * Cortex-A target information and configuration
2926  */
2927
2928 static int cortex_a_examine_first(struct target *target)
2929 {
2930         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2931         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2932         struct adiv5_dap *swjdp = armv7a->arm.dap;
2933
2934         int i;
2935         int retval = ERROR_OK;
2936         uint32_t didr, cpuid, dbg_osreg;
2937
2938         retval = dap_dp_init(swjdp);
2939         if (retval != ERROR_OK) {
2940                 LOG_ERROR("Could not initialize the debug port");
2941                 return retval;
2942         }
2943
2944         /* Search for the APB-AP - it is needed for access to debug registers */
2945         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2946         if (retval != ERROR_OK) {
2947                 LOG_ERROR("Could not find APB-AP for debug access");
2948                 return retval;
2949         }
2950
2951         retval = mem_ap_init(armv7a->debug_ap);
2952         if (retval != ERROR_OK) {
2953                 LOG_ERROR("Could not initialize the APB-AP");
2954                 return retval;
2955         }
2956
2957         armv7a->debug_ap->memaccess_tck = 80;
2958
2959         /* Search for the AHB-AB.
2960          * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
2961          * can access system memory. */
2962         armv7a->memory_ap_available = false;
2963         retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2964         if (retval == ERROR_OK) {
2965                 retval = mem_ap_init(armv7a->memory_ap);
2966                 if (retval == ERROR_OK)
2967                         armv7a->memory_ap_available = true;
2968         }
2969         if (retval != ERROR_OK) {
2970                 /* AHB-AP not found or unavailable - use the CPU */
2971                 LOG_DEBUG("No AHB-AP available for memory access");
2972         }
2973
2974         if (!target->dbgbase_set) {
2975                 uint32_t dbgbase;
2976                 /* Get ROM Table base */
2977                 uint32_t apid;
2978                 int32_t coreidx = target->coreid;
2979                 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2980                           target->cmd_name);
2981                 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2982                 if (retval != ERROR_OK)
2983                         return retval;
2984                 /* Lookup 0x15 -- Processor DAP */
2985                 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2986                                 &armv7a->debug_base, &coreidx);
2987                 if (retval != ERROR_OK) {
2988                         LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2989                                   target->cmd_name);
2990                         return retval;
2991                 }
2992                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2993                           target->coreid, armv7a->debug_base);
2994         } else
2995                 armv7a->debug_base = target->dbgbase;
2996
2997         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2998                         armv7a->debug_base + CPUDBG_DIDR, &didr);
2999         if (retval != ERROR_OK) {
3000                 LOG_DEBUG("Examine %s failed", "DIDR");
3001                 return retval;
3002         }
3003
3004         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3005                         armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3006         if (retval != ERROR_OK) {
3007                 LOG_DEBUG("Examine %s failed", "CPUID");
3008                 return retval;
3009         }
3010
3011         LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3012         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3013
3014         cortex_a->didr = didr;
3015         cortex_a->cpuid = cpuid;
3016
3017         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3018                                     armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3019         if (retval != ERROR_OK)
3020                 return retval;
3021         LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR  0x%" PRIx32, target->coreid, dbg_osreg);
3022
3023         if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
3024                 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
3025                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3026                 return ERROR_TARGET_INIT_FAILED;
3027         }
3028
3029         if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3030                 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
3031
3032         /* Read DBGOSLSR and check if OSLK is implemented */
3033         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3034                                 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3035         if (retval != ERROR_OK)
3036                 return retval;
3037         LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
3038
3039         /* check if OS Lock is implemented */
3040         if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3041                 /* check if OS Lock is set */
3042                 if (dbg_osreg & OSLSR_OSLK) {
3043                         LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
3044
3045                         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3046                                                         armv7a->debug_base + CPUDBG_OSLAR,
3047                                                         0);
3048                         if (retval == ERROR_OK)
3049                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3050                                                         armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3051
3052                         /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3053                         if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3054                                 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
3055                                                 target->coreid);
3056                                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3057                                 return ERROR_TARGET_INIT_FAILED;
3058                         }
3059                 }
3060         }
3061
3062         armv7a->arm.core_type = ARM_MODE_MON;
3063
3064         /* Avoid recreating the registers cache */
3065         if (!target_was_examined(target)) {
3066                 retval = cortex_a_dpm_setup(cortex_a, didr);
3067                 if (retval != ERROR_OK)
3068                         return retval;
3069         }
3070
3071         /* Setup Breakpoint Register Pairs */
3072         cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3073         cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3074         cortex_a->brp_num_available = cortex_a->brp_num;
3075         free(cortex_a->brp_list);
3076         cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3077 /*      cortex_a->brb_enabled = ????; */
3078         for (i = 0; i < cortex_a->brp_num; i++) {
3079                 cortex_a->brp_list[i].used = 0;
3080                 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3081                         cortex_a->brp_list[i].type = BRP_NORMAL;
3082                 else
3083                         cortex_a->brp_list[i].type = BRP_CONTEXT;
3084                 cortex_a->brp_list[i].value = 0;
3085                 cortex_a->brp_list[i].control = 0;
3086                 cortex_a->brp_list[i].BRPn = i;
3087         }
3088
3089         LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3090
3091         /* select debug_ap as default */
3092         swjdp->apsel = armv7a->debug_ap->ap_num;
3093
3094         target_set_examined(target);
3095         return ERROR_OK;
3096 }
3097
3098 static int cortex_a_examine(struct target *target)
3099 {
3100         int retval = ERROR_OK;
3101
3102         /* Reestablish communication after target reset */
3103         retval = cortex_a_examine_first(target);
3104
3105         /* Configure core debug access */
3106         if (retval == ERROR_OK)
3107                 retval = cortex_a_init_debug_access(target);
3108
3109         return retval;
3110 }
3111
3112 /*
3113  *      Cortex-A target creation and initialization
3114  */
3115
3116 static int cortex_a_init_target(struct command_context *cmd_ctx,
3117         struct target *target)
3118 {
3119         /* examine_first() does a bunch of this */
3120         arm_semihosting_init(target);
3121         return ERROR_OK;
3122 }
3123
3124 static int cortex_a_init_arch_info(struct target *target,
3125         struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3126 {
3127         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3128
3129         /* Setup struct cortex_a_common */
3130         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3131
3132         /*  tap has no dap initialized */
3133         if (!tap->dap) {
3134                 tap->dap = dap_init();
3135
3136                 /* Leave (only) generic DAP stuff for debugport_init() */
3137                 tap->dap->tap = tap;
3138         }
3139
3140         armv7a->arm.dap = tap->dap;
3141
3142         cortex_a->fast_reg_read = 0;
3143
3144         /* register arch-specific functions */
3145         armv7a->examine_debug_reason = NULL;
3146
3147         armv7a->post_debug_entry = cortex_a_post_debug_entry;
3148
3149         armv7a->pre_restore_context = NULL;
3150
3151         armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3152
3153
3154 /*      arm7_9->handle_target_request = cortex_a_handle_target_request; */
3155
3156         /* REVISIT v7a setup should be in a v7a-specific routine */
3157         armv7a_init_arch_info(target, armv7a);
3158         target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3159
3160         return ERROR_OK;
3161 }
3162
3163 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3164 {
3165         struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3166
3167         cortex_a->armv7a_common.is_armv7r = false;
3168
3169         return cortex_a_init_arch_info(target, cortex_a, target->tap);
3170 }
3171
3172 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3173 {
3174         struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3175
3176         cortex_a->armv7a_common.is_armv7r = true;
3177
3178         return cortex_a_init_arch_info(target, cortex_a, target->tap);
3179 }
3180
3181 static void cortex_a_deinit_target(struct target *target)
3182 {
3183         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3184         struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3185
3186         free(cortex_a->brp_list);
3187         free(dpm->dbp);
3188         free(dpm->dwp);
3189         free(cortex_a);
3190 }
3191
3192 static int cortex_a_mmu(struct target *target, int *enabled)
3193 {
3194         struct armv7a_common *armv7a = target_to_armv7a(target);
3195
3196         if (target->state != TARGET_HALTED) {
3197                 LOG_ERROR("%s: target not halted", __func__);
3198                 return ERROR_TARGET_INVALID;
3199         }
3200
3201         if (armv7a->is_armv7r)
3202                 *enabled = 0;
3203         else
3204                 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3205
3206         return ERROR_OK;
3207 }
3208
3209 static int cortex_a_virt2phys(struct target *target,
3210         target_addr_t virt, target_addr_t *phys)
3211 {
3212         int retval = ERROR_FAIL;
3213         struct armv7a_common *armv7a = target_to_armv7a(target);
3214         struct adiv5_dap *swjdp = armv7a->arm.dap;
3215         uint8_t apsel = swjdp->apsel;
3216         if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3217                 uint32_t ret;
3218                 retval = armv7a_mmu_translate_va(target,
3219                                 virt, &ret);
3220                 if (retval != ERROR_OK)
3221                         goto done;
3222                 *phys = ret;
3223         } else {/*  use this method if armv7a->memory_ap not selected
3224                  *  mmu must be enable in order to get a correct translation */
3225                 retval = cortex_a_mmu_modify(target, 1);
3226                 if (retval != ERROR_OK)
3227                         goto done;
3228                 retval = armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3229                                                     (uint32_t *)phys, 1);
3230         }
3231 done:
3232         return retval;
3233 }
3234
3235 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3236 {
3237         struct target *target = get_current_target(CMD_CTX);
3238         struct armv7a_common *armv7a = target_to_armv7a(target);
3239
3240         return armv7a_handle_cache_info_command(CMD_CTX,
3241                         &armv7a->armv7a_mmu.armv7a_cache);
3242 }
3243
3244
3245 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3246 {
3247         struct target *target = get_current_target(CMD_CTX);
3248         if (!target_was_examined(target)) {
3249                 LOG_ERROR("target not examined yet");
3250                 return ERROR_FAIL;
3251         }
3252
3253         return cortex_a_init_debug_access(target);
3254 }
3255 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3256 {
3257         struct target *target = get_current_target(CMD_CTX);
3258         /* check target is an smp target */
3259         struct target_list *head;
3260         struct target *curr;
3261         head = target->head;
3262         target->smp = 0;
3263         if (head != (struct target_list *)NULL) {
3264                 while (head != (struct target_list *)NULL) {
3265                         curr = head->target;
3266                         curr->smp = 0;
3267                         head = head->next;
3268                 }
3269                 /*  fixes the target display to the debugger */
3270                 target->gdb_service->target = target;
3271         }
3272         return ERROR_OK;
3273 }
3274
3275 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3276 {
3277         struct target *target = get_current_target(CMD_CTX);
3278         struct target_list *head;
3279         struct target *curr;
3280         head = target->head;
3281         if (head != (struct target_list *)NULL) {
3282                 target->smp = 1;
3283                 while (head != (struct target_list *)NULL) {
3284                         curr = head->target;
3285                         curr->smp = 1;
3286                         head = head->next;
3287                 }
3288         }
3289         return ERROR_OK;
3290 }
3291
3292 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3293 {
3294         struct target *target = get_current_target(CMD_CTX);
3295         int retval = ERROR_OK;
3296         struct target_list *head;
3297         head = target->head;
3298         if (head != (struct target_list *)NULL) {
3299                 if (CMD_ARGC == 1) {
3300                         int coreid = 0;
3301                         COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3302                         if (ERROR_OK != retval)
3303                                 return retval;
3304                         target->gdb_service->core[1] = coreid;
3305
3306                 }
3307                 command_print(CMD_CTX, "gdb coreid  %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3308                         , target->gdb_service->core[1]);
3309         }
3310         return ERROR_OK;
3311 }
3312
3313 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3314 {
3315         struct target *target = get_current_target(CMD_CTX);
3316         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3317
3318         static const Jim_Nvp nvp_maskisr_modes[] = {
3319                 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3320                 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3321                 { .name = NULL, .value = -1 },
3322         };
3323         const Jim_Nvp *n;
3324
3325         if (CMD_ARGC > 0) {
3326                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3327                 if (n->name == NULL) {
3328                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3329                         return ERROR_COMMAND_SYNTAX_ERROR;
3330                 }
3331
3332                 cortex_a->isrmasking_mode = n->value;
3333         }
3334
3335         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3336         command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3337
3338         return ERROR_OK;
3339 }
3340
3341 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3342 {
3343         struct target *target = get_current_target(CMD_CTX);
3344         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3345
3346         static const Jim_Nvp nvp_dacrfixup_modes[] = {
3347                 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3348                 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3349                 { .name = NULL, .value = -1 },
3350         };
3351         const Jim_Nvp *n;
3352
3353         if (CMD_ARGC > 0) {
3354                 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3355                 if (n->name == NULL)
3356                         return ERROR_COMMAND_SYNTAX_ERROR;
3357                 cortex_a->dacrfixup_mode = n->value;
3358
3359         }
3360
3361         n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3362         command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3363
3364         return ERROR_OK;
3365 }
3366
3367 static const struct command_registration cortex_a_exec_command_handlers[] = {
3368         {
3369                 .name = "cache_info",
3370                 .handler = cortex_a_handle_cache_info_command,
3371                 .mode = COMMAND_EXEC,
3372                 .help = "display information about target caches",
3373                 .usage = "",
3374         },
3375         {
3376                 .name = "dbginit",
3377                 .handler = cortex_a_handle_dbginit_command,
3378                 .mode = COMMAND_EXEC,
3379                 .help = "Initialize core debug",
3380                 .usage = "",
3381         },
3382         {   .name = "smp_off",
3383             .handler = cortex_a_handle_smp_off_command,
3384             .mode = COMMAND_EXEC,
3385             .help = "Stop smp handling",
3386             .usage = "",},
3387         {
3388                 .name = "smp_on",
3389                 .handler = cortex_a_handle_smp_on_command,
3390                 .mode = COMMAND_EXEC,
3391                 .help = "Restart smp handling",
3392                 .usage = "",
3393         },
3394         {
3395                 .name = "smp_gdb",
3396                 .handler = cortex_a_handle_smp_gdb_command,
3397                 .mode = COMMAND_EXEC,
3398                 .help = "display/fix current core played to gdb",
3399                 .usage = "",
3400         },
3401         {
3402                 .name = "maskisr",
3403                 .handler = handle_cortex_a_mask_interrupts_command,
3404                 .mode = COMMAND_ANY,
3405                 .help = "mask cortex_a interrupts",
3406                 .usage = "['on'|'off']",
3407         },
3408         {
3409                 .name = "dacrfixup",
3410                 .handler = handle_cortex_a_dacrfixup_command,
3411                 .mode = COMMAND_EXEC,
3412                 .help = "set domain access control (DACR) to all-manager "
3413                         "on memory access",
3414                 .usage = "['on'|'off']",
3415         },
3416
3417         COMMAND_REGISTRATION_DONE
3418 };
3419 static const struct command_registration cortex_a_command_handlers[] = {
3420         {
3421                 .chain = arm_command_handlers,
3422         },
3423         {
3424                 .chain = armv7a_command_handlers,
3425         },
3426         {
3427                 .name = "cortex_a",
3428                 .mode = COMMAND_ANY,
3429                 .help = "Cortex-A command group",
3430                 .usage = "",
3431                 .chain = cortex_a_exec_command_handlers,
3432         },
3433         COMMAND_REGISTRATION_DONE
3434 };
3435
3436 struct target_type cortexa_target = {
3437         .name = "cortex_a",
3438         .deprecated_name = "cortex_a8",
3439
3440         .poll = cortex_a_poll,
3441         .arch_state = armv7a_arch_state,
3442
3443         .halt = cortex_a_halt,
3444         .resume = cortex_a_resume,
3445         .step = cortex_a_step,
3446
3447         .assert_reset = cortex_a_assert_reset,
3448         .deassert_reset = cortex_a_deassert_reset,
3449
3450         /* REVISIT allow exporting VFP3 registers ... */
3451         .get_gdb_reg_list = arm_get_gdb_reg_list,
3452
3453         .read_memory = cortex_a_read_memory,
3454         .write_memory = cortex_a_write_memory,
3455
3456         .read_buffer = cortex_a_read_buffer,
3457         .write_buffer = cortex_a_write_buffer,
3458
3459         .checksum_memory = arm_checksum_memory,
3460         .blank_check_memory = arm_blank_check_memory,
3461
3462         .run_algorithm = armv4_5_run_algorithm,
3463
3464         .add_breakpoint = cortex_a_add_breakpoint,
3465         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3466         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3467         .remove_breakpoint = cortex_a_remove_breakpoint,
3468         .add_watchpoint = NULL,
3469         .remove_watchpoint = NULL,
3470
3471         .commands = cortex_a_command_handlers,
3472         .target_create = cortex_a_target_create,
3473         .init_target = cortex_a_init_target,
3474         .examine = cortex_a_examine,
3475         .deinit_target = cortex_a_deinit_target,
3476
3477         .read_phys_memory = cortex_a_read_phys_memory,
3478         .write_phys_memory = cortex_a_write_phys_memory,
3479         .mmu = cortex_a_mmu,
3480         .virt2phys = cortex_a_virt2phys,
3481 };
3482
3483 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3484         {
3485                 .name = "cache_info",
3486                 .handler = cortex_a_handle_cache_info_command,
3487                 .mode = COMMAND_EXEC,
3488                 .help = "display information about target caches",
3489                 .usage = "",
3490         },
3491         {
3492                 .name = "dbginit",
3493                 .handler = cortex_a_handle_dbginit_command,
3494                 .mode = COMMAND_EXEC,
3495                 .help = "Initialize core debug",
3496                 .usage = "",
3497         },
3498         {
3499                 .name = "maskisr",
3500                 .handler = handle_cortex_a_mask_interrupts_command,
3501                 .mode = COMMAND_EXEC,
3502                 .help = "mask cortex_r4 interrupts",
3503                 .usage = "['on'|'off']",
3504         },
3505
3506         COMMAND_REGISTRATION_DONE
3507 };
3508 static const struct command_registration cortex_r4_command_handlers[] = {
3509         {
3510                 .chain = arm_command_handlers,
3511         },
3512         {
3513                 .chain = armv7a_command_handlers,
3514         },
3515         {
3516                 .name = "cortex_r4",
3517                 .mode = COMMAND_ANY,
3518                 .help = "Cortex-R4 command group",
3519                 .usage = "",
3520                 .chain = cortex_r4_exec_command_handlers,
3521         },
3522         COMMAND_REGISTRATION_DONE
3523 };
3524
3525 struct target_type cortexr4_target = {
3526         .name = "cortex_r4",
3527
3528         .poll = cortex_a_poll,
3529         .arch_state = armv7a_arch_state,
3530
3531         .halt = cortex_a_halt,
3532         .resume = cortex_a_resume,
3533         .step = cortex_a_step,
3534
3535         .assert_reset = cortex_a_assert_reset,
3536         .deassert_reset = cortex_a_deassert_reset,
3537
3538         /* REVISIT allow exporting VFP3 registers ... */
3539         .get_gdb_reg_list = arm_get_gdb_reg_list,
3540
3541         .read_memory = cortex_a_read_phys_memory,
3542         .write_memory = cortex_a_write_phys_memory,
3543
3544         .checksum_memory = arm_checksum_memory,
3545         .blank_check_memory = arm_blank_check_memory,
3546
3547         .run_algorithm = armv4_5_run_algorithm,
3548
3549         .add_breakpoint = cortex_a_add_breakpoint,
3550         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3551         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3552         .remove_breakpoint = cortex_a_remove_breakpoint,
3553         .add_watchpoint = NULL,
3554         .remove_watchpoint = NULL,
3555
3556         .commands = cortex_r4_command_handlers,
3557         .target_create = cortex_r4_target_create,
3558         .init_target = cortex_a_init_target,
3559         .examine = cortex_a_examine,
3560         .deinit_target = cortex_a_deinit_target,
3561 };