]> git.sur5r.net Git - openocd/blob - src/target/cortex_a.c
target/cortex_a: fix temporary breakpoint during step
[openocd] / src / target / cortex_a.c
1 /***************************************************************************
2  *   Copyright (C) 2005 by Dominic Rath                                    *
3  *   Dominic.Rath@gmx.de                                                   *
4  *                                                                         *
5  *   Copyright (C) 2006 by Magnus Lundin                                   *
6  *   lundin@mlu.mine.nu                                                    *
7  *                                                                         *
8  *   Copyright (C) 2008 by Spencer Oliver                                  *
9  *   spen@spen-soft.co.uk                                                  *
10  *                                                                         *
11  *   Copyright (C) 2009 by Dirk Behme                                      *
12  *   dirk.behme@gmail.com - copy from cortex_m3                            *
13  *                                                                         *
14  *   Copyright (C) 2010 Ã˜yvind Harboe                                      *
15  *   oyvind.harboe@zylin.com                                               *
16  *                                                                         *
17  *   Copyright (C) ST-Ericsson SA 2011                                     *
18  *   michel.jaouen@stericsson.com : smp minimum support                    *
19  *                                                                         *
20  *   Copyright (C) Broadcom 2012                                           *
21  *   ehunter@broadcom.com : Cortex-R4 support                              *
22  *                                                                         *
23  *   Copyright (C) 2013 Kamal Dasu                                         *
24  *   kdasu.kdev@gmail.com                                                  *
25  *                                                                         *
26  *   This program is free software; you can redistribute it and/or modify  *
27  *   it under the terms of the GNU General Public License as published by  *
28  *   the Free Software Foundation; either version 2 of the License, or     *
29  *   (at your option) any later version.                                   *
30  *                                                                         *
31  *   This program is distributed in the hope that it will be useful,       *
32  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
33  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
34  *   GNU General Public License for more details.                          *
35  *                                                                         *
36  *   You should have received a copy of the GNU General Public License     *
37  *   along with this program.  If not, see <http://www.gnu.org/licenses/>. *
38  *                                                                         *
39  *   Cortex-A8(tm) TRM, ARM DDI 0344H                                      *
40  *   Cortex-A9(tm) TRM, ARM DDI 0407F                                      *
41  *   Cortex-A4(tm) TRM, ARM DDI 0363E                                      *
42  *   Cortex-A15(tm)TRM, ARM DDI 0438C                                      *
43  *                                                                         *
44  ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include "transport/transport.h"
58 #include <helper/time_support.h>
59
60 #define foreach_smp_target(pos, head) \
61         for (pos = head; (pos != NULL); pos = pos->next)
62
63 static int cortex_a_poll(struct target *target);
64 static int cortex_a_debug_entry(struct target *target);
65 static int cortex_a_restore_context(struct target *target, bool bpwp);
66 static int cortex_a_set_breakpoint(struct target *target,
67         struct breakpoint *breakpoint, uint8_t matchmode);
68 static int cortex_a_set_context_breakpoint(struct target *target,
69         struct breakpoint *breakpoint, uint8_t matchmode);
70 static int cortex_a_set_hybrid_breakpoint(struct target *target,
71         struct breakpoint *breakpoint);
72 static int cortex_a_unset_breakpoint(struct target *target,
73         struct breakpoint *breakpoint);
74 static int cortex_a_dap_read_coreregister_u32(struct target *target,
75         uint32_t *value, int regnum);
76 static int cortex_a_dap_write_coreregister_u32(struct target *target,
77         uint32_t value, int regnum);
78 static int cortex_a_mmu(struct target *target, int *enabled);
79 static int cortex_a_mmu_modify(struct target *target, int enable);
80 static int cortex_a_virt2phys(struct target *target,
81         target_addr_t virt, target_addr_t *phys);
82 static int cortex_a_read_cpu_memory(struct target *target,
83         uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
84
85
86 /*  restore cp15_control_reg at resume */
87 static int cortex_a_restore_cp15_control_reg(struct target *target)
88 {
89         int retval = ERROR_OK;
90         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
91         struct armv7a_common *armv7a = target_to_armv7a(target);
92
93         if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
94                 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
95                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
96                 retval = armv7a->arm.mcr(target, 15,
97                                 0, 0,   /* op1, op2 */
98                                 1, 0,   /* CRn, CRm */
99                                 cortex_a->cp15_control_reg);
100         }
101         return retval;
102 }
103
104 /*
105  * Set up ARM core for memory access.
106  * If !phys_access, switch to SVC mode and make sure MMU is on
107  * If phys_access, switch off mmu
108  */
109 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
110 {
111         struct armv7a_common *armv7a = target_to_armv7a(target);
112         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
113         int mmu_enabled = 0;
114
115         if (phys_access == 0) {
116                 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
117                 cortex_a_mmu(target, &mmu_enabled);
118                 if (mmu_enabled)
119                         cortex_a_mmu_modify(target, 1);
120                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
121                         /* overwrite DACR to all-manager */
122                         armv7a->arm.mcr(target, 15,
123                                         0, 0, 3, 0,
124                                         0xFFFFFFFF);
125                 }
126         } else {
127                 cortex_a_mmu(target, &mmu_enabled);
128                 if (mmu_enabled)
129                         cortex_a_mmu_modify(target, 0);
130         }
131         return ERROR_OK;
132 }
133
134 /*
135  * Restore ARM core after memory access.
136  * If !phys_access, switch to previous mode
137  * If phys_access, restore MMU setting
138  */
139 static int cortex_a_post_memaccess(struct target *target, int phys_access)
140 {
141         struct armv7a_common *armv7a = target_to_armv7a(target);
142         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
143
144         if (phys_access == 0) {
145                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
146                         /* restore */
147                         armv7a->arm.mcr(target, 15,
148                                         0, 0, 3, 0,
149                                         cortex_a->cp15_dacr_reg);
150                 }
151                 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
152         } else {
153                 int mmu_enabled = 0;
154                 cortex_a_mmu(target, &mmu_enabled);
155                 if (mmu_enabled)
156                         cortex_a_mmu_modify(target, 1);
157         }
158         return ERROR_OK;
159 }
160
161
162 /*  modify cp15_control_reg in order to enable or disable mmu for :
163  *  - virt2phys address conversion
164  *  - read or write memory in phys or virt address */
165 static int cortex_a_mmu_modify(struct target *target, int enable)
166 {
167         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
168         struct armv7a_common *armv7a = target_to_armv7a(target);
169         int retval = ERROR_OK;
170         int need_write = 0;
171
172         if (enable) {
173                 /*  if mmu enabled at target stop and mmu not enable */
174                 if (!(cortex_a->cp15_control_reg & 0x1U)) {
175                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
176                         return ERROR_FAIL;
177                 }
178                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
179                         cortex_a->cp15_control_reg_curr |= 0x1U;
180                         need_write = 1;
181                 }
182         } else {
183                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
184                         cortex_a->cp15_control_reg_curr &= ~0x1U;
185                         need_write = 1;
186                 }
187         }
188
189         if (need_write) {
190                 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
191                         enable ? "enable mmu" : "disable mmu",
192                         cortex_a->cp15_control_reg_curr);
193
194                 retval = armv7a->arm.mcr(target, 15,
195                                 0, 0,   /* op1, op2 */
196                                 1, 0,   /* CRn, CRm */
197                                 cortex_a->cp15_control_reg_curr);
198         }
199         return retval;
200 }
201
202 /*
203  * Cortex-A Basic debug access, very low level assumes state is saved
204  */
205 static int cortex_a_init_debug_access(struct target *target)
206 {
207         struct armv7a_common *armv7a = target_to_armv7a(target);
208         int retval;
209
210         /* lock memory-mapped access to debug registers to prevent
211          * software interference */
212         retval = mem_ap_write_u32(armv7a->debug_ap,
213                         armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
214         if (retval != ERROR_OK)
215                 return retval;
216
217         /* Disable cacheline fills and force cache write-through in debug state */
218         retval = mem_ap_write_u32(armv7a->debug_ap,
219                         armv7a->debug_base + CPUDBG_DSCCR, 0);
220         if (retval != ERROR_OK)
221                 return retval;
222
223         /* Disable TLB lookup and refill/eviction in debug state */
224         retval = mem_ap_write_u32(armv7a->debug_ap,
225                         armv7a->debug_base + CPUDBG_DSMCR, 0);
226         if (retval != ERROR_OK)
227                 return retval;
228
229         retval = dap_run(armv7a->debug_ap->dap);
230         if (retval != ERROR_OK)
231                 return retval;
232
233         /* Enabling of instruction execution in debug mode is done in debug_entry code */
234
235         /* Resync breakpoint registers */
236
237         /* Since this is likely called from init or reset, update target state information*/
238         return cortex_a_poll(target);
239 }
240
241 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
242 {
243         /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
244          * Writes final value of DSCR into *dscr. Pass force to force always
245          * reading DSCR at least once. */
246         struct armv7a_common *armv7a = target_to_armv7a(target);
247         int64_t then = timeval_ms();
248         while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
249                 force = false;
250                 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
251                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
252                 if (retval != ERROR_OK) {
253                         LOG_ERROR("Could not read DSCR register");
254                         return retval;
255                 }
256                 if (timeval_ms() > then + 1000) {
257                         LOG_ERROR("Timeout waiting for InstrCompl=1");
258                         return ERROR_FAIL;
259                 }
260         }
261         return ERROR_OK;
262 }
263
264 /* To reduce needless round-trips, pass in a pointer to the current
265  * DSCR value.  Initialize it to zero if you just need to know the
266  * value on return from this function; or DSCR_INSTR_COMP if you
267  * happen to know that no instruction is pending.
268  */
269 static int cortex_a_exec_opcode(struct target *target,
270         uint32_t opcode, uint32_t *dscr_p)
271 {
272         uint32_t dscr;
273         int retval;
274         struct armv7a_common *armv7a = target_to_armv7a(target);
275
276         dscr = dscr_p ? *dscr_p : 0;
277
278         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
279
280         /* Wait for InstrCompl bit to be set */
281         retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
282         if (retval != ERROR_OK)
283                 return retval;
284
285         retval = mem_ap_write_u32(armv7a->debug_ap,
286                         armv7a->debug_base + CPUDBG_ITR, opcode);
287         if (retval != ERROR_OK)
288                 return retval;
289
290         int64_t then = timeval_ms();
291         do {
292                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
293                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
294                 if (retval != ERROR_OK) {
295                         LOG_ERROR("Could not read DSCR register");
296                         return retval;
297                 }
298                 if (timeval_ms() > then + 1000) {
299                         LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
300                         return ERROR_FAIL;
301                 }
302         } while ((dscr & DSCR_INSTR_COMP) == 0);        /* Wait for InstrCompl bit to be set */
303
304         if (dscr_p)
305                 *dscr_p = dscr;
306
307         return retval;
308 }
309
310 /**************************************************************************
311 Read core register with very few exec_opcode, fast but needs work_area.
312 This can cause problems with MMU active.
313 **************************************************************************/
314 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
315         uint32_t *regfile)
316 {
317         int retval = ERROR_OK;
318         struct armv7a_common *armv7a = target_to_armv7a(target);
319
320         retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
321         if (retval != ERROR_OK)
322                 return retval;
323         retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
324         if (retval != ERROR_OK)
325                 return retval;
326         retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
327         if (retval != ERROR_OK)
328                 return retval;
329
330         retval = mem_ap_read_buf(armv7a->memory_ap,
331                         (uint8_t *)(&regfile[1]), 4, 15, address);
332
333         return retval;
334 }
335
336 static int cortex_a_dap_read_coreregister_u32(struct target *target,
337         uint32_t *value, int regnum)
338 {
339         int retval = ERROR_OK;
340         uint8_t reg = regnum&0xFF;
341         uint32_t dscr = 0;
342         struct armv7a_common *armv7a = target_to_armv7a(target);
343
344         if (reg > 17)
345                 return retval;
346
347         if (reg < 15) {
348                 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0"  0xEE00nE15 */
349                 retval = cortex_a_exec_opcode(target,
350                                 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
351                                 &dscr);
352                 if (retval != ERROR_OK)
353                         return retval;
354         } else if (reg == 15) {
355                 /* "MOV r0, r15"; then move r0 to DCCTX */
356                 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
357                 if (retval != ERROR_OK)
358                         return retval;
359                 retval = cortex_a_exec_opcode(target,
360                                 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
361                                 &dscr);
362                 if (retval != ERROR_OK)
363                         return retval;
364         } else {
365                 /* "MRS r0, CPSR" or "MRS r0, SPSR"
366                  * then move r0 to DCCTX
367                  */
368                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
369                 if (retval != ERROR_OK)
370                         return retval;
371                 retval = cortex_a_exec_opcode(target,
372                                 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
373                                 &dscr);
374                 if (retval != ERROR_OK)
375                         return retval;
376         }
377
378         /* Wait for DTRRXfull then read DTRRTX */
379         int64_t then = timeval_ms();
380         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
381                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
382                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
383                 if (retval != ERROR_OK)
384                         return retval;
385                 if (timeval_ms() > then + 1000) {
386                         LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
387                         return ERROR_FAIL;
388                 }
389         }
390
391         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
392                         armv7a->debug_base + CPUDBG_DTRTX, value);
393         LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
394
395         return retval;
396 }
397
398 static int cortex_a_dap_write_coreregister_u32(struct target *target,
399         uint32_t value, int regnum)
400 {
401         int retval = ERROR_OK;
402         uint8_t Rd = regnum&0xFF;
403         uint32_t dscr;
404         struct armv7a_common *armv7a = target_to_armv7a(target);
405
406         LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
407
408         /* Check that DCCRX is not full */
409         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
410                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
411         if (retval != ERROR_OK)
412                 return retval;
413         if (dscr & DSCR_DTR_RX_FULL) {
414                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
415                 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode  0xEE100E15 */
416                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
417                                 &dscr);
418                 if (retval != ERROR_OK)
419                         return retval;
420         }
421
422         if (Rd > 17)
423                 return retval;
424
425         /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
426         LOG_DEBUG("write DCC 0x%08" PRIx32, value);
427         retval = mem_ap_write_u32(armv7a->debug_ap,
428                         armv7a->debug_base + CPUDBG_DTRRX, value);
429         if (retval != ERROR_OK)
430                 return retval;
431
432         if (Rd < 15) {
433                 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
434                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
435                                 &dscr);
436
437                 if (retval != ERROR_OK)
438                         return retval;
439         } else if (Rd == 15) {
440                 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
441                  * then "mov r15, r0"
442                  */
443                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
444                                 &dscr);
445                 if (retval != ERROR_OK)
446                         return retval;
447                 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
448                 if (retval != ERROR_OK)
449                         return retval;
450         } else {
451                 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
452                  * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
453                  */
454                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
455                                 &dscr);
456                 if (retval != ERROR_OK)
457                         return retval;
458                 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
459                                 &dscr);
460                 if (retval != ERROR_OK)
461                         return retval;
462
463                 /* "Prefetch flush" after modifying execution status in CPSR */
464                 if (Rd == 16) {
465                         retval = cortex_a_exec_opcode(target,
466                                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
467                                         &dscr);
468                         if (retval != ERROR_OK)
469                                 return retval;
470                 }
471         }
472
473         return retval;
474 }
475
476 /* Write to memory mapped registers directly with no cache or mmu handling */
477 static int cortex_a_dap_write_memap_register_u32(struct target *target,
478         uint32_t address,
479         uint32_t value)
480 {
481         int retval;
482         struct armv7a_common *armv7a = target_to_armv7a(target);
483
484         retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
485
486         return retval;
487 }
488
489 /*
490  * Cortex-A implementation of Debug Programmer's Model
491  *
492  * NOTE the invariant:  these routines return with DSCR_INSTR_COMP set,
493  * so there's no need to poll for it before executing an instruction.
494  *
495  * NOTE that in several of these cases the "stall" mode might be useful.
496  * It'd let us queue a few operations together... prepare/finish might
497  * be the places to enable/disable that mode.
498  */
499
500 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
501 {
502         return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
503 }
504
505 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
506 {
507         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
508         return mem_ap_write_u32(a->armv7a_common.debug_ap,
509                         a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
510 }
511
512 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
513         uint32_t *dscr_p)
514 {
515         uint32_t dscr = DSCR_INSTR_COMP;
516         int retval;
517
518         if (dscr_p)
519                 dscr = *dscr_p;
520
521         /* Wait for DTRRXfull */
522         int64_t then = timeval_ms();
523         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
524                 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
525                                 a->armv7a_common.debug_base + CPUDBG_DSCR,
526                                 &dscr);
527                 if (retval != ERROR_OK)
528                         return retval;
529                 if (timeval_ms() > then + 1000) {
530                         LOG_ERROR("Timeout waiting for read dcc");
531                         return ERROR_FAIL;
532                 }
533         }
534
535         retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
536                         a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
537         if (retval != ERROR_OK)
538                 return retval;
539         /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
540
541         if (dscr_p)
542                 *dscr_p = dscr;
543
544         return retval;
545 }
546
547 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
548 {
549         struct cortex_a_common *a = dpm_to_a(dpm);
550         uint32_t dscr;
551         int retval;
552
553         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
554         int64_t then = timeval_ms();
555         for (;; ) {
556                 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
557                                 a->armv7a_common.debug_base + CPUDBG_DSCR,
558                                 &dscr);
559                 if (retval != ERROR_OK)
560                         return retval;
561                 if ((dscr & DSCR_INSTR_COMP) != 0)
562                         break;
563                 if (timeval_ms() > then + 1000) {
564                         LOG_ERROR("Timeout waiting for dpm prepare");
565                         return ERROR_FAIL;
566                 }
567         }
568
569         /* this "should never happen" ... */
570         if (dscr & DSCR_DTR_RX_FULL) {
571                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
572                 /* Clear DCCRX */
573                 retval = cortex_a_exec_opcode(
574                                 a->armv7a_common.arm.target,
575                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
576                                 &dscr);
577                 if (retval != ERROR_OK)
578                         return retval;
579         }
580
581         return retval;
582 }
583
584 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
585 {
586         /* REVISIT what could be done here? */
587         return ERROR_OK;
588 }
589
590 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
591         uint32_t opcode, uint32_t data)
592 {
593         struct cortex_a_common *a = dpm_to_a(dpm);
594         int retval;
595         uint32_t dscr = DSCR_INSTR_COMP;
596
597         retval = cortex_a_write_dcc(a, data);
598         if (retval != ERROR_OK)
599                 return retval;
600
601         return cortex_a_exec_opcode(
602                         a->armv7a_common.arm.target,
603                         opcode,
604                         &dscr);
605 }
606
607 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
608         uint32_t opcode, uint32_t data)
609 {
610         struct cortex_a_common *a = dpm_to_a(dpm);
611         uint32_t dscr = DSCR_INSTR_COMP;
612         int retval;
613
614         retval = cortex_a_write_dcc(a, data);
615         if (retval != ERROR_OK)
616                 return retval;
617
618         /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
619         retval = cortex_a_exec_opcode(
620                         a->armv7a_common.arm.target,
621                         ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
622                         &dscr);
623         if (retval != ERROR_OK)
624                 return retval;
625
626         /* then the opcode, taking data from R0 */
627         retval = cortex_a_exec_opcode(
628                         a->armv7a_common.arm.target,
629                         opcode,
630                         &dscr);
631
632         return retval;
633 }
634
635 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
636 {
637         struct target *target = dpm->arm->target;
638         uint32_t dscr = DSCR_INSTR_COMP;
639
640         /* "Prefetch flush" after modifying execution status in CPSR */
641         return cortex_a_exec_opcode(target,
642                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
643                         &dscr);
644 }
645
646 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
647         uint32_t opcode, uint32_t *data)
648 {
649         struct cortex_a_common *a = dpm_to_a(dpm);
650         int retval;
651         uint32_t dscr = DSCR_INSTR_COMP;
652
653         /* the opcode, writing data to DCC */
654         retval = cortex_a_exec_opcode(
655                         a->armv7a_common.arm.target,
656                         opcode,
657                         &dscr);
658         if (retval != ERROR_OK)
659                 return retval;
660
661         return cortex_a_read_dcc(a, data, &dscr);
662 }
663
664
665 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
666         uint32_t opcode, uint32_t *data)
667 {
668         struct cortex_a_common *a = dpm_to_a(dpm);
669         uint32_t dscr = DSCR_INSTR_COMP;
670         int retval;
671
672         /* the opcode, writing data to R0 */
673         retval = cortex_a_exec_opcode(
674                         a->armv7a_common.arm.target,
675                         opcode,
676                         &dscr);
677         if (retval != ERROR_OK)
678                 return retval;
679
680         /* write R0 to DCC */
681         retval = cortex_a_exec_opcode(
682                         a->armv7a_common.arm.target,
683                         ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
684                         &dscr);
685         if (retval != ERROR_OK)
686                 return retval;
687
688         return cortex_a_read_dcc(a, data, &dscr);
689 }
690
691 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
692         uint32_t addr, uint32_t control)
693 {
694         struct cortex_a_common *a = dpm_to_a(dpm);
695         uint32_t vr = a->armv7a_common.debug_base;
696         uint32_t cr = a->armv7a_common.debug_base;
697         int retval;
698
699         switch (index_t) {
700                 case 0 ... 15:  /* breakpoints */
701                         vr += CPUDBG_BVR_BASE;
702                         cr += CPUDBG_BCR_BASE;
703                         break;
704                 case 16 ... 31: /* watchpoints */
705                         vr += CPUDBG_WVR_BASE;
706                         cr += CPUDBG_WCR_BASE;
707                         index_t -= 16;
708                         break;
709                 default:
710                         return ERROR_FAIL;
711         }
712         vr += 4 * index_t;
713         cr += 4 * index_t;
714
715         LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
716                 (unsigned) vr, (unsigned) cr);
717
718         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
719                         vr, addr);
720         if (retval != ERROR_OK)
721                 return retval;
722         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
723                         cr, control);
724         return retval;
725 }
726
727 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
728 {
729         struct cortex_a_common *a = dpm_to_a(dpm);
730         uint32_t cr;
731
732         switch (index_t) {
733                 case 0 ... 15:
734                         cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
735                         break;
736                 case 16 ... 31:
737                         cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
738                         index_t -= 16;
739                         break;
740                 default:
741                         return ERROR_FAIL;
742         }
743         cr += 4 * index_t;
744
745         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
746
747         /* clear control register */
748         return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
749 }
750
751 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
752 {
753         struct arm_dpm *dpm = &a->armv7a_common.dpm;
754         int retval;
755
756         dpm->arm = &a->armv7a_common.arm;
757         dpm->didr = didr;
758
759         dpm->prepare = cortex_a_dpm_prepare;
760         dpm->finish = cortex_a_dpm_finish;
761
762         dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
763         dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
764         dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
765
766         dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
767         dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
768
769         dpm->bpwp_enable = cortex_a_bpwp_enable;
770         dpm->bpwp_disable = cortex_a_bpwp_disable;
771
772         retval = arm_dpm_setup(dpm);
773         if (retval == ERROR_OK)
774                 retval = arm_dpm_initialize(dpm);
775
776         return retval;
777 }
778 static struct target *get_cortex_a(struct target *target, int32_t coreid)
779 {
780         struct target_list *head;
781         struct target *curr;
782
783         head = target->head;
784         while (head != (struct target_list *)NULL) {
785                 curr = head->target;
786                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
787                         return curr;
788                 head = head->next;
789         }
790         return target;
791 }
792 static int cortex_a_halt(struct target *target);
793
794 static int cortex_a_halt_smp(struct target *target)
795 {
796         int retval = 0;
797         struct target_list *head;
798         struct target *curr;
799         head = target->head;
800         while (head != (struct target_list *)NULL) {
801                 curr = head->target;
802                 if ((curr != target) && (curr->state != TARGET_HALTED)
803                         && target_was_examined(curr))
804                         retval += cortex_a_halt(curr);
805                 head = head->next;
806         }
807         return retval;
808 }
809
810 static int update_halt_gdb(struct target *target)
811 {
812         struct target *gdb_target = NULL;
813         struct target_list *head;
814         struct target *curr;
815         int retval = 0;
816
817         if (target->gdb_service && target->gdb_service->core[0] == -1) {
818                 target->gdb_service->target = target;
819                 target->gdb_service->core[0] = target->coreid;
820                 retval += cortex_a_halt_smp(target);
821         }
822
823         if (target->gdb_service)
824                 gdb_target = target->gdb_service->target;
825
826         foreach_smp_target(head, target->head) {
827                 curr = head->target;
828                 /* skip calling context */
829                 if (curr == target)
830                         continue;
831                 if (!target_was_examined(curr))
832                         continue;
833                 /* skip targets that were already halted */
834                 if (curr->state == TARGET_HALTED)
835                         continue;
836                 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
837                 if (curr == gdb_target)
838                         continue;
839
840                 /* avoid recursion in cortex_a_poll() */
841                 curr->smp = 0;
842                 cortex_a_poll(curr);
843                 curr->smp = 1;
844         }
845
846         /* after all targets were updated, poll the gdb serving target */
847         if (gdb_target != NULL && gdb_target != target)
848                 cortex_a_poll(gdb_target);
849         return retval;
850 }
851
852 /*
853  * Cortex-A Run control
854  */
855
856 static int cortex_a_poll(struct target *target)
857 {
858         int retval = ERROR_OK;
859         uint32_t dscr;
860         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
861         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
862         enum target_state prev_target_state = target->state;
863         /*  toggle to another core is done by gdb as follow */
864         /*  maint packet J core_id */
865         /*  continue */
866         /*  the next polling trigger an halt event sent to gdb */
867         if ((target->state == TARGET_HALTED) && (target->smp) &&
868                 (target->gdb_service) &&
869                 (target->gdb_service->target == NULL)) {
870                 target->gdb_service->target =
871                         get_cortex_a(target, target->gdb_service->core[1]);
872                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
873                 return retval;
874         }
875         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
876                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
877         if (retval != ERROR_OK)
878                 return retval;
879         cortex_a->cpudbg_dscr = dscr;
880
881         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
882                 if (prev_target_state != TARGET_HALTED) {
883                         /* We have a halting debug event */
884                         LOG_DEBUG("Target halted");
885                         target->state = TARGET_HALTED;
886                         if ((prev_target_state == TARGET_RUNNING)
887                                 || (prev_target_state == TARGET_UNKNOWN)
888                                 || (prev_target_state == TARGET_RESET)) {
889                                 retval = cortex_a_debug_entry(target);
890                                 if (retval != ERROR_OK)
891                                         return retval;
892                                 if (target->smp) {
893                                         retval = update_halt_gdb(target);
894                                         if (retval != ERROR_OK)
895                                                 return retval;
896                                 }
897
898                                 if (arm_semihosting(target, &retval) != 0)
899                                         return retval;
900
901                                 target_call_event_callbacks(target,
902                                         TARGET_EVENT_HALTED);
903                         }
904                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
905                                 LOG_DEBUG(" ");
906
907                                 retval = cortex_a_debug_entry(target);
908                                 if (retval != ERROR_OK)
909                                         return retval;
910                                 if (target->smp) {
911                                         retval = update_halt_gdb(target);
912                                         if (retval != ERROR_OK)
913                                                 return retval;
914                                 }
915
916                                 target_call_event_callbacks(target,
917                                         TARGET_EVENT_DEBUG_HALTED);
918                         }
919                 }
920         } else
921                 target->state = TARGET_RUNNING;
922
923         return retval;
924 }
925
926 static int cortex_a_halt(struct target *target)
927 {
928         int retval = ERROR_OK;
929         uint32_t dscr;
930         struct armv7a_common *armv7a = target_to_armv7a(target);
931
932         /*
933          * Tell the core to be halted by writing DRCR with 0x1
934          * and then wait for the core to be halted.
935          */
936         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
937                         armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
938         if (retval != ERROR_OK)
939                 return retval;
940
941         /*
942          * enter halting debug mode
943          */
944         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
945                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
946         if (retval != ERROR_OK)
947                 return retval;
948
949         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
950                         armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
951         if (retval != ERROR_OK)
952                 return retval;
953
954         int64_t then = timeval_ms();
955         for (;; ) {
956                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
957                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
958                 if (retval != ERROR_OK)
959                         return retval;
960                 if ((dscr & DSCR_CORE_HALTED) != 0)
961                         break;
962                 if (timeval_ms() > then + 1000) {
963                         LOG_ERROR("Timeout waiting for halt");
964                         return ERROR_FAIL;
965                 }
966         }
967
968         target->debug_reason = DBG_REASON_DBGRQ;
969
970         return ERROR_OK;
971 }
972
973 static int cortex_a_internal_restore(struct target *target, int current,
974         target_addr_t *address, int handle_breakpoints, int debug_execution)
975 {
976         struct armv7a_common *armv7a = target_to_armv7a(target);
977         struct arm *arm = &armv7a->arm;
978         int retval;
979         uint32_t resume_pc;
980
981         if (!debug_execution)
982                 target_free_all_working_areas(target);
983
984 #if 0
985         if (debug_execution) {
986                 /* Disable interrupts */
987                 /* We disable interrupts in the PRIMASK register instead of
988                  * masking with C_MASKINTS,
989                  * This is probably the same issue as Cortex-M3 Errata 377493:
990                  * C_MASKINTS in parallel with disabled interrupts can cause
991                  * local faults to not be taken. */
992                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
993                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
994                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
995
996                 /* Make sure we are in Thumb mode */
997                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
998                         buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
999                         32) | (1 << 24));
1000                 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
1001                 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
1002         }
1003 #endif
1004
1005         /* current = 1: continue on current pc, otherwise continue at <address> */
1006         resume_pc = buf_get_u32(arm->pc->value, 0, 32);
1007         if (!current)
1008                 resume_pc = *address;
1009         else
1010                 *address = resume_pc;
1011
1012         /* Make sure that the Armv7 gdb thumb fixups does not
1013          * kill the return address
1014          */
1015         switch (arm->core_state) {
1016                 case ARM_STATE_ARM:
1017                         resume_pc &= 0xFFFFFFFC;
1018                         break;
1019                 case ARM_STATE_THUMB:
1020                 case ARM_STATE_THUMB_EE:
1021                         /* When the return address is loaded into PC
1022                          * bit 0 must be 1 to stay in Thumb state
1023                          */
1024                         resume_pc |= 0x1;
1025                         break;
1026                 case ARM_STATE_JAZELLE:
1027                         LOG_ERROR("How do I resume into Jazelle state??");
1028                         return ERROR_FAIL;
1029                 case ARM_STATE_AARCH64:
1030                         LOG_ERROR("Shoudn't be in AARCH64 state");
1031                         return ERROR_FAIL;
1032         }
1033         LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1034         buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1035         arm->pc->dirty = 1;
1036         arm->pc->valid = 1;
1037
1038         /* restore dpm_mode at system halt */
1039         dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1040         /* called it now before restoring context because it uses cpu
1041          * register r0 for restoring cp15 control register */
1042         retval = cortex_a_restore_cp15_control_reg(target);
1043         if (retval != ERROR_OK)
1044                 return retval;
1045         retval = cortex_a_restore_context(target, handle_breakpoints);
1046         if (retval != ERROR_OK)
1047                 return retval;
1048         target->debug_reason = DBG_REASON_NOTHALTED;
1049         target->state = TARGET_RUNNING;
1050
1051         /* registers are now invalid */
1052         register_cache_invalidate(arm->core_cache);
1053
1054 #if 0
1055         /* the front-end may request us not to handle breakpoints */
1056         if (handle_breakpoints) {
1057                 /* Single step past breakpoint at current address */
1058                 breakpoint = breakpoint_find(target, resume_pc);
1059                 if (breakpoint) {
1060                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1061                         cortex_m3_unset_breakpoint(target, breakpoint);
1062                         cortex_m3_single_step_core(target);
1063                         cortex_m3_set_breakpoint(target, breakpoint);
1064                 }
1065         }
1066
1067 #endif
1068         return retval;
1069 }
1070
1071 static int cortex_a_internal_restart(struct target *target)
1072 {
1073         struct armv7a_common *armv7a = target_to_armv7a(target);
1074         struct arm *arm = &armv7a->arm;
1075         int retval;
1076         uint32_t dscr;
1077         /*
1078          * * Restart core and wait for it to be started.  Clear ITRen and sticky
1079          * * exception flags: see ARMv7 ARM, C5.9.
1080          *
1081          * REVISIT: for single stepping, we probably want to
1082          * disable IRQs by default, with optional override...
1083          */
1084
1085         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1086                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1087         if (retval != ERROR_OK)
1088                 return retval;
1089
1090         if ((dscr & DSCR_INSTR_COMP) == 0)
1091                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1092
1093         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1094                         armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1095         if (retval != ERROR_OK)
1096                 return retval;
1097
1098         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1099                         armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1100                         DRCR_CLEAR_EXCEPTIONS);
1101         if (retval != ERROR_OK)
1102                 return retval;
1103
1104         int64_t then = timeval_ms();
1105         for (;; ) {
1106                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1107                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1108                 if (retval != ERROR_OK)
1109                         return retval;
1110                 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1111                         break;
1112                 if (timeval_ms() > then + 1000) {
1113                         LOG_ERROR("Timeout waiting for resume");
1114                         return ERROR_FAIL;
1115                 }
1116         }
1117
1118         target->debug_reason = DBG_REASON_NOTHALTED;
1119         target->state = TARGET_RUNNING;
1120
1121         /* registers are now invalid */
1122         register_cache_invalidate(arm->core_cache);
1123
1124         return ERROR_OK;
1125 }
1126
1127 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1128 {
1129         int retval = 0;
1130         struct target_list *head;
1131         struct target *curr;
1132         target_addr_t address;
1133         head = target->head;
1134         while (head != (struct target_list *)NULL) {
1135                 curr = head->target;
1136                 if ((curr != target) && (curr->state != TARGET_RUNNING)
1137                         && target_was_examined(curr)) {
1138                         /*  resume current address , not in step mode */
1139                         retval += cortex_a_internal_restore(curr, 1, &address,
1140                                         handle_breakpoints, 0);
1141                         retval += cortex_a_internal_restart(curr);
1142                 }
1143                 head = head->next;
1144
1145         }
1146         return retval;
1147 }
1148
1149 static int cortex_a_resume(struct target *target, int current,
1150         target_addr_t address, int handle_breakpoints, int debug_execution)
1151 {
1152         int retval = 0;
1153         /* dummy resume for smp toggle in order to reduce gdb impact  */
1154         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1155                 /*   simulate a start and halt of target */
1156                 target->gdb_service->target = NULL;
1157                 target->gdb_service->core[0] = target->gdb_service->core[1];
1158                 /*  fake resume at next poll we play the  target core[1], see poll*/
1159                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1160                 return 0;
1161         }
1162         cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1163         if (target->smp) {
1164                 target->gdb_service->core[0] = -1;
1165                 retval = cortex_a_restore_smp(target, handle_breakpoints);
1166                 if (retval != ERROR_OK)
1167                         return retval;
1168         }
1169         cortex_a_internal_restart(target);
1170
1171         if (!debug_execution) {
1172                 target->state = TARGET_RUNNING;
1173                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1174                 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1175         } else {
1176                 target->state = TARGET_DEBUG_RUNNING;
1177                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1178                 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1179         }
1180
1181         return ERROR_OK;
1182 }
1183
1184 static int cortex_a_debug_entry(struct target *target)
1185 {
1186         int i;
1187         uint32_t regfile[16], cpsr, spsr, dscr;
1188         int retval = ERROR_OK;
1189         struct working_area *regfile_working_area = NULL;
1190         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1191         struct armv7a_common *armv7a = target_to_armv7a(target);
1192         struct arm *arm = &armv7a->arm;
1193         struct reg *reg;
1194
1195         LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1196
1197         /* REVISIT surely we should not re-read DSCR !! */
1198         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1199                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1200         if (retval != ERROR_OK)
1201                 return retval;
1202
1203         /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1204          * imprecise data aborts get discarded by issuing a Data
1205          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1206          */
1207
1208         /* Enable the ITR execution once we are in debug mode */
1209         dscr |= DSCR_ITR_EN;
1210         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1211                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1212         if (retval != ERROR_OK)
1213                 return retval;
1214
1215         /* Examine debug reason */
1216         arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1217
1218         /* save address of instruction that triggered the watchpoint? */
1219         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1220                 uint32_t wfar;
1221
1222                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1223                                 armv7a->debug_base + CPUDBG_WFAR,
1224                                 &wfar);
1225                 if (retval != ERROR_OK)
1226                         return retval;
1227                 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1228         }
1229
1230         /* REVISIT fast_reg_read is never set ... */
1231
1232         /* Examine target state and mode */
1233         if (cortex_a->fast_reg_read)
1234                 target_alloc_working_area(target, 64, &regfile_working_area);
1235
1236
1237         /* First load register acessible through core debug port*/
1238         if (!regfile_working_area)
1239                 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1240         else {
1241                 retval = cortex_a_read_regs_through_mem(target,
1242                                 regfile_working_area->address, regfile);
1243
1244                 target_free_working_area(target, regfile_working_area);
1245                 if (retval != ERROR_OK)
1246                         return retval;
1247
1248                 /* read Current PSR */
1249                 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1250                 /*  store current cpsr */
1251                 if (retval != ERROR_OK)
1252                         return retval;
1253
1254                 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1255
1256                 arm_set_cpsr(arm, cpsr);
1257
1258                 /* update cache */
1259                 for (i = 0; i <= ARM_PC; i++) {
1260                         reg = arm_reg_current(arm, i);
1261
1262                         buf_set_u32(reg->value, 0, 32, regfile[i]);
1263                         reg->valid = 1;
1264                         reg->dirty = 0;
1265                 }
1266
1267                 /* Fixup PC Resume Address */
1268                 if (cpsr & (1 << 5)) {
1269                         /* T bit set for Thumb or ThumbEE state */
1270                         regfile[ARM_PC] -= 4;
1271                 } else {
1272                         /* ARM state */
1273                         regfile[ARM_PC] -= 8;
1274                 }
1275
1276                 reg = arm->pc;
1277                 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1278                 reg->dirty = reg->valid;
1279         }
1280
1281         if (arm->spsr) {
1282                 /* read Saved PSR */
1283                 retval = cortex_a_dap_read_coreregister_u32(target, &spsr, 17);
1284                 /*  store current spsr */
1285                 if (retval != ERROR_OK)
1286                         return retval;
1287
1288                 reg = arm->spsr;
1289                 buf_set_u32(reg->value, 0, 32, spsr);
1290                 reg->valid = 1;
1291                 reg->dirty = 0;
1292         }
1293
1294 #if 0
1295 /* TODO, Move this */
1296         uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1297         cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1298         LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1299
1300         cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1301         LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1302
1303         cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1304         LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1305 #endif
1306
1307         /* Are we in an exception handler */
1308 /*      armv4_5->exception_number = 0; */
1309         if (armv7a->post_debug_entry) {
1310                 retval = armv7a->post_debug_entry(target);
1311                 if (retval != ERROR_OK)
1312                         return retval;
1313         }
1314
1315         return retval;
1316 }
1317
1318 static int cortex_a_post_debug_entry(struct target *target)
1319 {
1320         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1321         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1322         int retval;
1323
1324         /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1325         retval = armv7a->arm.mrc(target, 15,
1326                         0, 0,   /* op1, op2 */
1327                         1, 0,   /* CRn, CRm */
1328                         &cortex_a->cp15_control_reg);
1329         if (retval != ERROR_OK)
1330                 return retval;
1331         LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1332         cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1333
1334         if (!armv7a->is_armv7r)
1335                 armv7a_read_ttbcr(target);
1336
1337         if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1338                 armv7a_identify_cache(target);
1339
1340         if (armv7a->is_armv7r) {
1341                 armv7a->armv7a_mmu.mmu_enabled = 0;
1342         } else {
1343                 armv7a->armv7a_mmu.mmu_enabled =
1344                         (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1345         }
1346         armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1347                 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1348         armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1349                 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1350         cortex_a->curr_mode = armv7a->arm.core_mode;
1351
1352         /* switch to SVC mode to read DACR */
1353         dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1354         armv7a->arm.mrc(target, 15,
1355                         0, 0, 3, 0,
1356                         &cortex_a->cp15_dacr_reg);
1357
1358         LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1359                         cortex_a->cp15_dacr_reg);
1360
1361         dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1362         return ERROR_OK;
1363 }
1364
1365 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1366 {
1367         struct armv7a_common *armv7a = target_to_armv7a(target);
1368         uint32_t dscr;
1369
1370         /* Read DSCR */
1371         int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1372                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1373         if (ERROR_OK != retval)
1374                 return retval;
1375
1376         /* clear bitfield */
1377         dscr &= ~bit_mask;
1378         /* put new value */
1379         dscr |= value & bit_mask;
1380
1381         /* write new DSCR */
1382         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1383                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1384         return retval;
1385 }
1386
1387 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1388         int handle_breakpoints)
1389 {
1390         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1391         struct armv7a_common *armv7a = target_to_armv7a(target);
1392         struct arm *arm = &armv7a->arm;
1393         struct breakpoint *breakpoint = NULL;
1394         struct breakpoint stepbreakpoint;
1395         struct reg *r;
1396         int retval;
1397
1398         if (target->state != TARGET_HALTED) {
1399                 LOG_WARNING("target not halted");
1400                 return ERROR_TARGET_NOT_HALTED;
1401         }
1402
1403         /* current = 1: continue on current pc, otherwise continue at <address> */
1404         r = arm->pc;
1405         if (!current)
1406                 buf_set_u32(r->value, 0, 32, address);
1407         else
1408                 address = buf_get_u32(r->value, 0, 32);
1409
1410         /* The front-end may request us not to handle breakpoints.
1411          * But since Cortex-A uses breakpoint for single step,
1412          * we MUST handle breakpoints.
1413          */
1414         handle_breakpoints = 1;
1415         if (handle_breakpoints) {
1416                 breakpoint = breakpoint_find(target, address);
1417                 if (breakpoint)
1418                         cortex_a_unset_breakpoint(target, breakpoint);
1419         }
1420
1421         /* Setup single step breakpoint */
1422         stepbreakpoint.address = address;
1423         stepbreakpoint.asid = 0;
1424         stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1425                 ? 2 : 4;
1426         stepbreakpoint.type = BKPT_HARD;
1427         stepbreakpoint.set = 0;
1428
1429         /* Disable interrupts during single step if requested */
1430         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1431                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1432                 if (ERROR_OK != retval)
1433                         return retval;
1434         }
1435
1436         /* Break on IVA mismatch */
1437         cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1438
1439         target->debug_reason = DBG_REASON_SINGLESTEP;
1440
1441         retval = cortex_a_resume(target, 1, address, 0, 0);
1442         if (retval != ERROR_OK)
1443                 return retval;
1444
1445         int64_t then = timeval_ms();
1446         while (target->state != TARGET_HALTED) {
1447                 retval = cortex_a_poll(target);
1448                 if (retval != ERROR_OK)
1449                         return retval;
1450                 if (timeval_ms() > then + 1000) {
1451                         LOG_ERROR("timeout waiting for target halt");
1452                         return ERROR_FAIL;
1453                 }
1454         }
1455
1456         cortex_a_unset_breakpoint(target, &stepbreakpoint);
1457
1458         /* Re-enable interrupts if they were disabled */
1459         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1460                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1461                 if (ERROR_OK != retval)
1462                         return retval;
1463         }
1464
1465
1466         target->debug_reason = DBG_REASON_BREAKPOINT;
1467
1468         if (breakpoint)
1469                 cortex_a_set_breakpoint(target, breakpoint, 0);
1470
1471         if (target->state != TARGET_HALTED)
1472                 LOG_DEBUG("target stepped");
1473
1474         return ERROR_OK;
1475 }
1476
1477 static int cortex_a_restore_context(struct target *target, bool bpwp)
1478 {
1479         struct armv7a_common *armv7a = target_to_armv7a(target);
1480
1481         LOG_DEBUG(" ");
1482
1483         if (armv7a->pre_restore_context)
1484                 armv7a->pre_restore_context(target);
1485
1486         return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1487 }
1488
1489 /*
1490  * Cortex-A Breakpoint and watchpoint functions
1491  */
1492
1493 /* Setup hardware Breakpoint Register Pair */
1494 static int cortex_a_set_breakpoint(struct target *target,
1495         struct breakpoint *breakpoint, uint8_t matchmode)
1496 {
1497         int retval;
1498         int brp_i = 0;
1499         uint32_t control;
1500         uint8_t byte_addr_select = 0x0F;
1501         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1502         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1503         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1504
1505         if (breakpoint->set) {
1506                 LOG_WARNING("breakpoint already set");
1507                 return ERROR_OK;
1508         }
1509
1510         if (breakpoint->type == BKPT_HARD) {
1511                 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1512                         brp_i++;
1513                 if (brp_i >= cortex_a->brp_num) {
1514                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1515                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1516                 }
1517                 breakpoint->set = brp_i + 1;
1518                 if (breakpoint->length == 2)
1519                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1520                 control = ((matchmode & 0x7) << 20)
1521                         | (byte_addr_select << 5)
1522                         | (3 << 1) | 1;
1523                 brp_list[brp_i].used = 1;
1524                 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1525                 brp_list[brp_i].control = control;
1526                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1527                                 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1528                                 brp_list[brp_i].value);
1529                 if (retval != ERROR_OK)
1530                         return retval;
1531                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1532                                 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1533                                 brp_list[brp_i].control);
1534                 if (retval != ERROR_OK)
1535                         return retval;
1536                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1537                         brp_list[brp_i].control,
1538                         brp_list[brp_i].value);
1539         } else if (breakpoint->type == BKPT_SOFT) {
1540                 uint8_t code[4];
1541                 /* length == 2: Thumb breakpoint */
1542                 if (breakpoint->length == 2)
1543                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1544                 else
1545                 /* length == 3: Thumb-2 breakpoint, actual encoding is
1546                  * a regular Thumb BKPT instruction but we replace a
1547                  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1548                  * length
1549                  */
1550                 if (breakpoint->length == 3) {
1551                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1552                         breakpoint->length = 4;
1553                 } else
1554                         /* length == 4, normal ARM breakpoint */
1555                         buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1556
1557                 retval = target_read_memory(target,
1558                                 breakpoint->address & 0xFFFFFFFE,
1559                                 breakpoint->length, 1,
1560                                 breakpoint->orig_instr);
1561                 if (retval != ERROR_OK)
1562                         return retval;
1563
1564                 /* make sure data cache is cleaned & invalidated down to PoC */
1565                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1566                         armv7a_cache_flush_virt(target, breakpoint->address,
1567                                                 breakpoint->length);
1568                 }
1569
1570                 retval = target_write_memory(target,
1571                                 breakpoint->address & 0xFFFFFFFE,
1572                                 breakpoint->length, 1, code);
1573                 if (retval != ERROR_OK)
1574                         return retval;
1575
1576                 /* update i-cache at breakpoint location */
1577                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1578                                         breakpoint->length);
1579                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1580                                                  breakpoint->length);
1581
1582                 breakpoint->set = 0x11; /* Any nice value but 0 */
1583         }
1584
1585         return ERROR_OK;
1586 }
1587
1588 static int cortex_a_set_context_breakpoint(struct target *target,
1589         struct breakpoint *breakpoint, uint8_t matchmode)
1590 {
1591         int retval = ERROR_FAIL;
1592         int brp_i = 0;
1593         uint32_t control;
1594         uint8_t byte_addr_select = 0x0F;
1595         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1596         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1597         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1598
1599         if (breakpoint->set) {
1600                 LOG_WARNING("breakpoint already set");
1601                 return retval;
1602         }
1603         /*check available context BRPs*/
1604         while ((brp_list[brp_i].used ||
1605                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1606                 brp_i++;
1607
1608         if (brp_i >= cortex_a->brp_num) {
1609                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1610                 return ERROR_FAIL;
1611         }
1612
1613         breakpoint->set = brp_i + 1;
1614         control = ((matchmode & 0x7) << 20)
1615                 | (byte_addr_select << 5)
1616                 | (3 << 1) | 1;
1617         brp_list[brp_i].used = 1;
1618         brp_list[brp_i].value = (breakpoint->asid);
1619         brp_list[brp_i].control = control;
1620         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1621                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1622                         brp_list[brp_i].value);
1623         if (retval != ERROR_OK)
1624                 return retval;
1625         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1626                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1627                         brp_list[brp_i].control);
1628         if (retval != ERROR_OK)
1629                 return retval;
1630         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1631                 brp_list[brp_i].control,
1632                 brp_list[brp_i].value);
1633         return ERROR_OK;
1634
1635 }
1636
1637 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1638 {
1639         int retval = ERROR_FAIL;
1640         int brp_1 = 0;  /* holds the contextID pair */
1641         int brp_2 = 0;  /* holds the IVA pair */
1642         uint32_t control_CTX, control_IVA;
1643         uint8_t CTX_byte_addr_select = 0x0F;
1644         uint8_t IVA_byte_addr_select = 0x0F;
1645         uint8_t CTX_machmode = 0x03;
1646         uint8_t IVA_machmode = 0x01;
1647         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1648         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1649         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1650
1651         if (breakpoint->set) {
1652                 LOG_WARNING("breakpoint already set");
1653                 return retval;
1654         }
1655         /*check available context BRPs*/
1656         while ((brp_list[brp_1].used ||
1657                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1658                 brp_1++;
1659
1660         printf("brp(CTX) found num: %d\n", brp_1);
1661         if (brp_1 >= cortex_a->brp_num) {
1662                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1663                 return ERROR_FAIL;
1664         }
1665
1666         while ((brp_list[brp_2].used ||
1667                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1668                 brp_2++;
1669
1670         printf("brp(IVA) found num: %d\n", brp_2);
1671         if (brp_2 >= cortex_a->brp_num) {
1672                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1673                 return ERROR_FAIL;
1674         }
1675
1676         breakpoint->set = brp_1 + 1;
1677         breakpoint->linked_BRP = brp_2;
1678         control_CTX = ((CTX_machmode & 0x7) << 20)
1679                 | (brp_2 << 16)
1680                 | (0 << 14)
1681                 | (CTX_byte_addr_select << 5)
1682                 | (3 << 1) | 1;
1683         brp_list[brp_1].used = 1;
1684         brp_list[brp_1].value = (breakpoint->asid);
1685         brp_list[brp_1].control = control_CTX;
1686         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1687                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1688                         brp_list[brp_1].value);
1689         if (retval != ERROR_OK)
1690                 return retval;
1691         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1692                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1693                         brp_list[brp_1].control);
1694         if (retval != ERROR_OK)
1695                 return retval;
1696
1697         control_IVA = ((IVA_machmode & 0x7) << 20)
1698                 | (brp_1 << 16)
1699                 | (IVA_byte_addr_select << 5)
1700                 | (3 << 1) | 1;
1701         brp_list[brp_2].used = 1;
1702         brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1703         brp_list[brp_2].control = control_IVA;
1704         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1705                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1706                         brp_list[brp_2].value);
1707         if (retval != ERROR_OK)
1708                 return retval;
1709         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1710                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1711                         brp_list[brp_2].control);
1712         if (retval != ERROR_OK)
1713                 return retval;
1714
1715         return ERROR_OK;
1716 }
1717
1718 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1719 {
1720         int retval;
1721         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1722         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1723         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1724
1725         if (!breakpoint->set) {
1726                 LOG_WARNING("breakpoint not set");
1727                 return ERROR_OK;
1728         }
1729
1730         if (breakpoint->type == BKPT_HARD) {
1731                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1732                         int brp_i = breakpoint->set - 1;
1733                         int brp_j = breakpoint->linked_BRP;
1734                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1735                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1736                                 return ERROR_OK;
1737                         }
1738                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1739                                 brp_list[brp_i].control, brp_list[brp_i].value);
1740                         brp_list[brp_i].used = 0;
1741                         brp_list[brp_i].value = 0;
1742                         brp_list[brp_i].control = 0;
1743                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1744                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1745                                         brp_list[brp_i].control);
1746                         if (retval != ERROR_OK)
1747                                 return retval;
1748                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1749                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1750                                         brp_list[brp_i].value);
1751                         if (retval != ERROR_OK)
1752                                 return retval;
1753                         if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1754                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1755                                 return ERROR_OK;
1756                         }
1757                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1758                                 brp_list[brp_j].control, brp_list[brp_j].value);
1759                         brp_list[brp_j].used = 0;
1760                         brp_list[brp_j].value = 0;
1761                         brp_list[brp_j].control = 0;
1762                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1763                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1764                                         brp_list[brp_j].control);
1765                         if (retval != ERROR_OK)
1766                                 return retval;
1767                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1768                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1769                                         brp_list[brp_j].value);
1770                         if (retval != ERROR_OK)
1771                                 return retval;
1772                         breakpoint->linked_BRP = 0;
1773                         breakpoint->set = 0;
1774                         return ERROR_OK;
1775
1776                 } else {
1777                         int brp_i = breakpoint->set - 1;
1778                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1779                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1780                                 return ERROR_OK;
1781                         }
1782                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1783                                 brp_list[brp_i].control, brp_list[brp_i].value);
1784                         brp_list[brp_i].used = 0;
1785                         brp_list[brp_i].value = 0;
1786                         brp_list[brp_i].control = 0;
1787                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1788                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1789                                         brp_list[brp_i].control);
1790                         if (retval != ERROR_OK)
1791                                 return retval;
1792                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1793                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1794                                         brp_list[brp_i].value);
1795                         if (retval != ERROR_OK)
1796                                 return retval;
1797                         breakpoint->set = 0;
1798                         return ERROR_OK;
1799                 }
1800         } else {
1801
1802                 /* make sure data cache is cleaned & invalidated down to PoC */
1803                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1804                         armv7a_cache_flush_virt(target, breakpoint->address,
1805                                                 breakpoint->length);
1806                 }
1807
1808                 /* restore original instruction (kept in target endianness) */
1809                 if (breakpoint->length == 4) {
1810                         retval = target_write_memory(target,
1811                                         breakpoint->address & 0xFFFFFFFE,
1812                                         4, 1, breakpoint->orig_instr);
1813                         if (retval != ERROR_OK)
1814                                 return retval;
1815                 } else {
1816                         retval = target_write_memory(target,
1817                                         breakpoint->address & 0xFFFFFFFE,
1818                                         2, 1, breakpoint->orig_instr);
1819                         if (retval != ERROR_OK)
1820                                 return retval;
1821                 }
1822
1823                 /* update i-cache at breakpoint location */
1824                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1825                                                  breakpoint->length);
1826                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1827                                                  breakpoint->length);
1828         }
1829         breakpoint->set = 0;
1830
1831         return ERROR_OK;
1832 }
1833
1834 static int cortex_a_add_breakpoint(struct target *target,
1835         struct breakpoint *breakpoint)
1836 {
1837         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1838
1839         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1840                 LOG_INFO("no hardware breakpoint available");
1841                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1842         }
1843
1844         if (breakpoint->type == BKPT_HARD)
1845                 cortex_a->brp_num_available--;
1846
1847         return cortex_a_set_breakpoint(target, breakpoint, 0x00);       /* Exact match */
1848 }
1849
1850 static int cortex_a_add_context_breakpoint(struct target *target,
1851         struct breakpoint *breakpoint)
1852 {
1853         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1854
1855         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1856                 LOG_INFO("no hardware breakpoint available");
1857                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1858         }
1859
1860         if (breakpoint->type == BKPT_HARD)
1861                 cortex_a->brp_num_available--;
1862
1863         return cortex_a_set_context_breakpoint(target, breakpoint, 0x02);       /* asid match */
1864 }
1865
1866 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1867         struct breakpoint *breakpoint)
1868 {
1869         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1870
1871         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1872                 LOG_INFO("no hardware breakpoint available");
1873                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1874         }
1875
1876         if (breakpoint->type == BKPT_HARD)
1877                 cortex_a->brp_num_available--;
1878
1879         return cortex_a_set_hybrid_breakpoint(target, breakpoint);      /* ??? */
1880 }
1881
1882
1883 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1884 {
1885         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1886
1887 #if 0
1888 /* It is perfectly possible to remove breakpoints while the target is running */
1889         if (target->state != TARGET_HALTED) {
1890                 LOG_WARNING("target not halted");
1891                 return ERROR_TARGET_NOT_HALTED;
1892         }
1893 #endif
1894
1895         if (breakpoint->set) {
1896                 cortex_a_unset_breakpoint(target, breakpoint);
1897                 if (breakpoint->type == BKPT_HARD)
1898                         cortex_a->brp_num_available++;
1899         }
1900
1901
1902         return ERROR_OK;
1903 }
1904
1905 /*
1906  * Cortex-A Reset functions
1907  */
1908
1909 static int cortex_a_assert_reset(struct target *target)
1910 {
1911         struct armv7a_common *armv7a = target_to_armv7a(target);
1912
1913         LOG_DEBUG(" ");
1914
1915         /* FIXME when halt is requested, make it work somehow... */
1916
1917         /* This function can be called in "target not examined" state */
1918
1919         /* Issue some kind of warm reset. */
1920         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1921                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1922         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1923                 /* REVISIT handle "pulls" cases, if there's
1924                  * hardware that needs them to work.
1925                  */
1926
1927                 /*
1928                  * FIXME: fix reset when transport is SWD. This is a temporary
1929                  * work-around for release v0.10 that is not intended to stay!
1930                  */
1931                 if (transport_is_swd() ||
1932                                 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1933                         jtag_add_reset(0, 1);
1934
1935         } else {
1936                 LOG_ERROR("%s: how to reset?", target_name(target));
1937                 return ERROR_FAIL;
1938         }
1939
1940         /* registers are now invalid */
1941         if (target_was_examined(target))
1942                 register_cache_invalidate(armv7a->arm.core_cache);
1943
1944         target->state = TARGET_RESET;
1945
1946         return ERROR_OK;
1947 }
1948
1949 static int cortex_a_deassert_reset(struct target *target)
1950 {
1951         int retval;
1952
1953         LOG_DEBUG(" ");
1954
1955         /* be certain SRST is off */
1956         jtag_add_reset(0, 0);
1957
1958         if (target_was_examined(target)) {
1959                 retval = cortex_a_poll(target);
1960                 if (retval != ERROR_OK)
1961                         return retval;
1962         }
1963
1964         if (target->reset_halt) {
1965                 if (target->state != TARGET_HALTED) {
1966                         LOG_WARNING("%s: ran after reset and before halt ...",
1967                                 target_name(target));
1968                         if (target_was_examined(target)) {
1969                                 retval = target_halt(target);
1970                                 if (retval != ERROR_OK)
1971                                         return retval;
1972                         } else
1973                                 target->state = TARGET_UNKNOWN;
1974                 }
1975         }
1976
1977         return ERROR_OK;
1978 }
1979
1980 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1981 {
1982         /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1983          * New desired mode must be in mode. Current value of DSCR must be in
1984          * *dscr, which is updated with new value.
1985          *
1986          * This function elides actually sending the mode-change over the debug
1987          * interface if the mode is already set as desired.
1988          */
1989         uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1990         if (new_dscr != *dscr) {
1991                 struct armv7a_common *armv7a = target_to_armv7a(target);
1992                 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1993                                 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1994                 if (retval == ERROR_OK)
1995                         *dscr = new_dscr;
1996                 return retval;
1997         } else {
1998                 return ERROR_OK;
1999         }
2000 }
2001
2002 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
2003         uint32_t value, uint32_t *dscr)
2004 {
2005         /* Waits until the specified bit(s) of DSCR take on a specified value. */
2006         struct armv7a_common *armv7a = target_to_armv7a(target);
2007         int64_t then = timeval_ms();
2008         int retval;
2009
2010         while ((*dscr & mask) != value) {
2011                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2012                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
2013                 if (retval != ERROR_OK)
2014                         return retval;
2015                 if (timeval_ms() > then + 1000) {
2016                         LOG_ERROR("timeout waiting for DSCR bit change");
2017                         return ERROR_FAIL;
2018                 }
2019         }
2020         return ERROR_OK;
2021 }
2022
2023 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
2024         uint32_t *data, uint32_t *dscr)
2025 {
2026         int retval;
2027         struct armv7a_common *armv7a = target_to_armv7a(target);
2028
2029         /* Move from coprocessor to R0. */
2030         retval = cortex_a_exec_opcode(target, opcode, dscr);
2031         if (retval != ERROR_OK)
2032                 return retval;
2033
2034         /* Move from R0 to DTRTX. */
2035         retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2036         if (retval != ERROR_OK)
2037                 return retval;
2038
2039         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2040          * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2041          * must also check TXfull_l). Most of the time this will be free
2042          * because TXfull_l will be set immediately and cached in dscr. */
2043         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2044                         DSCR_DTRTX_FULL_LATCHED, dscr);
2045         if (retval != ERROR_OK)
2046                 return retval;
2047
2048         /* Read the value transferred to DTRTX. */
2049         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2050                         armv7a->debug_base + CPUDBG_DTRTX, data);
2051         if (retval != ERROR_OK)
2052                 return retval;
2053
2054         return ERROR_OK;
2055 }
2056
2057 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2058         uint32_t *dfsr, uint32_t *dscr)
2059 {
2060         int retval;
2061
2062         if (dfar) {
2063                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2064                 if (retval != ERROR_OK)
2065                         return retval;
2066         }
2067
2068         if (dfsr) {
2069                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2070                 if (retval != ERROR_OK)
2071                         return retval;
2072         }
2073
2074         return ERROR_OK;
2075 }
2076
2077 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2078         uint32_t data, uint32_t *dscr)
2079 {
2080         int retval;
2081         struct armv7a_common *armv7a = target_to_armv7a(target);
2082
2083         /* Write the value into DTRRX. */
2084         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2085                         armv7a->debug_base + CPUDBG_DTRRX, data);
2086         if (retval != ERROR_OK)
2087                 return retval;
2088
2089         /* Move from DTRRX to R0. */
2090         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2091         if (retval != ERROR_OK)
2092                 return retval;
2093
2094         /* Move from R0 to coprocessor. */
2095         retval = cortex_a_exec_opcode(target, opcode, dscr);
2096         if (retval != ERROR_OK)
2097                 return retval;
2098
2099         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2100          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2101          * check RXfull_l). Most of the time this will be free because RXfull_l
2102          * will be cleared immediately and cached in dscr. */
2103         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2104         if (retval != ERROR_OK)
2105                 return retval;
2106
2107         return ERROR_OK;
2108 }
2109
2110 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2111         uint32_t dfsr, uint32_t *dscr)
2112 {
2113         int retval;
2114
2115         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2116         if (retval != ERROR_OK)
2117                 return retval;
2118
2119         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2120         if (retval != ERROR_OK)
2121                 return retval;
2122
2123         return ERROR_OK;
2124 }
2125
2126 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2127 {
2128         uint32_t status, upper4;
2129
2130         if (dfsr & (1 << 9)) {
2131                 /* LPAE format. */
2132                 status = dfsr & 0x3f;
2133                 upper4 = status >> 2;
2134                 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2135                         return ERROR_TARGET_TRANSLATION_FAULT;
2136                 else if (status == 33)
2137                         return ERROR_TARGET_UNALIGNED_ACCESS;
2138                 else
2139                         return ERROR_TARGET_DATA_ABORT;
2140         } else {
2141                 /* Normal format. */
2142                 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2143                 if (status == 1)
2144                         return ERROR_TARGET_UNALIGNED_ACCESS;
2145                 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2146                                 status == 9 || status == 11 || status == 13 || status == 15)
2147                         return ERROR_TARGET_TRANSLATION_FAULT;
2148                 else
2149                         return ERROR_TARGET_DATA_ABORT;
2150         }
2151 }
2152
2153 static int cortex_a_write_cpu_memory_slow(struct target *target,
2154         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2155 {
2156         /* Writes count objects of size size from *buffer. Old value of DSCR must
2157          * be in *dscr; updated to new value. This is slow because it works for
2158          * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2159          * the address is aligned, cortex_a_write_cpu_memory_fast should be
2160          * preferred.
2161          * Preconditions:
2162          * - Address is in R0.
2163          * - R0 is marked dirty.
2164          */
2165         struct armv7a_common *armv7a = target_to_armv7a(target);
2166         struct arm *arm = &armv7a->arm;
2167         int retval;
2168
2169         /* Mark register R1 as dirty, to use for transferring data. */
2170         arm_reg_current(arm, 1)->dirty = true;
2171
2172         /* Switch to non-blocking mode if not already in that mode. */
2173         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2174         if (retval != ERROR_OK)
2175                 return retval;
2176
2177         /* Go through the objects. */
2178         while (count) {
2179                 /* Write the value to store into DTRRX. */
2180                 uint32_t data, opcode;
2181                 if (size == 1)
2182                         data = *buffer;
2183                 else if (size == 2)
2184                         data = target_buffer_get_u16(target, buffer);
2185                 else
2186                         data = target_buffer_get_u32(target, buffer);
2187                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2188                                 armv7a->debug_base + CPUDBG_DTRRX, data);
2189                 if (retval != ERROR_OK)
2190                         return retval;
2191
2192                 /* Transfer the value from DTRRX to R1. */
2193                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2194                 if (retval != ERROR_OK)
2195                         return retval;
2196
2197                 /* Write the value transferred to R1 into memory. */
2198                 if (size == 1)
2199                         opcode = ARMV4_5_STRB_IP(1, 0);
2200                 else if (size == 2)
2201                         opcode = ARMV4_5_STRH_IP(1, 0);
2202                 else
2203                         opcode = ARMV4_5_STRW_IP(1, 0);
2204                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2205                 if (retval != ERROR_OK)
2206                         return retval;
2207
2208                 /* Check for faults and return early. */
2209                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2210                         return ERROR_OK; /* A data fault is not considered a system failure. */
2211
2212                 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2213                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2214                  * must also check RXfull_l). Most of the time this will be free
2215                  * because RXfull_l will be cleared immediately and cached in dscr. */
2216                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2217                 if (retval != ERROR_OK)
2218                         return retval;
2219
2220                 /* Advance. */
2221                 buffer += size;
2222                 --count;
2223         }
2224
2225         return ERROR_OK;
2226 }
2227
2228 static int cortex_a_write_cpu_memory_fast(struct target *target,
2229         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2230 {
2231         /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2232          * in *dscr; updated to new value. This is fast but only works for
2233          * word-sized objects at aligned addresses.
2234          * Preconditions:
2235          * - Address is in R0 and must be a multiple of 4.
2236          * - R0 is marked dirty.
2237          */
2238         struct armv7a_common *armv7a = target_to_armv7a(target);
2239         int retval;
2240
2241         /* Switch to fast mode if not already in that mode. */
2242         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2243         if (retval != ERROR_OK)
2244                 return retval;
2245
2246         /* Latch STC instruction. */
2247         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2248                         armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2249         if (retval != ERROR_OK)
2250                 return retval;
2251
2252         /* Transfer all the data and issue all the instructions. */
2253         return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2254                         4, count, armv7a->debug_base + CPUDBG_DTRRX);
2255 }
2256
2257 static int cortex_a_write_cpu_memory(struct target *target,
2258         uint32_t address, uint32_t size,
2259         uint32_t count, const uint8_t *buffer)
2260 {
2261         /* Write memory through the CPU. */
2262         int retval, final_retval;
2263         struct armv7a_common *armv7a = target_to_armv7a(target);
2264         struct arm *arm = &armv7a->arm;
2265         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2266
2267         LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2268                           address, size, count);
2269         if (target->state != TARGET_HALTED) {
2270                 LOG_WARNING("target not halted");
2271                 return ERROR_TARGET_NOT_HALTED;
2272         }
2273
2274         if (!count)
2275                 return ERROR_OK;
2276
2277         /* Clear any abort. */
2278         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2279                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2280         if (retval != ERROR_OK)
2281                 return retval;
2282
2283         /* Read DSCR. */
2284         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2285                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2286         if (retval != ERROR_OK)
2287                 return retval;
2288
2289         /* Switch to non-blocking mode if not already in that mode. */
2290         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2291         if (retval != ERROR_OK)
2292                 goto out;
2293
2294         /* Mark R0 as dirty. */
2295         arm_reg_current(arm, 0)->dirty = true;
2296
2297         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2298         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2299         if (retval != ERROR_OK)
2300                 goto out;
2301
2302         /* Get the memory address into R0. */
2303         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2304                         armv7a->debug_base + CPUDBG_DTRRX, address);
2305         if (retval != ERROR_OK)
2306                 goto out;
2307         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2308         if (retval != ERROR_OK)
2309                 goto out;
2310
2311         if (size == 4 && (address % 4) == 0) {
2312                 /* We are doing a word-aligned transfer, so use fast mode. */
2313                 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2314         } else {
2315                 /* Use slow path. */
2316                 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2317         }
2318
2319 out:
2320         final_retval = retval;
2321
2322         /* Switch to non-blocking mode if not already in that mode. */
2323         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2324         if (final_retval == ERROR_OK)
2325                 final_retval = retval;
2326
2327         /* Wait for last issued instruction to complete. */
2328         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2329         if (final_retval == ERROR_OK)
2330                 final_retval = retval;
2331
2332         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2333          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2334          * check RXfull_l). Most of the time this will be free because RXfull_l
2335          * will be cleared immediately and cached in dscr. However, don't do this
2336          * if there is fault, because then the instruction might not have completed
2337          * successfully. */
2338         if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2339                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2340                 if (retval != ERROR_OK)
2341                         return retval;
2342         }
2343
2344         /* If there were any sticky abort flags, clear them. */
2345         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2346                 fault_dscr = dscr;
2347                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2348                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2349                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2350         } else {
2351                 fault_dscr = 0;
2352         }
2353
2354         /* Handle synchronous data faults. */
2355         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2356                 if (final_retval == ERROR_OK) {
2357                         /* Final return value will reflect cause of fault. */
2358                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2359                         if (retval == ERROR_OK) {
2360                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2361                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2362                         } else
2363                                 final_retval = retval;
2364                 }
2365                 /* Fault destroyed DFAR/DFSR; restore them. */
2366                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2367                 if (retval != ERROR_OK)
2368                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2369         }
2370
2371         /* Handle asynchronous data faults. */
2372         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2373                 if (final_retval == ERROR_OK)
2374                         /* No other error has been recorded so far, so keep this one. */
2375                         final_retval = ERROR_TARGET_DATA_ABORT;
2376         }
2377
2378         /* If the DCC is nonempty, clear it. */
2379         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2380                 uint32_t dummy;
2381                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2382                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2383                 if (final_retval == ERROR_OK)
2384                         final_retval = retval;
2385         }
2386         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2387                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2388                 if (final_retval == ERROR_OK)
2389                         final_retval = retval;
2390         }
2391
2392         /* Done. */
2393         return final_retval;
2394 }
2395
2396 static int cortex_a_read_cpu_memory_slow(struct target *target,
2397         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2398 {
2399         /* Reads count objects of size size into *buffer. Old value of DSCR must be
2400          * in *dscr; updated to new value. This is slow because it works for
2401          * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2402          * the address is aligned, cortex_a_read_cpu_memory_fast should be
2403          * preferred.
2404          * Preconditions:
2405          * - Address is in R0.
2406          * - R0 is marked dirty.
2407          */
2408         struct armv7a_common *armv7a = target_to_armv7a(target);
2409         struct arm *arm = &armv7a->arm;
2410         int retval;
2411
2412         /* Mark register R1 as dirty, to use for transferring data. */
2413         arm_reg_current(arm, 1)->dirty = true;
2414
2415         /* Switch to non-blocking mode if not already in that mode. */
2416         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2417         if (retval != ERROR_OK)
2418                 return retval;
2419
2420         /* Go through the objects. */
2421         while (count) {
2422                 /* Issue a load of the appropriate size to R1. */
2423                 uint32_t opcode, data;
2424                 if (size == 1)
2425                         opcode = ARMV4_5_LDRB_IP(1, 0);
2426                 else if (size == 2)
2427                         opcode = ARMV4_5_LDRH_IP(1, 0);
2428                 else
2429                         opcode = ARMV4_5_LDRW_IP(1, 0);
2430                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2431                 if (retval != ERROR_OK)
2432                         return retval;
2433
2434                 /* Issue a write of R1 to DTRTX. */
2435                 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2436                 if (retval != ERROR_OK)
2437                         return retval;
2438
2439                 /* Check for faults and return early. */
2440                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2441                         return ERROR_OK; /* A data fault is not considered a system failure. */
2442
2443                 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2444                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2445                  * must also check TXfull_l). Most of the time this will be free
2446                  * because TXfull_l will be set immediately and cached in dscr. */
2447                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2448                                 DSCR_DTRTX_FULL_LATCHED, dscr);
2449                 if (retval != ERROR_OK)
2450                         return retval;
2451
2452                 /* Read the value transferred to DTRTX into the buffer. */
2453                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2454                                 armv7a->debug_base + CPUDBG_DTRTX, &data);
2455                 if (retval != ERROR_OK)
2456                         return retval;
2457                 if (size == 1)
2458                         *buffer = (uint8_t) data;
2459                 else if (size == 2)
2460                         target_buffer_set_u16(target, buffer, (uint16_t) data);
2461                 else
2462                         target_buffer_set_u32(target, buffer, data);
2463
2464                 /* Advance. */
2465                 buffer += size;
2466                 --count;
2467         }
2468
2469         return ERROR_OK;
2470 }
2471
2472 static int cortex_a_read_cpu_memory_fast(struct target *target,
2473         uint32_t count, uint8_t *buffer, uint32_t *dscr)
2474 {
2475         /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2476          * *dscr; updated to new value. This is fast but only works for word-sized
2477          * objects at aligned addresses.
2478          * Preconditions:
2479          * - Address is in R0 and must be a multiple of 4.
2480          * - R0 is marked dirty.
2481          */
2482         struct armv7a_common *armv7a = target_to_armv7a(target);
2483         uint32_t u32;
2484         int retval;
2485
2486         /* Switch to non-blocking mode if not already in that mode. */
2487         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2488         if (retval != ERROR_OK)
2489                 return retval;
2490
2491         /* Issue the LDC instruction via a write to ITR. */
2492         retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2493         if (retval != ERROR_OK)
2494                 return retval;
2495
2496         count--;
2497
2498         if (count > 0) {
2499                 /* Switch to fast mode if not already in that mode. */
2500                 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2501                 if (retval != ERROR_OK)
2502                         return retval;
2503
2504                 /* Latch LDC instruction. */
2505                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2506                                 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2507                 if (retval != ERROR_OK)
2508                         return retval;
2509
2510                 /* Read the value transferred to DTRTX into the buffer. Due to fast
2511                  * mode rules, this blocks until the instruction finishes executing and
2512                  * then reissues the read instruction to read the next word from
2513                  * memory. The last read of DTRTX in this call reads the second-to-last
2514                  * word from memory and issues the read instruction for the last word.
2515                  */
2516                 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2517                                 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2518                 if (retval != ERROR_OK)
2519                         return retval;
2520
2521                 /* Advance. */
2522                 buffer += count * 4;
2523         }
2524
2525         /* Wait for last issued instruction to complete. */
2526         retval = cortex_a_wait_instrcmpl(target, dscr, false);
2527         if (retval != ERROR_OK)
2528                 return retval;
2529
2530         /* Switch to non-blocking mode if not already in that mode. */
2531         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2532         if (retval != ERROR_OK)
2533                 return retval;
2534
2535         /* Check for faults and return early. */
2536         if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2537                 return ERROR_OK; /* A data fault is not considered a system failure. */
2538
2539         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2540          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2541          * check TXfull_l). Most of the time this will be free because TXfull_l
2542          * will be set immediately and cached in dscr. */
2543         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2544                         DSCR_DTRTX_FULL_LATCHED, dscr);
2545         if (retval != ERROR_OK)
2546                 return retval;
2547
2548         /* Read the value transferred to DTRTX into the buffer. This is the last
2549          * word. */
2550         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2551                         armv7a->debug_base + CPUDBG_DTRTX, &u32);
2552         if (retval != ERROR_OK)
2553                 return retval;
2554         target_buffer_set_u32(target, buffer, u32);
2555
2556         return ERROR_OK;
2557 }
2558
2559 static int cortex_a_read_cpu_memory(struct target *target,
2560         uint32_t address, uint32_t size,
2561         uint32_t count, uint8_t *buffer)
2562 {
2563         /* Read memory through the CPU. */
2564         int retval, final_retval;
2565         struct armv7a_common *armv7a = target_to_armv7a(target);
2566         struct arm *arm = &armv7a->arm;
2567         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2568
2569         LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2570                           address, size, count);
2571         if (target->state != TARGET_HALTED) {
2572                 LOG_WARNING("target not halted");
2573                 return ERROR_TARGET_NOT_HALTED;
2574         }
2575
2576         if (!count)
2577                 return ERROR_OK;
2578
2579         /* Clear any abort. */
2580         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2581                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2582         if (retval != ERROR_OK)
2583                 return retval;
2584
2585         /* Read DSCR */
2586         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2587                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2588         if (retval != ERROR_OK)
2589                 return retval;
2590
2591         /* Switch to non-blocking mode if not already in that mode. */
2592         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2593         if (retval != ERROR_OK)
2594                 goto out;
2595
2596         /* Mark R0 as dirty. */
2597         arm_reg_current(arm, 0)->dirty = true;
2598
2599         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2600         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2601         if (retval != ERROR_OK)
2602                 goto out;
2603
2604         /* Get the memory address into R0. */
2605         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2606                         armv7a->debug_base + CPUDBG_DTRRX, address);
2607         if (retval != ERROR_OK)
2608                 goto out;
2609         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2610         if (retval != ERROR_OK)
2611                 goto out;
2612
2613         if (size == 4 && (address % 4) == 0) {
2614                 /* We are doing a word-aligned transfer, so use fast mode. */
2615                 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2616         } else {
2617                 /* Use slow path. */
2618                 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2619         }
2620
2621 out:
2622         final_retval = retval;
2623
2624         /* Switch to non-blocking mode if not already in that mode. */
2625         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2626         if (final_retval == ERROR_OK)
2627                 final_retval = retval;
2628
2629         /* Wait for last issued instruction to complete. */
2630         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2631         if (final_retval == ERROR_OK)
2632                 final_retval = retval;
2633
2634         /* If there were any sticky abort flags, clear them. */
2635         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2636                 fault_dscr = dscr;
2637                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2638                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2639                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2640         } else {
2641                 fault_dscr = 0;
2642         }
2643
2644         /* Handle synchronous data faults. */
2645         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2646                 if (final_retval == ERROR_OK) {
2647                         /* Final return value will reflect cause of fault. */
2648                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2649                         if (retval == ERROR_OK) {
2650                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2651                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2652                         } else
2653                                 final_retval = retval;
2654                 }
2655                 /* Fault destroyed DFAR/DFSR; restore them. */
2656                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2657                 if (retval != ERROR_OK)
2658                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2659         }
2660
2661         /* Handle asynchronous data faults. */
2662         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2663                 if (final_retval == ERROR_OK)
2664                         /* No other error has been recorded so far, so keep this one. */
2665                         final_retval = ERROR_TARGET_DATA_ABORT;
2666         }
2667
2668         /* If the DCC is nonempty, clear it. */
2669         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2670                 uint32_t dummy;
2671                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2672                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2673                 if (final_retval == ERROR_OK)
2674                         final_retval = retval;
2675         }
2676         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2677                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2678                 if (final_retval == ERROR_OK)
2679                         final_retval = retval;
2680         }
2681
2682         /* Done. */
2683         return final_retval;
2684 }
2685
2686
2687 /*
2688  * Cortex-A Memory access
2689  *
2690  * This is same Cortex-M3 but we must also use the correct
2691  * ap number for every access.
2692  */
2693
2694 static int cortex_a_read_phys_memory(struct target *target,
2695         target_addr_t address, uint32_t size,
2696         uint32_t count, uint8_t *buffer)
2697 {
2698         struct armv7a_common *armv7a = target_to_armv7a(target);
2699         struct adiv5_dap *swjdp = armv7a->arm.dap;
2700         uint8_t apsel = swjdp->apsel;
2701         int retval;
2702
2703         if (!count || !buffer)
2704                 return ERROR_COMMAND_SYNTAX_ERROR;
2705
2706         LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2707                 address, size, count);
2708
2709         if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2710                 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2711
2712         /* read memory through the CPU */
2713         cortex_a_prep_memaccess(target, 1);
2714         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2715         cortex_a_post_memaccess(target, 1);
2716
2717         return retval;
2718 }
2719
2720 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2721         uint32_t size, uint32_t count, uint8_t *buffer)
2722 {
2723         int retval;
2724
2725         /* cortex_a handles unaligned memory access */
2726         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2727                 address, size, count);
2728
2729         cortex_a_prep_memaccess(target, 0);
2730         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2731         cortex_a_post_memaccess(target, 0);
2732
2733         return retval;
2734 }
2735
2736 static int cortex_a_read_memory_ahb(struct target *target, target_addr_t address,
2737         uint32_t size, uint32_t count, uint8_t *buffer)
2738 {
2739         int mmu_enabled = 0;
2740         target_addr_t virt, phys;
2741         int retval;
2742         struct armv7a_common *armv7a = target_to_armv7a(target);
2743         struct adiv5_dap *swjdp = armv7a->arm.dap;
2744         uint8_t apsel = swjdp->apsel;
2745
2746         if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2747                 return target_read_memory(target, address, size, count, buffer);
2748
2749         /* cortex_a handles unaligned memory access */
2750         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2751                 address, size, count);
2752
2753         /* determine if MMU was enabled on target stop */
2754         if (!armv7a->is_armv7r) {
2755                 retval = cortex_a_mmu(target, &mmu_enabled);
2756                 if (retval != ERROR_OK)
2757                         return retval;
2758         }
2759
2760         if (mmu_enabled) {
2761                 virt = address;
2762                 retval = cortex_a_virt2phys(target, virt, &phys);
2763                 if (retval != ERROR_OK)
2764                         return retval;
2765
2766                 LOG_DEBUG("Reading at virtual address. "
2767                           "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2768                           virt, phys);
2769                 address = phys;
2770         }
2771
2772         if (!count || !buffer)
2773                 return ERROR_COMMAND_SYNTAX_ERROR;
2774
2775         retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2776
2777         return retval;
2778 }
2779
2780 static int cortex_a_write_phys_memory(struct target *target,
2781         target_addr_t address, uint32_t size,
2782         uint32_t count, const uint8_t *buffer)
2783 {
2784         struct armv7a_common *armv7a = target_to_armv7a(target);
2785         struct adiv5_dap *swjdp = armv7a->arm.dap;
2786         uint8_t apsel = swjdp->apsel;
2787         int retval;
2788
2789         if (!count || !buffer)
2790                 return ERROR_COMMAND_SYNTAX_ERROR;
2791
2792         LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2793                 address, size, count);
2794
2795         if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2796                 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2797
2798         /* write memory through the CPU */
2799         cortex_a_prep_memaccess(target, 1);
2800         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2801         cortex_a_post_memaccess(target, 1);
2802
2803         return retval;
2804 }
2805
2806 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2807         uint32_t size, uint32_t count, const uint8_t *buffer)
2808 {
2809         int retval;
2810
2811         /* cortex_a handles unaligned memory access */
2812         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2813                 address, size, count);
2814
2815         /* memory writes bypass the caches, must flush before writing */
2816         armv7a_cache_auto_flush_on_write(target, address, size * count);
2817
2818         cortex_a_prep_memaccess(target, 0);
2819         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2820         cortex_a_post_memaccess(target, 0);
2821         return retval;
2822 }
2823
2824 static int cortex_a_write_memory_ahb(struct target *target, target_addr_t address,
2825         uint32_t size, uint32_t count, const uint8_t *buffer)
2826 {
2827         int mmu_enabled = 0;
2828         target_addr_t virt, phys;
2829         int retval;
2830         struct armv7a_common *armv7a = target_to_armv7a(target);
2831         struct adiv5_dap *swjdp = armv7a->arm.dap;
2832         uint8_t apsel = swjdp->apsel;
2833
2834         if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2835                 return target_write_memory(target, address, size, count, buffer);
2836
2837         /* cortex_a handles unaligned memory access */
2838         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2839                 address, size, count);
2840
2841         /* determine if MMU was enabled on target stop */
2842         if (!armv7a->is_armv7r) {
2843                 retval = cortex_a_mmu(target, &mmu_enabled);
2844                 if (retval != ERROR_OK)
2845                         return retval;
2846         }
2847
2848         if (mmu_enabled) {
2849                 virt = address;
2850                 retval = cortex_a_virt2phys(target, virt, &phys);
2851                 if (retval != ERROR_OK)
2852                         return retval;
2853
2854                 LOG_DEBUG("Writing to virtual address. "
2855                           "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2856                           virt,
2857                           phys);
2858                 address = phys;
2859         }
2860
2861         if (!count || !buffer)
2862                 return ERROR_COMMAND_SYNTAX_ERROR;
2863
2864         retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2865
2866         return retval;
2867 }
2868
2869 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2870                                 uint32_t count, uint8_t *buffer)
2871 {
2872         uint32_t size;
2873
2874         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2875          * will have something to do with the size we leave to it. */
2876         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2877                 if (address & size) {
2878                         int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2879                         if (retval != ERROR_OK)
2880                                 return retval;
2881                         address += size;
2882                         count -= size;
2883                         buffer += size;
2884                 }
2885         }
2886
2887         /* Read the data with as large access size as possible. */
2888         for (; size > 0; size /= 2) {
2889                 uint32_t aligned = count - count % size;
2890                 if (aligned > 0) {
2891                         int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2892                         if (retval != ERROR_OK)
2893                                 return retval;
2894                         address += aligned;
2895                         count -= aligned;
2896                         buffer += aligned;
2897                 }
2898         }
2899
2900         return ERROR_OK;
2901 }
2902
2903 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2904                                  uint32_t count, const uint8_t *buffer)
2905 {
2906         uint32_t size;
2907
2908         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2909          * will have something to do with the size we leave to it. */
2910         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2911                 if (address & size) {
2912                         int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2913                         if (retval != ERROR_OK)
2914                                 return retval;
2915                         address += size;
2916                         count -= size;
2917                         buffer += size;
2918                 }
2919         }
2920
2921         /* Write the data with as large access size as possible. */
2922         for (; size > 0; size /= 2) {
2923                 uint32_t aligned = count - count % size;
2924                 if (aligned > 0) {
2925                         int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2926                         if (retval != ERROR_OK)
2927                                 return retval;
2928                         address += aligned;
2929                         count -= aligned;
2930                         buffer += aligned;
2931                 }
2932         }
2933
2934         return ERROR_OK;
2935 }
2936
2937 static int cortex_a_handle_target_request(void *priv)
2938 {
2939         struct target *target = priv;
2940         struct armv7a_common *armv7a = target_to_armv7a(target);
2941         int retval;
2942
2943         if (!target_was_examined(target))
2944                 return ERROR_OK;
2945         if (!target->dbg_msg_enabled)
2946                 return ERROR_OK;
2947
2948         if (target->state == TARGET_RUNNING) {
2949                 uint32_t request;
2950                 uint32_t dscr;
2951                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2952                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2953
2954                 /* check if we have data */
2955                 int64_t then = timeval_ms();
2956                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2957                         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2958                                         armv7a->debug_base + CPUDBG_DTRTX, &request);
2959                         if (retval == ERROR_OK) {
2960                                 target_request(target, request);
2961                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2962                                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2963                         }
2964                         if (timeval_ms() > then + 1000) {
2965                                 LOG_ERROR("Timeout waiting for dtr tx full");
2966                                 return ERROR_FAIL;
2967                         }
2968                 }
2969         }
2970
2971         return ERROR_OK;
2972 }
2973
2974 /*
2975  * Cortex-A target information and configuration
2976  */
2977
2978 static int cortex_a_examine_first(struct target *target)
2979 {
2980         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2981         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2982         struct adiv5_dap *swjdp = armv7a->arm.dap;
2983
2984         int i;
2985         int retval = ERROR_OK;
2986         uint32_t didr, cpuid, dbg_osreg;
2987
2988         /* Search for the APB-AP - it is needed for access to debug registers */
2989         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2990         if (retval != ERROR_OK) {
2991                 LOG_ERROR("Could not find APB-AP for debug access");
2992                 return retval;
2993         }
2994
2995         retval = mem_ap_init(armv7a->debug_ap);
2996         if (retval != ERROR_OK) {
2997                 LOG_ERROR("Could not initialize the APB-AP");
2998                 return retval;
2999         }
3000
3001         armv7a->debug_ap->memaccess_tck = 80;
3002
3003         /* Search for the AHB-AB.
3004          * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
3005          * can access system memory. */
3006         armv7a->memory_ap_available = false;
3007         retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
3008         if (retval == ERROR_OK) {
3009                 retval = mem_ap_init(armv7a->memory_ap);
3010                 if (retval == ERROR_OK)
3011                         armv7a->memory_ap_available = true;
3012         }
3013         if (retval != ERROR_OK) {
3014                 /* AHB-AP not found or unavailable - use the CPU */
3015                 LOG_DEBUG("No AHB-AP available for memory access");
3016         }
3017
3018         if (!target->dbgbase_set) {
3019                 uint32_t dbgbase;
3020                 /* Get ROM Table base */
3021                 uint32_t apid;
3022                 int32_t coreidx = target->coreid;
3023                 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
3024                           target->cmd_name);
3025                 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
3026                 if (retval != ERROR_OK)
3027                         return retval;
3028                 /* Lookup 0x15 -- Processor DAP */
3029                 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
3030                                 &armv7a->debug_base, &coreidx);
3031                 if (retval != ERROR_OK) {
3032                         LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
3033                                   target->cmd_name);
3034                         return retval;
3035                 }
3036                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
3037                           target->coreid, armv7a->debug_base);
3038         } else
3039                 armv7a->debug_base = target->dbgbase;
3040
3041         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3042                         armv7a->debug_base + CPUDBG_DIDR, &didr);
3043         if (retval != ERROR_OK) {
3044                 LOG_DEBUG("Examine %s failed", "DIDR");
3045                 return retval;
3046         }
3047
3048         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3049                         armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3050         if (retval != ERROR_OK) {
3051                 LOG_DEBUG("Examine %s failed", "CPUID");
3052                 return retval;
3053         }
3054
3055         LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3056         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3057
3058         cortex_a->didr = didr;
3059         cortex_a->cpuid = cpuid;
3060
3061         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3062                                     armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3063         if (retval != ERROR_OK)
3064                 return retval;
3065         LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR  0x%" PRIx32, target->coreid, dbg_osreg);
3066
3067         if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
3068                 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
3069                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3070                 return ERROR_TARGET_INIT_FAILED;
3071         }
3072
3073         if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3074                 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
3075
3076         /* Read DBGOSLSR and check if OSLK is implemented */
3077         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3078                                 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3079         if (retval != ERROR_OK)
3080                 return retval;
3081         LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
3082
3083         /* check if OS Lock is implemented */
3084         if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3085                 /* check if OS Lock is set */
3086                 if (dbg_osreg & OSLSR_OSLK) {
3087                         LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
3088
3089                         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3090                                                         armv7a->debug_base + CPUDBG_OSLAR,
3091                                                         0);
3092                         if (retval == ERROR_OK)
3093                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3094                                                         armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3095
3096                         /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3097                         if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3098                                 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
3099                                                 target->coreid);
3100                                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3101                                 return ERROR_TARGET_INIT_FAILED;
3102                         }
3103                 }
3104         }
3105
3106         armv7a->arm.core_type = ARM_MODE_MON;
3107
3108         /* Avoid recreating the registers cache */
3109         if (!target_was_examined(target)) {
3110                 retval = cortex_a_dpm_setup(cortex_a, didr);
3111                 if (retval != ERROR_OK)
3112                         return retval;
3113         }
3114
3115         /* Setup Breakpoint Register Pairs */
3116         cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3117         cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3118         cortex_a->brp_num_available = cortex_a->brp_num;
3119         free(cortex_a->brp_list);
3120         cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3121 /*      cortex_a->brb_enabled = ????; */
3122         for (i = 0; i < cortex_a->brp_num; i++) {
3123                 cortex_a->brp_list[i].used = 0;
3124                 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3125                         cortex_a->brp_list[i].type = BRP_NORMAL;
3126                 else
3127                         cortex_a->brp_list[i].type = BRP_CONTEXT;
3128                 cortex_a->brp_list[i].value = 0;
3129                 cortex_a->brp_list[i].control = 0;
3130                 cortex_a->brp_list[i].BRPn = i;
3131         }
3132
3133         LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3134
3135         /* select debug_ap as default */
3136         swjdp->apsel = armv7a->debug_ap->ap_num;
3137
3138         target_set_examined(target);
3139         return ERROR_OK;
3140 }
3141
3142 static int cortex_a_examine(struct target *target)
3143 {
3144         int retval = ERROR_OK;
3145
3146         /* Reestablish communication after target reset */
3147         retval = cortex_a_examine_first(target);
3148
3149         /* Configure core debug access */
3150         if (retval == ERROR_OK)
3151                 retval = cortex_a_init_debug_access(target);
3152
3153         return retval;
3154 }
3155
3156 /*
3157  *      Cortex-A target creation and initialization
3158  */
3159
3160 static int cortex_a_init_target(struct command_context *cmd_ctx,
3161         struct target *target)
3162 {
3163         /* examine_first() does a bunch of this */
3164         arm_semihosting_init(target);
3165         return ERROR_OK;
3166 }
3167
3168 static int cortex_a_init_arch_info(struct target *target,
3169         struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3170 {
3171         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3172
3173         /* Setup struct cortex_a_common */
3174         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3175         armv7a->arm.dap = dap;
3176
3177         cortex_a->fast_reg_read = 0;
3178
3179         /* register arch-specific functions */
3180         armv7a->examine_debug_reason = NULL;
3181
3182         armv7a->post_debug_entry = cortex_a_post_debug_entry;
3183
3184         armv7a->pre_restore_context = NULL;
3185
3186         armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3187
3188
3189 /*      arm7_9->handle_target_request = cortex_a_handle_target_request; */
3190
3191         /* REVISIT v7a setup should be in a v7a-specific routine */
3192         armv7a_init_arch_info(target, armv7a);
3193         target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3194
3195         return ERROR_OK;
3196 }
3197
3198 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3199 {
3200         struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3201         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3202         struct adiv5_private_config *pc;
3203
3204         if (target->private_config == NULL)
3205                 return ERROR_FAIL;
3206
3207         pc = (struct adiv5_private_config *)target->private_config;
3208
3209         cortex_a->armv7a_common.is_armv7r = false;
3210
3211         cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3212
3213         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3214 }
3215
3216 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3217 {
3218         struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3219         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3220         struct adiv5_private_config *pc;
3221
3222         pc = (struct adiv5_private_config *)target->private_config;
3223         if (adiv5_verify_config(pc) != ERROR_OK)
3224                 return ERROR_FAIL;
3225
3226         cortex_a->armv7a_common.is_armv7r = true;
3227
3228         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3229 }
3230
3231 static void cortex_a_deinit_target(struct target *target)
3232 {
3233         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3234         struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3235
3236         free(cortex_a->brp_list);
3237         free(dpm->dbp);
3238         free(dpm->dwp);
3239         free(target->private_config);
3240         free(cortex_a);
3241 }
3242
3243 static int cortex_a_mmu(struct target *target, int *enabled)
3244 {
3245         struct armv7a_common *armv7a = target_to_armv7a(target);
3246
3247         if (target->state != TARGET_HALTED) {
3248                 LOG_ERROR("%s: target not halted", __func__);
3249                 return ERROR_TARGET_INVALID;
3250         }
3251
3252         if (armv7a->is_armv7r)
3253                 *enabled = 0;
3254         else
3255                 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3256
3257         return ERROR_OK;
3258 }
3259
3260 static int cortex_a_virt2phys(struct target *target,
3261         target_addr_t virt, target_addr_t *phys)
3262 {
3263         int retval = ERROR_FAIL;
3264         struct armv7a_common *armv7a = target_to_armv7a(target);
3265         struct adiv5_dap *swjdp = armv7a->arm.dap;
3266         uint8_t apsel = swjdp->apsel;
3267         int mmu_enabled = 0;
3268
3269         /*
3270          * If the MMU was not enabled at debug entry, there is no
3271          * way of knowing if there was ever a valid configuration
3272          * for it and thus it's not safe to enable it. In this case,
3273          * just return the virtual address as physical.
3274          */
3275         cortex_a_mmu(target, &mmu_enabled);
3276         if (!mmu_enabled) {
3277                 *phys = virt;
3278                 return ERROR_OK;
3279         }
3280
3281         if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3282                 uint32_t ret;
3283                 retval = armv7a_mmu_translate_va(target,
3284                                 virt, &ret);
3285                 if (retval != ERROR_OK)
3286                         goto done;
3287                 *phys = ret;
3288         } else {/*  use this method if armv7a->memory_ap not selected
3289                  *  mmu must be enable in order to get a correct translation */
3290                 retval = cortex_a_mmu_modify(target, 1);
3291                 if (retval != ERROR_OK)
3292                         goto done;
3293                 retval = armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3294                                                     (uint32_t *)phys, 1);
3295         }
3296 done:
3297         return retval;
3298 }
3299
3300 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3301 {
3302         struct target *target = get_current_target(CMD_CTX);
3303         struct armv7a_common *armv7a = target_to_armv7a(target);
3304
3305         return armv7a_handle_cache_info_command(CMD_CTX,
3306                         &armv7a->armv7a_mmu.armv7a_cache);
3307 }
3308
3309
3310 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3311 {
3312         struct target *target = get_current_target(CMD_CTX);
3313         if (!target_was_examined(target)) {
3314                 LOG_ERROR("target not examined yet");
3315                 return ERROR_FAIL;
3316         }
3317
3318         return cortex_a_init_debug_access(target);
3319 }
3320 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3321 {
3322         struct target *target = get_current_target(CMD_CTX);
3323         /* check target is an smp target */
3324         struct target_list *head;
3325         struct target *curr;
3326         head = target->head;
3327         target->smp = 0;
3328         if (head != (struct target_list *)NULL) {
3329                 while (head != (struct target_list *)NULL) {
3330                         curr = head->target;
3331                         curr->smp = 0;
3332                         head = head->next;
3333                 }
3334                 /*  fixes the target display to the debugger */
3335                 target->gdb_service->target = target;
3336         }
3337         return ERROR_OK;
3338 }
3339
3340 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3341 {
3342         struct target *target = get_current_target(CMD_CTX);
3343         struct target_list *head;
3344         struct target *curr;
3345         head = target->head;
3346         if (head != (struct target_list *)NULL) {
3347                 target->smp = 1;
3348                 while (head != (struct target_list *)NULL) {
3349                         curr = head->target;
3350                         curr->smp = 1;
3351                         head = head->next;
3352                 }
3353         }
3354         return ERROR_OK;
3355 }
3356
3357 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3358 {
3359         struct target *target = get_current_target(CMD_CTX);
3360         int retval = ERROR_OK;
3361         struct target_list *head;
3362         head = target->head;
3363         if (head != (struct target_list *)NULL) {
3364                 if (CMD_ARGC == 1) {
3365                         int coreid = 0;
3366                         COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3367                         if (ERROR_OK != retval)
3368                                 return retval;
3369                         target->gdb_service->core[1] = coreid;
3370
3371                 }
3372                 command_print(CMD_CTX, "gdb coreid  %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3373                         , target->gdb_service->core[1]);
3374         }
3375         return ERROR_OK;
3376 }
3377
3378 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3379 {
3380         struct target *target = get_current_target(CMD_CTX);
3381         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3382
3383         static const Jim_Nvp nvp_maskisr_modes[] = {
3384                 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3385                 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3386                 { .name = NULL, .value = -1 },
3387         };
3388         const Jim_Nvp *n;
3389
3390         if (CMD_ARGC > 0) {
3391                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3392                 if (n->name == NULL) {
3393                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3394                         return ERROR_COMMAND_SYNTAX_ERROR;
3395                 }
3396
3397                 cortex_a->isrmasking_mode = n->value;
3398         }
3399
3400         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3401         command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3402
3403         return ERROR_OK;
3404 }
3405
3406 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3407 {
3408         struct target *target = get_current_target(CMD_CTX);
3409         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3410
3411         static const Jim_Nvp nvp_dacrfixup_modes[] = {
3412                 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3413                 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3414                 { .name = NULL, .value = -1 },
3415         };
3416         const Jim_Nvp *n;
3417
3418         if (CMD_ARGC > 0) {
3419                 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3420                 if (n->name == NULL)
3421                         return ERROR_COMMAND_SYNTAX_ERROR;
3422                 cortex_a->dacrfixup_mode = n->value;
3423
3424         }
3425
3426         n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3427         command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3428
3429         return ERROR_OK;
3430 }
3431
3432 static const struct command_registration cortex_a_exec_command_handlers[] = {
3433         {
3434                 .name = "cache_info",
3435                 .handler = cortex_a_handle_cache_info_command,
3436                 .mode = COMMAND_EXEC,
3437                 .help = "display information about target caches",
3438                 .usage = "",
3439         },
3440         {
3441                 .name = "dbginit",
3442                 .handler = cortex_a_handle_dbginit_command,
3443                 .mode = COMMAND_EXEC,
3444                 .help = "Initialize core debug",
3445                 .usage = "",
3446         },
3447         {   .name = "smp_off",
3448             .handler = cortex_a_handle_smp_off_command,
3449             .mode = COMMAND_EXEC,
3450             .help = "Stop smp handling",
3451             .usage = "",},
3452         {
3453                 .name = "smp_on",
3454                 .handler = cortex_a_handle_smp_on_command,
3455                 .mode = COMMAND_EXEC,
3456                 .help = "Restart smp handling",
3457                 .usage = "",
3458         },
3459         {
3460                 .name = "smp_gdb",
3461                 .handler = cortex_a_handle_smp_gdb_command,
3462                 .mode = COMMAND_EXEC,
3463                 .help = "display/fix current core played to gdb",
3464                 .usage = "",
3465         },
3466         {
3467                 .name = "maskisr",
3468                 .handler = handle_cortex_a_mask_interrupts_command,
3469                 .mode = COMMAND_ANY,
3470                 .help = "mask cortex_a interrupts",
3471                 .usage = "['on'|'off']",
3472         },
3473         {
3474                 .name = "dacrfixup",
3475                 .handler = handle_cortex_a_dacrfixup_command,
3476                 .mode = COMMAND_ANY,
3477                 .help = "set domain access control (DACR) to all-manager "
3478                         "on memory access",
3479                 .usage = "['on'|'off']",
3480         },
3481
3482         COMMAND_REGISTRATION_DONE
3483 };
3484 static const struct command_registration cortex_a_command_handlers[] = {
3485         {
3486                 .chain = arm_command_handlers,
3487         },
3488         {
3489                 .chain = armv7a_command_handlers,
3490         },
3491         {
3492                 .name = "cortex_a",
3493                 .mode = COMMAND_ANY,
3494                 .help = "Cortex-A command group",
3495                 .usage = "",
3496                 .chain = cortex_a_exec_command_handlers,
3497         },
3498         COMMAND_REGISTRATION_DONE
3499 };
3500
3501 struct target_type cortexa_target = {
3502         .name = "cortex_a",
3503         .deprecated_name = "cortex_a8",
3504
3505         .poll = cortex_a_poll,
3506         .arch_state = armv7a_arch_state,
3507
3508         .halt = cortex_a_halt,
3509         .resume = cortex_a_resume,
3510         .step = cortex_a_step,
3511
3512         .assert_reset = cortex_a_assert_reset,
3513         .deassert_reset = cortex_a_deassert_reset,
3514
3515         /* REVISIT allow exporting VFP3 registers ... */
3516         .get_gdb_reg_list = arm_get_gdb_reg_list,
3517
3518         .read_memory = cortex_a_read_memory,
3519         .write_memory = cortex_a_write_memory,
3520
3521         .read_buffer = cortex_a_read_buffer,
3522         .write_buffer = cortex_a_write_buffer,
3523
3524         .checksum_memory = arm_checksum_memory,
3525         .blank_check_memory = arm_blank_check_memory,
3526
3527         .run_algorithm = armv4_5_run_algorithm,
3528
3529         .add_breakpoint = cortex_a_add_breakpoint,
3530         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3531         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3532         .remove_breakpoint = cortex_a_remove_breakpoint,
3533         .add_watchpoint = NULL,
3534         .remove_watchpoint = NULL,
3535
3536         .commands = cortex_a_command_handlers,
3537         .target_create = cortex_a_target_create,
3538         .target_jim_configure = adiv5_jim_configure,
3539         .init_target = cortex_a_init_target,
3540         .examine = cortex_a_examine,
3541         .deinit_target = cortex_a_deinit_target,
3542
3543         .read_phys_memory = cortex_a_read_phys_memory,
3544         .write_phys_memory = cortex_a_write_phys_memory,
3545         .mmu = cortex_a_mmu,
3546         .virt2phys = cortex_a_virt2phys,
3547 };
3548
3549 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3550         {
3551                 .name = "dbginit",
3552                 .handler = cortex_a_handle_dbginit_command,
3553                 .mode = COMMAND_EXEC,
3554                 .help = "Initialize core debug",
3555                 .usage = "",
3556         },
3557         {
3558                 .name = "maskisr",
3559                 .handler = handle_cortex_a_mask_interrupts_command,
3560                 .mode = COMMAND_EXEC,
3561                 .help = "mask cortex_r4 interrupts",
3562                 .usage = "['on'|'off']",
3563         },
3564
3565         COMMAND_REGISTRATION_DONE
3566 };
3567 static const struct command_registration cortex_r4_command_handlers[] = {
3568         {
3569                 .chain = arm_command_handlers,
3570         },
3571         {
3572                 .name = "cortex_r4",
3573                 .mode = COMMAND_ANY,
3574                 .help = "Cortex-R4 command group",
3575                 .usage = "",
3576                 .chain = cortex_r4_exec_command_handlers,
3577         },
3578         COMMAND_REGISTRATION_DONE
3579 };
3580
3581 struct target_type cortexr4_target = {
3582         .name = "cortex_r4",
3583
3584         .poll = cortex_a_poll,
3585         .arch_state = armv7a_arch_state,
3586
3587         .halt = cortex_a_halt,
3588         .resume = cortex_a_resume,
3589         .step = cortex_a_step,
3590
3591         .assert_reset = cortex_a_assert_reset,
3592         .deassert_reset = cortex_a_deassert_reset,
3593
3594         /* REVISIT allow exporting VFP3 registers ... */
3595         .get_gdb_reg_list = arm_get_gdb_reg_list,
3596
3597         .read_memory = cortex_a_read_phys_memory,
3598         .write_memory = cortex_a_write_phys_memory,
3599
3600         .checksum_memory = arm_checksum_memory,
3601         .blank_check_memory = arm_blank_check_memory,
3602
3603         .run_algorithm = armv4_5_run_algorithm,
3604
3605         .add_breakpoint = cortex_a_add_breakpoint,
3606         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3607         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3608         .remove_breakpoint = cortex_a_remove_breakpoint,
3609         .add_watchpoint = NULL,
3610         .remove_watchpoint = NULL,
3611
3612         .commands = cortex_r4_command_handlers,
3613         .target_create = cortex_r4_target_create,
3614         .target_jim_configure = adiv5_jim_configure,
3615         .init_target = cortex_a_init_target,
3616         .examine = cortex_a_examine,
3617         .deinit_target = cortex_a_deinit_target,
3618 };