]> git.sur5r.net Git - openocd/blob - src/target/aarch64.c
aarch64: fix error recovery in aarch64_dpm_prepare
[openocd] / src / target / aarch64.c
1 /***************************************************************************
2  *   Copyright (C) 2015 by David Ung                                       *
3  *                                                                         *
4  *   This program is free software; you can redistribute it and/or modify  *
5  *   it under the terms of the GNU General Public License as published by  *
6  *   the Free Software Foundation; either version 2 of the License, or     *
7  *   (at your option) any later version.                                   *
8  *                                                                         *
9  *   This program is distributed in the hope that it will be useful,       *
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
12  *   GNU General Public License for more details.                          *
13  *                                                                         *
14  *   You should have received a copy of the GNU General Public License     *
15  *   along with this program; if not, write to the                         *
16  *   Free Software Foundation, Inc.,                                       *
17  *                                                                         *
18  ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36         struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38         struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40         struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42         struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45         target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47         uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49         uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53         int retval = ERROR_OK;
54
55         struct aarch64_common *aarch64 = target_to_aarch64(target);
56         struct armv8_common *armv8 = target_to_armv8(target);
57
58         if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59                 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60                 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
61                                                      0xd5181000,
62                                                      aarch64->system_control_reg);
63         }
64
65         return retval;
66 }
67
68 /*  check address before aarch64_apb read write access with mmu on
69  *  remove apb predictible data abort */
70 static int aarch64_check_address(struct target *target, uint32_t address)
71 {
72         /* TODO */
73         return ERROR_OK;
74 }
75 /*  modify system_control_reg in order to enable or disable mmu for :
76  *  - virt2phys address conversion
77  *  - read or write memory in phys or virt address */
78 static int aarch64_mmu_modify(struct target *target, int enable)
79 {
80         struct aarch64_common *aarch64 = target_to_aarch64(target);
81         struct armv8_common *armv8 = &aarch64->armv8_common;
82         int retval = ERROR_OK;
83
84         if (enable) {
85                 /*  if mmu enabled at target stop and mmu not enable */
86                 if (!(aarch64->system_control_reg & 0x1U)) {
87                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
88                         return ERROR_FAIL;
89                 }
90                 if (!(aarch64->system_control_reg_curr & 0x1U)) {
91                         aarch64->system_control_reg_curr |= 0x1U;
92                         retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
93                                                              0xd5181000,
94                                                              aarch64->system_control_reg_curr);
95                 }
96         } else {
97                 if (aarch64->system_control_reg_curr & 0x4U) {
98                         /*  data cache is active */
99                         aarch64->system_control_reg_curr &= ~0x4U;
100                         /* flush data cache armv7 function to be called */
101                         if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
102                                 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
103                 }
104                 if ((aarch64->system_control_reg_curr & 0x1U)) {
105                         aarch64->system_control_reg_curr &= ~0x1U;
106                         retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
107                                                              0xd5181000,
108                                                              aarch64->system_control_reg_curr);
109                 }
110         }
111         return retval;
112 }
113
114 /*
115  * Basic debug access, very low level assumes state is saved
116  */
117 static int aarch64_init_debug_access(struct target *target)
118 {
119         struct armv8_common *armv8 = target_to_armv8(target);
120         int retval;
121         uint32_t dummy;
122
123         LOG_DEBUG(" ");
124
125         /* Unlocking the debug registers for modification
126          * The debugport might be uninitialised so try twice */
127         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
128                              armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
129         if (retval != ERROR_OK) {
130                 /* try again */
131                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
132                              armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
133                 if (retval == ERROR_OK)
134                         LOG_USER("Locking debug access failed on first, but succeeded on second try.");
135         }
136         if (retval != ERROR_OK)
137                 return retval;
138         /* Clear Sticky Power Down status Bit in PRSR to enable access to
139            the registers in the Core Power Domain */
140         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
141                         armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
142         if (retval != ERROR_OK)
143                 return retval;
144
145         /* Enabling of instruction execution in debug mode is done in debug_entry code */
146
147         /* Resync breakpoint registers */
148
149         /* Since this is likely called from init or reset, update target state information*/
150         return aarch64_poll(target);
151 }
152
153 /* To reduce needless round-trips, pass in a pointer to the current
154  * DSCR value.  Initialize it to zero if you just need to know the
155  * value on return from this function; or DSCR_ITE if you
156  * happen to know that no instruction is pending.
157  */
158 static int aarch64_exec_opcode(struct target *target,
159         uint32_t opcode, uint32_t *dscr_p)
160 {
161         uint32_t dscr;
162         int retval;
163         struct armv8_common *armv8 = target_to_armv8(target);
164         dscr = dscr_p ? *dscr_p : 0;
165
166         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
167
168         /* Wait for InstrCompl bit to be set */
169         long long then = timeval_ms();
170         while ((dscr & DSCR_ITE) == 0) {
171                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
172                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
173                 if (retval != ERROR_OK) {
174                         LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
175                         return retval;
176                 }
177                 if (timeval_ms() > then + 1000) {
178                         LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
179                         return ERROR_FAIL;
180                 }
181         }
182
183         retval = mem_ap_write_u32(armv8->debug_ap,
184                         armv8->debug_base + CPUV8_DBG_ITR, opcode);
185         if (retval != ERROR_OK)
186                 return retval;
187
188         then = timeval_ms();
189         do {
190                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
191                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
192                 if (retval != ERROR_OK) {
193                         LOG_ERROR("Could not read DSCR register");
194                         return retval;
195                 }
196                 if (timeval_ms() > then + 1000) {
197                         LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
198                         return ERROR_FAIL;
199                 }
200         } while ((dscr & DSCR_ITE) == 0);       /* Wait for InstrCompl bit to be set */
201
202         if (dscr_p)
203                 *dscr_p = dscr;
204
205         return retval;
206 }
207
208 /* Write to memory mapped registers directly with no cache or mmu handling */
209 static int aarch64_dap_write_memap_register_u32(struct target *target,
210         uint32_t address,
211         uint32_t value)
212 {
213         int retval;
214         struct armv8_common *armv8 = target_to_armv8(target);
215
216         retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
217
218         return retval;
219 }
220
221 /*
222  * AARCH64 implementation of Debug Programmer's Model
223  *
224  * NOTE the invariant:  these routines return with DSCR_ITE set,
225  * so there's no need to poll for it before executing an instruction.
226  *
227  * NOTE that in several of these cases the "stall" mode might be useful.
228  * It'd let us queue a few operations together... prepare/finish might
229  * be the places to enable/disable that mode.
230  */
231
232 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
233 {
234         return container_of(dpm, struct aarch64_common, armv8_common.dpm);
235 }
236
237 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
238 {
239         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
240         return mem_ap_write_u32(armv8->debug_ap,
241                                 armv8->debug_base + CPUV8_DBG_DTRRX, data);
242 }
243
244 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
245 {
246         int ret;
247         LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
248         LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
249         ret = mem_ap_write_u32(armv8->debug_ap,
250                                armv8->debug_base + CPUV8_DBG_DTRRX, data);
251         ret += mem_ap_write_u32(armv8->debug_ap,
252                                 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
253         return ret;
254 }
255
256 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
257         uint32_t *dscr_p)
258 {
259         uint32_t dscr = DSCR_ITE;
260         int retval;
261
262         if (dscr_p)
263                 dscr = *dscr_p;
264
265         /* Wait for DTRRXfull */
266         long long then = timeval_ms();
267         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
268                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
269                                 armv8->debug_base + CPUV8_DBG_DSCR,
270                                 &dscr);
271                 if (retval != ERROR_OK)
272                         return retval;
273                 if (timeval_ms() > then + 1000) {
274                         LOG_ERROR("Timeout waiting for read dcc");
275                         return ERROR_FAIL;
276                 }
277         }
278
279         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280                                             armv8->debug_base + CPUV8_DBG_DTRTX,
281                                             data);
282         if (retval != ERROR_OK)
283                 return retval;
284         LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
285
286         if (dscr_p)
287                 *dscr_p = dscr;
288
289         return retval;
290 }
291
292 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
293         uint32_t *dscr_p)
294 {
295         uint32_t dscr = DSCR_ITE;
296         uint32_t higher;
297         int retval;
298
299         if (dscr_p)
300                 dscr = *dscr_p;
301
302         /* Wait for DTRRXfull */
303         long long then = timeval_ms();
304         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
305                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
306                                 armv8->debug_base + CPUV8_DBG_DSCR,
307                                 &dscr);
308                 if (retval != ERROR_OK)
309                         return retval;
310                 if (timeval_ms() > then + 1000) {
311                         LOG_ERROR("Timeout waiting for read dcc");
312                         return ERROR_FAIL;
313                 }
314         }
315
316         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
317                                             armv8->debug_base + CPUV8_DBG_DTRTX,
318                                             (uint32_t *)data);
319         if (retval != ERROR_OK)
320                 return retval;
321
322         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
323                                             armv8->debug_base + CPUV8_DBG_DTRRX,
324                                             &higher);
325         if (retval != ERROR_OK)
326                 return retval;
327
328         *data = *(uint32_t *)data | (uint64_t)higher << 32;
329         LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
330
331         if (dscr_p)
332                 *dscr_p = dscr;
333
334         return retval;
335 }
336
337 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
338 {
339         struct aarch64_common *a8 = dpm_to_a8(dpm);
340         uint32_t dscr;
341         int retval;
342
343         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
344         long long then = timeval_ms();
345         for (;; ) {
346                 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
347                                 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
348                                 &dscr);
349                 if (retval != ERROR_OK)
350                         return retval;
351                 if ((dscr & DSCR_ITE) != 0)
352                         break;
353                 if (timeval_ms() > then + 1000) {
354                         LOG_ERROR("Timeout waiting for dpm prepare");
355                         return ERROR_FAIL;
356                 }
357         }
358
359         /* this "should never happen" ... */
360         if (dscr & DSCR_DTR_RX_FULL) {
361                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
362                 /* Clear DCCRX */
363                 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
364                         a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
365                 if (retval != ERROR_OK)
366                         return retval;
367
368                 /* Clear sticky error */
369                 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
370                         a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
371                 if (retval != ERROR_OK)
372                         return retval;
373         }
374
375         return retval;
376 }
377
378 static int aarch64_dpm_finish(struct arm_dpm *dpm)
379 {
380         /* REVISIT what could be done here? */
381         return ERROR_OK;
382 }
383
384 static int aarch64_instr_execute(struct arm_dpm *dpm,
385         uint32_t opcode)
386 {
387         struct aarch64_common *a8 = dpm_to_a8(dpm);
388         uint32_t dscr = DSCR_ITE;
389
390         return aarch64_exec_opcode(
391                         a8->armv8_common.arm.target,
392                         opcode,
393                         &dscr);
394 }
395
396 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
397         uint32_t opcode, uint32_t data)
398 {
399         struct aarch64_common *a8 = dpm_to_a8(dpm);
400         int retval;
401         uint32_t dscr = DSCR_ITE;
402
403         retval = aarch64_write_dcc(&a8->armv8_common, data);
404         if (retval != ERROR_OK)
405                 return retval;
406
407         return aarch64_exec_opcode(
408                         a8->armv8_common.arm.target,
409                         opcode,
410                         &dscr);
411 }
412
413 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
414         uint32_t opcode, uint64_t data)
415 {
416         struct aarch64_common *a8 = dpm_to_a8(dpm);
417         int retval;
418         uint32_t dscr = DSCR_ITE;
419
420         retval = aarch64_write_dcc_64(&a8->armv8_common, data);
421         if (retval != ERROR_OK)
422                 return retval;
423
424         return aarch64_exec_opcode(
425                         a8->armv8_common.arm.target,
426                         opcode,
427                         &dscr);
428 }
429
430 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
431         uint32_t opcode, uint32_t data)
432 {
433         struct aarch64_common *a8 = dpm_to_a8(dpm);
434         uint32_t dscr = DSCR_ITE;
435         int retval;
436
437         retval = aarch64_write_dcc(&a8->armv8_common, data);
438         if (retval != ERROR_OK)
439                 return retval;
440
441         retval = aarch64_exec_opcode(
442                         a8->armv8_common.arm.target,
443                         0xd5330500,
444                         &dscr);
445         if (retval != ERROR_OK)
446                 return retval;
447
448         /* then the opcode, taking data from R0 */
449         retval = aarch64_exec_opcode(
450                         a8->armv8_common.arm.target,
451                         opcode,
452                         &dscr);
453
454         return retval;
455 }
456
457 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
458         uint32_t opcode, uint64_t data)
459 {
460         struct aarch64_common *a8 = dpm_to_a8(dpm);
461         uint32_t dscr = DSCR_ITE;
462         int retval;
463
464         retval = aarch64_write_dcc_64(&a8->armv8_common, data);
465         if (retval != ERROR_OK)
466                 return retval;
467
468         retval = aarch64_exec_opcode(
469                         a8->armv8_common.arm.target,
470                         0xd5330400,
471                         &dscr);
472         if (retval != ERROR_OK)
473                 return retval;
474
475         /* then the opcode, taking data from R0 */
476         retval = aarch64_exec_opcode(
477                         a8->armv8_common.arm.target,
478                         opcode,
479                         &dscr);
480
481         return retval;
482 }
483
484 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
485 {
486         struct target *target = dpm->arm->target;
487         uint32_t dscr = DSCR_ITE;
488
489         /* "Prefetch flush" after modifying execution status in CPSR */
490         return aarch64_exec_opcode(target,
491                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
492                         &dscr);
493 }
494
495 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
496         uint32_t opcode, uint32_t *data)
497 {
498         struct aarch64_common *a8 = dpm_to_a8(dpm);
499         int retval;
500         uint32_t dscr = DSCR_ITE;
501
502         /* the opcode, writing data to DCC */
503         retval = aarch64_exec_opcode(
504                         a8->armv8_common.arm.target,
505                         opcode,
506                         &dscr);
507         if (retval != ERROR_OK)
508                 return retval;
509
510         return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
511 }
512
513 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
514         uint32_t opcode, uint64_t *data)
515 {
516         struct aarch64_common *a8 = dpm_to_a8(dpm);
517         int retval;
518         uint32_t dscr = DSCR_ITE;
519
520         /* the opcode, writing data to DCC */
521         retval = aarch64_exec_opcode(
522                         a8->armv8_common.arm.target,
523                         opcode,
524                         &dscr);
525         if (retval != ERROR_OK)
526                 return retval;
527
528         return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
529 }
530
531 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
532         uint32_t opcode, uint32_t *data)
533 {
534         struct aarch64_common *a8 = dpm_to_a8(dpm);
535         uint32_t dscr = DSCR_ITE;
536         int retval;
537
538         /* the opcode, writing data to R0 */
539         retval = aarch64_exec_opcode(
540                         a8->armv8_common.arm.target,
541                         opcode,
542                         &dscr);
543         if (retval != ERROR_OK)
544                 return retval;
545
546         /* write R0 to DCC */
547         retval = aarch64_exec_opcode(
548                         a8->armv8_common.arm.target,
549                         0xd5130400,  /* msr dbgdtr_el0, x0 */
550                         &dscr);
551         if (retval != ERROR_OK)
552                 return retval;
553
554         return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
555 }
556
557 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
558         uint32_t opcode, uint64_t *data)
559 {
560         struct aarch64_common *a8 = dpm_to_a8(dpm);
561         uint32_t dscr = DSCR_ITE;
562         int retval;
563
564         /* the opcode, writing data to R0 */
565         retval = aarch64_exec_opcode(
566                         a8->armv8_common.arm.target,
567                         opcode,
568                         &dscr);
569         if (retval != ERROR_OK)
570                 return retval;
571
572         /* write R0 to DCC */
573         retval = aarch64_exec_opcode(
574                         a8->armv8_common.arm.target,
575                         0xd5130400,  /* msr dbgdtr_el0, x0 */
576                         &dscr);
577         if (retval != ERROR_OK)
578                 return retval;
579
580         return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
581 }
582
583 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
584         uint32_t addr, uint32_t control)
585 {
586         struct aarch64_common *a8 = dpm_to_a8(dpm);
587         uint32_t vr = a8->armv8_common.debug_base;
588         uint32_t cr = a8->armv8_common.debug_base;
589         int retval;
590
591         switch (index_t) {
592                 case 0 ... 15:  /* breakpoints */
593                         vr += CPUV8_DBG_BVR_BASE;
594                         cr += CPUV8_DBG_BCR_BASE;
595                         break;
596                 case 16 ... 31: /* watchpoints */
597                         vr += CPUV8_DBG_WVR_BASE;
598                         cr += CPUV8_DBG_WCR_BASE;
599                         index_t -= 16;
600                         break;
601                 default:
602                         return ERROR_FAIL;
603         }
604         vr += 4 * index_t;
605         cr += 4 * index_t;
606
607         LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
608                 (unsigned) vr, (unsigned) cr);
609
610         retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
611                         vr, addr);
612         if (retval != ERROR_OK)
613                 return retval;
614         retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
615                         cr, control);
616         return retval;
617 }
618
619 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
620 {
621         return ERROR_OK;
622
623 #if 0
624         struct aarch64_common *a = dpm_to_a8(dpm);
625         uint32_t cr;
626
627         switch (index_t) {
628                 case 0 ... 15:
629                         cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
630                         break;
631                 case 16 ... 31:
632                         cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
633                         index_t -= 16;
634                         break;
635                 default:
636                         return ERROR_FAIL;
637         }
638         cr += 4 * index_t;
639
640         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
641
642         /* clear control register */
643         return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
644 #endif
645 }
646
647 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
648 {
649         struct arm_dpm *dpm = &a8->armv8_common.dpm;
650         int retval;
651
652         dpm->arm = &a8->armv8_common.arm;
653         dpm->didr = debug;
654
655         dpm->prepare = aarch64_dpm_prepare;
656         dpm->finish = aarch64_dpm_finish;
657
658         dpm->instr_execute = aarch64_instr_execute;
659         dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
660         dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
661         dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
662         dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
663         dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
664
665         dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
666         dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
667         dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
668         dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
669
670         dpm->arm_reg_current = armv8_reg_current;
671
672         dpm->bpwp_enable = aarch64_bpwp_enable;
673         dpm->bpwp_disable = aarch64_bpwp_disable;
674
675         retval = armv8_dpm_setup(dpm);
676         if (retval == ERROR_OK)
677                 retval = armv8_dpm_initialize(dpm);
678
679         return retval;
680 }
681 static struct target *get_aarch64(struct target *target, int32_t coreid)
682 {
683         struct target_list *head;
684         struct target *curr;
685
686         head = target->head;
687         while (head != (struct target_list *)NULL) {
688                 curr = head->target;
689                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
690                         return curr;
691                 head = head->next;
692         }
693         return target;
694 }
695 static int aarch64_halt(struct target *target);
696
697 static int aarch64_halt_smp(struct target *target)
698 {
699         int retval = 0;
700         struct target_list *head;
701         struct target *curr;
702         head = target->head;
703         while (head != (struct target_list *)NULL) {
704                 curr = head->target;
705                 if ((curr != target) && (curr->state != TARGET_HALTED))
706                         retval += aarch64_halt(curr);
707                 head = head->next;
708         }
709         return retval;
710 }
711
712 static int update_halt_gdb(struct target *target)
713 {
714         int retval = 0;
715         if (target->gdb_service && target->gdb_service->core[0] == -1) {
716                 target->gdb_service->target = target;
717                 target->gdb_service->core[0] = target->coreid;
718                 retval += aarch64_halt_smp(target);
719         }
720         return retval;
721 }
722
723 /*
724  * Cortex-A8 Run control
725  */
726
727 static int aarch64_poll(struct target *target)
728 {
729         int retval = ERROR_OK;
730         uint32_t dscr;
731         struct aarch64_common *aarch64 = target_to_aarch64(target);
732         struct armv8_common *armv8 = &aarch64->armv8_common;
733         enum target_state prev_target_state = target->state;
734         /*  toggle to another core is done by gdb as follow */
735         /*  maint packet J core_id */
736         /*  continue */
737         /*  the next polling trigger an halt event sent to gdb */
738         if ((target->state == TARGET_HALTED) && (target->smp) &&
739                 (target->gdb_service) &&
740                 (target->gdb_service->target == NULL)) {
741                 target->gdb_service->target =
742                         get_aarch64(target, target->gdb_service->core[1]);
743                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
744                 return retval;
745         }
746         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
747                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
748         if (retval != ERROR_OK)
749                 return retval;
750         aarch64->cpudbg_dscr = dscr;
751
752         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
753                 if (prev_target_state != TARGET_HALTED) {
754                         /* We have a halting debug event */
755                         LOG_DEBUG("Target halted");
756                         target->state = TARGET_HALTED;
757                         if ((prev_target_state == TARGET_RUNNING)
758                                 || (prev_target_state == TARGET_UNKNOWN)
759                                 || (prev_target_state == TARGET_RESET)) {
760                                 retval = aarch64_debug_entry(target);
761                                 if (retval != ERROR_OK)
762                                         return retval;
763                                 if (target->smp) {
764                                         retval = update_halt_gdb(target);
765                                         if (retval != ERROR_OK)
766                                                 return retval;
767                                 }
768                                 target_call_event_callbacks(target,
769                                         TARGET_EVENT_HALTED);
770                         }
771                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
772                                 LOG_DEBUG(" ");
773
774                                 retval = aarch64_debug_entry(target);
775                                 if (retval != ERROR_OK)
776                                         return retval;
777                                 if (target->smp) {
778                                         retval = update_halt_gdb(target);
779                                         if (retval != ERROR_OK)
780                                                 return retval;
781                                 }
782
783                                 target_call_event_callbacks(target,
784                                         TARGET_EVENT_DEBUG_HALTED);
785                         }
786                 }
787         } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
788                 target->state = TARGET_RUNNING;
789         else {
790                 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
791                 target->state = TARGET_UNKNOWN;
792         }
793
794         return retval;
795 }
796
797 static int aarch64_halt(struct target *target)
798 {
799         int retval = ERROR_OK;
800         uint32_t dscr;
801         struct armv8_common *armv8 = target_to_armv8(target);
802
803         /* enable CTI*/
804         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
805                         armv8->cti_base + CTI_CTR, 1);
806         if (retval != ERROR_OK)
807                 return retval;
808
809         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
810                         armv8->cti_base + CTI_GATE, 3);
811         if (retval != ERROR_OK)
812                 return retval;
813
814         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
815                         armv8->cti_base + CTI_OUTEN0, 1);
816         if (retval != ERROR_OK)
817                 return retval;
818
819         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
820                         armv8->cti_base + CTI_OUTEN1, 2);
821         if (retval != ERROR_OK)
822                 return retval;
823
824         /*
825          * add HDE in halting debug mode
826          */
827         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
828                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
829         if (retval != ERROR_OK)
830                 return retval;
831
832         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
833                         armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
834         if (retval != ERROR_OK)
835                 return retval;
836
837         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
838                         armv8->cti_base + CTI_APPPULSE, 1);
839         if (retval != ERROR_OK)
840                 return retval;
841
842         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
843                         armv8->cti_base + CTI_INACK, 1);
844         if (retval != ERROR_OK)
845                 return retval;
846
847
848         long long then = timeval_ms();
849         for (;; ) {
850                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
851                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
852                 if (retval != ERROR_OK)
853                         return retval;
854                 if ((dscr & DSCRV8_HALT_MASK) != 0)
855                         break;
856                 if (timeval_ms() > then + 1000) {
857                         LOG_ERROR("Timeout waiting for halt");
858                         return ERROR_FAIL;
859                 }
860         }
861
862         target->debug_reason = DBG_REASON_DBGRQ;
863
864         return ERROR_OK;
865 }
866
867 static int aarch64_internal_restore(struct target *target, int current,
868         uint64_t *address, int handle_breakpoints, int debug_execution)
869 {
870         struct armv8_common *armv8 = target_to_armv8(target);
871         struct arm *arm = &armv8->arm;
872         int retval;
873         uint64_t resume_pc;
874
875         if (!debug_execution)
876                 target_free_all_working_areas(target);
877
878         /* current = 1: continue on current pc, otherwise continue at <address> */
879         resume_pc = buf_get_u64(arm->pc->value, 0, 64);
880         if (!current)
881                 resume_pc = *address;
882         else
883                 *address = resume_pc;
884
885         /* Make sure that the Armv7 gdb thumb fixups does not
886          * kill the return address
887          */
888         switch (arm->core_state) {
889                 case ARM_STATE_ARM:
890                         resume_pc &= 0xFFFFFFFC;
891                         break;
892                 case ARM_STATE_AARCH64:
893                         resume_pc &= 0xFFFFFFFFFFFFFFFC;
894                         break;
895                 case ARM_STATE_THUMB:
896                 case ARM_STATE_THUMB_EE:
897                         /* When the return address is loaded into PC
898                          * bit 0 must be 1 to stay in Thumb state
899                          */
900                         resume_pc |= 0x1;
901                         break;
902                 case ARM_STATE_JAZELLE:
903                         LOG_ERROR("How do I resume into Jazelle state??");
904                         return ERROR_FAIL;
905         }
906         LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
907         buf_set_u64(arm->pc->value, 0, 64, resume_pc);
908         arm->pc->dirty = 1;
909         arm->pc->valid = 1;
910         dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
911
912         /* called it now before restoring context because it uses cpu
913          * register r0 for restoring system control register */
914         retval = aarch64_restore_system_control_reg(target);
915         if (retval != ERROR_OK)
916                 return retval;
917         retval = aarch64_restore_context(target, handle_breakpoints);
918         if (retval != ERROR_OK)
919                 return retval;
920         target->debug_reason = DBG_REASON_NOTHALTED;
921         target->state = TARGET_RUNNING;
922
923         /* registers are now invalid */
924         register_cache_invalidate(arm->core_cache);
925
926 #if 0
927         /* the front-end may request us not to handle breakpoints */
928         if (handle_breakpoints) {
929                 /* Single step past breakpoint at current address */
930                 breakpoint = breakpoint_find(target, resume_pc);
931                 if (breakpoint) {
932                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
933                         cortex_m3_unset_breakpoint(target, breakpoint);
934                         cortex_m3_single_step_core(target);
935                         cortex_m3_set_breakpoint(target, breakpoint);
936                 }
937         }
938 #endif
939
940         return retval;
941 }
942
943 static int aarch64_internal_restart(struct target *target)
944 {
945         struct armv8_common *armv8 = target_to_armv8(target);
946         struct arm *arm = &armv8->arm;
947         int retval;
948         uint32_t dscr;
949         /*
950          * * Restart core and wait for it to be started.  Clear ITRen and sticky
951          * * exception flags: see ARMv7 ARM, C5.9.
952          *
953          * REVISIT: for single stepping, we probably want to
954          * disable IRQs by default, with optional override...
955          */
956
957         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
958                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
959         if (retval != ERROR_OK)
960                 return retval;
961
962         if ((dscr & DSCR_ITE) == 0)
963                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
964
965         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
966                         armv8->cti_base + CTI_APPPULSE, 2);
967         if (retval != ERROR_OK)
968                 return retval;
969
970         long long then = timeval_ms();
971         for (;; ) {
972                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
973                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
974                 if (retval != ERROR_OK)
975                         return retval;
976                 if ((dscr & DSCR_HDE) != 0)
977                         break;
978                 if (timeval_ms() > then + 1000) {
979                         LOG_ERROR("Timeout waiting for resume");
980                         return ERROR_FAIL;
981                 }
982         }
983
984         target->debug_reason = DBG_REASON_NOTHALTED;
985         target->state = TARGET_RUNNING;
986
987         /* registers are now invalid */
988         register_cache_invalidate(arm->core_cache);
989
990         return ERROR_OK;
991 }
992
993 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
994 {
995         int retval = 0;
996         struct target_list *head;
997         struct target *curr;
998         uint64_t address;
999         head = target->head;
1000         while (head != (struct target_list *)NULL) {
1001                 curr = head->target;
1002                 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1003                         /*  resume current address , not in step mode */
1004                         retval += aarch64_internal_restore(curr, 1, &address,
1005                                         handle_breakpoints, 0);
1006                         retval += aarch64_internal_restart(curr);
1007                 }
1008                 head = head->next;
1009
1010         }
1011         return retval;
1012 }
1013
1014 static int aarch64_resume(struct target *target, int current,
1015         target_addr_t address, int handle_breakpoints, int debug_execution)
1016 {
1017         int retval = 0;
1018         uint64_t addr = address;
1019
1020         /* dummy resume for smp toggle in order to reduce gdb impact  */
1021         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1022                 /*   simulate a start and halt of target */
1023                 target->gdb_service->target = NULL;
1024                 target->gdb_service->core[0] = target->gdb_service->core[1];
1025                 /*  fake resume at next poll we play the  target core[1], see poll*/
1026                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1027                 return 0;
1028         }
1029         aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1030                                  debug_execution);
1031         if (target->smp) {
1032                 target->gdb_service->core[0] = -1;
1033                 retval = aarch64_restore_smp(target, handle_breakpoints);
1034                 if (retval != ERROR_OK)
1035                         return retval;
1036         }
1037         aarch64_internal_restart(target);
1038
1039         if (!debug_execution) {
1040                 target->state = TARGET_RUNNING;
1041                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1042                 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1043         } else {
1044                 target->state = TARGET_DEBUG_RUNNING;
1045                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1046                 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1047         }
1048
1049         return ERROR_OK;
1050 }
1051
1052 static int aarch64_debug_entry(struct target *target)
1053 {
1054         uint32_t dscr;
1055         int retval = ERROR_OK;
1056         struct aarch64_common *aarch64 = target_to_aarch64(target);
1057         struct armv8_common *armv8 = target_to_armv8(target);
1058         uint32_t tmp;
1059
1060         LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1061
1062         /* REVISIT surely we should not re-read DSCR !! */
1063         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1064                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1065         if (retval != ERROR_OK)
1066                 return retval;
1067
1068         /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1069          * imprecise data aborts get discarded by issuing a Data
1070          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1071          */
1072
1073         /* Enable the ITR execution once we are in debug mode */
1074         dscr |= DSCR_ITR_EN;
1075         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1076                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1077         if (retval != ERROR_OK)
1078                 return retval;
1079
1080         /* Examine debug reason */
1081         arm_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1082         mem_ap_read_atomic_u32(armv8->debug_ap,
1083                                    armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1084         if ((tmp & 0x7) == 0x4)
1085                 target->debug_reason = DBG_REASON_SINGLESTEP;
1086
1087         /* save address of instruction that triggered the watchpoint? */
1088         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1089                 uint32_t wfar;
1090
1091                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1092                                 armv8->debug_base + CPUV8_DBG_WFAR0,
1093                                 &wfar);
1094                 if (retval != ERROR_OK)
1095                         return retval;
1096                 arm_dpm_report_wfar(&armv8->dpm, wfar);
1097         }
1098
1099         retval = armv8_dpm_read_current_registers(&armv8->dpm);
1100
1101         if (armv8->post_debug_entry) {
1102                 retval = armv8->post_debug_entry(target);
1103                 if (retval != ERROR_OK)
1104                         return retval;
1105         }
1106
1107         return retval;
1108 }
1109
1110 static int aarch64_post_debug_entry(struct target *target)
1111 {
1112         struct aarch64_common *aarch64 = target_to_aarch64(target);
1113         struct armv8_common *armv8 = &aarch64->armv8_common;
1114         struct armv8_mmu_common *armv8_mmu = &armv8->armv8_mmu;
1115         uint32_t sctlr_el1 = 0;
1116         int retval;
1117
1118         mem_ap_write_atomic_u32(armv8->debug_ap,
1119                                 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1120         retval = aarch64_instr_read_data_r0(armv8->arm.dpm,
1121                                             0xd5381000, &sctlr_el1);
1122         if (retval != ERROR_OK)
1123                 return retval;
1124
1125         LOG_DEBUG("sctlr_el1 = %#8.8x", sctlr_el1);
1126         aarch64->system_control_reg = sctlr_el1;
1127         aarch64->system_control_reg_curr = sctlr_el1;
1128         aarch64->curr_mode = armv8->arm.core_mode;
1129
1130         armv8_mmu->mmu_enabled = sctlr_el1 & 0x1U ? 1 : 0;
1131         armv8_mmu->armv8_cache.d_u_cache_enabled = sctlr_el1 & 0x4U ? 1 : 0;
1132         armv8_mmu->armv8_cache.i_cache_enabled = sctlr_el1 & 0x1000U ? 1 : 0;
1133
1134 #if 0
1135         if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1136                 armv8_identify_cache(target);
1137 #endif
1138
1139         return ERROR_OK;
1140 }
1141
1142 static int aarch64_step(struct target *target, int current, target_addr_t address,
1143         int handle_breakpoints)
1144 {
1145         struct armv8_common *armv8 = target_to_armv8(target);
1146         int retval;
1147         uint32_t tmp;
1148
1149         if (target->state != TARGET_HALTED) {
1150                 LOG_WARNING("target not halted");
1151                 return ERROR_TARGET_NOT_HALTED;
1152         }
1153
1154         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1155                         armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1156         if (retval != ERROR_OK)
1157                 return retval;
1158
1159         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1160                         armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1161         if (retval != ERROR_OK)
1162                 return retval;
1163
1164         target->debug_reason = DBG_REASON_SINGLESTEP;
1165         retval = aarch64_resume(target, 1, address, 0, 0);
1166         if (retval != ERROR_OK)
1167                 return retval;
1168
1169         long long then = timeval_ms();
1170         while (target->state != TARGET_HALTED) {
1171                 mem_ap_read_atomic_u32(armv8->debug_ap,
1172                         armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1173                 LOG_DEBUG("DESR = %#x", tmp);
1174                 retval = aarch64_poll(target);
1175                 if (retval != ERROR_OK)
1176                         return retval;
1177                 if (timeval_ms() > then + 1000) {
1178                         LOG_ERROR("timeout waiting for target halt");
1179                         return ERROR_FAIL;
1180                 }
1181         }
1182
1183         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1184                         armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1185         if (retval != ERROR_OK)
1186                 return retval;
1187
1188         target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1189         if (target->state == TARGET_HALTED)
1190                 LOG_DEBUG("target stepped");
1191
1192         return ERROR_OK;
1193 }
1194
1195 static int aarch64_restore_context(struct target *target, bool bpwp)
1196 {
1197         struct armv8_common *armv8 = target_to_armv8(target);
1198
1199         LOG_DEBUG(" ");
1200
1201         if (armv8->pre_restore_context)
1202                 armv8->pre_restore_context(target);
1203
1204         return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1205
1206 }
1207
1208 /*
1209  * Cortex-A8 Breakpoint and watchpoint functions
1210  */
1211
1212 /* Setup hardware Breakpoint Register Pair */
1213 static int aarch64_set_breakpoint(struct target *target,
1214         struct breakpoint *breakpoint, uint8_t matchmode)
1215 {
1216         int retval;
1217         int brp_i = 0;
1218         uint32_t control;
1219         uint8_t byte_addr_select = 0x0F;
1220         struct aarch64_common *aarch64 = target_to_aarch64(target);
1221         struct armv8_common *armv8 = &aarch64->armv8_common;
1222         struct aarch64_brp *brp_list = aarch64->brp_list;
1223         uint32_t dscr;
1224
1225         if (breakpoint->set) {
1226                 LOG_WARNING("breakpoint already set");
1227                 return ERROR_OK;
1228         }
1229
1230         if (breakpoint->type == BKPT_HARD) {
1231                 int64_t bpt_value;
1232                 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1233                         brp_i++;
1234                 if (brp_i >= aarch64->brp_num) {
1235                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1236                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1237                 }
1238                 breakpoint->set = brp_i + 1;
1239                 if (breakpoint->length == 2)
1240                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1241                 control = ((matchmode & 0x7) << 20)
1242                         | (1 << 13)
1243                         | (byte_addr_select << 5)
1244                         | (3 << 1) | 1;
1245                 brp_list[brp_i].used = 1;
1246                 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1247                 brp_list[brp_i].control = control;
1248                 bpt_value = brp_list[brp_i].value;
1249
1250                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1251                                 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1252                                 (uint32_t)(bpt_value & 0xFFFFFFFF));
1253                 if (retval != ERROR_OK)
1254                         return retval;
1255                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1256                                 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1257                                 (uint32_t)(bpt_value >> 32));
1258                 if (retval != ERROR_OK)
1259                         return retval;
1260
1261                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1262                                 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1263                                 brp_list[brp_i].control);
1264                 if (retval != ERROR_OK)
1265                         return retval;
1266                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1267                         brp_list[brp_i].control,
1268                         brp_list[brp_i].value);
1269
1270         } else if (breakpoint->type == BKPT_SOFT) {
1271                 uint8_t code[4];
1272                 buf_set_u32(code, 0, 32, 0xD4400000);
1273
1274                 retval = target_read_memory(target,
1275                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1276                                 breakpoint->length, 1,
1277                                 breakpoint->orig_instr);
1278                 if (retval != ERROR_OK)
1279                         return retval;
1280                 retval = target_write_memory(target,
1281                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1282                                 breakpoint->length, 1, code);
1283                 if (retval != ERROR_OK)
1284                         return retval;
1285                 breakpoint->set = 0x11; /* Any nice value but 0 */
1286         }
1287
1288         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1289                                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1290         /* Ensure that halting debug mode is enable */
1291         dscr = dscr | DSCR_HDE;
1292         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1293                                          armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1294         if (retval != ERROR_OK) {
1295                 LOG_DEBUG("Failed to set DSCR.HDE");
1296                 return retval;
1297         }
1298
1299         return ERROR_OK;
1300 }
1301
1302 static int aarch64_set_context_breakpoint(struct target *target,
1303         struct breakpoint *breakpoint, uint8_t matchmode)
1304 {
1305         int retval = ERROR_FAIL;
1306         int brp_i = 0;
1307         uint32_t control;
1308         uint8_t byte_addr_select = 0x0F;
1309         struct aarch64_common *aarch64 = target_to_aarch64(target);
1310         struct armv8_common *armv8 = &aarch64->armv8_common;
1311         struct aarch64_brp *brp_list = aarch64->brp_list;
1312
1313         if (breakpoint->set) {
1314                 LOG_WARNING("breakpoint already set");
1315                 return retval;
1316         }
1317         /*check available context BRPs*/
1318         while ((brp_list[brp_i].used ||
1319                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1320                 brp_i++;
1321
1322         if (brp_i >= aarch64->brp_num) {
1323                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1324                 return ERROR_FAIL;
1325         }
1326
1327         breakpoint->set = brp_i + 1;
1328         control = ((matchmode & 0x7) << 20)
1329                 | (1 << 13)
1330                 | (byte_addr_select << 5)
1331                 | (3 << 1) | 1;
1332         brp_list[brp_i].used = 1;
1333         brp_list[brp_i].value = (breakpoint->asid);
1334         brp_list[brp_i].control = control;
1335         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1336                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1337                         brp_list[brp_i].value);
1338         if (retval != ERROR_OK)
1339                 return retval;
1340         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1341                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1342                         brp_list[brp_i].control);
1343         if (retval != ERROR_OK)
1344                 return retval;
1345         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1346                 brp_list[brp_i].control,
1347                 brp_list[brp_i].value);
1348         return ERROR_OK;
1349
1350 }
1351
1352 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1353 {
1354         int retval = ERROR_FAIL;
1355         int brp_1 = 0;  /* holds the contextID pair */
1356         int brp_2 = 0;  /* holds the IVA pair */
1357         uint32_t control_CTX, control_IVA;
1358         uint8_t CTX_byte_addr_select = 0x0F;
1359         uint8_t IVA_byte_addr_select = 0x0F;
1360         uint8_t CTX_machmode = 0x03;
1361         uint8_t IVA_machmode = 0x01;
1362         struct aarch64_common *aarch64 = target_to_aarch64(target);
1363         struct armv8_common *armv8 = &aarch64->armv8_common;
1364         struct aarch64_brp *brp_list = aarch64->brp_list;
1365
1366         if (breakpoint->set) {
1367                 LOG_WARNING("breakpoint already set");
1368                 return retval;
1369         }
1370         /*check available context BRPs*/
1371         while ((brp_list[brp_1].used ||
1372                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1373                 brp_1++;
1374
1375         printf("brp(CTX) found num: %d\n", brp_1);
1376         if (brp_1 >= aarch64->brp_num) {
1377                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1378                 return ERROR_FAIL;
1379         }
1380
1381         while ((brp_list[brp_2].used ||
1382                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1383                 brp_2++;
1384
1385         printf("brp(IVA) found num: %d\n", brp_2);
1386         if (brp_2 >= aarch64->brp_num) {
1387                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1388                 return ERROR_FAIL;
1389         }
1390
1391         breakpoint->set = brp_1 + 1;
1392         breakpoint->linked_BRP = brp_2;
1393         control_CTX = ((CTX_machmode & 0x7) << 20)
1394                 | (brp_2 << 16)
1395                 | (0 << 14)
1396                 | (CTX_byte_addr_select << 5)
1397                 | (3 << 1) | 1;
1398         brp_list[brp_1].used = 1;
1399         brp_list[brp_1].value = (breakpoint->asid);
1400         brp_list[brp_1].control = control_CTX;
1401         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1402                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1403                         brp_list[brp_1].value);
1404         if (retval != ERROR_OK)
1405                 return retval;
1406         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1407                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1408                         brp_list[brp_1].control);
1409         if (retval != ERROR_OK)
1410                 return retval;
1411
1412         control_IVA = ((IVA_machmode & 0x7) << 20)
1413                 | (brp_1 << 16)
1414                 | (1 << 13)
1415                 | (IVA_byte_addr_select << 5)
1416                 | (3 << 1) | 1;
1417         brp_list[brp_2].used = 1;
1418         brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1419         brp_list[brp_2].control = control_IVA;
1420         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1421                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1422                         brp_list[brp_2].value & 0xFFFFFFFF);
1423         if (retval != ERROR_OK)
1424                 return retval;
1425         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1426                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1427                         brp_list[brp_2].value >> 32);
1428         if (retval != ERROR_OK)
1429                 return retval;
1430         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1431                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1432                         brp_list[brp_2].control);
1433         if (retval != ERROR_OK)
1434                 return retval;
1435
1436         return ERROR_OK;
1437 }
1438
1439 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1440 {
1441         int retval;
1442         struct aarch64_common *aarch64 = target_to_aarch64(target);
1443         struct armv8_common *armv8 = &aarch64->armv8_common;
1444         struct aarch64_brp *brp_list = aarch64->brp_list;
1445
1446         if (!breakpoint->set) {
1447                 LOG_WARNING("breakpoint not set");
1448                 return ERROR_OK;
1449         }
1450
1451         if (breakpoint->type == BKPT_HARD) {
1452                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1453                         int brp_i = breakpoint->set - 1;
1454                         int brp_j = breakpoint->linked_BRP;
1455                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1456                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1457                                 return ERROR_OK;
1458                         }
1459                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1460                                 brp_list[brp_i].control, brp_list[brp_i].value);
1461                         brp_list[brp_i].used = 0;
1462                         brp_list[brp_i].value = 0;
1463                         brp_list[brp_i].control = 0;
1464                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1465                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1466                                         brp_list[brp_i].control);
1467                         if (retval != ERROR_OK)
1468                                 return retval;
1469                         if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1470                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1471                                 return ERROR_OK;
1472                         }
1473                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1474                                 brp_list[brp_j].control, brp_list[brp_j].value);
1475                         brp_list[brp_j].used = 0;
1476                         brp_list[brp_j].value = 0;
1477                         brp_list[brp_j].control = 0;
1478                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1479                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1480                                         brp_list[brp_j].control);
1481                         if (retval != ERROR_OK)
1482                                 return retval;
1483                         breakpoint->linked_BRP = 0;
1484                         breakpoint->set = 0;
1485                         return ERROR_OK;
1486
1487                 } else {
1488                         int brp_i = breakpoint->set - 1;
1489                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1490                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1491                                 return ERROR_OK;
1492                         }
1493                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1494                                 brp_list[brp_i].control, brp_list[brp_i].value);
1495                         brp_list[brp_i].used = 0;
1496                         brp_list[brp_i].value = 0;
1497                         brp_list[brp_i].control = 0;
1498                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1499                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1500                                         brp_list[brp_i].control);
1501                         if (retval != ERROR_OK)
1502                                 return retval;
1503                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1504                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1505                                         brp_list[brp_i].value);
1506                         if (retval != ERROR_OK)
1507                                 return retval;
1508                         breakpoint->set = 0;
1509                         return ERROR_OK;
1510                 }
1511         } else {
1512                 /* restore original instruction (kept in target endianness) */
1513                 if (breakpoint->length == 4) {
1514                         retval = target_write_memory(target,
1515                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1516                                         4, 1, breakpoint->orig_instr);
1517                         if (retval != ERROR_OK)
1518                                 return retval;
1519                 } else {
1520                         retval = target_write_memory(target,
1521                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1522                                         2, 1, breakpoint->orig_instr);
1523                         if (retval != ERROR_OK)
1524                                 return retval;
1525                 }
1526         }
1527         breakpoint->set = 0;
1528
1529         return ERROR_OK;
1530 }
1531
1532 static int aarch64_add_breakpoint(struct target *target,
1533         struct breakpoint *breakpoint)
1534 {
1535         struct aarch64_common *aarch64 = target_to_aarch64(target);
1536
1537         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1538                 LOG_INFO("no hardware breakpoint available");
1539                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1540         }
1541
1542         if (breakpoint->type == BKPT_HARD)
1543                 aarch64->brp_num_available--;
1544
1545         return aarch64_set_breakpoint(target, breakpoint, 0x00);        /* Exact match */
1546 }
1547
1548 static int aarch64_add_context_breakpoint(struct target *target,
1549         struct breakpoint *breakpoint)
1550 {
1551         struct aarch64_common *aarch64 = target_to_aarch64(target);
1552
1553         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1554                 LOG_INFO("no hardware breakpoint available");
1555                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1556         }
1557
1558         if (breakpoint->type == BKPT_HARD)
1559                 aarch64->brp_num_available--;
1560
1561         return aarch64_set_context_breakpoint(target, breakpoint, 0x02);        /* asid match */
1562 }
1563
1564 static int aarch64_add_hybrid_breakpoint(struct target *target,
1565         struct breakpoint *breakpoint)
1566 {
1567         struct aarch64_common *aarch64 = target_to_aarch64(target);
1568
1569         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1570                 LOG_INFO("no hardware breakpoint available");
1571                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1572         }
1573
1574         if (breakpoint->type == BKPT_HARD)
1575                 aarch64->brp_num_available--;
1576
1577         return aarch64_set_hybrid_breakpoint(target, breakpoint);       /* ??? */
1578 }
1579
1580
1581 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1582 {
1583         struct aarch64_common *aarch64 = target_to_aarch64(target);
1584
1585 #if 0
1586 /* It is perfectly possible to remove breakpoints while the target is running */
1587         if (target->state != TARGET_HALTED) {
1588                 LOG_WARNING("target not halted");
1589                 return ERROR_TARGET_NOT_HALTED;
1590         }
1591 #endif
1592
1593         if (breakpoint->set) {
1594                 aarch64_unset_breakpoint(target, breakpoint);
1595                 if (breakpoint->type == BKPT_HARD)
1596                         aarch64->brp_num_available++;
1597         }
1598
1599         return ERROR_OK;
1600 }
1601
1602 /*
1603  * Cortex-A8 Reset functions
1604  */
1605
1606 static int aarch64_assert_reset(struct target *target)
1607 {
1608         struct armv8_common *armv8 = target_to_armv8(target);
1609
1610         LOG_DEBUG(" ");
1611
1612         /* FIXME when halt is requested, make it work somehow... */
1613
1614         /* Issue some kind of warm reset. */
1615         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1616                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1617         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1618                 /* REVISIT handle "pulls" cases, if there's
1619                  * hardware that needs them to work.
1620                  */
1621                 jtag_add_reset(0, 1);
1622         } else {
1623                 LOG_ERROR("%s: how to reset?", target_name(target));
1624                 return ERROR_FAIL;
1625         }
1626
1627         /* registers are now invalid */
1628         register_cache_invalidate(armv8->arm.core_cache);
1629
1630         target->state = TARGET_RESET;
1631
1632         return ERROR_OK;
1633 }
1634
1635 static int aarch64_deassert_reset(struct target *target)
1636 {
1637         int retval;
1638
1639         LOG_DEBUG(" ");
1640
1641         /* be certain SRST is off */
1642         jtag_add_reset(0, 0);
1643
1644         retval = aarch64_poll(target);
1645         if (retval != ERROR_OK)
1646                 return retval;
1647
1648         if (target->reset_halt) {
1649                 if (target->state != TARGET_HALTED) {
1650                         LOG_WARNING("%s: ran after reset and before halt ...",
1651                                 target_name(target));
1652                         retval = target_halt(target);
1653                         if (retval != ERROR_OK)
1654                                 return retval;
1655                 }
1656         }
1657
1658         return ERROR_OK;
1659 }
1660
1661 static int aarch64_write_apb_ap_memory(struct target *target,
1662         uint64_t address, uint32_t size,
1663         uint32_t count, const uint8_t *buffer)
1664 {
1665         /* write memory through APB-AP */
1666         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1667         struct armv8_common *armv8 = target_to_armv8(target);
1668         struct arm *arm = &armv8->arm;
1669         int total_bytes = count * size;
1670         int total_u32;
1671         int start_byte = address & 0x3;
1672         int end_byte   = (address + total_bytes) & 0x3;
1673         struct reg *reg;
1674         uint32_t dscr;
1675         uint8_t *tmp_buff = NULL;
1676
1677         LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %"  PRIu32 " count%"  PRIu32,
1678                           address, size, count);
1679         if (target->state != TARGET_HALTED) {
1680                 LOG_WARNING("target not halted");
1681                 return ERROR_TARGET_NOT_HALTED;
1682         }
1683
1684         total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1685
1686         /* Mark register R0 as dirty, as it will be used
1687          * for transferring the data.
1688          * It will be restored automatically when exiting
1689          * debug mode
1690          */
1691         reg = armv8_reg_current(arm, 1);
1692         reg->dirty = true;
1693
1694         reg = armv8_reg_current(arm, 0);
1695         reg->dirty = true;
1696
1697         /*  clear any abort  */
1698         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1699                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1700         if (retval != ERROR_OK)
1701                 return retval;
1702
1703
1704         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1705
1706         /* The algorithm only copies 32 bit words, so the buffer
1707          * should be expanded to include the words at either end.
1708          * The first and last words will be read first to avoid
1709          * corruption if needed.
1710          */
1711         tmp_buff = malloc(total_u32 * 4);
1712
1713         if ((start_byte != 0) && (total_u32 > 1)) {
1714                 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1715                  * the other bytes in the word.
1716                  */
1717                 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1718                 if (retval != ERROR_OK)
1719                         goto error_free_buff_w;
1720         }
1721
1722         /* If end of write is not aligned, or the write is less than 4 bytes */
1723         if ((end_byte != 0) ||
1724                 ((total_u32 == 1) && (total_bytes != 4))) {
1725
1726                 /* Read the last word to avoid corruption during 32 bit write */
1727                 int mem_offset = (total_u32-1) * 4;
1728                 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1729                 if (retval != ERROR_OK)
1730                         goto error_free_buff_w;
1731         }
1732
1733         /* Copy the write buffer over the top of the temporary buffer */
1734         memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1735
1736         /* We now have a 32 bit aligned buffer that can be written */
1737
1738         /* Read DSCR */
1739         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1740                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1741         if (retval != ERROR_OK)
1742                 goto error_free_buff_w;
1743
1744         /* Set Normal access mode  */
1745         dscr = (dscr & ~DSCR_MA);
1746         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1747                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1748
1749         if (arm->core_state == ARM_STATE_AARCH64) {
1750                 /* Write X0 with value 'address' using write procedure */
1751                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1752                 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1753                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1754                 retval += aarch64_exec_opcode(target,
1755                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1756         } else {
1757                 /* Write R0 with value 'address' using write procedure */
1758                 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1759                 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1760                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1761                 retval += aarch64_exec_opcode(target,
1762                                 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1763
1764         }
1765         /* Step 1.d   - Change DCC to memory mode */
1766         dscr = dscr | DSCR_MA;
1767         retval +=  mem_ap_write_atomic_u32(armv8->debug_ap,
1768                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1769         if (retval != ERROR_OK)
1770                 goto error_unset_dtr_w;
1771
1772
1773         /* Step 2.a   - Do the write */
1774         retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1775                                         tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1776         if (retval != ERROR_OK)
1777                 goto error_unset_dtr_w;
1778
1779         /* Step 3.a   - Switch DTR mode back to Normal mode */
1780         dscr = (dscr & ~DSCR_MA);
1781         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1782                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1783         if (retval != ERROR_OK)
1784                 goto error_unset_dtr_w;
1785
1786         /* Check for sticky abort flags in the DSCR */
1787         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1788                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1789         if (retval != ERROR_OK)
1790                 goto error_free_buff_w;
1791         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1792                 /* Abort occurred - clear it and exit */
1793                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1794                 mem_ap_write_atomic_u32(armv8->debug_ap,
1795                                         armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1796                 goto error_free_buff_w;
1797         }
1798
1799         /* Done */
1800         free(tmp_buff);
1801         return ERROR_OK;
1802
1803 error_unset_dtr_w:
1804         /* Unset DTR mode */
1805         mem_ap_read_atomic_u32(armv8->debug_ap,
1806                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1807         dscr = (dscr & ~DSCR_MA);
1808         mem_ap_write_atomic_u32(armv8->debug_ap,
1809                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1810 error_free_buff_w:
1811         LOG_ERROR("error");
1812         free(tmp_buff);
1813         return ERROR_FAIL;
1814 }
1815
1816 static int aarch64_read_apb_ap_memory(struct target *target,
1817         target_addr_t address, uint32_t size,
1818         uint32_t count, uint8_t *buffer)
1819 {
1820         /* read memory through APB-AP */
1821         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1822         struct armv8_common *armv8 = target_to_armv8(target);
1823         struct arm *arm = &armv8->arm;
1824         int total_bytes = count * size;
1825         int total_u32;
1826         int start_byte = address & 0x3;
1827         int end_byte   = (address + total_bytes) & 0x3;
1828         struct reg *reg;
1829         uint32_t dscr;
1830         uint8_t *tmp_buff = NULL;
1831         uint8_t *u8buf_ptr;
1832         uint32_t value;
1833
1834         LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%"  PRIu32,
1835                           address, size, count);
1836         if (target->state != TARGET_HALTED) {
1837                 LOG_WARNING("target not halted");
1838                 return ERROR_TARGET_NOT_HALTED;
1839         }
1840
1841         total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1842         /* Mark register X0, X1 as dirty, as it will be used
1843          * for transferring the data.
1844          * It will be restored automatically when exiting
1845          * debug mode
1846          */
1847         reg = armv8_reg_current(arm, 1);
1848         reg->dirty = true;
1849
1850         reg = armv8_reg_current(arm, 0);
1851         reg->dirty = true;
1852
1853         /*      clear any abort  */
1854         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1855                                 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1856         if (retval != ERROR_OK)
1857                 goto error_free_buff_r;
1858
1859         /* Read DSCR */
1860         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1861                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1862
1863         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1864
1865         /* Set Normal access mode  */
1866         dscr = (dscr & ~DSCR_MA);
1867         retval +=  mem_ap_write_atomic_u32(armv8->debug_ap,
1868                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1869
1870         if (arm->core_state == ARM_STATE_AARCH64) {
1871                 /* Write X0 with value 'address' using write procedure */
1872                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1873                 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1874                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1875                 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1876                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1877                 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1878                 /* Step 1.e - Change DCC to memory mode */
1879                 dscr = dscr | DSCR_MA;
1880                 retval +=  mem_ap_write_atomic_u32(armv8->debug_ap,
1881                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1882                 /* Step 1.f - read DBGDTRTX and discard the value */
1883                 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1884                                 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1885         } else {
1886                 /* Write R0 with value 'address' using write procedure */
1887                 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1888                 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1889                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1890                 retval += aarch64_exec_opcode(target,
1891                                 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1892                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1893                 retval += aarch64_exec_opcode(target,
1894                                 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
1895                 /* Step 1.e - Change DCC to memory mode */
1896                 dscr = dscr | DSCR_MA;
1897                 retval +=  mem_ap_write_atomic_u32(armv8->debug_ap,
1898                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1899                 /* Step 1.f - read DBGDTRTX and discard the value */
1900                 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1901                                 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1902
1903         }
1904         if (retval != ERROR_OK)
1905                 goto error_unset_dtr_r;
1906
1907         /* Optimize the read as much as we can, either way we read in a single pass  */
1908         if ((start_byte) || (end_byte)) {
1909                 /* The algorithm only copies 32 bit words, so the buffer
1910                  * should be expanded to include the words at either end.
1911                  * The first and last words will be read into a temp buffer
1912                  * to avoid corruption
1913                  */
1914                 tmp_buff = malloc(total_u32 * 4);
1915                 if (!tmp_buff)
1916                         goto error_unset_dtr_r;
1917
1918                 /* use the tmp buffer to read the entire data */
1919                 u8buf_ptr = tmp_buff;
1920         } else
1921                 /* address and read length are aligned so read directly into the passed buffer */
1922                 u8buf_ptr = buffer;
1923
1924         /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1925          * Abort flags are sticky, so can be read at end of transactions
1926          *
1927          * This data is read in aligned to 32 bit boundary.
1928          */
1929
1930         /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1931          * increments X0 by 4. */
1932         retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1933                                                                         armv8->debug_base + CPUV8_DBG_DTRTX);
1934         if (retval != ERROR_OK)
1935                         goto error_unset_dtr_r;
1936
1937         /* Step 3.a - set DTR access mode back to Normal mode   */
1938         dscr = (dscr & ~DSCR_MA);
1939         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1940                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1941         if (retval != ERROR_OK)
1942                 goto error_free_buff_r;
1943
1944         /* Step 3.b - read DBGDTRTX for the final value */
1945         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1946                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1947         memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1948
1949         /* Check for sticky abort flags in the DSCR */
1950         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1951                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1952         if (retval != ERROR_OK)
1953                 goto error_free_buff_r;
1954         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1955                 /* Abort occurred - clear it and exit */
1956                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1957                 mem_ap_write_atomic_u32(armv8->debug_ap,
1958                                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1959                 goto error_free_buff_r;
1960         }
1961
1962         /* check if we need to copy aligned data by applying any shift necessary */
1963         if (tmp_buff) {
1964                 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1965                 free(tmp_buff);
1966         }
1967
1968         /* Done */
1969         return ERROR_OK;
1970
1971 error_unset_dtr_r:
1972         /* Unset DTR mode */
1973         mem_ap_read_atomic_u32(armv8->debug_ap,
1974                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1975         dscr = (dscr & ~DSCR_MA);
1976         mem_ap_write_atomic_u32(armv8->debug_ap,
1977                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1978 error_free_buff_r:
1979         LOG_ERROR("error");
1980         free(tmp_buff);
1981         return ERROR_FAIL;
1982 }
1983
1984 static int aarch64_read_phys_memory(struct target *target,
1985         target_addr_t address, uint32_t size,
1986         uint32_t count, uint8_t *buffer)
1987 {
1988         struct armv8_common *armv8 = target_to_armv8(target);
1989         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1990         struct adiv5_dap *swjdp = armv8->arm.dap;
1991         uint8_t apsel = swjdp->apsel;
1992         LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1993                 address, size, count);
1994
1995         if (count && buffer) {
1996
1997                 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1998
1999                         /* read memory through AHB-AP */
2000                         retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
2001                 } else {
2002                         /* read memory through APB-AP */
2003                         retval = aarch64_mmu_modify(target, 0);
2004                         if (retval != ERROR_OK)
2005                                 return retval;
2006                         retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2007                 }
2008         }
2009         return retval;
2010 }
2011
2012 static int aarch64_read_memory(struct target *target, target_addr_t address,
2013         uint32_t size, uint32_t count, uint8_t *buffer)
2014 {
2015         int mmu_enabled = 0;
2016         target_addr_t virt, phys;
2017         int retval;
2018         struct armv8_common *armv8 = target_to_armv8(target);
2019         struct adiv5_dap *swjdp = armv8->arm.dap;
2020         uint8_t apsel = swjdp->apsel;
2021
2022         /* aarch64 handles unaligned memory access */
2023         LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2024                 size, count);
2025
2026         /* determine if MMU was enabled on target stop */
2027         if (!armv8->is_armv7r) {
2028                 retval = aarch64_mmu(target, &mmu_enabled);
2029                 if (retval != ERROR_OK)
2030                         return retval;
2031         }
2032
2033         if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2034                 if (mmu_enabled) {
2035                         virt = address;
2036                         retval = aarch64_virt2phys(target, virt, &phys);
2037                         if (retval != ERROR_OK)
2038                                 return retval;
2039
2040                         LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2041                                   virt, phys);
2042                         address = phys;
2043                 }
2044                 retval = aarch64_read_phys_memory(target, address, size, count,
2045                                                   buffer);
2046         } else {
2047                 if (mmu_enabled) {
2048                         retval = aarch64_check_address(target, address);
2049                         if (retval != ERROR_OK)
2050                                 return retval;
2051                         /* enable MMU as we could have disabled it for phys
2052                            access */
2053                         retval = aarch64_mmu_modify(target, 1);
2054                         if (retval != ERROR_OK)
2055                                 return retval;
2056                 }
2057                 retval = aarch64_read_apb_ap_memory(target, address, size,
2058                                                     count, buffer);
2059         }
2060         return retval;
2061 }
2062
2063 static int aarch64_write_phys_memory(struct target *target,
2064         target_addr_t address, uint32_t size,
2065         uint32_t count, const uint8_t *buffer)
2066 {
2067         struct armv8_common *armv8 = target_to_armv8(target);
2068         struct adiv5_dap *swjdp = armv8->arm.dap;
2069         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2070         uint8_t apsel = swjdp->apsel;
2071
2072         LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2073                 size, count);
2074
2075         if (count && buffer) {
2076
2077                 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2078
2079                         /* write memory through AHB-AP */
2080                         retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2081                 } else {
2082
2083                         /* write memory through APB-AP */
2084                         if (!armv8->is_armv7r) {
2085                                 retval = aarch64_mmu_modify(target, 0);
2086                                 if (retval != ERROR_OK)
2087                                         return retval;
2088                         }
2089                         return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2090                 }
2091         }
2092
2093
2094         /* REVISIT this op is generic ARMv7-A/R stuff */
2095         if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2096                 struct arm_dpm *dpm = armv8->arm.dpm;
2097
2098                 retval = dpm->prepare(dpm);
2099                 if (retval != ERROR_OK)
2100                         return retval;
2101
2102                 /* The Cache handling will NOT work with MMU active, the
2103                  * wrong addresses will be invalidated!
2104                  *
2105                  * For both ICache and DCache, walk all cache lines in the
2106                  * address range. Cortex-A8 has fixed 64 byte line length.
2107                  *
2108                  * REVISIT per ARMv7, these may trigger watchpoints ...
2109                  */
2110
2111                 /* invalidate I-Cache */
2112                 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2113                         /* ICIMVAU - Invalidate Cache single entry
2114                          * with MVA to PoU
2115                          *      MCR p15, 0, r0, c7, c5, 1
2116                          */
2117                         for (uint32_t cacheline = address;
2118                                 cacheline < address + size * count;
2119                                 cacheline += 64) {
2120                                 retval = dpm->instr_write_data_r0(dpm,
2121                                                 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2122                                                 cacheline);
2123                                 if (retval != ERROR_OK)
2124                                         return retval;
2125                         }
2126                 }
2127
2128                 /* invalidate D-Cache */
2129                 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2130                         /* DCIMVAC - Invalidate data Cache line
2131                          * with MVA to PoC
2132                          *      MCR p15, 0, r0, c7, c6, 1
2133                          */
2134                         for (uint32_t cacheline = address;
2135                                 cacheline < address + size * count;
2136                                 cacheline += 64) {
2137                                 retval = dpm->instr_write_data_r0(dpm,
2138                                                 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2139                                                 cacheline);
2140                                 if (retval != ERROR_OK)
2141                                         return retval;
2142                         }
2143                 }
2144
2145                 /* (void) */ dpm->finish(dpm);
2146         }
2147
2148         return retval;
2149 }
2150
2151 static int aarch64_write_memory(struct target *target, target_addr_t address,
2152         uint32_t size, uint32_t count, const uint8_t *buffer)
2153 {
2154         int mmu_enabled = 0;
2155         target_addr_t virt, phys;
2156         int retval;
2157         struct armv8_common *armv8 = target_to_armv8(target);
2158         struct adiv5_dap *swjdp = armv8->arm.dap;
2159         uint8_t apsel = swjdp->apsel;
2160
2161         /* aarch64 handles unaligned memory access */
2162         LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2163                   "; count %" PRId32, address, size, count);
2164
2165         /* determine if MMU was enabled on target stop */
2166         if (!armv8->is_armv7r) {
2167                 retval = aarch64_mmu(target, &mmu_enabled);
2168                 if (retval != ERROR_OK)
2169                         return retval;
2170         }
2171
2172         if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2173                 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2174                           PRId32 "; count %" PRId32, address, size, count);
2175                 if (mmu_enabled) {
2176                         virt = address;
2177                         retval = aarch64_virt2phys(target, virt, &phys);
2178                         if (retval != ERROR_OK)
2179                                 return retval;
2180
2181                         LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2182                                   TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2183                         address = phys;
2184                 }
2185                 retval = aarch64_write_phys_memory(target, address, size,
2186                                 count, buffer);
2187         } else {
2188                 if (mmu_enabled) {
2189                         retval = aarch64_check_address(target, address);
2190                         if (retval != ERROR_OK)
2191                                 return retval;
2192                         /* enable MMU as we could have disabled it for phys access */
2193                         retval = aarch64_mmu_modify(target, 1);
2194                         if (retval != ERROR_OK)
2195                                 return retval;
2196                 }
2197                 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2198         }
2199         return retval;
2200 }
2201
2202 static int aarch64_handle_target_request(void *priv)
2203 {
2204         struct target *target = priv;
2205         struct armv8_common *armv8 = target_to_armv8(target);
2206         int retval;
2207
2208         if (!target_was_examined(target))
2209                 return ERROR_OK;
2210         if (!target->dbg_msg_enabled)
2211                 return ERROR_OK;
2212
2213         if (target->state == TARGET_RUNNING) {
2214                 uint32_t request;
2215                 uint32_t dscr;
2216                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2217                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2218
2219                 /* check if we have data */
2220                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2221                         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2222                                         armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2223                         if (retval == ERROR_OK) {
2224                                 target_request(target, request);
2225                                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2226                                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2227                         }
2228                 }
2229         }
2230
2231         return ERROR_OK;
2232 }
2233
2234 static int aarch64_examine_first(struct target *target)
2235 {
2236         struct aarch64_common *aarch64 = target_to_aarch64(target);
2237         struct armv8_common *armv8 = &aarch64->armv8_common;
2238         struct adiv5_dap *swjdp = armv8->arm.dap;
2239         int retval = ERROR_OK;
2240         uint32_t pfr, debug, ctypr, ttypr, cpuid;
2241         int i;
2242
2243         /* We do one extra read to ensure DAP is configured,
2244          * we call ahbap_debugport_init(swjdp) instead
2245          */
2246         retval = dap_dp_init(swjdp);
2247         if (retval != ERROR_OK)
2248                 return retval;
2249
2250         /* Search for the APB-AB - it is needed for access to debug registers */
2251         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2252         if (retval != ERROR_OK) {
2253                 LOG_ERROR("Could not find APB-AP for debug access");
2254                 return retval;
2255         }
2256
2257         retval = mem_ap_init(armv8->debug_ap);
2258         if (retval != ERROR_OK) {
2259                 LOG_ERROR("Could not initialize the APB-AP");
2260                 return retval;
2261         }
2262
2263         armv8->debug_ap->memaccess_tck = 80;
2264
2265         /* Search for the AHB-AB */
2266         armv8->memory_ap_available = false;
2267         retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2268         if (retval == ERROR_OK) {
2269                 retval = mem_ap_init(armv8->memory_ap);
2270                 if (retval == ERROR_OK)
2271                         armv8->memory_ap_available = true;
2272         }
2273         if (retval != ERROR_OK) {
2274                 /* AHB-AP not found or unavailable - use the CPU */
2275                 LOG_DEBUG("No AHB-AP available for memory access");
2276         }
2277
2278
2279         if (!target->dbgbase_set) {
2280                 uint32_t dbgbase;
2281                 /* Get ROM Table base */
2282                 uint32_t apid;
2283                 int32_t coreidx = target->coreid;
2284                 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2285                 if (retval != ERROR_OK)
2286                         return retval;
2287                 /* Lookup 0x15 -- Processor DAP */
2288                 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2289                                 &armv8->debug_base, &coreidx);
2290                 if (retval != ERROR_OK)
2291                         return retval;
2292                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2293                           coreidx, armv8->debug_base);
2294         } else
2295                 armv8->debug_base = target->dbgbase;
2296
2297         LOG_DEBUG("Target ctibase is 0x%x", target->ctibase);
2298         if (target->ctibase == 0)
2299                 armv8->cti_base = target->ctibase = armv8->debug_base + 0x1000;
2300         else
2301                 armv8->cti_base = target->ctibase;
2302
2303         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2304                         armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2305         if (retval != ERROR_OK) {
2306                 LOG_DEBUG("Examine %s failed", "oslock");
2307                 return retval;
2308         }
2309
2310         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2311                         armv8->debug_base + 0x88, &cpuid);
2312         LOG_DEBUG("0x88 = %x", cpuid);
2313
2314         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2315                         armv8->debug_base + 0x314, &cpuid);
2316         LOG_DEBUG("0x314 = %x", cpuid);
2317
2318         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2319                         armv8->debug_base + 0x310, &cpuid);
2320         LOG_DEBUG("0x310 = %x", cpuid);
2321         if (retval != ERROR_OK)
2322                 return retval;
2323
2324         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2325                         armv8->debug_base + CPUDBG_CPUID, &cpuid);
2326         if (retval != ERROR_OK) {
2327                 LOG_DEBUG("Examine %s failed", "CPUID");
2328                 return retval;
2329         }
2330
2331         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2332                         armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2333         if (retval != ERROR_OK) {
2334                 LOG_DEBUG("Examine %s failed", "CTYPR");
2335                 return retval;
2336         }
2337
2338         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2339                         armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2340         if (retval != ERROR_OK) {
2341                 LOG_DEBUG("Examine %s failed", "TTYPR");
2342                 return retval;
2343         }
2344
2345         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2346                         armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2347         if (retval != ERROR_OK) {
2348                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2349                 return retval;
2350         }
2351         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2352                         armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2353         if (retval != ERROR_OK) {
2354                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2355                 return retval;
2356         }
2357
2358         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2359         LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2360         LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2361         LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2362         LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2363
2364         armv8->arm.core_type = ARM_MODE_MON;
2365         armv8->arm.core_state = ARM_STATE_AARCH64;
2366         retval = aarch64_dpm_setup(aarch64, debug);
2367         if (retval != ERROR_OK)
2368                 return retval;
2369
2370         /* Setup Breakpoint Register Pairs */
2371         aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2372         aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2373
2374         /* hack - no context bpt support yet */
2375         aarch64->brp_num_context = 0;
2376
2377         aarch64->brp_num_available = aarch64->brp_num;
2378         aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2379         for (i = 0; i < aarch64->brp_num; i++) {
2380                 aarch64->brp_list[i].used = 0;
2381                 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2382                         aarch64->brp_list[i].type = BRP_NORMAL;
2383                 else
2384                         aarch64->brp_list[i].type = BRP_CONTEXT;
2385                 aarch64->brp_list[i].value = 0;
2386                 aarch64->brp_list[i].control = 0;
2387                 aarch64->brp_list[i].BRPn = i;
2388         }
2389
2390         LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2391
2392         target_set_examined(target);
2393         return ERROR_OK;
2394 }
2395
2396 static int aarch64_examine(struct target *target)
2397 {
2398         int retval = ERROR_OK;
2399
2400         /* don't re-probe hardware after each reset */
2401         if (!target_was_examined(target))
2402                 retval = aarch64_examine_first(target);
2403
2404         /* Configure core debug access */
2405         if (retval == ERROR_OK)
2406                 retval = aarch64_init_debug_access(target);
2407
2408         return retval;
2409 }
2410
2411 /*
2412  *      Cortex-A8 target creation and initialization
2413  */
2414
2415 static int aarch64_init_target(struct command_context *cmd_ctx,
2416         struct target *target)
2417 {
2418         /* examine_first() does a bunch of this */
2419         return ERROR_OK;
2420 }
2421
2422 static int aarch64_init_arch_info(struct target *target,
2423         struct aarch64_common *aarch64, struct jtag_tap *tap)
2424 {
2425         struct armv8_common *armv8 = &aarch64->armv8_common;
2426         struct adiv5_dap *dap = armv8->arm.dap;
2427
2428         armv8->arm.dap = dap;
2429
2430         /* Setup struct aarch64_common */
2431         aarch64->common_magic = AARCH64_COMMON_MAGIC;
2432         /*  tap has no dap initialized */
2433         if (!tap->dap) {
2434                 tap->dap = dap_init();
2435
2436                 /* Leave (only) generic DAP stuff for debugport_init() */
2437                 tap->dap->tap = tap;
2438         }
2439
2440         armv8->arm.dap = tap->dap;
2441
2442         aarch64->fast_reg_read = 0;
2443
2444         /* register arch-specific functions */
2445         armv8->examine_debug_reason = NULL;
2446
2447         armv8->post_debug_entry = aarch64_post_debug_entry;
2448
2449         armv8->pre_restore_context = NULL;
2450
2451         armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2452
2453         /* REVISIT v7a setup should be in a v7a-specific routine */
2454         armv8_init_arch_info(target, armv8);
2455         target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2456
2457         return ERROR_OK;
2458 }
2459
2460 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2461 {
2462         struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2463
2464         aarch64->armv8_common.is_armv7r = false;
2465
2466         return aarch64_init_arch_info(target, aarch64, target->tap);
2467 }
2468
2469 static int aarch64_mmu(struct target *target, int *enabled)
2470 {
2471         if (target->state != TARGET_HALTED) {
2472                 LOG_ERROR("%s: target not halted", __func__);
2473                 return ERROR_TARGET_INVALID;
2474         }
2475
2476         *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2477         return ERROR_OK;
2478 }
2479
2480 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2481                              target_addr_t *phys)
2482 {
2483         int retval = ERROR_FAIL;
2484         struct armv8_common *armv8 = target_to_armv8(target);
2485         struct adiv5_dap *swjdp = armv8->arm.dap;
2486         uint8_t apsel = swjdp->apsel;
2487         if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2488                 uint32_t ret;
2489                 retval = armv8_mmu_translate_va(target,
2490                                 virt, &ret);
2491                 if (retval != ERROR_OK)
2492                         goto done;
2493                 *phys = ret;
2494         } else {/*  use this method if armv8->memory_ap not selected
2495                  *  mmu must be enable in order to get a correct translation */
2496                 retval = aarch64_mmu_modify(target, 1);
2497                 if (retval != ERROR_OK)
2498                         goto done;
2499                 retval = armv8_mmu_translate_va_pa(target, virt,  phys, 1);
2500         }
2501 done:
2502         return retval;
2503 }
2504
2505 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2506 {
2507         struct target *target = get_current_target(CMD_CTX);
2508         struct armv8_common *armv8 = target_to_armv8(target);
2509
2510         return armv8_handle_cache_info_command(CMD_CTX,
2511                         &armv8->armv8_mmu.armv8_cache);
2512 }
2513
2514
2515 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2516 {
2517         struct target *target = get_current_target(CMD_CTX);
2518         if (!target_was_examined(target)) {
2519                 LOG_ERROR("target not examined yet");
2520                 return ERROR_FAIL;
2521         }
2522
2523         return aarch64_init_debug_access(target);
2524 }
2525 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2526 {
2527         struct target *target = get_current_target(CMD_CTX);
2528         /* check target is an smp target */
2529         struct target_list *head;
2530         struct target *curr;
2531         head = target->head;
2532         target->smp = 0;
2533         if (head != (struct target_list *)NULL) {
2534                 while (head != (struct target_list *)NULL) {
2535                         curr = head->target;
2536                         curr->smp = 0;
2537                         head = head->next;
2538                 }
2539                 /*  fixes the target display to the debugger */
2540                 target->gdb_service->target = target;
2541         }
2542         return ERROR_OK;
2543 }
2544
2545 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2546 {
2547         struct target *target = get_current_target(CMD_CTX);
2548         struct target_list *head;
2549         struct target *curr;
2550         head = target->head;
2551         if (head != (struct target_list *)NULL) {
2552                 target->smp = 1;
2553                 while (head != (struct target_list *)NULL) {
2554                         curr = head->target;
2555                         curr->smp = 1;
2556                         head = head->next;
2557                 }
2558         }
2559         return ERROR_OK;
2560 }
2561
2562 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2563 {
2564         struct target *target = get_current_target(CMD_CTX);
2565         int retval = ERROR_OK;
2566         struct target_list *head;
2567         head = target->head;
2568         if (head != (struct target_list *)NULL) {
2569                 if (CMD_ARGC == 1) {
2570                         int coreid = 0;
2571                         COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2572                         if (ERROR_OK != retval)
2573                                 return retval;
2574                         target->gdb_service->core[1] = coreid;
2575
2576                 }
2577                 command_print(CMD_CTX, "gdb coreid  %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2578                         , target->gdb_service->core[1]);
2579         }
2580         return ERROR_OK;
2581 }
2582
2583 static const struct command_registration aarch64_exec_command_handlers[] = {
2584         {
2585                 .name = "cache_info",
2586                 .handler = aarch64_handle_cache_info_command,
2587                 .mode = COMMAND_EXEC,
2588                 .help = "display information about target caches",
2589                 .usage = "",
2590         },
2591         {
2592                 .name = "dbginit",
2593                 .handler = aarch64_handle_dbginit_command,
2594                 .mode = COMMAND_EXEC,
2595                 .help = "Initialize core debug",
2596                 .usage = "",
2597         },
2598         {       .name = "smp_off",
2599                 .handler = aarch64_handle_smp_off_command,
2600                 .mode = COMMAND_EXEC,
2601                 .help = "Stop smp handling",
2602                 .usage = "",
2603         },
2604         {
2605                 .name = "smp_on",
2606                 .handler = aarch64_handle_smp_on_command,
2607                 .mode = COMMAND_EXEC,
2608                 .help = "Restart smp handling",
2609                 .usage = "",
2610         },
2611         {
2612                 .name = "smp_gdb",
2613                 .handler = aarch64_handle_smp_gdb_command,
2614                 .mode = COMMAND_EXEC,
2615                 .help = "display/fix current core played to gdb",
2616                 .usage = "",
2617         },
2618
2619
2620         COMMAND_REGISTRATION_DONE
2621 };
2622 static const struct command_registration aarch64_command_handlers[] = {
2623         {
2624                 .chain = arm_command_handlers,
2625         },
2626         {
2627                 .chain = armv8_command_handlers,
2628         },
2629         {
2630                 .name = "cortex_a",
2631                 .mode = COMMAND_ANY,
2632                 .help = "Cortex-A command group",
2633                 .usage = "",
2634                 .chain = aarch64_exec_command_handlers,
2635         },
2636         COMMAND_REGISTRATION_DONE
2637 };
2638
2639 struct target_type aarch64_target = {
2640         .name = "aarch64",
2641
2642         .poll = aarch64_poll,
2643         .arch_state = armv8_arch_state,
2644
2645         .halt = aarch64_halt,
2646         .resume = aarch64_resume,
2647         .step = aarch64_step,
2648
2649         .assert_reset = aarch64_assert_reset,
2650         .deassert_reset = aarch64_deassert_reset,
2651
2652         /* REVISIT allow exporting VFP3 registers ... */
2653         .get_gdb_reg_list = armv8_get_gdb_reg_list,
2654
2655         .read_memory = aarch64_read_memory,
2656         .write_memory = aarch64_write_memory,
2657
2658         .checksum_memory = arm_checksum_memory,
2659         .blank_check_memory = arm_blank_check_memory,
2660
2661         .run_algorithm = armv4_5_run_algorithm,
2662
2663         .add_breakpoint = aarch64_add_breakpoint,
2664         .add_context_breakpoint = aarch64_add_context_breakpoint,
2665         .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2666         .remove_breakpoint = aarch64_remove_breakpoint,
2667         .add_watchpoint = NULL,
2668         .remove_watchpoint = NULL,
2669
2670         .commands = aarch64_command_handlers,
2671         .target_create = aarch64_target_create,
2672         .init_target = aarch64_init_target,
2673         .examine = aarch64_examine,
2674
2675         .read_phys_memory = aarch64_read_phys_memory,
2676         .write_phys_memory = aarch64_write_phys_memory,
2677         .mmu = aarch64_mmu,
2678         .virt2phys = aarch64_virt2phys,
2679 };