]> git.sur5r.net Git - openocd/blob - src/target/aarch64.c
arm_cti: add cti command group
[openocd] / src / target / aarch64.c
1 /***************************************************************************
2  *   Copyright (C) 2015 by David Ung                                       *
3  *                                                                         *
4  *   This program is free software; you can redistribute it and/or modify  *
5  *   it under the terms of the GNU General Public License as published by  *
6  *   the Free Software Foundation; either version 2 of the License, or     *
7  *   (at your option) any later version.                                   *
8  *                                                                         *
9  *   This program is distributed in the hope that it will be useful,       *
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
12  *   GNU General Public License for more details.                          *
13  *                                                                         *
14  *   You should have received a copy of the GNU General Public License     *
15  *   along with this program; if not, write to the                         *
16  *   Free Software Foundation, Inc.,                                       *
17  *                                                                         *
18  ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 enum restart_mode {
34         RESTART_LAZY,
35         RESTART_SYNC,
36 };
37
38 enum halt_mode {
39         HALT_LAZY,
40         HALT_SYNC,
41 };
42
43 struct aarch64_private_config {
44         struct arm_cti *cti;
45 };
46
47 static int aarch64_poll(struct target *target);
48 static int aarch64_debug_entry(struct target *target);
49 static int aarch64_restore_context(struct target *target, bool bpwp);
50 static int aarch64_set_breakpoint(struct target *target,
51         struct breakpoint *breakpoint, uint8_t matchmode);
52 static int aarch64_set_context_breakpoint(struct target *target,
53         struct breakpoint *breakpoint, uint8_t matchmode);
54 static int aarch64_set_hybrid_breakpoint(struct target *target,
55         struct breakpoint *breakpoint);
56 static int aarch64_unset_breakpoint(struct target *target,
57         struct breakpoint *breakpoint);
58 static int aarch64_mmu(struct target *target, int *enabled);
59 static int aarch64_virt2phys(struct target *target,
60         target_addr_t virt, target_addr_t *phys);
61 static int aarch64_read_cpu_memory(struct target *target,
62         uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
63
64 #define foreach_smp_target(pos, head) \
65         for (pos = head; (pos != NULL); pos = pos->next)
66
67 static int aarch64_restore_system_control_reg(struct target *target)
68 {
69         enum arm_mode target_mode = ARM_MODE_ANY;
70         int retval = ERROR_OK;
71         uint32_t instr;
72
73         struct aarch64_common *aarch64 = target_to_aarch64(target);
74         struct armv8_common *armv8 = target_to_armv8(target);
75
76         if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
77                 aarch64->system_control_reg_curr = aarch64->system_control_reg;
78                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
79
80                 switch (armv8->arm.core_mode) {
81                 case ARMV8_64_EL0T:
82                         target_mode = ARMV8_64_EL1H;
83                         /* fall through */
84                 case ARMV8_64_EL1T:
85                 case ARMV8_64_EL1H:
86                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
87                         break;
88                 case ARMV8_64_EL2T:
89                 case ARMV8_64_EL2H:
90                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
91                         break;
92                 case ARMV8_64_EL3H:
93                 case ARMV8_64_EL3T:
94                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
95                         break;
96
97                 case ARM_MODE_SVC:
98                 case ARM_MODE_ABT:
99                 case ARM_MODE_FIQ:
100                 case ARM_MODE_IRQ:
101                         instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
102                         break;
103
104                 default:
105                         LOG_INFO("cannot read system control register in this mode");
106                         return ERROR_FAIL;
107                 }
108
109                 if (target_mode != ARM_MODE_ANY)
110                         armv8_dpm_modeswitch(&armv8->dpm, target_mode);
111
112                 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
113                 if (retval != ERROR_OK)
114                         return retval;
115
116                 if (target_mode != ARM_MODE_ANY)
117                         armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
118         }
119
120         return retval;
121 }
122
123 /*  modify system_control_reg in order to enable or disable mmu for :
124  *  - virt2phys address conversion
125  *  - read or write memory in phys or virt address */
126 static int aarch64_mmu_modify(struct target *target, int enable)
127 {
128         struct aarch64_common *aarch64 = target_to_aarch64(target);
129         struct armv8_common *armv8 = &aarch64->armv8_common;
130         int retval = ERROR_OK;
131         uint32_t instr = 0;
132
133         if (enable) {
134                 /*      if mmu enabled at target stop and mmu not enable */
135                 if (!(aarch64->system_control_reg & 0x1U)) {
136                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
137                         return ERROR_FAIL;
138                 }
139                 if (!(aarch64->system_control_reg_curr & 0x1U))
140                         aarch64->system_control_reg_curr |= 0x1U;
141         } else {
142                 if (aarch64->system_control_reg_curr & 0x4U) {
143                         /*  data cache is active */
144                         aarch64->system_control_reg_curr &= ~0x4U;
145                         /* flush data cache armv8 function to be called */
146                         if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
147                                 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
148                 }
149                 if ((aarch64->system_control_reg_curr & 0x1U)) {
150                         aarch64->system_control_reg_curr &= ~0x1U;
151                 }
152         }
153
154         switch (armv8->arm.core_mode) {
155         case ARMV8_64_EL0T:
156         case ARMV8_64_EL1T:
157         case ARMV8_64_EL1H:
158                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
159                 break;
160         case ARMV8_64_EL2T:
161         case ARMV8_64_EL2H:
162                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
163                 break;
164         case ARMV8_64_EL3H:
165         case ARMV8_64_EL3T:
166                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
167                 break;
168
169         case ARM_MODE_SVC:
170         case ARM_MODE_ABT:
171         case ARM_MODE_FIQ:
172         case ARM_MODE_IRQ:
173                 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
174                 break;
175
176         default:
177                 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
178                 break;
179         }
180
181         retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
182                                 aarch64->system_control_reg_curr);
183         return retval;
184 }
185
186 /*
187  * Basic debug access, very low level assumes state is saved
188  */
189 static int aarch64_init_debug_access(struct target *target)
190 {
191         struct armv8_common *armv8 = target_to_armv8(target);
192         int retval;
193         uint32_t dummy;
194
195         LOG_DEBUG("%s", target_name(target));
196
197         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
198                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
199         if (retval != ERROR_OK) {
200                 LOG_DEBUG("Examine %s failed", "oslock");
201                 return retval;
202         }
203
204         /* Clear Sticky Power Down status Bit in PRSR to enable access to
205            the registers in the Core Power Domain */
206         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
207                         armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
208         if (retval != ERROR_OK)
209                 return retval;
210
211         /*
212          * Static CTI configuration:
213          * Channel 0 -> trigger outputs HALT request to PE
214          * Channel 1 -> trigger outputs Resume request to PE
215          * Gate all channel trigger events from entering the CTM
216          */
217
218         /* Enable CTI */
219         retval = arm_cti_enable(armv8->cti, true);
220         /* By default, gate all channel events to and from the CTM */
221         if (retval == ERROR_OK)
222                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
223         /* output halt requests to PE on channel 0 event */
224         if (retval == ERROR_OK)
225                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
226         /* output restart requests to PE on channel 1 event */
227         if (retval == ERROR_OK)
228                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
229         if (retval != ERROR_OK)
230                 return retval;
231
232         /* Resync breakpoint registers */
233
234         return ERROR_OK;
235 }
236
237 /* Write to memory mapped registers directly with no cache or mmu handling */
238 static int aarch64_dap_write_memap_register_u32(struct target *target,
239         uint32_t address,
240         uint32_t value)
241 {
242         int retval;
243         struct armv8_common *armv8 = target_to_armv8(target);
244
245         retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
246
247         return retval;
248 }
249
250 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
251 {
252         struct arm_dpm *dpm = &a8->armv8_common.dpm;
253         int retval;
254
255         dpm->arm = &a8->armv8_common.arm;
256         dpm->didr = debug;
257
258         retval = armv8_dpm_setup(dpm);
259         if (retval == ERROR_OK)
260                 retval = armv8_dpm_initialize(dpm);
261
262         return retval;
263 }
264
265 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
266 {
267         struct armv8_common *armv8 = target_to_armv8(target);
268         return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
269 }
270
271 static int aarch64_check_state_one(struct target *target,
272                 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
273 {
274         struct armv8_common *armv8 = target_to_armv8(target);
275         uint32_t prsr;
276         int retval;
277
278         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
279                         armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
280         if (retval != ERROR_OK)
281                 return retval;
282
283         if (p_prsr)
284                 *p_prsr = prsr;
285
286         if (p_result)
287                 *p_result = (prsr & mask) == (val & mask);
288
289         return ERROR_OK;
290 }
291
292 static int aarch64_wait_halt_one(struct target *target)
293 {
294         int retval = ERROR_OK;
295         uint32_t prsr;
296
297         int64_t then = timeval_ms();
298         for (;;) {
299                 int halted;
300
301                 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
302                 if (retval != ERROR_OK || halted)
303                         break;
304
305                 if (timeval_ms() > then + 1000) {
306                         retval = ERROR_TARGET_TIMEOUT;
307                         LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
308                         break;
309                 }
310         }
311         return retval;
312 }
313
314 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
315 {
316         int retval = ERROR_OK;
317         struct target_list *head = target->head;
318         struct target *first = NULL;
319
320         LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
321
322         while (head != NULL) {
323                 struct target *curr = head->target;
324                 struct armv8_common *armv8 = target_to_armv8(curr);
325                 head = head->next;
326
327                 if (exc_target && curr == target)
328                         continue;
329                 if (!target_was_examined(curr))
330                         continue;
331                 if (curr->state != TARGET_RUNNING)
332                         continue;
333
334                 /* HACK: mark this target as prepared for halting */
335                 curr->debug_reason = DBG_REASON_DBGRQ;
336
337                 /* open the gate for channel 0 to let HALT requests pass to the CTM */
338                 retval = arm_cti_ungate_channel(armv8->cti, 0);
339                 if (retval == ERROR_OK)
340                         retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
341                 if (retval != ERROR_OK)
342                         break;
343
344                 LOG_DEBUG("target %s prepared", target_name(curr));
345
346                 if (first == NULL)
347                         first = curr;
348         }
349
350         if (p_first) {
351                 if (exc_target && first)
352                         *p_first = first;
353                 else
354                         *p_first = target;
355         }
356
357         return retval;
358 }
359
360 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
361 {
362         int retval = ERROR_OK;
363         struct armv8_common *armv8 = target_to_armv8(target);
364
365         LOG_DEBUG("%s", target_name(target));
366
367         /* allow Halting Debug Mode */
368         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
369         if (retval != ERROR_OK)
370                 return retval;
371
372         /* trigger an event on channel 0, this outputs a halt request to the PE */
373         retval = arm_cti_pulse_channel(armv8->cti, 0);
374         if (retval != ERROR_OK)
375                 return retval;
376
377         if (mode == HALT_SYNC) {
378                 retval = aarch64_wait_halt_one(target);
379                 if (retval != ERROR_OK) {
380                         if (retval == ERROR_TARGET_TIMEOUT)
381                                 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
382                         return retval;
383                 }
384         }
385
386         return ERROR_OK;
387 }
388
389 static int aarch64_halt_smp(struct target *target, bool exc_target)
390 {
391         struct target *next = target;
392         int retval;
393
394         /* prepare halt on all PEs of the group */
395         retval = aarch64_prepare_halt_smp(target, exc_target, &next);
396
397         if (exc_target && next == target)
398                 return retval;
399
400         /* halt the target PE */
401         if (retval == ERROR_OK)
402                 retval = aarch64_halt_one(next, HALT_LAZY);
403
404         if (retval != ERROR_OK)
405                 return retval;
406
407         /* wait for all PEs to halt */
408         int64_t then = timeval_ms();
409         for (;;) {
410                 bool all_halted = true;
411                 struct target_list *head;
412                 struct target *curr;
413
414                 foreach_smp_target(head, target->head) {
415                         int halted;
416
417                         curr = head->target;
418
419                         if (!target_was_examined(curr))
420                                 continue;
421
422                         retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
423                         if (retval != ERROR_OK || !halted) {
424                                 all_halted = false;
425                                 break;
426                         }
427                 }
428
429                 if (all_halted)
430                         break;
431
432                 if (timeval_ms() > then + 1000) {
433                         retval = ERROR_TARGET_TIMEOUT;
434                         break;
435                 }
436
437                 /*
438                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
439                  * and it looks like the CTI's are not connected by a common
440                  * trigger matrix. It seems that we need to halt one core in each
441                  * cluster explicitly. So if we find that a core has not halted
442                  * yet, we trigger an explicit halt for the second cluster.
443                  */
444                 retval = aarch64_halt_one(curr, HALT_LAZY);
445                 if (retval != ERROR_OK)
446                         break;
447         }
448
449         return retval;
450 }
451
452 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
453 {
454         struct target *gdb_target = NULL;
455         struct target_list *head;
456         struct target *curr;
457
458         if (debug_reason == DBG_REASON_NOTHALTED) {
459                 LOG_DEBUG("Halting remaining targets in SMP group");
460                 aarch64_halt_smp(target, true);
461         }
462
463         /* poll all targets in the group, but skip the target that serves GDB */
464         foreach_smp_target(head, target->head) {
465                 curr = head->target;
466                 /* skip calling context */
467                 if (curr == target)
468                         continue;
469                 if (!target_was_examined(curr))
470                         continue;
471                 /* skip targets that were already halted */
472                 if (curr->state == TARGET_HALTED)
473                         continue;
474                 /* remember the gdb_service->target */
475                 if (curr->gdb_service != NULL)
476                         gdb_target = curr->gdb_service->target;
477                 /* skip it */
478                 if (curr == gdb_target)
479                         continue;
480
481                 /* avoid recursion in aarch64_poll() */
482                 curr->smp = 0;
483                 aarch64_poll(curr);
484                 curr->smp = 1;
485         }
486
487         /* after all targets were updated, poll the gdb serving target */
488         if (gdb_target != NULL && gdb_target != target)
489                 aarch64_poll(gdb_target);
490
491         return ERROR_OK;
492 }
493
494 /*
495  * Aarch64 Run control
496  */
497
498 static int aarch64_poll(struct target *target)
499 {
500         enum target_state prev_target_state;
501         int retval = ERROR_OK;
502         int halted;
503
504         retval = aarch64_check_state_one(target,
505                                 PRSR_HALT, PRSR_HALT, &halted, NULL);
506         if (retval != ERROR_OK)
507                 return retval;
508
509         if (halted) {
510                 prev_target_state = target->state;
511                 if (prev_target_state != TARGET_HALTED) {
512                         enum target_debug_reason debug_reason = target->debug_reason;
513
514                         /* We have a halting debug event */
515                         target->state = TARGET_HALTED;
516                         LOG_DEBUG("Target %s halted", target_name(target));
517                         retval = aarch64_debug_entry(target);
518                         if (retval != ERROR_OK)
519                                 return retval;
520
521                         if (target->smp)
522                                 update_halt_gdb(target, debug_reason);
523
524                         switch (prev_target_state) {
525                         case TARGET_RUNNING:
526                         case TARGET_UNKNOWN:
527                         case TARGET_RESET:
528                                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
529                                 break;
530                         case TARGET_DEBUG_RUNNING:
531                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
532                                 break;
533                         default:
534                                 break;
535                         }
536                 }
537         } else
538                 target->state = TARGET_RUNNING;
539
540         return retval;
541 }
542
543 static int aarch64_halt(struct target *target)
544 {
545         if (target->smp)
546                 return aarch64_halt_smp(target, false);
547
548         return aarch64_halt_one(target, HALT_SYNC);
549 }
550
551 static int aarch64_restore_one(struct target *target, int current,
552         uint64_t *address, int handle_breakpoints, int debug_execution)
553 {
554         struct armv8_common *armv8 = target_to_armv8(target);
555         struct arm *arm = &armv8->arm;
556         int retval;
557         uint64_t resume_pc;
558
559         LOG_DEBUG("%s", target_name(target));
560
561         if (!debug_execution)
562                 target_free_all_working_areas(target);
563
564         /* current = 1: continue on current pc, otherwise continue at <address> */
565         resume_pc = buf_get_u64(arm->pc->value, 0, 64);
566         if (!current)
567                 resume_pc = *address;
568         else
569                 *address = resume_pc;
570
571         /* Make sure that the Armv7 gdb thumb fixups does not
572          * kill the return address
573          */
574         switch (arm->core_state) {
575                 case ARM_STATE_ARM:
576                         resume_pc &= 0xFFFFFFFC;
577                         break;
578                 case ARM_STATE_AARCH64:
579                         resume_pc &= 0xFFFFFFFFFFFFFFFC;
580                         break;
581                 case ARM_STATE_THUMB:
582                 case ARM_STATE_THUMB_EE:
583                         /* When the return address is loaded into PC
584                          * bit 0 must be 1 to stay in Thumb state
585                          */
586                         resume_pc |= 0x1;
587                         break;
588                 case ARM_STATE_JAZELLE:
589                         LOG_ERROR("How do I resume into Jazelle state??");
590                         return ERROR_FAIL;
591         }
592         LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
593         buf_set_u64(arm->pc->value, 0, 64, resume_pc);
594         arm->pc->dirty = 1;
595         arm->pc->valid = 1;
596
597         /* called it now before restoring context because it uses cpu
598          * register r0 for restoring system control register */
599         retval = aarch64_restore_system_control_reg(target);
600         if (retval == ERROR_OK)
601                 retval = aarch64_restore_context(target, handle_breakpoints);
602
603         return retval;
604 }
605
606 /**
607  * prepare single target for restart
608  *
609  *
610  */
611 static int aarch64_prepare_restart_one(struct target *target)
612 {
613         struct armv8_common *armv8 = target_to_armv8(target);
614         int retval;
615         uint32_t dscr;
616         uint32_t tmp;
617
618         LOG_DEBUG("%s", target_name(target));
619
620         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
621                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
622         if (retval != ERROR_OK)
623                 return retval;
624
625         if ((dscr & DSCR_ITE) == 0)
626                 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
627         if ((dscr & DSCR_ERR) != 0)
628                 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
629
630         /* acknowledge a pending CTI halt event */
631         retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
632         /*
633          * open the CTI gate for channel 1 so that the restart events
634          * get passed along to all PEs. Also close gate for channel 0
635          * to isolate the PE from halt events.
636          */
637         if (retval == ERROR_OK)
638                 retval = arm_cti_ungate_channel(armv8->cti, 1);
639         if (retval == ERROR_OK)
640                 retval = arm_cti_gate_channel(armv8->cti, 0);
641
642         /* make sure that DSCR.HDE is set */
643         if (retval == ERROR_OK) {
644                 dscr |= DSCR_HDE;
645                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
646                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
647         }
648
649         if (retval == ERROR_OK) {
650                 /* clear sticky bits in PRSR, SDR is now 0 */
651                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
652                                 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
653         }
654
655         return retval;
656 }
657
658 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
659 {
660         struct armv8_common *armv8 = target_to_armv8(target);
661         int retval;
662
663         LOG_DEBUG("%s", target_name(target));
664
665         /* trigger an event on channel 1, generates a restart request to the PE */
666         retval = arm_cti_pulse_channel(armv8->cti, 1);
667         if (retval != ERROR_OK)
668                 return retval;
669
670         if (mode == RESTART_SYNC) {
671                 int64_t then = timeval_ms();
672                 for (;;) {
673                         int resumed;
674                         /*
675                          * if PRSR.SDR is set now, the target did restart, even
676                          * if it's now already halted again (e.g. due to breakpoint)
677                          */
678                         retval = aarch64_check_state_one(target,
679                                                 PRSR_SDR, PRSR_SDR, &resumed, NULL);
680                         if (retval != ERROR_OK || resumed)
681                                 break;
682
683                         if (timeval_ms() > then + 1000) {
684                                 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
685                                 retval = ERROR_TARGET_TIMEOUT;
686                                 break;
687                         }
688                 }
689         }
690
691         if (retval != ERROR_OK)
692                 return retval;
693
694         target->debug_reason = DBG_REASON_NOTHALTED;
695         target->state = TARGET_RUNNING;
696
697         return ERROR_OK;
698 }
699
700 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
701 {
702         int retval;
703
704         LOG_DEBUG("%s", target_name(target));
705
706         retval = aarch64_prepare_restart_one(target);
707         if (retval == ERROR_OK)
708                 retval = aarch64_do_restart_one(target, mode);
709
710         return retval;
711 }
712
713 /*
714  * prepare all but the current target for restart
715  */
716 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
717 {
718         int retval = ERROR_OK;
719         struct target_list *head;
720         struct target *first = NULL;
721         uint64_t address;
722
723         foreach_smp_target(head, target->head) {
724                 struct target *curr = head->target;
725
726                 /* skip calling target */
727                 if (curr == target)
728                         continue;
729                 if (!target_was_examined(curr))
730                         continue;
731                 if (curr->state != TARGET_HALTED)
732                         continue;
733
734                 /*  resume at current address, not in step mode */
735                 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
736                 if (retval == ERROR_OK)
737                         retval = aarch64_prepare_restart_one(curr);
738                 if (retval != ERROR_OK) {
739                         LOG_ERROR("failed to restore target %s", target_name(curr));
740                         break;
741                 }
742                 /* remember the first valid target in the group */
743                 if (first == NULL)
744                         first = curr;
745         }
746
747         if (p_first)
748                 *p_first = first;
749
750         return retval;
751 }
752
753
754 static int aarch64_step_restart_smp(struct target *target)
755 {
756         int retval = ERROR_OK;
757         struct target_list *head;
758         struct target *first = NULL;
759
760         LOG_DEBUG("%s", target_name(target));
761
762         retval = aarch64_prep_restart_smp(target, 0, &first);
763         if (retval != ERROR_OK)
764                 return retval;
765
766         if (first != NULL)
767                 retval = aarch64_do_restart_one(first, RESTART_LAZY);
768         if (retval != ERROR_OK) {
769                 LOG_DEBUG("error restarting target %s", target_name(first));
770                 return retval;
771         }
772
773         int64_t then = timeval_ms();
774         for (;;) {
775                 struct target *curr = target;
776                 bool all_resumed = true;
777
778                 foreach_smp_target(head, target->head) {
779                         uint32_t prsr;
780                         int resumed;
781
782                         curr = head->target;
783
784                         if (curr == target)
785                                 continue;
786
787                         if (!target_was_examined(curr))
788                                 continue;
789
790                         retval = aarch64_check_state_one(curr,
791                                         PRSR_SDR, PRSR_SDR, &resumed, &prsr);
792                         if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
793                                 all_resumed = false;
794                                 break;
795                         }
796
797                         if (curr->state != TARGET_RUNNING) {
798                                 curr->state = TARGET_RUNNING;
799                                 curr->debug_reason = DBG_REASON_NOTHALTED;
800                                 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
801                         }
802                 }
803
804                 if (all_resumed)
805                         break;
806
807                 if (timeval_ms() > then + 1000) {
808                         LOG_ERROR("%s: timeout waiting for target resume", __func__);
809                         retval = ERROR_TARGET_TIMEOUT;
810                         break;
811                 }
812                 /*
813                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
814                  * and it looks like the CTI's are not connected by a common
815                  * trigger matrix. It seems that we need to halt one core in each
816                  * cluster explicitly. So if we find that a core has not halted
817                  * yet, we trigger an explicit resume for the second cluster.
818                  */
819                 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
820                 if (retval != ERROR_OK)
821                         break;
822 }
823
824         return retval;
825 }
826
827 static int aarch64_resume(struct target *target, int current,
828         target_addr_t address, int handle_breakpoints, int debug_execution)
829 {
830         int retval = 0;
831         uint64_t addr = address;
832
833         if (target->state != TARGET_HALTED)
834                 return ERROR_TARGET_NOT_HALTED;
835
836         /*
837          * If this target is part of a SMP group, prepare the others
838          * targets for resuming. This involves restoring the complete
839          * target register context and setting up CTI gates to accept
840          * resume events from the trigger matrix.
841          */
842         if (target->smp) {
843                 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
844                 if (retval != ERROR_OK)
845                         return retval;
846         }
847
848         /* all targets prepared, restore and restart the current target */
849         retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
850                                  debug_execution);
851         if (retval == ERROR_OK)
852                 retval = aarch64_restart_one(target, RESTART_SYNC);
853         if (retval != ERROR_OK)
854                 return retval;
855
856         if (target->smp) {
857                 int64_t then = timeval_ms();
858                 for (;;) {
859                         struct target *curr = target;
860                         struct target_list *head;
861                         bool all_resumed = true;
862
863                         foreach_smp_target(head, target->head) {
864                                 uint32_t prsr;
865                                 int resumed;
866
867                                 curr = head->target;
868                                 if (curr == target)
869                                         continue;
870                                 if (!target_was_examined(curr))
871                                         continue;
872
873                                 retval = aarch64_check_state_one(curr,
874                                                 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
875                                 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
876                                         all_resumed = false;
877                                         break;
878                                 }
879
880                                 if (curr->state != TARGET_RUNNING) {
881                                         curr->state = TARGET_RUNNING;
882                                         curr->debug_reason = DBG_REASON_NOTHALTED;
883                                         target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
884                                 }
885                         }
886
887                         if (all_resumed)
888                                 break;
889
890                         if (timeval_ms() > then + 1000) {
891                                 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
892                                 retval = ERROR_TARGET_TIMEOUT;
893                                 break;
894                         }
895
896                         /*
897                          * HACK: on Hi6220 there are 8 cores organized in 2 clusters
898                          * and it looks like the CTI's are not connected by a common
899                          * trigger matrix. It seems that we need to halt one core in each
900                          * cluster explicitly. So if we find that a core has not halted
901                          * yet, we trigger an explicit resume for the second cluster.
902                          */
903                         retval = aarch64_do_restart_one(curr, RESTART_LAZY);
904                         if (retval != ERROR_OK)
905                                 break;
906                 }
907         }
908
909         if (retval != ERROR_OK)
910                 return retval;
911
912         target->debug_reason = DBG_REASON_NOTHALTED;
913
914         if (!debug_execution) {
915                 target->state = TARGET_RUNNING;
916                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
917                 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
918         } else {
919                 target->state = TARGET_DEBUG_RUNNING;
920                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
921                 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
922         }
923
924         return ERROR_OK;
925 }
926
927 static int aarch64_debug_entry(struct target *target)
928 {
929         int retval = ERROR_OK;
930         struct armv8_common *armv8 = target_to_armv8(target);
931         struct arm_dpm *dpm = &armv8->dpm;
932         enum arm_state core_state;
933         uint32_t dscr;
934
935         /* make sure to clear all sticky errors */
936         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
937                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
938         if (retval == ERROR_OK)
939                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
940                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
941         if (retval == ERROR_OK)
942                 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
943
944         if (retval != ERROR_OK)
945                 return retval;
946
947         LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
948
949         dpm->dscr = dscr;
950         core_state = armv8_dpm_get_core_state(dpm);
951         armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
952         armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
953
954         /* close the CTI gate for all events */
955         if (retval == ERROR_OK)
956                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
957         /* discard async exceptions */
958         if (retval == ERROR_OK)
959                 retval = dpm->instr_cpsr_sync(dpm);
960         if (retval != ERROR_OK)
961                 return retval;
962
963         /* Examine debug reason */
964         armv8_dpm_report_dscr(dpm, dscr);
965
966         /* save address of instruction that triggered the watchpoint? */
967         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
968                 uint32_t tmp;
969                 uint64_t wfar = 0;
970
971                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
972                                 armv8->debug_base + CPUV8_DBG_WFAR1,
973                                 &tmp);
974                 if (retval != ERROR_OK)
975                         return retval;
976                 wfar = tmp;
977                 wfar = (wfar << 32);
978                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
979                                 armv8->debug_base + CPUV8_DBG_WFAR0,
980                                 &tmp);
981                 if (retval != ERROR_OK)
982                         return retval;
983                 wfar |= tmp;
984                 armv8_dpm_report_wfar(&armv8->dpm, wfar);
985         }
986
987         retval = armv8_dpm_read_current_registers(&armv8->dpm);
988
989         if (retval == ERROR_OK && armv8->post_debug_entry)
990                 retval = armv8->post_debug_entry(target);
991
992         return retval;
993 }
994
995 static int aarch64_post_debug_entry(struct target *target)
996 {
997         struct aarch64_common *aarch64 = target_to_aarch64(target);
998         struct armv8_common *armv8 = &aarch64->armv8_common;
999         int retval;
1000         enum arm_mode target_mode = ARM_MODE_ANY;
1001         uint32_t instr;
1002
1003         switch (armv8->arm.core_mode) {
1004         case ARMV8_64_EL0T:
1005                 target_mode = ARMV8_64_EL1H;
1006                 /* fall through */
1007         case ARMV8_64_EL1T:
1008         case ARMV8_64_EL1H:
1009                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1010                 break;
1011         case ARMV8_64_EL2T:
1012         case ARMV8_64_EL2H:
1013                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1014                 break;
1015         case ARMV8_64_EL3H:
1016         case ARMV8_64_EL3T:
1017                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1018                 break;
1019
1020         case ARM_MODE_SVC:
1021         case ARM_MODE_ABT:
1022         case ARM_MODE_FIQ:
1023         case ARM_MODE_IRQ:
1024                 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1025                 break;
1026
1027         default:
1028                 LOG_INFO("cannot read system control register in this mode");
1029                 return ERROR_FAIL;
1030         }
1031
1032         if (target_mode != ARM_MODE_ANY)
1033                 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1034
1035         retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1036         if (retval != ERROR_OK)
1037                 return retval;
1038
1039         if (target_mode != ARM_MODE_ANY)
1040                 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1041
1042         LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1043         aarch64->system_control_reg_curr = aarch64->system_control_reg;
1044
1045         if (armv8->armv8_mmu.armv8_cache.info == -1) {
1046                 armv8_identify_cache(armv8);
1047                 armv8_read_mpidr(armv8);
1048         }
1049
1050         armv8->armv8_mmu.mmu_enabled =
1051                         (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1052         armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1053                 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1054         armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1055                 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1056         return ERROR_OK;
1057 }
1058
1059 /*
1060  * single-step a target
1061  */
1062 static int aarch64_step(struct target *target, int current, target_addr_t address,
1063         int handle_breakpoints)
1064 {
1065         struct armv8_common *armv8 = target_to_armv8(target);
1066         struct aarch64_common *aarch64 = target_to_aarch64(target);
1067         int saved_retval = ERROR_OK;
1068         int retval;
1069         uint32_t edecr;
1070
1071         if (target->state != TARGET_HALTED) {
1072                 LOG_WARNING("target not halted");
1073                 return ERROR_TARGET_NOT_HALTED;
1074         }
1075
1076         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1077                         armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1078         /* make sure EDECR.SS is not set when restoring the register */
1079
1080         if (retval == ERROR_OK) {
1081                 edecr &= ~0x4;
1082                 /* set EDECR.SS to enter hardware step mode */
1083                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1084                                 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1085         }
1086         /* disable interrupts while stepping */
1087         if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1088                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1089         /* bail out if stepping setup has failed */
1090         if (retval != ERROR_OK)
1091                 return retval;
1092
1093         if (target->smp && (current == 1)) {
1094                 /*
1095                  * isolate current target so that it doesn't get resumed
1096                  * together with the others
1097                  */
1098                 retval = arm_cti_gate_channel(armv8->cti, 1);
1099                 /* resume all other targets in the group */
1100                 if (retval == ERROR_OK)
1101                         retval = aarch64_step_restart_smp(target);
1102                 if (retval != ERROR_OK) {
1103                         LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1104                         return retval;
1105                 }
1106                 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1107         }
1108
1109         /* all other targets running, restore and restart the current target */
1110         retval = aarch64_restore_one(target, current, &address, 0, 0);
1111         if (retval == ERROR_OK)
1112                 retval = aarch64_restart_one(target, RESTART_LAZY);
1113
1114         if (retval != ERROR_OK)
1115                 return retval;
1116
1117         LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1118         if (!handle_breakpoints)
1119                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1120
1121         int64_t then = timeval_ms();
1122         for (;;) {
1123                 int stepped;
1124                 uint32_t prsr;
1125
1126                 retval = aarch64_check_state_one(target,
1127                                         PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1128                 if (retval != ERROR_OK || stepped)
1129                         break;
1130
1131                 if (timeval_ms() > then + 100) {
1132                         LOG_ERROR("timeout waiting for target %s halt after step",
1133                                         target_name(target));
1134                         retval = ERROR_TARGET_TIMEOUT;
1135                         break;
1136                 }
1137         }
1138
1139         /*
1140          * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1141          * causes a timeout. The core takes the step but doesn't complete it and so
1142          * debug state is never entered. However, you can manually halt the core
1143          * as an external debug even is also a WFI wakeup event.
1144          */
1145         if (retval == ERROR_TARGET_TIMEOUT)
1146                 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1147
1148         /* restore EDECR */
1149         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1150                         armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1151         if (retval != ERROR_OK)
1152                 return retval;
1153
1154         /* restore interrupts */
1155         if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1156                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1157                 if (retval != ERROR_OK)
1158                         return ERROR_OK;
1159         }
1160
1161         if (saved_retval != ERROR_OK)
1162                 return saved_retval;
1163
1164         return aarch64_poll(target);
1165 }
1166
1167 static int aarch64_restore_context(struct target *target, bool bpwp)
1168 {
1169         struct armv8_common *armv8 = target_to_armv8(target);
1170         struct arm *arm = &armv8->arm;
1171
1172         int retval;
1173
1174         LOG_DEBUG("%s", target_name(target));
1175
1176         if (armv8->pre_restore_context)
1177                 armv8->pre_restore_context(target);
1178
1179         retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1180         if (retval == ERROR_OK) {
1181                 /* registers are now invalid */
1182                 register_cache_invalidate(arm->core_cache);
1183                 register_cache_invalidate(arm->core_cache->next);
1184         }
1185
1186         return retval;
1187 }
1188
1189 /*
1190  * Cortex-A8 Breakpoint and watchpoint functions
1191  */
1192
1193 /* Setup hardware Breakpoint Register Pair */
1194 static int aarch64_set_breakpoint(struct target *target,
1195         struct breakpoint *breakpoint, uint8_t matchmode)
1196 {
1197         int retval;
1198         int brp_i = 0;
1199         uint32_t control;
1200         uint8_t byte_addr_select = 0x0F;
1201         struct aarch64_common *aarch64 = target_to_aarch64(target);
1202         struct armv8_common *armv8 = &aarch64->armv8_common;
1203         struct aarch64_brp *brp_list = aarch64->brp_list;
1204
1205         if (breakpoint->set) {
1206                 LOG_WARNING("breakpoint already set");
1207                 return ERROR_OK;
1208         }
1209
1210         if (breakpoint->type == BKPT_HARD) {
1211                 int64_t bpt_value;
1212                 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1213                         brp_i++;
1214                 if (brp_i >= aarch64->brp_num) {
1215                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1216                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1217                 }
1218                 breakpoint->set = brp_i + 1;
1219                 if (breakpoint->length == 2)
1220                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1221                 control = ((matchmode & 0x7) << 20)
1222                         | (1 << 13)
1223                         | (byte_addr_select << 5)
1224                         | (3 << 1) | 1;
1225                 brp_list[brp_i].used = 1;
1226                 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1227                 brp_list[brp_i].control = control;
1228                 bpt_value = brp_list[brp_i].value;
1229
1230                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1231                                 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1232                                 (uint32_t)(bpt_value & 0xFFFFFFFF));
1233                 if (retval != ERROR_OK)
1234                         return retval;
1235                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1236                                 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1237                                 (uint32_t)(bpt_value >> 32));
1238                 if (retval != ERROR_OK)
1239                         return retval;
1240
1241                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1242                                 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1243                                 brp_list[brp_i].control);
1244                 if (retval != ERROR_OK)
1245                         return retval;
1246                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1247                         brp_list[brp_i].control,
1248                         brp_list[brp_i].value);
1249
1250         } else if (breakpoint->type == BKPT_SOFT) {
1251                 uint8_t code[4];
1252
1253                 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1254                 retval = target_read_memory(target,
1255                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1256                                 breakpoint->length, 1,
1257                                 breakpoint->orig_instr);
1258                 if (retval != ERROR_OK)
1259                         return retval;
1260
1261                 armv8_cache_d_inner_flush_virt(armv8,
1262                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1263                                 breakpoint->length);
1264
1265                 retval = target_write_memory(target,
1266                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1267                                 breakpoint->length, 1, code);
1268                 if (retval != ERROR_OK)
1269                         return retval;
1270
1271                 armv8_cache_d_inner_flush_virt(armv8,
1272                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1273                                 breakpoint->length);
1274
1275                 armv8_cache_i_inner_inval_virt(armv8,
1276                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1277                                 breakpoint->length);
1278
1279                 breakpoint->set = 0x11; /* Any nice value but 0 */
1280         }
1281
1282         /* Ensure that halting debug mode is enable */
1283         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1284         if (retval != ERROR_OK) {
1285                 LOG_DEBUG("Failed to set DSCR.HDE");
1286                 return retval;
1287         }
1288
1289         return ERROR_OK;
1290 }
1291
1292 static int aarch64_set_context_breakpoint(struct target *target,
1293         struct breakpoint *breakpoint, uint8_t matchmode)
1294 {
1295         int retval = ERROR_FAIL;
1296         int brp_i = 0;
1297         uint32_t control;
1298         uint8_t byte_addr_select = 0x0F;
1299         struct aarch64_common *aarch64 = target_to_aarch64(target);
1300         struct armv8_common *armv8 = &aarch64->armv8_common;
1301         struct aarch64_brp *brp_list = aarch64->brp_list;
1302
1303         if (breakpoint->set) {
1304                 LOG_WARNING("breakpoint already set");
1305                 return retval;
1306         }
1307         /*check available context BRPs*/
1308         while ((brp_list[brp_i].used ||
1309                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1310                 brp_i++;
1311
1312         if (brp_i >= aarch64->brp_num) {
1313                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1314                 return ERROR_FAIL;
1315         }
1316
1317         breakpoint->set = brp_i + 1;
1318         control = ((matchmode & 0x7) << 20)
1319                 | (1 << 13)
1320                 | (byte_addr_select << 5)
1321                 | (3 << 1) | 1;
1322         brp_list[brp_i].used = 1;
1323         brp_list[brp_i].value = (breakpoint->asid);
1324         brp_list[brp_i].control = control;
1325         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1326                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1327                         brp_list[brp_i].value);
1328         if (retval != ERROR_OK)
1329                 return retval;
1330         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1331                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1332                         brp_list[brp_i].control);
1333         if (retval != ERROR_OK)
1334                 return retval;
1335         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1336                 brp_list[brp_i].control,
1337                 brp_list[brp_i].value);
1338         return ERROR_OK;
1339
1340 }
1341
1342 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1343 {
1344         int retval = ERROR_FAIL;
1345         int brp_1 = 0;  /* holds the contextID pair */
1346         int brp_2 = 0;  /* holds the IVA pair */
1347         uint32_t control_CTX, control_IVA;
1348         uint8_t CTX_byte_addr_select = 0x0F;
1349         uint8_t IVA_byte_addr_select = 0x0F;
1350         uint8_t CTX_machmode = 0x03;
1351         uint8_t IVA_machmode = 0x01;
1352         struct aarch64_common *aarch64 = target_to_aarch64(target);
1353         struct armv8_common *armv8 = &aarch64->armv8_common;
1354         struct aarch64_brp *brp_list = aarch64->brp_list;
1355
1356         if (breakpoint->set) {
1357                 LOG_WARNING("breakpoint already set");
1358                 return retval;
1359         }
1360         /*check available context BRPs*/
1361         while ((brp_list[brp_1].used ||
1362                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1363                 brp_1++;
1364
1365         printf("brp(CTX) found num: %d\n", brp_1);
1366         if (brp_1 >= aarch64->brp_num) {
1367                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1368                 return ERROR_FAIL;
1369         }
1370
1371         while ((brp_list[brp_2].used ||
1372                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1373                 brp_2++;
1374
1375         printf("brp(IVA) found num: %d\n", brp_2);
1376         if (brp_2 >= aarch64->brp_num) {
1377                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1378                 return ERROR_FAIL;
1379         }
1380
1381         breakpoint->set = brp_1 + 1;
1382         breakpoint->linked_BRP = brp_2;
1383         control_CTX = ((CTX_machmode & 0x7) << 20)
1384                 | (brp_2 << 16)
1385                 | (0 << 14)
1386                 | (CTX_byte_addr_select << 5)
1387                 | (3 << 1) | 1;
1388         brp_list[brp_1].used = 1;
1389         brp_list[brp_1].value = (breakpoint->asid);
1390         brp_list[brp_1].control = control_CTX;
1391         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1392                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1393                         brp_list[brp_1].value);
1394         if (retval != ERROR_OK)
1395                 return retval;
1396         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1397                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1398                         brp_list[brp_1].control);
1399         if (retval != ERROR_OK)
1400                 return retval;
1401
1402         control_IVA = ((IVA_machmode & 0x7) << 20)
1403                 | (brp_1 << 16)
1404                 | (1 << 13)
1405                 | (IVA_byte_addr_select << 5)
1406                 | (3 << 1) | 1;
1407         brp_list[brp_2].used = 1;
1408         brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1409         brp_list[brp_2].control = control_IVA;
1410         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1411                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1412                         brp_list[brp_2].value & 0xFFFFFFFF);
1413         if (retval != ERROR_OK)
1414                 return retval;
1415         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1416                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1417                         brp_list[brp_2].value >> 32);
1418         if (retval != ERROR_OK)
1419                 return retval;
1420         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1421                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1422                         brp_list[brp_2].control);
1423         if (retval != ERROR_OK)
1424                 return retval;
1425
1426         return ERROR_OK;
1427 }
1428
1429 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1430 {
1431         int retval;
1432         struct aarch64_common *aarch64 = target_to_aarch64(target);
1433         struct armv8_common *armv8 = &aarch64->armv8_common;
1434         struct aarch64_brp *brp_list = aarch64->brp_list;
1435
1436         if (!breakpoint->set) {
1437                 LOG_WARNING("breakpoint not set");
1438                 return ERROR_OK;
1439         }
1440
1441         if (breakpoint->type == BKPT_HARD) {
1442                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1443                         int brp_i = breakpoint->set - 1;
1444                         int brp_j = breakpoint->linked_BRP;
1445                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1446                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1447                                 return ERROR_OK;
1448                         }
1449                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1450                                 brp_list[brp_i].control, brp_list[brp_i].value);
1451                         brp_list[brp_i].used = 0;
1452                         brp_list[brp_i].value = 0;
1453                         brp_list[brp_i].control = 0;
1454                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1455                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1456                                         brp_list[brp_i].control);
1457                         if (retval != ERROR_OK)
1458                                 return retval;
1459                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1460                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1461                                         (uint32_t)brp_list[brp_i].value);
1462                         if (retval != ERROR_OK)
1463                                 return retval;
1464                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1465                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1466                                         (uint32_t)brp_list[brp_i].value);
1467                         if (retval != ERROR_OK)
1468                                 return retval;
1469                         if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1470                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1471                                 return ERROR_OK;
1472                         }
1473                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1474                                 brp_list[brp_j].control, brp_list[brp_j].value);
1475                         brp_list[brp_j].used = 0;
1476                         brp_list[brp_j].value = 0;
1477                         brp_list[brp_j].control = 0;
1478                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1479                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1480                                         brp_list[brp_j].control);
1481                         if (retval != ERROR_OK)
1482                                 return retval;
1483                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1484                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1485                                         (uint32_t)brp_list[brp_j].value);
1486                         if (retval != ERROR_OK)
1487                                 return retval;
1488                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1489                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1490                                         (uint32_t)brp_list[brp_j].value);
1491                         if (retval != ERROR_OK)
1492                                 return retval;
1493
1494                         breakpoint->linked_BRP = 0;
1495                         breakpoint->set = 0;
1496                         return ERROR_OK;
1497
1498                 } else {
1499                         int brp_i = breakpoint->set - 1;
1500                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1501                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1502                                 return ERROR_OK;
1503                         }
1504                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1505                                 brp_list[brp_i].control, brp_list[brp_i].value);
1506                         brp_list[brp_i].used = 0;
1507                         brp_list[brp_i].value = 0;
1508                         brp_list[brp_i].control = 0;
1509                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1510                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1511                                         brp_list[brp_i].control);
1512                         if (retval != ERROR_OK)
1513                                 return retval;
1514                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1515                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1516                                         brp_list[brp_i].value);
1517                         if (retval != ERROR_OK)
1518                                 return retval;
1519
1520                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1521                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1522                                         (uint32_t)brp_list[brp_i].value);
1523                         if (retval != ERROR_OK)
1524                                 return retval;
1525                         breakpoint->set = 0;
1526                         return ERROR_OK;
1527                 }
1528         } else {
1529                 /* restore original instruction (kept in target endianness) */
1530
1531                 armv8_cache_d_inner_flush_virt(armv8,
1532                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1533                                 breakpoint->length);
1534
1535                 if (breakpoint->length == 4) {
1536                         retval = target_write_memory(target,
1537                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1538                                         4, 1, breakpoint->orig_instr);
1539                         if (retval != ERROR_OK)
1540                                 return retval;
1541                 } else {
1542                         retval = target_write_memory(target,
1543                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1544                                         2, 1, breakpoint->orig_instr);
1545                         if (retval != ERROR_OK)
1546                                 return retval;
1547                 }
1548
1549                 armv8_cache_d_inner_flush_virt(armv8,
1550                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1551                                 breakpoint->length);
1552
1553                 armv8_cache_i_inner_inval_virt(armv8,
1554                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1555                                 breakpoint->length);
1556         }
1557         breakpoint->set = 0;
1558
1559         return ERROR_OK;
1560 }
1561
1562 static int aarch64_add_breakpoint(struct target *target,
1563         struct breakpoint *breakpoint)
1564 {
1565         struct aarch64_common *aarch64 = target_to_aarch64(target);
1566
1567         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1568                 LOG_INFO("no hardware breakpoint available");
1569                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1570         }
1571
1572         if (breakpoint->type == BKPT_HARD)
1573                 aarch64->brp_num_available--;
1574
1575         return aarch64_set_breakpoint(target, breakpoint, 0x00);        /* Exact match */
1576 }
1577
1578 static int aarch64_add_context_breakpoint(struct target *target,
1579         struct breakpoint *breakpoint)
1580 {
1581         struct aarch64_common *aarch64 = target_to_aarch64(target);
1582
1583         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1584                 LOG_INFO("no hardware breakpoint available");
1585                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1586         }
1587
1588         if (breakpoint->type == BKPT_HARD)
1589                 aarch64->brp_num_available--;
1590
1591         return aarch64_set_context_breakpoint(target, breakpoint, 0x02);        /* asid match */
1592 }
1593
1594 static int aarch64_add_hybrid_breakpoint(struct target *target,
1595         struct breakpoint *breakpoint)
1596 {
1597         struct aarch64_common *aarch64 = target_to_aarch64(target);
1598
1599         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1600                 LOG_INFO("no hardware breakpoint available");
1601                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1602         }
1603
1604         if (breakpoint->type == BKPT_HARD)
1605                 aarch64->brp_num_available--;
1606
1607         return aarch64_set_hybrid_breakpoint(target, breakpoint);       /* ??? */
1608 }
1609
1610
1611 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1612 {
1613         struct aarch64_common *aarch64 = target_to_aarch64(target);
1614
1615 #if 0
1616 /* It is perfectly possible to remove breakpoints while the target is running */
1617         if (target->state != TARGET_HALTED) {
1618                 LOG_WARNING("target not halted");
1619                 return ERROR_TARGET_NOT_HALTED;
1620         }
1621 #endif
1622
1623         if (breakpoint->set) {
1624                 aarch64_unset_breakpoint(target, breakpoint);
1625                 if (breakpoint->type == BKPT_HARD)
1626                         aarch64->brp_num_available++;
1627         }
1628
1629         return ERROR_OK;
1630 }
1631
1632 /*
1633  * Cortex-A8 Reset functions
1634  */
1635
1636 static int aarch64_assert_reset(struct target *target)
1637 {
1638         struct armv8_common *armv8 = target_to_armv8(target);
1639
1640         LOG_DEBUG(" ");
1641
1642         /* FIXME when halt is requested, make it work somehow... */
1643
1644         /* Issue some kind of warm reset. */
1645         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1646                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1647         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1648                 /* REVISIT handle "pulls" cases, if there's
1649                  * hardware that needs them to work.
1650                  */
1651                 jtag_add_reset(0, 1);
1652         } else {
1653                 LOG_ERROR("%s: how to reset?", target_name(target));
1654                 return ERROR_FAIL;
1655         }
1656
1657         /* registers are now invalid */
1658         if (target_was_examined(target)) {
1659                 register_cache_invalidate(armv8->arm.core_cache);
1660                 register_cache_invalidate(armv8->arm.core_cache->next);
1661         }
1662
1663         target->state = TARGET_RESET;
1664
1665         return ERROR_OK;
1666 }
1667
1668 static int aarch64_deassert_reset(struct target *target)
1669 {
1670         int retval;
1671
1672         LOG_DEBUG(" ");
1673
1674         /* be certain SRST is off */
1675         jtag_add_reset(0, 0);
1676
1677         if (!target_was_examined(target))
1678                 return ERROR_OK;
1679
1680         retval = aarch64_poll(target);
1681         if (retval != ERROR_OK)
1682                 return retval;
1683
1684         if (target->reset_halt) {
1685                 if (target->state != TARGET_HALTED) {
1686                         LOG_WARNING("%s: ran after reset and before halt ...",
1687                                 target_name(target));
1688                         retval = target_halt(target);
1689                         if (retval != ERROR_OK)
1690                                 return retval;
1691                 }
1692         }
1693
1694         return aarch64_init_debug_access(target);
1695 }
1696
1697 static int aarch64_write_cpu_memory_slow(struct target *target,
1698         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1699 {
1700         struct armv8_common *armv8 = target_to_armv8(target);
1701         struct arm_dpm *dpm = &armv8->dpm;
1702         struct arm *arm = &armv8->arm;
1703         int retval;
1704
1705         armv8_reg_current(arm, 1)->dirty = true;
1706
1707         /* change DCC to normal mode if necessary */
1708         if (*dscr & DSCR_MA) {
1709                 *dscr &= ~DSCR_MA;
1710                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1711                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1712                 if (retval != ERROR_OK)
1713                         return retval;
1714         }
1715
1716         while (count) {
1717                 uint32_t data, opcode;
1718
1719                 /* write the data to store into DTRRX */
1720                 if (size == 1)
1721                         data = *buffer;
1722                 else if (size == 2)
1723                         data = target_buffer_get_u16(target, buffer);
1724                 else
1725                         data = target_buffer_get_u32(target, buffer);
1726                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1727                                 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1728                 if (retval != ERROR_OK)
1729                         return retval;
1730
1731                 if (arm->core_state == ARM_STATE_AARCH64)
1732                         retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1733                 else
1734                         retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1735                 if (retval != ERROR_OK)
1736                         return retval;
1737
1738                 if (size == 1)
1739                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1740                 else if (size == 2)
1741                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1742                 else
1743                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1744                 retval = dpm->instr_execute(dpm, opcode);
1745                 if (retval != ERROR_OK)
1746                         return retval;
1747
1748                 /* Advance */
1749                 buffer += size;
1750                 --count;
1751         }
1752
1753         return ERROR_OK;
1754 }
1755
1756 static int aarch64_write_cpu_memory_fast(struct target *target,
1757         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1758 {
1759         struct armv8_common *armv8 = target_to_armv8(target);
1760         struct arm *arm = &armv8->arm;
1761         int retval;
1762
1763         armv8_reg_current(arm, 1)->dirty = true;
1764
1765         /* Step 1.d   - Change DCC to memory mode */
1766         *dscr |= DSCR_MA;
1767         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1768                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1769         if (retval != ERROR_OK)
1770                 return retval;
1771
1772
1773         /* Step 2.a   - Do the write */
1774         retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1775                                         buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1776         if (retval != ERROR_OK)
1777                 return retval;
1778
1779         /* Step 3.a   - Switch DTR mode back to Normal mode */
1780         *dscr &= ~DSCR_MA;
1781         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1782                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1783         if (retval != ERROR_OK)
1784                 return retval;
1785
1786         return ERROR_OK;
1787 }
1788
1789 static int aarch64_write_cpu_memory(struct target *target,
1790         uint64_t address, uint32_t size,
1791         uint32_t count, const uint8_t *buffer)
1792 {
1793         /* write memory through APB-AP */
1794         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1795         struct armv8_common *armv8 = target_to_armv8(target);
1796         struct arm_dpm *dpm = &armv8->dpm;
1797         struct arm *arm = &armv8->arm;
1798         uint32_t dscr;
1799
1800         if (target->state != TARGET_HALTED) {
1801                 LOG_WARNING("target not halted");
1802                 return ERROR_TARGET_NOT_HALTED;
1803         }
1804
1805         /* Mark register X0 as dirty, as it will be used
1806          * for transferring the data.
1807          * It will be restored automatically when exiting
1808          * debug mode
1809          */
1810         armv8_reg_current(arm, 0)->dirty = true;
1811
1812         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1813
1814         /* Read DSCR */
1815         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1816                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1817         if (retval != ERROR_OK)
1818                 return retval;
1819
1820         /* Set Normal access mode  */
1821         dscr = (dscr & ~DSCR_MA);
1822         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1823                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1824         if (retval != ERROR_OK)
1825                 return retval;
1826
1827         if (arm->core_state == ARM_STATE_AARCH64) {
1828                 /* Write X0 with value 'address' using write procedure */
1829                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1830                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1831                 retval = dpm->instr_write_data_dcc_64(dpm,
1832                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1833         } else {
1834                 /* Write R0 with value 'address' using write procedure */
1835                 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1836                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1837                 retval = dpm->instr_write_data_dcc(dpm,
1838                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1839         }
1840
1841         if (retval != ERROR_OK)
1842                 return retval;
1843
1844         if (size == 4 && (address % 4) == 0)
1845                 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1846         else
1847                 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1848
1849         if (retval != ERROR_OK) {
1850                 /* Unset DTR mode */
1851                 mem_ap_read_atomic_u32(armv8->debug_ap,
1852                                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1853                 dscr &= ~DSCR_MA;
1854                 mem_ap_write_atomic_u32(armv8->debug_ap,
1855                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1856         }
1857
1858         /* Check for sticky abort flags in the DSCR */
1859         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1860                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1861         if (retval != ERROR_OK)
1862                 return retval;
1863
1864         dpm->dscr = dscr;
1865         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1866                 /* Abort occurred - clear it and exit */
1867                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1868                 armv8_dpm_handle_exception(dpm, true);
1869                 return ERROR_FAIL;
1870         }
1871
1872         /* Done */
1873         return ERROR_OK;
1874 }
1875
1876 static int aarch64_read_cpu_memory_slow(struct target *target,
1877         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1878 {
1879         struct armv8_common *armv8 = target_to_armv8(target);
1880         struct arm_dpm *dpm = &armv8->dpm;
1881         struct arm *arm = &armv8->arm;
1882         int retval;
1883
1884         armv8_reg_current(arm, 1)->dirty = true;
1885
1886         /* change DCC to normal mode (if necessary) */
1887         if (*dscr & DSCR_MA) {
1888                 *dscr &= DSCR_MA;
1889                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1890                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1891                 if (retval != ERROR_OK)
1892                         return retval;
1893         }
1894
1895         while (count) {
1896                 uint32_t opcode, data;
1897
1898                 if (size == 1)
1899                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1900                 else if (size == 2)
1901                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1902                 else
1903                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1904                 retval = dpm->instr_execute(dpm, opcode);
1905                 if (retval != ERROR_OK)
1906                         return retval;
1907
1908                 if (arm->core_state == ARM_STATE_AARCH64)
1909                         retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1910                 else
1911                         retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1912                 if (retval != ERROR_OK)
1913                         return retval;
1914
1915                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1916                                 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1917                 if (retval != ERROR_OK)
1918                         return retval;
1919
1920                 if (size == 1)
1921                         *buffer = (uint8_t)data;
1922                 else if (size == 2)
1923                         target_buffer_set_u16(target, buffer, (uint16_t)data);
1924                 else
1925                         target_buffer_set_u32(target, buffer, data);
1926
1927                 /* Advance */
1928                 buffer += size;
1929                 --count;
1930         }
1931
1932         return ERROR_OK;
1933 }
1934
1935 static int aarch64_read_cpu_memory_fast(struct target *target,
1936         uint32_t count, uint8_t *buffer, uint32_t *dscr)
1937 {
1938         struct armv8_common *armv8 = target_to_armv8(target);
1939         struct arm_dpm *dpm = &armv8->dpm;
1940         struct arm *arm = &armv8->arm;
1941         int retval;
1942         uint32_t value;
1943
1944         /* Mark X1 as dirty */
1945         armv8_reg_current(arm, 1)->dirty = true;
1946
1947         if (arm->core_state == ARM_STATE_AARCH64) {
1948                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1949                 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1950         } else {
1951                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1952                 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1953         }
1954
1955         if (retval != ERROR_OK)
1956                 return retval;
1957
1958         /* Step 1.e - Change DCC to memory mode */
1959         *dscr |= DSCR_MA;
1960         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1961                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1962         if (retval != ERROR_OK)
1963                 return retval;
1964
1965         /* Step 1.f - read DBGDTRTX and discard the value */
1966         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1967                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1968         if (retval != ERROR_OK)
1969                 return retval;
1970
1971         count--;
1972         /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1973          * Abort flags are sticky, so can be read at end of transactions
1974          *
1975          * This data is read in aligned to 32 bit boundary.
1976          */
1977
1978         if (count) {
1979                 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1980                  * increments X0 by 4. */
1981                 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1982                                                                         armv8->debug_base + CPUV8_DBG_DTRTX);
1983                 if (retval != ERROR_OK)
1984                         return retval;
1985         }
1986
1987         /* Step 3.a - set DTR access mode back to Normal mode   */
1988         *dscr &= ~DSCR_MA;
1989         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1990                                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1991         if (retval != ERROR_OK)
1992                 return retval;
1993
1994         /* Step 3.b - read DBGDTRTX for the final value */
1995         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1996                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1997         if (retval != ERROR_OK)
1998                 return retval;
1999
2000         target_buffer_set_u32(target, buffer + count * 4, value);
2001         return retval;
2002 }
2003
2004 static int aarch64_read_cpu_memory(struct target *target,
2005         target_addr_t address, uint32_t size,
2006         uint32_t count, uint8_t *buffer)
2007 {
2008         /* read memory through APB-AP */
2009         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2010         struct armv8_common *armv8 = target_to_armv8(target);
2011         struct arm_dpm *dpm = &armv8->dpm;
2012         struct arm *arm = &armv8->arm;
2013         uint32_t dscr;
2014
2015         LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2016                         address, size, count);
2017
2018         if (target->state != TARGET_HALTED) {
2019                 LOG_WARNING("target not halted");
2020                 return ERROR_TARGET_NOT_HALTED;
2021         }
2022
2023         /* Mark register X0 as dirty, as it will be used
2024          * for transferring the data.
2025          * It will be restored automatically when exiting
2026          * debug mode
2027          */
2028         armv8_reg_current(arm, 0)->dirty = true;
2029
2030         /* Read DSCR */
2031         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2032                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2033         if (retval != ERROR_OK)
2034                 return retval;
2035
2036         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2037
2038         /* Set Normal access mode  */
2039         dscr &= ~DSCR_MA;
2040         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
2041                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2042         if (retval != ERROR_OK)
2043                 return retval;
2044
2045         if (arm->core_state == ARM_STATE_AARCH64) {
2046                 /* Write X0 with value 'address' using write procedure */
2047                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2048                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2049                 retval = dpm->instr_write_data_dcc_64(dpm,
2050                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2051         } else {
2052                 /* Write R0 with value 'address' using write procedure */
2053                 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2054                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2055                 retval = dpm->instr_write_data_dcc(dpm,
2056                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2057         }
2058
2059         if (retval != ERROR_OK)
2060                 return retval;
2061
2062         if (size == 4 && (address % 4) == 0)
2063                 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2064         else
2065                 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2066
2067         if (dscr & DSCR_MA) {
2068                 dscr &= ~DSCR_MA;
2069                 mem_ap_write_atomic_u32(armv8->debug_ap,
2070                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2071         }
2072
2073         if (retval != ERROR_OK)
2074                 return retval;
2075
2076         /* Check for sticky abort flags in the DSCR */
2077         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2078                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2079         if (retval != ERROR_OK)
2080                 return retval;
2081
2082         dpm->dscr = dscr;
2083
2084         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2085                 /* Abort occurred - clear it and exit */
2086                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2087                 armv8_dpm_handle_exception(dpm, true);
2088                 return ERROR_FAIL;
2089         }
2090
2091         /* Done */
2092         return ERROR_OK;
2093 }
2094
2095 static int aarch64_read_phys_memory(struct target *target,
2096         target_addr_t address, uint32_t size,
2097         uint32_t count, uint8_t *buffer)
2098 {
2099         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2100
2101         if (count && buffer) {
2102                 /* read memory through APB-AP */
2103                 retval = aarch64_mmu_modify(target, 0);
2104                 if (retval != ERROR_OK)
2105                         return retval;
2106                 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2107         }
2108         return retval;
2109 }
2110
2111 static int aarch64_read_memory(struct target *target, target_addr_t address,
2112         uint32_t size, uint32_t count, uint8_t *buffer)
2113 {
2114         int mmu_enabled = 0;
2115         int retval;
2116
2117         /* determine if MMU was enabled on target stop */
2118         retval = aarch64_mmu(target, &mmu_enabled);
2119         if (retval != ERROR_OK)
2120                 return retval;
2121
2122         if (mmu_enabled) {
2123                 /* enable MMU as we could have disabled it for phys access */
2124                 retval = aarch64_mmu_modify(target, 1);
2125                 if (retval != ERROR_OK)
2126                         return retval;
2127         }
2128         return aarch64_read_cpu_memory(target, address, size, count, buffer);
2129 }
2130
2131 static int aarch64_write_phys_memory(struct target *target,
2132         target_addr_t address, uint32_t size,
2133         uint32_t count, const uint8_t *buffer)
2134 {
2135         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2136
2137         if (count && buffer) {
2138                 /* write memory through APB-AP */
2139                 retval = aarch64_mmu_modify(target, 0);
2140                 if (retval != ERROR_OK)
2141                         return retval;
2142                 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2143         }
2144
2145         return retval;
2146 }
2147
2148 static int aarch64_write_memory(struct target *target, target_addr_t address,
2149         uint32_t size, uint32_t count, const uint8_t *buffer)
2150 {
2151         int mmu_enabled = 0;
2152         int retval;
2153
2154         /* determine if MMU was enabled on target stop */
2155         retval = aarch64_mmu(target, &mmu_enabled);
2156         if (retval != ERROR_OK)
2157                 return retval;
2158
2159         if (mmu_enabled) {
2160                 /* enable MMU as we could have disabled it for phys access */
2161                 retval = aarch64_mmu_modify(target, 1);
2162                 if (retval != ERROR_OK)
2163                         return retval;
2164         }
2165         return aarch64_write_cpu_memory(target, address, size, count, buffer);
2166 }
2167
2168 static int aarch64_handle_target_request(void *priv)
2169 {
2170         struct target *target = priv;
2171         struct armv8_common *armv8 = target_to_armv8(target);
2172         int retval;
2173
2174         if (!target_was_examined(target))
2175                 return ERROR_OK;
2176         if (!target->dbg_msg_enabled)
2177                 return ERROR_OK;
2178
2179         if (target->state == TARGET_RUNNING) {
2180                 uint32_t request;
2181                 uint32_t dscr;
2182                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2183                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2184
2185                 /* check if we have data */
2186                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2187                         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2188                                         armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2189                         if (retval == ERROR_OK) {
2190                                 target_request(target, request);
2191                                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2192                                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2193                         }
2194                 }
2195         }
2196
2197         return ERROR_OK;
2198 }
2199
2200 static int aarch64_examine_first(struct target *target)
2201 {
2202         struct aarch64_common *aarch64 = target_to_aarch64(target);
2203         struct armv8_common *armv8 = &aarch64->armv8_common;
2204         struct adiv5_dap *swjdp = armv8->arm.dap;
2205         struct aarch64_private_config *pc;
2206         int i;
2207         int retval = ERROR_OK;
2208         uint64_t debug, ttypr;
2209         uint32_t cpuid;
2210         uint32_t tmp0, tmp1, tmp2, tmp3;
2211         debug = ttypr = cpuid = 0;
2212
2213         retval = dap_dp_init(swjdp);
2214         if (retval != ERROR_OK)
2215                 return retval;
2216
2217         /* Search for the APB-AB - it is needed for access to debug registers */
2218         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2219         if (retval != ERROR_OK) {
2220                 LOG_ERROR("Could not find APB-AP for debug access");
2221                 return retval;
2222         }
2223
2224         retval = mem_ap_init(armv8->debug_ap);
2225         if (retval != ERROR_OK) {
2226                 LOG_ERROR("Could not initialize the APB-AP");
2227                 return retval;
2228         }
2229
2230         armv8->debug_ap->memaccess_tck = 10;
2231
2232         if (!target->dbgbase_set) {
2233                 uint32_t dbgbase;
2234                 /* Get ROM Table base */
2235                 uint32_t apid;
2236                 int32_t coreidx = target->coreid;
2237                 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2238                 if (retval != ERROR_OK)
2239                         return retval;
2240                 /* Lookup 0x15 -- Processor DAP */
2241                 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2242                                 &armv8->debug_base, &coreidx);
2243                 if (retval != ERROR_OK)
2244                         return retval;
2245                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2246                                 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2247         } else
2248                 armv8->debug_base = target->dbgbase;
2249
2250         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2251                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2252         if (retval != ERROR_OK) {
2253                 LOG_DEBUG("Examine %s failed", "oslock");
2254                 return retval;
2255         }
2256
2257         retval = mem_ap_read_u32(armv8->debug_ap,
2258                         armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2259         if (retval != ERROR_OK) {
2260                 LOG_DEBUG("Examine %s failed", "CPUID");
2261                 return retval;
2262         }
2263
2264         retval = mem_ap_read_u32(armv8->debug_ap,
2265                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2266         retval += mem_ap_read_u32(armv8->debug_ap,
2267                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2268         if (retval != ERROR_OK) {
2269                 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2270                 return retval;
2271         }
2272         retval = mem_ap_read_u32(armv8->debug_ap,
2273                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2274         retval += mem_ap_read_u32(armv8->debug_ap,
2275                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2276         if (retval != ERROR_OK) {
2277                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2278                 return retval;
2279         }
2280
2281         retval = dap_run(armv8->debug_ap->dap);
2282         if (retval != ERROR_OK) {
2283                 LOG_ERROR("%s: examination failed\n", target_name(target));
2284                 return retval;
2285         }
2286
2287         ttypr |= tmp1;
2288         ttypr = (ttypr << 32) | tmp0;
2289         debug |= tmp3;
2290         debug = (debug << 32) | tmp2;
2291
2292         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2293         LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2294         LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2295
2296         if (target->private_config == NULL)
2297                 return ERROR_FAIL;
2298
2299         pc = (struct aarch64_private_config *)target->private_config;
2300         if (pc->cti == NULL)
2301                 return ERROR_FAIL;
2302
2303         armv8->cti = pc->cti;
2304
2305         retval = aarch64_dpm_setup(aarch64, debug);
2306         if (retval != ERROR_OK)
2307                 return retval;
2308
2309         /* Setup Breakpoint Register Pairs */
2310         aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2311         aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2312         aarch64->brp_num_available = aarch64->brp_num;
2313         aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2314         for (i = 0; i < aarch64->brp_num; i++) {
2315                 aarch64->brp_list[i].used = 0;
2316                 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2317                         aarch64->brp_list[i].type = BRP_NORMAL;
2318                 else
2319                         aarch64->brp_list[i].type = BRP_CONTEXT;
2320                 aarch64->brp_list[i].value = 0;
2321                 aarch64->brp_list[i].control = 0;
2322                 aarch64->brp_list[i].BRPn = i;
2323         }
2324
2325         LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2326
2327         target->state = TARGET_UNKNOWN;
2328         target->debug_reason = DBG_REASON_NOTHALTED;
2329         aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2330         target_set_examined(target);
2331         return ERROR_OK;
2332 }
2333
2334 static int aarch64_examine(struct target *target)
2335 {
2336         int retval = ERROR_OK;
2337
2338         /* don't re-probe hardware after each reset */
2339         if (!target_was_examined(target))
2340                 retval = aarch64_examine_first(target);
2341
2342         /* Configure core debug access */
2343         if (retval == ERROR_OK)
2344                 retval = aarch64_init_debug_access(target);
2345
2346         return retval;
2347 }
2348
2349 /*
2350  *      Cortex-A8 target creation and initialization
2351  */
2352
2353 static int aarch64_init_target(struct command_context *cmd_ctx,
2354         struct target *target)
2355 {
2356         /* examine_first() does a bunch of this */
2357         return ERROR_OK;
2358 }
2359
2360 static int aarch64_init_arch_info(struct target *target,
2361         struct aarch64_common *aarch64, struct jtag_tap *tap)
2362 {
2363         struct armv8_common *armv8 = &aarch64->armv8_common;
2364
2365         /* Setup struct aarch64_common */
2366         aarch64->common_magic = AARCH64_COMMON_MAGIC;
2367         /*  tap has no dap initialized */
2368         if (!tap->dap) {
2369                 tap->dap = dap_init();
2370                 tap->dap->tap = tap;
2371         }
2372         armv8->arm.dap = tap->dap;
2373
2374         /* register arch-specific functions */
2375         armv8->examine_debug_reason = NULL;
2376         armv8->post_debug_entry = aarch64_post_debug_entry;
2377         armv8->pre_restore_context = NULL;
2378         armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2379
2380         armv8_init_arch_info(target, armv8);
2381         target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2382
2383         return ERROR_OK;
2384 }
2385
2386 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2387 {
2388         struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2389
2390         return aarch64_init_arch_info(target, aarch64, target->tap);
2391 }
2392
2393 static int aarch64_mmu(struct target *target, int *enabled)
2394 {
2395         if (target->state != TARGET_HALTED) {
2396                 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2397                 return ERROR_TARGET_INVALID;
2398         }
2399
2400         *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2401         return ERROR_OK;
2402 }
2403
2404 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2405                              target_addr_t *phys)
2406 {
2407         return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2408 }
2409
2410 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2411 {
2412         struct aarch64_private_config *pc;
2413         const char *arg;
2414         int e;
2415
2416         /* check if argv[0] is for us */
2417         arg = Jim_GetString(goi->argv[0], NULL);
2418         if (strcmp(arg, "-cti"))
2419                 return JIM_CONTINUE;
2420
2421         /* pop the argument from argv */
2422         e = Jim_GetOpt_String(goi, &arg, NULL);
2423         if (e != JIM_OK)
2424                 return e;
2425
2426         /* check if we have another option */
2427         if (goi->argc == 0) {
2428                 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-cti ?cti-name?");
2429                 return JIM_ERR;
2430         }
2431
2432         pc = (struct aarch64_private_config *)target->private_config;
2433
2434         if (goi->isconfigure) {
2435                 Jim_Obj *o_cti;
2436                 struct arm_cti *cti;
2437                 e = Jim_GetOpt_Obj(goi, &o_cti);
2438                 if (e != JIM_OK)
2439                         return e;
2440                 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2441                 if (cti == NULL)
2442                         return JIM_ERR;
2443
2444                 if (pc == NULL) {
2445                         pc = calloc(1, sizeof(struct aarch64_private_config));
2446                         target->private_config = pc;
2447                 }
2448                 pc->cti = cti;
2449         } else {
2450                 if (goi->argc != 0) {
2451                         Jim_WrongNumArgs(goi->interp,
2452                                         goi->argc, goi->argv,
2453                                         "NO PARAMS");
2454                         return JIM_ERR;
2455                 }
2456
2457                 if (pc == NULL || pc->cti == NULL) {
2458                         Jim_SetResultString(goi->interp, "CTI not configured", -1);
2459                         return JIM_ERR;
2460                 }
2461                 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2462         }
2463
2464         return JIM_OK;
2465 }
2466
2467 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2468 {
2469         struct target *target = get_current_target(CMD_CTX);
2470         struct armv8_common *armv8 = target_to_armv8(target);
2471
2472         return armv8_handle_cache_info_command(CMD_CTX,
2473                         &armv8->armv8_mmu.armv8_cache);
2474 }
2475
2476
2477 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2478 {
2479         struct target *target = get_current_target(CMD_CTX);
2480         if (!target_was_examined(target)) {
2481                 LOG_ERROR("target not examined yet");
2482                 return ERROR_FAIL;
2483         }
2484
2485         return aarch64_init_debug_access(target);
2486 }
2487 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2488 {
2489         struct target *target = get_current_target(CMD_CTX);
2490         /* check target is an smp target */
2491         struct target_list *head;
2492         struct target *curr;
2493         head = target->head;
2494         target->smp = 0;
2495         if (head != (struct target_list *)NULL) {
2496                 while (head != (struct target_list *)NULL) {
2497                         curr = head->target;
2498                         curr->smp = 0;
2499                         head = head->next;
2500                 }
2501                 /*  fixes the target display to the debugger */
2502                 target->gdb_service->target = target;
2503         }
2504         return ERROR_OK;
2505 }
2506
2507 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2508 {
2509         struct target *target = get_current_target(CMD_CTX);
2510         struct target_list *head;
2511         struct target *curr;
2512         head = target->head;
2513         if (head != (struct target_list *)NULL) {
2514                 target->smp = 1;
2515                 while (head != (struct target_list *)NULL) {
2516                         curr = head->target;
2517                         curr->smp = 1;
2518                         head = head->next;
2519                 }
2520         }
2521         return ERROR_OK;
2522 }
2523
2524 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2525 {
2526         struct target *target = get_current_target(CMD_CTX);
2527         struct aarch64_common *aarch64 = target_to_aarch64(target);
2528
2529         static const Jim_Nvp nvp_maskisr_modes[] = {
2530                 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2531                 { .name = "on", .value = AARCH64_ISRMASK_ON },
2532                 { .name = NULL, .value = -1 },
2533         };
2534         const Jim_Nvp *n;
2535
2536         if (CMD_ARGC > 0) {
2537                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2538                 if (n->name == NULL) {
2539                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2540                         return ERROR_COMMAND_SYNTAX_ERROR;
2541                 }
2542
2543                 aarch64->isrmasking_mode = n->value;
2544         }
2545
2546         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2547         command_print(CMD_CTX, "aarch64 interrupt mask %s", n->name);
2548
2549         return ERROR_OK;
2550 }
2551
2552 static const struct command_registration aarch64_exec_command_handlers[] = {
2553         {
2554                 .name = "cache_info",
2555                 .handler = aarch64_handle_cache_info_command,
2556                 .mode = COMMAND_EXEC,
2557                 .help = "display information about target caches",
2558                 .usage = "",
2559         },
2560         {
2561                 .name = "dbginit",
2562                 .handler = aarch64_handle_dbginit_command,
2563                 .mode = COMMAND_EXEC,
2564                 .help = "Initialize core debug",
2565                 .usage = "",
2566         },
2567         {       .name = "smp_off",
2568                 .handler = aarch64_handle_smp_off_command,
2569                 .mode = COMMAND_EXEC,
2570                 .help = "Stop smp handling",
2571                 .usage = "",
2572         },
2573         {
2574                 .name = "smp_on",
2575                 .handler = aarch64_handle_smp_on_command,
2576                 .mode = COMMAND_EXEC,
2577                 .help = "Restart smp handling",
2578                 .usage = "",
2579         },
2580         {
2581                 .name = "maskisr",
2582                 .handler = aarch64_mask_interrupts_command,
2583                 .mode = COMMAND_ANY,
2584                 .help = "mask aarch64 interrupts during single-step",
2585                 .usage = "['on'|'off']",
2586         },
2587
2588         COMMAND_REGISTRATION_DONE
2589 };
2590 static const struct command_registration aarch64_command_handlers[] = {
2591         {
2592                 .chain = armv8_command_handlers,
2593         },
2594         {
2595                 .name = "aarch64",
2596                 .mode = COMMAND_ANY,
2597                 .help = "Aarch64 command group",
2598                 .usage = "",
2599                 .chain = aarch64_exec_command_handlers,
2600         },
2601         COMMAND_REGISTRATION_DONE
2602 };
2603
2604 struct target_type aarch64_target = {
2605         .name = "aarch64",
2606
2607         .poll = aarch64_poll,
2608         .arch_state = armv8_arch_state,
2609
2610         .halt = aarch64_halt,
2611         .resume = aarch64_resume,
2612         .step = aarch64_step,
2613
2614         .assert_reset = aarch64_assert_reset,
2615         .deassert_reset = aarch64_deassert_reset,
2616
2617         /* REVISIT allow exporting VFP3 registers ... */
2618         .get_gdb_reg_list = armv8_get_gdb_reg_list,
2619
2620         .read_memory = aarch64_read_memory,
2621         .write_memory = aarch64_write_memory,
2622
2623         .add_breakpoint = aarch64_add_breakpoint,
2624         .add_context_breakpoint = aarch64_add_context_breakpoint,
2625         .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2626         .remove_breakpoint = aarch64_remove_breakpoint,
2627         .add_watchpoint = NULL,
2628         .remove_watchpoint = NULL,
2629
2630         .commands = aarch64_command_handlers,
2631         .target_create = aarch64_target_create,
2632         .target_jim_configure = aarch64_jim_configure,
2633         .init_target = aarch64_init_target,
2634         .examine = aarch64_examine,
2635
2636         .read_phys_memory = aarch64_read_phys_memory,
2637         .write_phys_memory = aarch64_write_phys_memory,
2638         .mmu = aarch64_mmu,
2639         .virt2phys = aarch64_virt2phys,
2640 };