]> git.sur5r.net Git - openocd/blob - src/target/aarch64.c
cortex_a: fix virt2phys when mmu is disabled
[openocd] / src / target / aarch64.c
1 /***************************************************************************
2  *   Copyright (C) 2015 by David Ung                                       *
3  *                                                                         *
4  *   This program is free software; you can redistribute it and/or modify  *
5  *   it under the terms of the GNU General Public License as published by  *
6  *   the Free Software Foundation; either version 2 of the License, or     *
7  *   (at your option) any later version.                                   *
8  *                                                                         *
9  *   This program is distributed in the hope that it will be useful,       *
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
12  *   GNU General Public License for more details.                          *
13  *                                                                         *
14  *   You should have received a copy of the GNU General Public License     *
15  *   along with this program; if not, write to the                         *
16  *   Free Software Foundation, Inc.,                                       *
17  *                                                                         *
18  ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include <helper/time_support.h>
33
34 enum restart_mode {
35         RESTART_LAZY,
36         RESTART_SYNC,
37 };
38
39 enum halt_mode {
40         HALT_LAZY,
41         HALT_SYNC,
42 };
43
44 struct aarch64_private_config {
45         struct adiv5_private_config adiv5_config;
46         struct arm_cti *cti;
47 };
48
49 static int aarch64_poll(struct target *target);
50 static int aarch64_debug_entry(struct target *target);
51 static int aarch64_restore_context(struct target *target, bool bpwp);
52 static int aarch64_set_breakpoint(struct target *target,
53         struct breakpoint *breakpoint, uint8_t matchmode);
54 static int aarch64_set_context_breakpoint(struct target *target,
55         struct breakpoint *breakpoint, uint8_t matchmode);
56 static int aarch64_set_hybrid_breakpoint(struct target *target,
57         struct breakpoint *breakpoint);
58 static int aarch64_unset_breakpoint(struct target *target,
59         struct breakpoint *breakpoint);
60 static int aarch64_mmu(struct target *target, int *enabled);
61 static int aarch64_virt2phys(struct target *target,
62         target_addr_t virt, target_addr_t *phys);
63 static int aarch64_read_cpu_memory(struct target *target,
64         uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
65
66 #define foreach_smp_target(pos, head) \
67         for (pos = head; (pos != NULL); pos = pos->next)
68
69 static int aarch64_restore_system_control_reg(struct target *target)
70 {
71         enum arm_mode target_mode = ARM_MODE_ANY;
72         int retval = ERROR_OK;
73         uint32_t instr;
74
75         struct aarch64_common *aarch64 = target_to_aarch64(target);
76         struct armv8_common *armv8 = target_to_armv8(target);
77
78         if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
79                 aarch64->system_control_reg_curr = aarch64->system_control_reg;
80                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81
82                 switch (armv8->arm.core_mode) {
83                 case ARMV8_64_EL0T:
84                         target_mode = ARMV8_64_EL1H;
85                         /* fall through */
86                 case ARMV8_64_EL1T:
87                 case ARMV8_64_EL1H:
88                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
89                         break;
90                 case ARMV8_64_EL2T:
91                 case ARMV8_64_EL2H:
92                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
93                         break;
94                 case ARMV8_64_EL3H:
95                 case ARMV8_64_EL3T:
96                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
97                         break;
98
99                 case ARM_MODE_SVC:
100                 case ARM_MODE_ABT:
101                 case ARM_MODE_FIQ:
102                 case ARM_MODE_IRQ:
103                         instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
104                         break;
105
106                 default:
107                         LOG_INFO("cannot read system control register in this mode");
108                         return ERROR_FAIL;
109                 }
110
111                 if (target_mode != ARM_MODE_ANY)
112                         armv8_dpm_modeswitch(&armv8->dpm, target_mode);
113
114                 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
115                 if (retval != ERROR_OK)
116                         return retval;
117
118                 if (target_mode != ARM_MODE_ANY)
119                         armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
120         }
121
122         return retval;
123 }
124
125 /*  modify system_control_reg in order to enable or disable mmu for :
126  *  - virt2phys address conversion
127  *  - read or write memory in phys or virt address */
128 static int aarch64_mmu_modify(struct target *target, int enable)
129 {
130         struct aarch64_common *aarch64 = target_to_aarch64(target);
131         struct armv8_common *armv8 = &aarch64->armv8_common;
132         int retval = ERROR_OK;
133         uint32_t instr = 0;
134
135         if (enable) {
136                 /*      if mmu enabled at target stop and mmu not enable */
137                 if (!(aarch64->system_control_reg & 0x1U)) {
138                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139                         return ERROR_FAIL;
140                 }
141                 if (!(aarch64->system_control_reg_curr & 0x1U))
142                         aarch64->system_control_reg_curr |= 0x1U;
143         } else {
144                 if (aarch64->system_control_reg_curr & 0x4U) {
145                         /*  data cache is active */
146                         aarch64->system_control_reg_curr &= ~0x4U;
147                         /* flush data cache armv8 function to be called */
148                         if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
149                                 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
150                 }
151                 if ((aarch64->system_control_reg_curr & 0x1U)) {
152                         aarch64->system_control_reg_curr &= ~0x1U;
153                 }
154         }
155
156         switch (armv8->arm.core_mode) {
157         case ARMV8_64_EL0T:
158         case ARMV8_64_EL1T:
159         case ARMV8_64_EL1H:
160                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
161                 break;
162         case ARMV8_64_EL2T:
163         case ARMV8_64_EL2H:
164                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
165                 break;
166         case ARMV8_64_EL3H:
167         case ARMV8_64_EL3T:
168                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
169                 break;
170
171         case ARM_MODE_SVC:
172         case ARM_MODE_ABT:
173         case ARM_MODE_FIQ:
174         case ARM_MODE_IRQ:
175                 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
176                 break;
177
178         default:
179                 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
180                 break;
181         }
182
183         retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
184                                 aarch64->system_control_reg_curr);
185         return retval;
186 }
187
188 /*
189  * Basic debug access, very low level assumes state is saved
190  */
191 static int aarch64_init_debug_access(struct target *target)
192 {
193         struct armv8_common *armv8 = target_to_armv8(target);
194         int retval;
195         uint32_t dummy;
196
197         LOG_DEBUG("%s", target_name(target));
198
199         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
200                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
201         if (retval != ERROR_OK) {
202                 LOG_DEBUG("Examine %s failed", "oslock");
203                 return retval;
204         }
205
206         /* Clear Sticky Power Down status Bit in PRSR to enable access to
207            the registers in the Core Power Domain */
208         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
209                         armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
210         if (retval != ERROR_OK)
211                 return retval;
212
213         /*
214          * Static CTI configuration:
215          * Channel 0 -> trigger outputs HALT request to PE
216          * Channel 1 -> trigger outputs Resume request to PE
217          * Gate all channel trigger events from entering the CTM
218          */
219
220         /* Enable CTI */
221         retval = arm_cti_enable(armv8->cti, true);
222         /* By default, gate all channel events to and from the CTM */
223         if (retval == ERROR_OK)
224                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
225         /* output halt requests to PE on channel 0 event */
226         if (retval == ERROR_OK)
227                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
228         /* output restart requests to PE on channel 1 event */
229         if (retval == ERROR_OK)
230                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
231         if (retval != ERROR_OK)
232                 return retval;
233
234         /* Resync breakpoint registers */
235
236         return ERROR_OK;
237 }
238
239 /* Write to memory mapped registers directly with no cache or mmu handling */
240 static int aarch64_dap_write_memap_register_u32(struct target *target,
241         uint32_t address,
242         uint32_t value)
243 {
244         int retval;
245         struct armv8_common *armv8 = target_to_armv8(target);
246
247         retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
248
249         return retval;
250 }
251
252 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
253 {
254         struct arm_dpm *dpm = &a8->armv8_common.dpm;
255         int retval;
256
257         dpm->arm = &a8->armv8_common.arm;
258         dpm->didr = debug;
259
260         retval = armv8_dpm_setup(dpm);
261         if (retval == ERROR_OK)
262                 retval = armv8_dpm_initialize(dpm);
263
264         return retval;
265 }
266
267 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
268 {
269         struct armv8_common *armv8 = target_to_armv8(target);
270         return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
271 }
272
273 static int aarch64_check_state_one(struct target *target,
274                 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
275 {
276         struct armv8_common *armv8 = target_to_armv8(target);
277         uint32_t prsr;
278         int retval;
279
280         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
281                         armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
282         if (retval != ERROR_OK)
283                 return retval;
284
285         if (p_prsr)
286                 *p_prsr = prsr;
287
288         if (p_result)
289                 *p_result = (prsr & mask) == (val & mask);
290
291         return ERROR_OK;
292 }
293
294 static int aarch64_wait_halt_one(struct target *target)
295 {
296         int retval = ERROR_OK;
297         uint32_t prsr;
298
299         int64_t then = timeval_ms();
300         for (;;) {
301                 int halted;
302
303                 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
304                 if (retval != ERROR_OK || halted)
305                         break;
306
307                 if (timeval_ms() > then + 1000) {
308                         retval = ERROR_TARGET_TIMEOUT;
309                         LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
310                         break;
311                 }
312         }
313         return retval;
314 }
315
316 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
317 {
318         int retval = ERROR_OK;
319         struct target_list *head = target->head;
320         struct target *first = NULL;
321
322         LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
323
324         while (head != NULL) {
325                 struct target *curr = head->target;
326                 struct armv8_common *armv8 = target_to_armv8(curr);
327                 head = head->next;
328
329                 if (exc_target && curr == target)
330                         continue;
331                 if (!target_was_examined(curr))
332                         continue;
333                 if (curr->state != TARGET_RUNNING)
334                         continue;
335
336                 /* HACK: mark this target as prepared for halting */
337                 curr->debug_reason = DBG_REASON_DBGRQ;
338
339                 /* open the gate for channel 0 to let HALT requests pass to the CTM */
340                 retval = arm_cti_ungate_channel(armv8->cti, 0);
341                 if (retval == ERROR_OK)
342                         retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
343                 if (retval != ERROR_OK)
344                         break;
345
346                 LOG_DEBUG("target %s prepared", target_name(curr));
347
348                 if (first == NULL)
349                         first = curr;
350         }
351
352         if (p_first) {
353                 if (exc_target && first)
354                         *p_first = first;
355                 else
356                         *p_first = target;
357         }
358
359         return retval;
360 }
361
362 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
363 {
364         int retval = ERROR_OK;
365         struct armv8_common *armv8 = target_to_armv8(target);
366
367         LOG_DEBUG("%s", target_name(target));
368
369         /* allow Halting Debug Mode */
370         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
371         if (retval != ERROR_OK)
372                 return retval;
373
374         /* trigger an event on channel 0, this outputs a halt request to the PE */
375         retval = arm_cti_pulse_channel(armv8->cti, 0);
376         if (retval != ERROR_OK)
377                 return retval;
378
379         if (mode == HALT_SYNC) {
380                 retval = aarch64_wait_halt_one(target);
381                 if (retval != ERROR_OK) {
382                         if (retval == ERROR_TARGET_TIMEOUT)
383                                 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
384                         return retval;
385                 }
386         }
387
388         return ERROR_OK;
389 }
390
391 static int aarch64_halt_smp(struct target *target, bool exc_target)
392 {
393         struct target *next = target;
394         int retval;
395
396         /* prepare halt on all PEs of the group */
397         retval = aarch64_prepare_halt_smp(target, exc_target, &next);
398
399         if (exc_target && next == target)
400                 return retval;
401
402         /* halt the target PE */
403         if (retval == ERROR_OK)
404                 retval = aarch64_halt_one(next, HALT_LAZY);
405
406         if (retval != ERROR_OK)
407                 return retval;
408
409         /* wait for all PEs to halt */
410         int64_t then = timeval_ms();
411         for (;;) {
412                 bool all_halted = true;
413                 struct target_list *head;
414                 struct target *curr;
415
416                 foreach_smp_target(head, target->head) {
417                         int halted;
418
419                         curr = head->target;
420
421                         if (!target_was_examined(curr))
422                                 continue;
423
424                         retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
425                         if (retval != ERROR_OK || !halted) {
426                                 all_halted = false;
427                                 break;
428                         }
429                 }
430
431                 if (all_halted)
432                         break;
433
434                 if (timeval_ms() > then + 1000) {
435                         retval = ERROR_TARGET_TIMEOUT;
436                         break;
437                 }
438
439                 /*
440                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
441                  * and it looks like the CTI's are not connected by a common
442                  * trigger matrix. It seems that we need to halt one core in each
443                  * cluster explicitly. So if we find that a core has not halted
444                  * yet, we trigger an explicit halt for the second cluster.
445                  */
446                 retval = aarch64_halt_one(curr, HALT_LAZY);
447                 if (retval != ERROR_OK)
448                         break;
449         }
450
451         return retval;
452 }
453
454 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
455 {
456         struct target *gdb_target = NULL;
457         struct target_list *head;
458         struct target *curr;
459
460         if (debug_reason == DBG_REASON_NOTHALTED) {
461                 LOG_DEBUG("Halting remaining targets in SMP group");
462                 aarch64_halt_smp(target, true);
463         }
464
465         /* poll all targets in the group, but skip the target that serves GDB */
466         foreach_smp_target(head, target->head) {
467                 curr = head->target;
468                 /* skip calling context */
469                 if (curr == target)
470                         continue;
471                 if (!target_was_examined(curr))
472                         continue;
473                 /* skip targets that were already halted */
474                 if (curr->state == TARGET_HALTED)
475                         continue;
476                 /* remember the gdb_service->target */
477                 if (curr->gdb_service != NULL)
478                         gdb_target = curr->gdb_service->target;
479                 /* skip it */
480                 if (curr == gdb_target)
481                         continue;
482
483                 /* avoid recursion in aarch64_poll() */
484                 curr->smp = 0;
485                 aarch64_poll(curr);
486                 curr->smp = 1;
487         }
488
489         /* after all targets were updated, poll the gdb serving target */
490         if (gdb_target != NULL && gdb_target != target)
491                 aarch64_poll(gdb_target);
492
493         return ERROR_OK;
494 }
495
496 /*
497  * Aarch64 Run control
498  */
499
500 static int aarch64_poll(struct target *target)
501 {
502         enum target_state prev_target_state;
503         int retval = ERROR_OK;
504         int halted;
505
506         retval = aarch64_check_state_one(target,
507                                 PRSR_HALT, PRSR_HALT, &halted, NULL);
508         if (retval != ERROR_OK)
509                 return retval;
510
511         if (halted) {
512                 prev_target_state = target->state;
513                 if (prev_target_state != TARGET_HALTED) {
514                         enum target_debug_reason debug_reason = target->debug_reason;
515
516                         /* We have a halting debug event */
517                         target->state = TARGET_HALTED;
518                         LOG_DEBUG("Target %s halted", target_name(target));
519                         retval = aarch64_debug_entry(target);
520                         if (retval != ERROR_OK)
521                                 return retval;
522
523                         if (target->smp)
524                                 update_halt_gdb(target, debug_reason);
525
526                         if (arm_semihosting(target, &retval) != 0)
527                                 return retval;
528
529                         switch (prev_target_state) {
530                         case TARGET_RUNNING:
531                         case TARGET_UNKNOWN:
532                         case TARGET_RESET:
533                                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
534                                 break;
535                         case TARGET_DEBUG_RUNNING:
536                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
537                                 break;
538                         default:
539                                 break;
540                         }
541                 }
542         } else
543                 target->state = TARGET_RUNNING;
544
545         return retval;
546 }
547
548 static int aarch64_halt(struct target *target)
549 {
550         struct armv8_common *armv8 = target_to_armv8(target);
551         armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
552
553         if (target->smp)
554                 return aarch64_halt_smp(target, false);
555
556         return aarch64_halt_one(target, HALT_SYNC);
557 }
558
559 static int aarch64_restore_one(struct target *target, int current,
560         uint64_t *address, int handle_breakpoints, int debug_execution)
561 {
562         struct armv8_common *armv8 = target_to_armv8(target);
563         struct arm *arm = &armv8->arm;
564         int retval;
565         uint64_t resume_pc;
566
567         LOG_DEBUG("%s", target_name(target));
568
569         if (!debug_execution)
570                 target_free_all_working_areas(target);
571
572         /* current = 1: continue on current pc, otherwise continue at <address> */
573         resume_pc = buf_get_u64(arm->pc->value, 0, 64);
574         if (!current)
575                 resume_pc = *address;
576         else
577                 *address = resume_pc;
578
579         /* Make sure that the Armv7 gdb thumb fixups does not
580          * kill the return address
581          */
582         switch (arm->core_state) {
583                 case ARM_STATE_ARM:
584                         resume_pc &= 0xFFFFFFFC;
585                         break;
586                 case ARM_STATE_AARCH64:
587                         resume_pc &= 0xFFFFFFFFFFFFFFFC;
588                         break;
589                 case ARM_STATE_THUMB:
590                 case ARM_STATE_THUMB_EE:
591                         /* When the return address is loaded into PC
592                          * bit 0 must be 1 to stay in Thumb state
593                          */
594                         resume_pc |= 0x1;
595                         break;
596                 case ARM_STATE_JAZELLE:
597                         LOG_ERROR("How do I resume into Jazelle state??");
598                         return ERROR_FAIL;
599         }
600         LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
601         buf_set_u64(arm->pc->value, 0, 64, resume_pc);
602         arm->pc->dirty = 1;
603         arm->pc->valid = 1;
604
605         /* called it now before restoring context because it uses cpu
606          * register r0 for restoring system control register */
607         retval = aarch64_restore_system_control_reg(target);
608         if (retval == ERROR_OK)
609                 retval = aarch64_restore_context(target, handle_breakpoints);
610
611         return retval;
612 }
613
614 /**
615  * prepare single target for restart
616  *
617  *
618  */
619 static int aarch64_prepare_restart_one(struct target *target)
620 {
621         struct armv8_common *armv8 = target_to_armv8(target);
622         int retval;
623         uint32_t dscr;
624         uint32_t tmp;
625
626         LOG_DEBUG("%s", target_name(target));
627
628         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
629                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
630         if (retval != ERROR_OK)
631                 return retval;
632
633         if ((dscr & DSCR_ITE) == 0)
634                 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
635         if ((dscr & DSCR_ERR) != 0)
636                 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
637
638         /* acknowledge a pending CTI halt event */
639         retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
640         /*
641          * open the CTI gate for channel 1 so that the restart events
642          * get passed along to all PEs. Also close gate for channel 0
643          * to isolate the PE from halt events.
644          */
645         if (retval == ERROR_OK)
646                 retval = arm_cti_ungate_channel(armv8->cti, 1);
647         if (retval == ERROR_OK)
648                 retval = arm_cti_gate_channel(armv8->cti, 0);
649
650         /* make sure that DSCR.HDE is set */
651         if (retval == ERROR_OK) {
652                 dscr |= DSCR_HDE;
653                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
654                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
655         }
656
657         if (retval == ERROR_OK) {
658                 /* clear sticky bits in PRSR, SDR is now 0 */
659                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
660                                 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
661         }
662
663         return retval;
664 }
665
666 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
667 {
668         struct armv8_common *armv8 = target_to_armv8(target);
669         int retval;
670
671         LOG_DEBUG("%s", target_name(target));
672
673         /* trigger an event on channel 1, generates a restart request to the PE */
674         retval = arm_cti_pulse_channel(armv8->cti, 1);
675         if (retval != ERROR_OK)
676                 return retval;
677
678         if (mode == RESTART_SYNC) {
679                 int64_t then = timeval_ms();
680                 for (;;) {
681                         int resumed;
682                         /*
683                          * if PRSR.SDR is set now, the target did restart, even
684                          * if it's now already halted again (e.g. due to breakpoint)
685                          */
686                         retval = aarch64_check_state_one(target,
687                                                 PRSR_SDR, PRSR_SDR, &resumed, NULL);
688                         if (retval != ERROR_OK || resumed)
689                                 break;
690
691                         if (timeval_ms() > then + 1000) {
692                                 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
693                                 retval = ERROR_TARGET_TIMEOUT;
694                                 break;
695                         }
696                 }
697         }
698
699         if (retval != ERROR_OK)
700                 return retval;
701
702         target->debug_reason = DBG_REASON_NOTHALTED;
703         target->state = TARGET_RUNNING;
704
705         return ERROR_OK;
706 }
707
708 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
709 {
710         int retval;
711
712         LOG_DEBUG("%s", target_name(target));
713
714         retval = aarch64_prepare_restart_one(target);
715         if (retval == ERROR_OK)
716                 retval = aarch64_do_restart_one(target, mode);
717
718         return retval;
719 }
720
721 /*
722  * prepare all but the current target for restart
723  */
724 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
725 {
726         int retval = ERROR_OK;
727         struct target_list *head;
728         struct target *first = NULL;
729         uint64_t address;
730
731         foreach_smp_target(head, target->head) {
732                 struct target *curr = head->target;
733
734                 /* skip calling target */
735                 if (curr == target)
736                         continue;
737                 if (!target_was_examined(curr))
738                         continue;
739                 if (curr->state != TARGET_HALTED)
740                         continue;
741
742                 /*  resume at current address, not in step mode */
743                 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
744                 if (retval == ERROR_OK)
745                         retval = aarch64_prepare_restart_one(curr);
746                 if (retval != ERROR_OK) {
747                         LOG_ERROR("failed to restore target %s", target_name(curr));
748                         break;
749                 }
750                 /* remember the first valid target in the group */
751                 if (first == NULL)
752                         first = curr;
753         }
754
755         if (p_first)
756                 *p_first = first;
757
758         return retval;
759 }
760
761
762 static int aarch64_step_restart_smp(struct target *target)
763 {
764         int retval = ERROR_OK;
765         struct target_list *head;
766         struct target *first = NULL;
767
768         LOG_DEBUG("%s", target_name(target));
769
770         retval = aarch64_prep_restart_smp(target, 0, &first);
771         if (retval != ERROR_OK)
772                 return retval;
773
774         if (first != NULL)
775                 retval = aarch64_do_restart_one(first, RESTART_LAZY);
776         if (retval != ERROR_OK) {
777                 LOG_DEBUG("error restarting target %s", target_name(first));
778                 return retval;
779         }
780
781         int64_t then = timeval_ms();
782         for (;;) {
783                 struct target *curr = target;
784                 bool all_resumed = true;
785
786                 foreach_smp_target(head, target->head) {
787                         uint32_t prsr;
788                         int resumed;
789
790                         curr = head->target;
791
792                         if (curr == target)
793                                 continue;
794
795                         if (!target_was_examined(curr))
796                                 continue;
797
798                         retval = aarch64_check_state_one(curr,
799                                         PRSR_SDR, PRSR_SDR, &resumed, &prsr);
800                         if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
801                                 all_resumed = false;
802                                 break;
803                         }
804
805                         if (curr->state != TARGET_RUNNING) {
806                                 curr->state = TARGET_RUNNING;
807                                 curr->debug_reason = DBG_REASON_NOTHALTED;
808                                 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
809                         }
810                 }
811
812                 if (all_resumed)
813                         break;
814
815                 if (timeval_ms() > then + 1000) {
816                         LOG_ERROR("%s: timeout waiting for target resume", __func__);
817                         retval = ERROR_TARGET_TIMEOUT;
818                         break;
819                 }
820                 /*
821                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
822                  * and it looks like the CTI's are not connected by a common
823                  * trigger matrix. It seems that we need to halt one core in each
824                  * cluster explicitly. So if we find that a core has not halted
825                  * yet, we trigger an explicit resume for the second cluster.
826                  */
827                 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
828                 if (retval != ERROR_OK)
829                         break;
830 }
831
832         return retval;
833 }
834
835 static int aarch64_resume(struct target *target, int current,
836         target_addr_t address, int handle_breakpoints, int debug_execution)
837 {
838         int retval = 0;
839         uint64_t addr = address;
840
841         struct armv8_common *armv8 = target_to_armv8(target);
842         armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
843
844         if (target->state != TARGET_HALTED)
845                 return ERROR_TARGET_NOT_HALTED;
846
847         /*
848          * If this target is part of a SMP group, prepare the others
849          * targets for resuming. This involves restoring the complete
850          * target register context and setting up CTI gates to accept
851          * resume events from the trigger matrix.
852          */
853         if (target->smp) {
854                 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
855                 if (retval != ERROR_OK)
856                         return retval;
857         }
858
859         /* all targets prepared, restore and restart the current target */
860         retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
861                                  debug_execution);
862         if (retval == ERROR_OK)
863                 retval = aarch64_restart_one(target, RESTART_SYNC);
864         if (retval != ERROR_OK)
865                 return retval;
866
867         if (target->smp) {
868                 int64_t then = timeval_ms();
869                 for (;;) {
870                         struct target *curr = target;
871                         struct target_list *head;
872                         bool all_resumed = true;
873
874                         foreach_smp_target(head, target->head) {
875                                 uint32_t prsr;
876                                 int resumed;
877
878                                 curr = head->target;
879                                 if (curr == target)
880                                         continue;
881                                 if (!target_was_examined(curr))
882                                         continue;
883
884                                 retval = aarch64_check_state_one(curr,
885                                                 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
886                                 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
887                                         all_resumed = false;
888                                         break;
889                                 }
890
891                                 if (curr->state != TARGET_RUNNING) {
892                                         curr->state = TARGET_RUNNING;
893                                         curr->debug_reason = DBG_REASON_NOTHALTED;
894                                         target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
895                                 }
896                         }
897
898                         if (all_resumed)
899                                 break;
900
901                         if (timeval_ms() > then + 1000) {
902                                 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
903                                 retval = ERROR_TARGET_TIMEOUT;
904                                 break;
905                         }
906
907                         /*
908                          * HACK: on Hi6220 there are 8 cores organized in 2 clusters
909                          * and it looks like the CTI's are not connected by a common
910                          * trigger matrix. It seems that we need to halt one core in each
911                          * cluster explicitly. So if we find that a core has not halted
912                          * yet, we trigger an explicit resume for the second cluster.
913                          */
914                         retval = aarch64_do_restart_one(curr, RESTART_LAZY);
915                         if (retval != ERROR_OK)
916                                 break;
917                 }
918         }
919
920         if (retval != ERROR_OK)
921                 return retval;
922
923         target->debug_reason = DBG_REASON_NOTHALTED;
924
925         if (!debug_execution) {
926                 target->state = TARGET_RUNNING;
927                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
928                 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
929         } else {
930                 target->state = TARGET_DEBUG_RUNNING;
931                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
932                 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
933         }
934
935         return ERROR_OK;
936 }
937
938 static int aarch64_debug_entry(struct target *target)
939 {
940         int retval = ERROR_OK;
941         struct armv8_common *armv8 = target_to_armv8(target);
942         struct arm_dpm *dpm = &armv8->dpm;
943         enum arm_state core_state;
944         uint32_t dscr;
945
946         /* make sure to clear all sticky errors */
947         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
948                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
949         if (retval == ERROR_OK)
950                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
951                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
952         if (retval == ERROR_OK)
953                 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
954
955         if (retval != ERROR_OK)
956                 return retval;
957
958         LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
959
960         dpm->dscr = dscr;
961         core_state = armv8_dpm_get_core_state(dpm);
962         armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
963         armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
964
965         /* close the CTI gate for all events */
966         if (retval == ERROR_OK)
967                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
968         /* discard async exceptions */
969         if (retval == ERROR_OK)
970                 retval = dpm->instr_cpsr_sync(dpm);
971         if (retval != ERROR_OK)
972                 return retval;
973
974         /* Examine debug reason */
975         armv8_dpm_report_dscr(dpm, dscr);
976
977         /* save address of instruction that triggered the watchpoint? */
978         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
979                 uint32_t tmp;
980                 uint64_t wfar = 0;
981
982                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
983                                 armv8->debug_base + CPUV8_DBG_WFAR1,
984                                 &tmp);
985                 if (retval != ERROR_OK)
986                         return retval;
987                 wfar = tmp;
988                 wfar = (wfar << 32);
989                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
990                                 armv8->debug_base + CPUV8_DBG_WFAR0,
991                                 &tmp);
992                 if (retval != ERROR_OK)
993                         return retval;
994                 wfar |= tmp;
995                 armv8_dpm_report_wfar(&armv8->dpm, wfar);
996         }
997
998         retval = armv8_dpm_read_current_registers(&armv8->dpm);
999
1000         if (retval == ERROR_OK && armv8->post_debug_entry)
1001                 retval = armv8->post_debug_entry(target);
1002
1003         return retval;
1004 }
1005
1006 static int aarch64_post_debug_entry(struct target *target)
1007 {
1008         struct aarch64_common *aarch64 = target_to_aarch64(target);
1009         struct armv8_common *armv8 = &aarch64->armv8_common;
1010         int retval;
1011         enum arm_mode target_mode = ARM_MODE_ANY;
1012         uint32_t instr;
1013
1014         switch (armv8->arm.core_mode) {
1015         case ARMV8_64_EL0T:
1016                 target_mode = ARMV8_64_EL1H;
1017                 /* fall through */
1018         case ARMV8_64_EL1T:
1019         case ARMV8_64_EL1H:
1020                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1021                 break;
1022         case ARMV8_64_EL2T:
1023         case ARMV8_64_EL2H:
1024                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1025                 break;
1026         case ARMV8_64_EL3H:
1027         case ARMV8_64_EL3T:
1028                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1029                 break;
1030
1031         case ARM_MODE_SVC:
1032         case ARM_MODE_ABT:
1033         case ARM_MODE_FIQ:
1034         case ARM_MODE_IRQ:
1035                 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1036                 break;
1037
1038         default:
1039                 LOG_INFO("cannot read system control register in this mode");
1040                 return ERROR_FAIL;
1041         }
1042
1043         if (target_mode != ARM_MODE_ANY)
1044                 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1045
1046         retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1047         if (retval != ERROR_OK)
1048                 return retval;
1049
1050         if (target_mode != ARM_MODE_ANY)
1051                 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1052
1053         LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1054         aarch64->system_control_reg_curr = aarch64->system_control_reg;
1055
1056         if (armv8->armv8_mmu.armv8_cache.info == -1) {
1057                 armv8_identify_cache(armv8);
1058                 armv8_read_mpidr(armv8);
1059         }
1060
1061         armv8->armv8_mmu.mmu_enabled =
1062                         (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1063         armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1064                 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1065         armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1066                 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1067         return ERROR_OK;
1068 }
1069
1070 /*
1071  * single-step a target
1072  */
1073 static int aarch64_step(struct target *target, int current, target_addr_t address,
1074         int handle_breakpoints)
1075 {
1076         struct armv8_common *armv8 = target_to_armv8(target);
1077         struct aarch64_common *aarch64 = target_to_aarch64(target);
1078         int saved_retval = ERROR_OK;
1079         int retval;
1080         uint32_t edecr;
1081
1082         armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1083
1084         if (target->state != TARGET_HALTED) {
1085                 LOG_WARNING("target not halted");
1086                 return ERROR_TARGET_NOT_HALTED;
1087         }
1088
1089         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1090                         armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1091         /* make sure EDECR.SS is not set when restoring the register */
1092
1093         if (retval == ERROR_OK) {
1094                 edecr &= ~0x4;
1095                 /* set EDECR.SS to enter hardware step mode */
1096                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1097                                 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1098         }
1099         /* disable interrupts while stepping */
1100         if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1101                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1102         /* bail out if stepping setup has failed */
1103         if (retval != ERROR_OK)
1104                 return retval;
1105
1106         if (target->smp && (current == 1)) {
1107                 /*
1108                  * isolate current target so that it doesn't get resumed
1109                  * together with the others
1110                  */
1111                 retval = arm_cti_gate_channel(armv8->cti, 1);
1112                 /* resume all other targets in the group */
1113                 if (retval == ERROR_OK)
1114                         retval = aarch64_step_restart_smp(target);
1115                 if (retval != ERROR_OK) {
1116                         LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1117                         return retval;
1118                 }
1119                 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1120         }
1121
1122         /* all other targets running, restore and restart the current target */
1123         retval = aarch64_restore_one(target, current, &address, 0, 0);
1124         if (retval == ERROR_OK)
1125                 retval = aarch64_restart_one(target, RESTART_LAZY);
1126
1127         if (retval != ERROR_OK)
1128                 return retval;
1129
1130         LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1131         if (!handle_breakpoints)
1132                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1133
1134         int64_t then = timeval_ms();
1135         for (;;) {
1136                 int stepped;
1137                 uint32_t prsr;
1138
1139                 retval = aarch64_check_state_one(target,
1140                                         PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1141                 if (retval != ERROR_OK || stepped)
1142                         break;
1143
1144                 if (timeval_ms() > then + 100) {
1145                         LOG_ERROR("timeout waiting for target %s halt after step",
1146                                         target_name(target));
1147                         retval = ERROR_TARGET_TIMEOUT;
1148                         break;
1149                 }
1150         }
1151
1152         /*
1153          * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1154          * causes a timeout. The core takes the step but doesn't complete it and so
1155          * debug state is never entered. However, you can manually halt the core
1156          * as an external debug even is also a WFI wakeup event.
1157          */
1158         if (retval == ERROR_TARGET_TIMEOUT)
1159                 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1160
1161         /* restore EDECR */
1162         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1163                         armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1164         if (retval != ERROR_OK)
1165                 return retval;
1166
1167         /* restore interrupts */
1168         if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1169                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1170                 if (retval != ERROR_OK)
1171                         return ERROR_OK;
1172         }
1173
1174         if (saved_retval != ERROR_OK)
1175                 return saved_retval;
1176
1177         return aarch64_poll(target);
1178 }
1179
1180 static int aarch64_restore_context(struct target *target, bool bpwp)
1181 {
1182         struct armv8_common *armv8 = target_to_armv8(target);
1183         struct arm *arm = &armv8->arm;
1184
1185         int retval;
1186
1187         LOG_DEBUG("%s", target_name(target));
1188
1189         if (armv8->pre_restore_context)
1190                 armv8->pre_restore_context(target);
1191
1192         retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1193         if (retval == ERROR_OK) {
1194                 /* registers are now invalid */
1195                 register_cache_invalidate(arm->core_cache);
1196                 register_cache_invalidate(arm->core_cache->next);
1197         }
1198
1199         return retval;
1200 }
1201
1202 /*
1203  * Cortex-A8 Breakpoint and watchpoint functions
1204  */
1205
1206 /* Setup hardware Breakpoint Register Pair */
1207 static int aarch64_set_breakpoint(struct target *target,
1208         struct breakpoint *breakpoint, uint8_t matchmode)
1209 {
1210         int retval;
1211         int brp_i = 0;
1212         uint32_t control;
1213         uint8_t byte_addr_select = 0x0F;
1214         struct aarch64_common *aarch64 = target_to_aarch64(target);
1215         struct armv8_common *armv8 = &aarch64->armv8_common;
1216         struct aarch64_brp *brp_list = aarch64->brp_list;
1217
1218         if (breakpoint->set) {
1219                 LOG_WARNING("breakpoint already set");
1220                 return ERROR_OK;
1221         }
1222
1223         if (breakpoint->type == BKPT_HARD) {
1224                 int64_t bpt_value;
1225                 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1226                         brp_i++;
1227                 if (brp_i >= aarch64->brp_num) {
1228                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1229                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1230                 }
1231                 breakpoint->set = brp_i + 1;
1232                 if (breakpoint->length == 2)
1233                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1234                 control = ((matchmode & 0x7) << 20)
1235                         | (1 << 13)
1236                         | (byte_addr_select << 5)
1237                         | (3 << 1) | 1;
1238                 brp_list[brp_i].used = 1;
1239                 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1240                 brp_list[brp_i].control = control;
1241                 bpt_value = brp_list[brp_i].value;
1242
1243                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1244                                 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1245                                 (uint32_t)(bpt_value & 0xFFFFFFFF));
1246                 if (retval != ERROR_OK)
1247                         return retval;
1248                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1249                                 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1250                                 (uint32_t)(bpt_value >> 32));
1251                 if (retval != ERROR_OK)
1252                         return retval;
1253
1254                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1255                                 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1256                                 brp_list[brp_i].control);
1257                 if (retval != ERROR_OK)
1258                         return retval;
1259                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1260                         brp_list[brp_i].control,
1261                         brp_list[brp_i].value);
1262
1263         } else if (breakpoint->type == BKPT_SOFT) {
1264                 uint8_t code[4];
1265
1266                 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1267                 retval = target_read_memory(target,
1268                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1269                                 breakpoint->length, 1,
1270                                 breakpoint->orig_instr);
1271                 if (retval != ERROR_OK)
1272                         return retval;
1273
1274                 armv8_cache_d_inner_flush_virt(armv8,
1275                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1276                                 breakpoint->length);
1277
1278                 retval = target_write_memory(target,
1279                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1280                                 breakpoint->length, 1, code);
1281                 if (retval != ERROR_OK)
1282                         return retval;
1283
1284                 armv8_cache_d_inner_flush_virt(armv8,
1285                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1286                                 breakpoint->length);
1287
1288                 armv8_cache_i_inner_inval_virt(armv8,
1289                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1290                                 breakpoint->length);
1291
1292                 breakpoint->set = 0x11; /* Any nice value but 0 */
1293         }
1294
1295         /* Ensure that halting debug mode is enable */
1296         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1297         if (retval != ERROR_OK) {
1298                 LOG_DEBUG("Failed to set DSCR.HDE");
1299                 return retval;
1300         }
1301
1302         return ERROR_OK;
1303 }
1304
1305 static int aarch64_set_context_breakpoint(struct target *target,
1306         struct breakpoint *breakpoint, uint8_t matchmode)
1307 {
1308         int retval = ERROR_FAIL;
1309         int brp_i = 0;
1310         uint32_t control;
1311         uint8_t byte_addr_select = 0x0F;
1312         struct aarch64_common *aarch64 = target_to_aarch64(target);
1313         struct armv8_common *armv8 = &aarch64->armv8_common;
1314         struct aarch64_brp *brp_list = aarch64->brp_list;
1315
1316         if (breakpoint->set) {
1317                 LOG_WARNING("breakpoint already set");
1318                 return retval;
1319         }
1320         /*check available context BRPs*/
1321         while ((brp_list[brp_i].used ||
1322                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1323                 brp_i++;
1324
1325         if (brp_i >= aarch64->brp_num) {
1326                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1327                 return ERROR_FAIL;
1328         }
1329
1330         breakpoint->set = brp_i + 1;
1331         control = ((matchmode & 0x7) << 20)
1332                 | (1 << 13)
1333                 | (byte_addr_select << 5)
1334                 | (3 << 1) | 1;
1335         brp_list[brp_i].used = 1;
1336         brp_list[brp_i].value = (breakpoint->asid);
1337         brp_list[brp_i].control = control;
1338         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1339                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1340                         brp_list[brp_i].value);
1341         if (retval != ERROR_OK)
1342                 return retval;
1343         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1344                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1345                         brp_list[brp_i].control);
1346         if (retval != ERROR_OK)
1347                 return retval;
1348         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1349                 brp_list[brp_i].control,
1350                 brp_list[brp_i].value);
1351         return ERROR_OK;
1352
1353 }
1354
1355 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1356 {
1357         int retval = ERROR_FAIL;
1358         int brp_1 = 0;  /* holds the contextID pair */
1359         int brp_2 = 0;  /* holds the IVA pair */
1360         uint32_t control_CTX, control_IVA;
1361         uint8_t CTX_byte_addr_select = 0x0F;
1362         uint8_t IVA_byte_addr_select = 0x0F;
1363         uint8_t CTX_machmode = 0x03;
1364         uint8_t IVA_machmode = 0x01;
1365         struct aarch64_common *aarch64 = target_to_aarch64(target);
1366         struct armv8_common *armv8 = &aarch64->armv8_common;
1367         struct aarch64_brp *brp_list = aarch64->brp_list;
1368
1369         if (breakpoint->set) {
1370                 LOG_WARNING("breakpoint already set");
1371                 return retval;
1372         }
1373         /*check available context BRPs*/
1374         while ((brp_list[brp_1].used ||
1375                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1376                 brp_1++;
1377
1378         printf("brp(CTX) found num: %d\n", brp_1);
1379         if (brp_1 >= aarch64->brp_num) {
1380                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1381                 return ERROR_FAIL;
1382         }
1383
1384         while ((brp_list[brp_2].used ||
1385                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1386                 brp_2++;
1387
1388         printf("brp(IVA) found num: %d\n", brp_2);
1389         if (brp_2 >= aarch64->brp_num) {
1390                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1391                 return ERROR_FAIL;
1392         }
1393
1394         breakpoint->set = brp_1 + 1;
1395         breakpoint->linked_BRP = brp_2;
1396         control_CTX = ((CTX_machmode & 0x7) << 20)
1397                 | (brp_2 << 16)
1398                 | (0 << 14)
1399                 | (CTX_byte_addr_select << 5)
1400                 | (3 << 1) | 1;
1401         brp_list[brp_1].used = 1;
1402         brp_list[brp_1].value = (breakpoint->asid);
1403         brp_list[brp_1].control = control_CTX;
1404         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1405                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1406                         brp_list[brp_1].value);
1407         if (retval != ERROR_OK)
1408                 return retval;
1409         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1410                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1411                         brp_list[brp_1].control);
1412         if (retval != ERROR_OK)
1413                 return retval;
1414
1415         control_IVA = ((IVA_machmode & 0x7) << 20)
1416                 | (brp_1 << 16)
1417                 | (1 << 13)
1418                 | (IVA_byte_addr_select << 5)
1419                 | (3 << 1) | 1;
1420         brp_list[brp_2].used = 1;
1421         brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1422         brp_list[brp_2].control = control_IVA;
1423         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1424                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1425                         brp_list[brp_2].value & 0xFFFFFFFF);
1426         if (retval != ERROR_OK)
1427                 return retval;
1428         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1429                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1430                         brp_list[brp_2].value >> 32);
1431         if (retval != ERROR_OK)
1432                 return retval;
1433         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1434                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1435                         brp_list[brp_2].control);
1436         if (retval != ERROR_OK)
1437                 return retval;
1438
1439         return ERROR_OK;
1440 }
1441
1442 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1443 {
1444         int retval;
1445         struct aarch64_common *aarch64 = target_to_aarch64(target);
1446         struct armv8_common *armv8 = &aarch64->armv8_common;
1447         struct aarch64_brp *brp_list = aarch64->brp_list;
1448
1449         if (!breakpoint->set) {
1450                 LOG_WARNING("breakpoint not set");
1451                 return ERROR_OK;
1452         }
1453
1454         if (breakpoint->type == BKPT_HARD) {
1455                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1456                         int brp_i = breakpoint->set - 1;
1457                         int brp_j = breakpoint->linked_BRP;
1458                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1459                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1460                                 return ERROR_OK;
1461                         }
1462                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1463                                 brp_list[brp_i].control, brp_list[brp_i].value);
1464                         brp_list[brp_i].used = 0;
1465                         brp_list[brp_i].value = 0;
1466                         brp_list[brp_i].control = 0;
1467                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1468                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1469                                         brp_list[brp_i].control);
1470                         if (retval != ERROR_OK)
1471                                 return retval;
1472                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1473                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1474                                         (uint32_t)brp_list[brp_i].value);
1475                         if (retval != ERROR_OK)
1476                                 return retval;
1477                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1478                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1479                                         (uint32_t)brp_list[brp_i].value);
1480                         if (retval != ERROR_OK)
1481                                 return retval;
1482                         if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1483                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1484                                 return ERROR_OK;
1485                         }
1486                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1487                                 brp_list[brp_j].control, brp_list[brp_j].value);
1488                         brp_list[brp_j].used = 0;
1489                         brp_list[brp_j].value = 0;
1490                         brp_list[brp_j].control = 0;
1491                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1492                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1493                                         brp_list[brp_j].control);
1494                         if (retval != ERROR_OK)
1495                                 return retval;
1496                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1497                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1498                                         (uint32_t)brp_list[brp_j].value);
1499                         if (retval != ERROR_OK)
1500                                 return retval;
1501                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1502                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1503                                         (uint32_t)brp_list[brp_j].value);
1504                         if (retval != ERROR_OK)
1505                                 return retval;
1506
1507                         breakpoint->linked_BRP = 0;
1508                         breakpoint->set = 0;
1509                         return ERROR_OK;
1510
1511                 } else {
1512                         int brp_i = breakpoint->set - 1;
1513                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1514                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1515                                 return ERROR_OK;
1516                         }
1517                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1518                                 brp_list[brp_i].control, brp_list[brp_i].value);
1519                         brp_list[brp_i].used = 0;
1520                         brp_list[brp_i].value = 0;
1521                         brp_list[brp_i].control = 0;
1522                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1523                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1524                                         brp_list[brp_i].control);
1525                         if (retval != ERROR_OK)
1526                                 return retval;
1527                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1528                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1529                                         brp_list[brp_i].value);
1530                         if (retval != ERROR_OK)
1531                                 return retval;
1532
1533                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1534                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1535                                         (uint32_t)brp_list[brp_i].value);
1536                         if (retval != ERROR_OK)
1537                                 return retval;
1538                         breakpoint->set = 0;
1539                         return ERROR_OK;
1540                 }
1541         } else {
1542                 /* restore original instruction (kept in target endianness) */
1543
1544                 armv8_cache_d_inner_flush_virt(armv8,
1545                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1546                                 breakpoint->length);
1547
1548                 if (breakpoint->length == 4) {
1549                         retval = target_write_memory(target,
1550                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1551                                         4, 1, breakpoint->orig_instr);
1552                         if (retval != ERROR_OK)
1553                                 return retval;
1554                 } else {
1555                         retval = target_write_memory(target,
1556                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1557                                         2, 1, breakpoint->orig_instr);
1558                         if (retval != ERROR_OK)
1559                                 return retval;
1560                 }
1561
1562                 armv8_cache_d_inner_flush_virt(armv8,
1563                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1564                                 breakpoint->length);
1565
1566                 armv8_cache_i_inner_inval_virt(armv8,
1567                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1568                                 breakpoint->length);
1569         }
1570         breakpoint->set = 0;
1571
1572         return ERROR_OK;
1573 }
1574
1575 static int aarch64_add_breakpoint(struct target *target,
1576         struct breakpoint *breakpoint)
1577 {
1578         struct aarch64_common *aarch64 = target_to_aarch64(target);
1579
1580         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1581                 LOG_INFO("no hardware breakpoint available");
1582                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1583         }
1584
1585         if (breakpoint->type == BKPT_HARD)
1586                 aarch64->brp_num_available--;
1587
1588         return aarch64_set_breakpoint(target, breakpoint, 0x00);        /* Exact match */
1589 }
1590
1591 static int aarch64_add_context_breakpoint(struct target *target,
1592         struct breakpoint *breakpoint)
1593 {
1594         struct aarch64_common *aarch64 = target_to_aarch64(target);
1595
1596         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1597                 LOG_INFO("no hardware breakpoint available");
1598                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1599         }
1600
1601         if (breakpoint->type == BKPT_HARD)
1602                 aarch64->brp_num_available--;
1603
1604         return aarch64_set_context_breakpoint(target, breakpoint, 0x02);        /* asid match */
1605 }
1606
1607 static int aarch64_add_hybrid_breakpoint(struct target *target,
1608         struct breakpoint *breakpoint)
1609 {
1610         struct aarch64_common *aarch64 = target_to_aarch64(target);
1611
1612         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1613                 LOG_INFO("no hardware breakpoint available");
1614                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1615         }
1616
1617         if (breakpoint->type == BKPT_HARD)
1618                 aarch64->brp_num_available--;
1619
1620         return aarch64_set_hybrid_breakpoint(target, breakpoint);       /* ??? */
1621 }
1622
1623
1624 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1625 {
1626         struct aarch64_common *aarch64 = target_to_aarch64(target);
1627
1628 #if 0
1629 /* It is perfectly possible to remove breakpoints while the target is running */
1630         if (target->state != TARGET_HALTED) {
1631                 LOG_WARNING("target not halted");
1632                 return ERROR_TARGET_NOT_HALTED;
1633         }
1634 #endif
1635
1636         if (breakpoint->set) {
1637                 aarch64_unset_breakpoint(target, breakpoint);
1638                 if (breakpoint->type == BKPT_HARD)
1639                         aarch64->brp_num_available++;
1640         }
1641
1642         return ERROR_OK;
1643 }
1644
1645 /*
1646  * Cortex-A8 Reset functions
1647  */
1648
1649 static int aarch64_assert_reset(struct target *target)
1650 {
1651         struct armv8_common *armv8 = target_to_armv8(target);
1652
1653         LOG_DEBUG(" ");
1654
1655         /* FIXME when halt is requested, make it work somehow... */
1656
1657         /* Issue some kind of warm reset. */
1658         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1659                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1660         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1661                 /* REVISIT handle "pulls" cases, if there's
1662                  * hardware that needs them to work.
1663                  */
1664                 jtag_add_reset(0, 1);
1665         } else {
1666                 LOG_ERROR("%s: how to reset?", target_name(target));
1667                 return ERROR_FAIL;
1668         }
1669
1670         /* registers are now invalid */
1671         if (target_was_examined(target)) {
1672                 register_cache_invalidate(armv8->arm.core_cache);
1673                 register_cache_invalidate(armv8->arm.core_cache->next);
1674         }
1675
1676         target->state = TARGET_RESET;
1677
1678         return ERROR_OK;
1679 }
1680
1681 static int aarch64_deassert_reset(struct target *target)
1682 {
1683         int retval;
1684
1685         LOG_DEBUG(" ");
1686
1687         /* be certain SRST is off */
1688         jtag_add_reset(0, 0);
1689
1690         if (!target_was_examined(target))
1691                 return ERROR_OK;
1692
1693         retval = aarch64_poll(target);
1694         if (retval != ERROR_OK)
1695                 return retval;
1696
1697         retval = aarch64_init_debug_access(target);
1698         if (retval != ERROR_OK)
1699                 return retval;
1700
1701         if (target->reset_halt) {
1702                 if (target->state != TARGET_HALTED) {
1703                         LOG_WARNING("%s: ran after reset and before halt ...",
1704                                 target_name(target));
1705                         retval = target_halt(target);
1706                 }
1707         }
1708
1709         return retval;
1710 }
1711
1712 static int aarch64_write_cpu_memory_slow(struct target *target,
1713         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1714 {
1715         struct armv8_common *armv8 = target_to_armv8(target);
1716         struct arm_dpm *dpm = &armv8->dpm;
1717         struct arm *arm = &armv8->arm;
1718         int retval;
1719
1720         armv8_reg_current(arm, 1)->dirty = true;
1721
1722         /* change DCC to normal mode if necessary */
1723         if (*dscr & DSCR_MA) {
1724                 *dscr &= ~DSCR_MA;
1725                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1726                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1727                 if (retval != ERROR_OK)
1728                         return retval;
1729         }
1730
1731         while (count) {
1732                 uint32_t data, opcode;
1733
1734                 /* write the data to store into DTRRX */
1735                 if (size == 1)
1736                         data = *buffer;
1737                 else if (size == 2)
1738                         data = target_buffer_get_u16(target, buffer);
1739                 else
1740                         data = target_buffer_get_u32(target, buffer);
1741                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1742                                 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1743                 if (retval != ERROR_OK)
1744                         return retval;
1745
1746                 if (arm->core_state == ARM_STATE_AARCH64)
1747                         retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1748                 else
1749                         retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1750                 if (retval != ERROR_OK)
1751                         return retval;
1752
1753                 if (size == 1)
1754                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1755                 else if (size == 2)
1756                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1757                 else
1758                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1759                 retval = dpm->instr_execute(dpm, opcode);
1760                 if (retval != ERROR_OK)
1761                         return retval;
1762
1763                 /* Advance */
1764                 buffer += size;
1765                 --count;
1766         }
1767
1768         return ERROR_OK;
1769 }
1770
1771 static int aarch64_write_cpu_memory_fast(struct target *target,
1772         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1773 {
1774         struct armv8_common *armv8 = target_to_armv8(target);
1775         struct arm *arm = &armv8->arm;
1776         int retval;
1777
1778         armv8_reg_current(arm, 1)->dirty = true;
1779
1780         /* Step 1.d   - Change DCC to memory mode */
1781         *dscr |= DSCR_MA;
1782         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1783                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1784         if (retval != ERROR_OK)
1785                 return retval;
1786
1787
1788         /* Step 2.a   - Do the write */
1789         retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1790                                         buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1791         if (retval != ERROR_OK)
1792                 return retval;
1793
1794         /* Step 3.a   - Switch DTR mode back to Normal mode */
1795         *dscr &= ~DSCR_MA;
1796         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1797                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1798         if (retval != ERROR_OK)
1799                 return retval;
1800
1801         return ERROR_OK;
1802 }
1803
1804 static int aarch64_write_cpu_memory(struct target *target,
1805         uint64_t address, uint32_t size,
1806         uint32_t count, const uint8_t *buffer)
1807 {
1808         /* write memory through APB-AP */
1809         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1810         struct armv8_common *armv8 = target_to_armv8(target);
1811         struct arm_dpm *dpm = &armv8->dpm;
1812         struct arm *arm = &armv8->arm;
1813         uint32_t dscr;
1814
1815         if (target->state != TARGET_HALTED) {
1816                 LOG_WARNING("target not halted");
1817                 return ERROR_TARGET_NOT_HALTED;
1818         }
1819
1820         /* Mark register X0 as dirty, as it will be used
1821          * for transferring the data.
1822          * It will be restored automatically when exiting
1823          * debug mode
1824          */
1825         armv8_reg_current(arm, 0)->dirty = true;
1826
1827         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1828
1829         /* Read DSCR */
1830         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1831                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1832         if (retval != ERROR_OK)
1833                 return retval;
1834
1835         /* Set Normal access mode  */
1836         dscr = (dscr & ~DSCR_MA);
1837         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1838                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1839         if (retval != ERROR_OK)
1840                 return retval;
1841
1842         if (arm->core_state == ARM_STATE_AARCH64) {
1843                 /* Write X0 with value 'address' using write procedure */
1844                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1845                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1846                 retval = dpm->instr_write_data_dcc_64(dpm,
1847                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1848         } else {
1849                 /* Write R0 with value 'address' using write procedure */
1850                 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1851                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1852                 retval = dpm->instr_write_data_dcc(dpm,
1853                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1854         }
1855
1856         if (retval != ERROR_OK)
1857                 return retval;
1858
1859         if (size == 4 && (address % 4) == 0)
1860                 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1861         else
1862                 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1863
1864         if (retval != ERROR_OK) {
1865                 /* Unset DTR mode */
1866                 mem_ap_read_atomic_u32(armv8->debug_ap,
1867                                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1868                 dscr &= ~DSCR_MA;
1869                 mem_ap_write_atomic_u32(armv8->debug_ap,
1870                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1871         }
1872
1873         /* Check for sticky abort flags in the DSCR */
1874         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1875                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1876         if (retval != ERROR_OK)
1877                 return retval;
1878
1879         dpm->dscr = dscr;
1880         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1881                 /* Abort occurred - clear it and exit */
1882                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1883                 armv8_dpm_handle_exception(dpm, true);
1884                 return ERROR_FAIL;
1885         }
1886
1887         /* Done */
1888         return ERROR_OK;
1889 }
1890
1891 static int aarch64_read_cpu_memory_slow(struct target *target,
1892         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1893 {
1894         struct armv8_common *armv8 = target_to_armv8(target);
1895         struct arm_dpm *dpm = &armv8->dpm;
1896         struct arm *arm = &armv8->arm;
1897         int retval;
1898
1899         armv8_reg_current(arm, 1)->dirty = true;
1900
1901         /* change DCC to normal mode (if necessary) */
1902         if (*dscr & DSCR_MA) {
1903                 *dscr &= DSCR_MA;
1904                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1905                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1906                 if (retval != ERROR_OK)
1907                         return retval;
1908         }
1909
1910         while (count) {
1911                 uint32_t opcode, data;
1912
1913                 if (size == 1)
1914                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1915                 else if (size == 2)
1916                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1917                 else
1918                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1919                 retval = dpm->instr_execute(dpm, opcode);
1920                 if (retval != ERROR_OK)
1921                         return retval;
1922
1923                 if (arm->core_state == ARM_STATE_AARCH64)
1924                         retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1925                 else
1926                         retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1927                 if (retval != ERROR_OK)
1928                         return retval;
1929
1930                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1931                                 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1932                 if (retval != ERROR_OK)
1933                         return retval;
1934
1935                 if (size == 1)
1936                         *buffer = (uint8_t)data;
1937                 else if (size == 2)
1938                         target_buffer_set_u16(target, buffer, (uint16_t)data);
1939                 else
1940                         target_buffer_set_u32(target, buffer, data);
1941
1942                 /* Advance */
1943                 buffer += size;
1944                 --count;
1945         }
1946
1947         return ERROR_OK;
1948 }
1949
1950 static int aarch64_read_cpu_memory_fast(struct target *target,
1951         uint32_t count, uint8_t *buffer, uint32_t *dscr)
1952 {
1953         struct armv8_common *armv8 = target_to_armv8(target);
1954         struct arm_dpm *dpm = &armv8->dpm;
1955         struct arm *arm = &armv8->arm;
1956         int retval;
1957         uint32_t value;
1958
1959         /* Mark X1 as dirty */
1960         armv8_reg_current(arm, 1)->dirty = true;
1961
1962         if (arm->core_state == ARM_STATE_AARCH64) {
1963                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1964                 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1965         } else {
1966                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1967                 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1968         }
1969
1970         if (retval != ERROR_OK)
1971                 return retval;
1972
1973         /* Step 1.e - Change DCC to memory mode */
1974         *dscr |= DSCR_MA;
1975         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1976                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1977         if (retval != ERROR_OK)
1978                 return retval;
1979
1980         /* Step 1.f - read DBGDTRTX and discard the value */
1981         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1982                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1983         if (retval != ERROR_OK)
1984                 return retval;
1985
1986         count--;
1987         /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1988          * Abort flags are sticky, so can be read at end of transactions
1989          *
1990          * This data is read in aligned to 32 bit boundary.
1991          */
1992
1993         if (count) {
1994                 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1995                  * increments X0 by 4. */
1996                 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1997                                                                         armv8->debug_base + CPUV8_DBG_DTRTX);
1998                 if (retval != ERROR_OK)
1999                         return retval;
2000         }
2001
2002         /* Step 3.a - set DTR access mode back to Normal mode   */
2003         *dscr &= ~DSCR_MA;
2004         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
2005                                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2006         if (retval != ERROR_OK)
2007                 return retval;
2008
2009         /* Step 3.b - read DBGDTRTX for the final value */
2010         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2011                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2012         if (retval != ERROR_OK)
2013                 return retval;
2014
2015         target_buffer_set_u32(target, buffer + count * 4, value);
2016         return retval;
2017 }
2018
2019 static int aarch64_read_cpu_memory(struct target *target,
2020         target_addr_t address, uint32_t size,
2021         uint32_t count, uint8_t *buffer)
2022 {
2023         /* read memory through APB-AP */
2024         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2025         struct armv8_common *armv8 = target_to_armv8(target);
2026         struct arm_dpm *dpm = &armv8->dpm;
2027         struct arm *arm = &armv8->arm;
2028         uint32_t dscr;
2029
2030         LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2031                         address, size, count);
2032
2033         if (target->state != TARGET_HALTED) {
2034                 LOG_WARNING("target not halted");
2035                 return ERROR_TARGET_NOT_HALTED;
2036         }
2037
2038         /* Mark register X0 as dirty, as it will be used
2039          * for transferring the data.
2040          * It will be restored automatically when exiting
2041          * debug mode
2042          */
2043         armv8_reg_current(arm, 0)->dirty = true;
2044
2045         /* Read DSCR */
2046         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2047                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2048         if (retval != ERROR_OK)
2049                 return retval;
2050
2051         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2052
2053         /* Set Normal access mode  */
2054         dscr &= ~DSCR_MA;
2055         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
2056                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2057         if (retval != ERROR_OK)
2058                 return retval;
2059
2060         if (arm->core_state == ARM_STATE_AARCH64) {
2061                 /* Write X0 with value 'address' using write procedure */
2062                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2063                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2064                 retval = dpm->instr_write_data_dcc_64(dpm,
2065                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2066         } else {
2067                 /* Write R0 with value 'address' using write procedure */
2068                 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2069                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2070                 retval = dpm->instr_write_data_dcc(dpm,
2071                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2072         }
2073
2074         if (retval != ERROR_OK)
2075                 return retval;
2076
2077         if (size == 4 && (address % 4) == 0)
2078                 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2079         else
2080                 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2081
2082         if (dscr & DSCR_MA) {
2083                 dscr &= ~DSCR_MA;
2084                 mem_ap_write_atomic_u32(armv8->debug_ap,
2085                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2086         }
2087
2088         if (retval != ERROR_OK)
2089                 return retval;
2090
2091         /* Check for sticky abort flags in the DSCR */
2092         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2093                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2094         if (retval != ERROR_OK)
2095                 return retval;
2096
2097         dpm->dscr = dscr;
2098
2099         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2100                 /* Abort occurred - clear it and exit */
2101                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2102                 armv8_dpm_handle_exception(dpm, true);
2103                 return ERROR_FAIL;
2104         }
2105
2106         /* Done */
2107         return ERROR_OK;
2108 }
2109
2110 static int aarch64_read_phys_memory(struct target *target,
2111         target_addr_t address, uint32_t size,
2112         uint32_t count, uint8_t *buffer)
2113 {
2114         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2115
2116         if (count && buffer) {
2117                 /* read memory through APB-AP */
2118                 retval = aarch64_mmu_modify(target, 0);
2119                 if (retval != ERROR_OK)
2120                         return retval;
2121                 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2122         }
2123         return retval;
2124 }
2125
2126 static int aarch64_read_memory(struct target *target, target_addr_t address,
2127         uint32_t size, uint32_t count, uint8_t *buffer)
2128 {
2129         int mmu_enabled = 0;
2130         int retval;
2131
2132         /* determine if MMU was enabled on target stop */
2133         retval = aarch64_mmu(target, &mmu_enabled);
2134         if (retval != ERROR_OK)
2135                 return retval;
2136
2137         if (mmu_enabled) {
2138                 /* enable MMU as we could have disabled it for phys access */
2139                 retval = aarch64_mmu_modify(target, 1);
2140                 if (retval != ERROR_OK)
2141                         return retval;
2142         }
2143         return aarch64_read_cpu_memory(target, address, size, count, buffer);
2144 }
2145
2146 static int aarch64_write_phys_memory(struct target *target,
2147         target_addr_t address, uint32_t size,
2148         uint32_t count, const uint8_t *buffer)
2149 {
2150         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2151
2152         if (count && buffer) {
2153                 /* write memory through APB-AP */
2154                 retval = aarch64_mmu_modify(target, 0);
2155                 if (retval != ERROR_OK)
2156                         return retval;
2157                 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2158         }
2159
2160         return retval;
2161 }
2162
2163 static int aarch64_write_memory(struct target *target, target_addr_t address,
2164         uint32_t size, uint32_t count, const uint8_t *buffer)
2165 {
2166         int mmu_enabled = 0;
2167         int retval;
2168
2169         /* determine if MMU was enabled on target stop */
2170         retval = aarch64_mmu(target, &mmu_enabled);
2171         if (retval != ERROR_OK)
2172                 return retval;
2173
2174         if (mmu_enabled) {
2175                 /* enable MMU as we could have disabled it for phys access */
2176                 retval = aarch64_mmu_modify(target, 1);
2177                 if (retval != ERROR_OK)
2178                         return retval;
2179         }
2180         return aarch64_write_cpu_memory(target, address, size, count, buffer);
2181 }
2182
2183 static int aarch64_handle_target_request(void *priv)
2184 {
2185         struct target *target = priv;
2186         struct armv8_common *armv8 = target_to_armv8(target);
2187         int retval;
2188
2189         if (!target_was_examined(target))
2190                 return ERROR_OK;
2191         if (!target->dbg_msg_enabled)
2192                 return ERROR_OK;
2193
2194         if (target->state == TARGET_RUNNING) {
2195                 uint32_t request;
2196                 uint32_t dscr;
2197                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2198                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2199
2200                 /* check if we have data */
2201                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2202                         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2203                                         armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2204                         if (retval == ERROR_OK) {
2205                                 target_request(target, request);
2206                                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2207                                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2208                         }
2209                 }
2210         }
2211
2212         return ERROR_OK;
2213 }
2214
2215 static int aarch64_examine_first(struct target *target)
2216 {
2217         struct aarch64_common *aarch64 = target_to_aarch64(target);
2218         struct armv8_common *armv8 = &aarch64->armv8_common;
2219         struct adiv5_dap *swjdp = armv8->arm.dap;
2220         struct aarch64_private_config *pc;
2221         int i;
2222         int retval = ERROR_OK;
2223         uint64_t debug, ttypr;
2224         uint32_t cpuid;
2225         uint32_t tmp0, tmp1, tmp2, tmp3;
2226         debug = ttypr = cpuid = 0;
2227
2228         /* Search for the APB-AB - it is needed for access to debug registers */
2229         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2230         if (retval != ERROR_OK) {
2231                 LOG_ERROR("Could not find APB-AP for debug access");
2232                 return retval;
2233         }
2234
2235         retval = mem_ap_init(armv8->debug_ap);
2236         if (retval != ERROR_OK) {
2237                 LOG_ERROR("Could not initialize the APB-AP");
2238                 return retval;
2239         }
2240
2241         armv8->debug_ap->memaccess_tck = 10;
2242
2243         if (!target->dbgbase_set) {
2244                 uint32_t dbgbase;
2245                 /* Get ROM Table base */
2246                 uint32_t apid;
2247                 int32_t coreidx = target->coreid;
2248                 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2249                 if (retval != ERROR_OK)
2250                         return retval;
2251                 /* Lookup 0x15 -- Processor DAP */
2252                 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2253                                 &armv8->debug_base, &coreidx);
2254                 if (retval != ERROR_OK)
2255                         return retval;
2256                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2257                                 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2258         } else
2259                 armv8->debug_base = target->dbgbase;
2260
2261         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2262                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2263         if (retval != ERROR_OK) {
2264                 LOG_DEBUG("Examine %s failed", "oslock");
2265                 return retval;
2266         }
2267
2268         retval = mem_ap_read_u32(armv8->debug_ap,
2269                         armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2270         if (retval != ERROR_OK) {
2271                 LOG_DEBUG("Examine %s failed", "CPUID");
2272                 return retval;
2273         }
2274
2275         retval = mem_ap_read_u32(armv8->debug_ap,
2276                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2277         retval += mem_ap_read_u32(armv8->debug_ap,
2278                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2279         if (retval != ERROR_OK) {
2280                 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2281                 return retval;
2282         }
2283         retval = mem_ap_read_u32(armv8->debug_ap,
2284                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2285         retval += mem_ap_read_u32(armv8->debug_ap,
2286                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2287         if (retval != ERROR_OK) {
2288                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2289                 return retval;
2290         }
2291
2292         retval = dap_run(armv8->debug_ap->dap);
2293         if (retval != ERROR_OK) {
2294                 LOG_ERROR("%s: examination failed\n", target_name(target));
2295                 return retval;
2296         }
2297
2298         ttypr |= tmp1;
2299         ttypr = (ttypr << 32) | tmp0;
2300         debug |= tmp3;
2301         debug = (debug << 32) | tmp2;
2302
2303         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2304         LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2305         LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2306
2307         if (target->private_config == NULL)
2308                 return ERROR_FAIL;
2309
2310         pc = (struct aarch64_private_config *)target->private_config;
2311         if (pc->cti == NULL)
2312                 return ERROR_FAIL;
2313
2314         armv8->cti = pc->cti;
2315
2316         retval = aarch64_dpm_setup(aarch64, debug);
2317         if (retval != ERROR_OK)
2318                 return retval;
2319
2320         /* Setup Breakpoint Register Pairs */
2321         aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2322         aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2323         aarch64->brp_num_available = aarch64->brp_num;
2324         aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2325         for (i = 0; i < aarch64->brp_num; i++) {
2326                 aarch64->brp_list[i].used = 0;
2327                 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2328                         aarch64->brp_list[i].type = BRP_NORMAL;
2329                 else
2330                         aarch64->brp_list[i].type = BRP_CONTEXT;
2331                 aarch64->brp_list[i].value = 0;
2332                 aarch64->brp_list[i].control = 0;
2333                 aarch64->brp_list[i].BRPn = i;
2334         }
2335
2336         LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2337
2338         target->state = TARGET_UNKNOWN;
2339         target->debug_reason = DBG_REASON_NOTHALTED;
2340         aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2341         target_set_examined(target);
2342         return ERROR_OK;
2343 }
2344
2345 static int aarch64_examine(struct target *target)
2346 {
2347         int retval = ERROR_OK;
2348
2349         /* don't re-probe hardware after each reset */
2350         if (!target_was_examined(target))
2351                 retval = aarch64_examine_first(target);
2352
2353         /* Configure core debug access */
2354         if (retval == ERROR_OK)
2355                 retval = aarch64_init_debug_access(target);
2356
2357         return retval;
2358 }
2359
2360 /*
2361  *      Cortex-A8 target creation and initialization
2362  */
2363
2364 static int aarch64_init_target(struct command_context *cmd_ctx,
2365         struct target *target)
2366 {
2367         /* examine_first() does a bunch of this */
2368         arm_semihosting_init(target);
2369         return ERROR_OK;
2370 }
2371
2372 static int aarch64_init_arch_info(struct target *target,
2373         struct aarch64_common *aarch64, struct adiv5_dap *dap)
2374 {
2375         struct armv8_common *armv8 = &aarch64->armv8_common;
2376
2377         /* Setup struct aarch64_common */
2378         aarch64->common_magic = AARCH64_COMMON_MAGIC;
2379         armv8->arm.dap = dap;
2380
2381         /* register arch-specific functions */
2382         armv8->examine_debug_reason = NULL;
2383         armv8->post_debug_entry = aarch64_post_debug_entry;
2384         armv8->pre_restore_context = NULL;
2385         armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2386
2387         armv8_init_arch_info(target, armv8);
2388         target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2389
2390         return ERROR_OK;
2391 }
2392
2393 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2394 {
2395         struct aarch64_private_config *pc = target->private_config;
2396         struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2397
2398         if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2399                 return ERROR_FAIL;
2400
2401         return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2402 }
2403
2404 static void aarch64_deinit_target(struct target *target)
2405 {
2406         struct aarch64_common *aarch64 = target_to_aarch64(target);
2407         struct armv8_common *armv8 = &aarch64->armv8_common;
2408         struct arm_dpm *dpm = &armv8->dpm;
2409
2410         armv8_free_reg_cache(target);
2411         free(aarch64->brp_list);
2412         free(dpm->dbp);
2413         free(dpm->dwp);
2414         free(target->private_config);
2415         free(aarch64);
2416 }
2417
2418 static int aarch64_mmu(struct target *target, int *enabled)
2419 {
2420         if (target->state != TARGET_HALTED) {
2421                 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2422                 return ERROR_TARGET_INVALID;
2423         }
2424
2425         *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2426         return ERROR_OK;
2427 }
2428
2429 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2430                              target_addr_t *phys)
2431 {
2432         return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2433 }
2434
2435 /*
2436  * private target configuration items
2437  */
2438 enum aarch64_cfg_param {
2439         CFG_CTI,
2440 };
2441
2442 static const Jim_Nvp nvp_config_opts[] = {
2443         { .name = "-cti", .value = CFG_CTI },
2444         { .name = NULL, .value = -1 }
2445 };
2446
2447 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2448 {
2449         struct aarch64_private_config *pc;
2450         Jim_Nvp *n;
2451         int e;
2452
2453         pc = (struct aarch64_private_config *)target->private_config;
2454         if (pc == NULL) {
2455                         pc = calloc(1, sizeof(struct aarch64_private_config));
2456                         target->private_config = pc;
2457         }
2458
2459         /*
2460          * Call adiv5_jim_configure() to parse the common DAP options
2461          * It will return JIM_CONTINUE if it didn't find any known
2462          * options, JIM_OK if it correctly parsed the topmost option
2463          * and JIM_ERR if an error occured during parameter evaluation.
2464          * For JIM_CONTINUE, we check our own params.
2465          */
2466         e = adiv5_jim_configure(target, goi);
2467         if (e != JIM_CONTINUE)
2468                 return e;
2469
2470         /* parse config or cget options ... */
2471         if (goi->argc > 0) {
2472                 Jim_SetEmptyResult(goi->interp);
2473
2474                 /* check first if topmost item is for us */
2475                 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2476                                 goi->argv[0], &n);
2477                 if (e != JIM_OK)
2478                         return JIM_CONTINUE;
2479
2480                 e = Jim_GetOpt_Obj(goi, NULL);
2481                 if (e != JIM_OK)
2482                         return e;
2483
2484                 switch (n->value) {
2485                 case CFG_CTI: {
2486                         if (goi->isconfigure) {
2487                                 Jim_Obj *o_cti;
2488                                 struct arm_cti *cti;
2489                                 e = Jim_GetOpt_Obj(goi, &o_cti);
2490                                 if (e != JIM_OK)
2491                                         return e;
2492                                 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2493                                 if (cti == NULL) {
2494                                         Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2495                                         return JIM_ERR;
2496                                 }
2497                                 pc->cti = cti;
2498                         } else {
2499                                 if (goi->argc != 0) {
2500                                         Jim_WrongNumArgs(goi->interp,
2501                                                         goi->argc, goi->argv,
2502                                                         "NO PARAMS");
2503                                         return JIM_ERR;
2504                                 }
2505
2506                                 if (pc == NULL || pc->cti == NULL) {
2507                                         Jim_SetResultString(goi->interp, "CTI not configured", -1);
2508                                         return JIM_ERR;
2509                                 }
2510                                 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2511                         }
2512                         break;
2513                 }
2514
2515                 default:
2516                         return JIM_CONTINUE;
2517                 }
2518         }
2519
2520         return JIM_OK;
2521 }
2522
2523 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2524 {
2525         struct target *target = get_current_target(CMD_CTX);
2526         struct armv8_common *armv8 = target_to_armv8(target);
2527
2528         return armv8_handle_cache_info_command(CMD_CTX,
2529                         &armv8->armv8_mmu.armv8_cache);
2530 }
2531
2532
2533 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2534 {
2535         struct target *target = get_current_target(CMD_CTX);
2536         if (!target_was_examined(target)) {
2537                 LOG_ERROR("target not examined yet");
2538                 return ERROR_FAIL;
2539         }
2540
2541         return aarch64_init_debug_access(target);
2542 }
2543 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2544 {
2545         struct target *target = get_current_target(CMD_CTX);
2546         /* check target is an smp target */
2547         struct target_list *head;
2548         struct target *curr;
2549         head = target->head;
2550         target->smp = 0;
2551         if (head != (struct target_list *)NULL) {
2552                 while (head != (struct target_list *)NULL) {
2553                         curr = head->target;
2554                         curr->smp = 0;
2555                         head = head->next;
2556                 }
2557                 /*  fixes the target display to the debugger */
2558                 target->gdb_service->target = target;
2559         }
2560         return ERROR_OK;
2561 }
2562
2563 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2564 {
2565         struct target *target = get_current_target(CMD_CTX);
2566         struct target_list *head;
2567         struct target *curr;
2568         head = target->head;
2569         if (head != (struct target_list *)NULL) {
2570                 target->smp = 1;
2571                 while (head != (struct target_list *)NULL) {
2572                         curr = head->target;
2573                         curr->smp = 1;
2574                         head = head->next;
2575                 }
2576         }
2577         return ERROR_OK;
2578 }
2579
2580 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2581 {
2582         struct target *target = get_current_target(CMD_CTX);
2583         struct aarch64_common *aarch64 = target_to_aarch64(target);
2584
2585         static const Jim_Nvp nvp_maskisr_modes[] = {
2586                 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2587                 { .name = "on", .value = AARCH64_ISRMASK_ON },
2588                 { .name = NULL, .value = -1 },
2589         };
2590         const Jim_Nvp *n;
2591
2592         if (CMD_ARGC > 0) {
2593                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2594                 if (n->name == NULL) {
2595                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2596                         return ERROR_COMMAND_SYNTAX_ERROR;
2597                 }
2598
2599                 aarch64->isrmasking_mode = n->value;
2600         }
2601
2602         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2603         command_print(CMD_CTX, "aarch64 interrupt mask %s", n->name);
2604
2605         return ERROR_OK;
2606 }
2607
2608 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2609 {
2610         struct command_context *context;
2611         struct target *target;
2612         struct arm *arm;
2613         int retval;
2614         bool is_mcr = false;
2615         int arg_cnt = 0;
2616
2617         if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2618                 is_mcr = true;
2619                 arg_cnt = 7;
2620         } else {
2621                 arg_cnt = 6;
2622         }
2623
2624         context = current_command_context(interp);
2625         assert(context != NULL);
2626
2627         target = get_current_target(context);
2628         if (target == NULL) {
2629                 LOG_ERROR("%s: no current target", __func__);
2630                 return JIM_ERR;
2631         }
2632         if (!target_was_examined(target)) {
2633                 LOG_ERROR("%s: not yet examined", target_name(target));
2634                 return JIM_ERR;
2635         }
2636
2637         arm = target_to_arm(target);
2638         if (!is_arm(arm)) {
2639                 LOG_ERROR("%s: not an ARM", target_name(target));
2640                 return JIM_ERR;
2641         }
2642
2643         if (target->state != TARGET_HALTED)
2644                 return ERROR_TARGET_NOT_HALTED;
2645
2646         if (arm->core_state == ARM_STATE_AARCH64) {
2647                 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2648                 return JIM_ERR;
2649         }
2650
2651         if (argc != arg_cnt) {
2652                 LOG_ERROR("%s: wrong number of arguments", __func__);
2653                 return JIM_ERR;
2654         }
2655
2656         int cpnum;
2657         uint32_t op1;
2658         uint32_t op2;
2659         uint32_t CRn;
2660         uint32_t CRm;
2661         uint32_t value;
2662         long l;
2663
2664         /* NOTE:  parameter sequence matches ARM instruction set usage:
2665          *      MCR     pNUM, op1, rX, CRn, CRm, op2    ; write CP from rX
2666          *      MRC     pNUM, op1, rX, CRn, CRm, op2    ; read CP into rX
2667          * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2668          */
2669         retval = Jim_GetLong(interp, argv[1], &l);
2670         if (retval != JIM_OK)
2671                 return retval;
2672         if (l & ~0xf) {
2673                 LOG_ERROR("%s: %s %d out of range", __func__,
2674                         "coprocessor", (int) l);
2675                 return JIM_ERR;
2676         }
2677         cpnum = l;
2678
2679         retval = Jim_GetLong(interp, argv[2], &l);
2680         if (retval != JIM_OK)
2681                 return retval;
2682         if (l & ~0x7) {
2683                 LOG_ERROR("%s: %s %d out of range", __func__,
2684                         "op1", (int) l);
2685                 return JIM_ERR;
2686         }
2687         op1 = l;
2688
2689         retval = Jim_GetLong(interp, argv[3], &l);
2690         if (retval != JIM_OK)
2691                 return retval;
2692         if (l & ~0xf) {
2693                 LOG_ERROR("%s: %s %d out of range", __func__,
2694                         "CRn", (int) l);
2695                 return JIM_ERR;
2696         }
2697         CRn = l;
2698
2699         retval = Jim_GetLong(interp, argv[4], &l);
2700         if (retval != JIM_OK)
2701                 return retval;
2702         if (l & ~0xf) {
2703                 LOG_ERROR("%s: %s %d out of range", __func__,
2704                         "CRm", (int) l);
2705                 return JIM_ERR;
2706         }
2707         CRm = l;
2708
2709         retval = Jim_GetLong(interp, argv[5], &l);
2710         if (retval != JIM_OK)
2711                 return retval;
2712         if (l & ~0x7) {
2713                 LOG_ERROR("%s: %s %d out of range", __func__,
2714                         "op2", (int) l);
2715                 return JIM_ERR;
2716         }
2717         op2 = l;
2718
2719         value = 0;
2720
2721         if (is_mcr == true) {
2722                 retval = Jim_GetLong(interp, argv[6], &l);
2723                 if (retval != JIM_OK)
2724                         return retval;
2725                 value = l;
2726
2727                 /* NOTE: parameters reordered! */
2728                 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2729                 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2730                 if (retval != ERROR_OK)
2731                         return JIM_ERR;
2732         } else {
2733                 /* NOTE: parameters reordered! */
2734                 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2735                 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2736                 if (retval != ERROR_OK)
2737                         return JIM_ERR;
2738
2739                 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2740         }
2741
2742         return JIM_OK;
2743 }
2744
2745 static const struct command_registration aarch64_exec_command_handlers[] = {
2746         {
2747                 .name = "cache_info",
2748                 .handler = aarch64_handle_cache_info_command,
2749                 .mode = COMMAND_EXEC,
2750                 .help = "display information about target caches",
2751                 .usage = "",
2752         },
2753         {
2754                 .name = "dbginit",
2755                 .handler = aarch64_handle_dbginit_command,
2756                 .mode = COMMAND_EXEC,
2757                 .help = "Initialize core debug",
2758                 .usage = "",
2759         },
2760         {       .name = "smp_off",
2761                 .handler = aarch64_handle_smp_off_command,
2762                 .mode = COMMAND_EXEC,
2763                 .help = "Stop smp handling",
2764                 .usage = "",
2765         },
2766         {
2767                 .name = "smp_on",
2768                 .handler = aarch64_handle_smp_on_command,
2769                 .mode = COMMAND_EXEC,
2770                 .help = "Restart smp handling",
2771                 .usage = "",
2772         },
2773         {
2774                 .name = "maskisr",
2775                 .handler = aarch64_mask_interrupts_command,
2776                 .mode = COMMAND_ANY,
2777                 .help = "mask aarch64 interrupts during single-step",
2778                 .usage = "['on'|'off']",
2779         },
2780         {
2781                 .name = "mcr",
2782                 .mode = COMMAND_EXEC,
2783                 .jim_handler = jim_mcrmrc,
2784                 .help = "write coprocessor register",
2785                 .usage = "cpnum op1 CRn CRm op2 value",
2786         },
2787         {
2788                 .name = "mrc",
2789                 .mode = COMMAND_EXEC,
2790                 .jim_handler = jim_mcrmrc,
2791                 .help = "read coprocessor register",
2792                 .usage = "cpnum op1 CRn CRm op2",
2793         },
2794
2795
2796         COMMAND_REGISTRATION_DONE
2797 };
2798
2799 static const struct command_registration aarch64_command_handlers[] = {
2800         {
2801                 .chain = armv8_command_handlers,
2802         },
2803         {
2804                 .name = "aarch64",
2805                 .mode = COMMAND_ANY,
2806                 .help = "Aarch64 command group",
2807                 .usage = "",
2808                 .chain = aarch64_exec_command_handlers,
2809         },
2810         COMMAND_REGISTRATION_DONE
2811 };
2812
2813 struct target_type aarch64_target = {
2814         .name = "aarch64",
2815
2816         .poll = aarch64_poll,
2817         .arch_state = armv8_arch_state,
2818
2819         .halt = aarch64_halt,
2820         .resume = aarch64_resume,
2821         .step = aarch64_step,
2822
2823         .assert_reset = aarch64_assert_reset,
2824         .deassert_reset = aarch64_deassert_reset,
2825
2826         /* REVISIT allow exporting VFP3 registers ... */
2827         .get_gdb_reg_list = armv8_get_gdb_reg_list,
2828
2829         .read_memory = aarch64_read_memory,
2830         .write_memory = aarch64_write_memory,
2831
2832         .add_breakpoint = aarch64_add_breakpoint,
2833         .add_context_breakpoint = aarch64_add_context_breakpoint,
2834         .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2835         .remove_breakpoint = aarch64_remove_breakpoint,
2836         .add_watchpoint = NULL,
2837         .remove_watchpoint = NULL,
2838
2839         .commands = aarch64_command_handlers,
2840         .target_create = aarch64_target_create,
2841         .target_jim_configure = aarch64_jim_configure,
2842         .init_target = aarch64_init_target,
2843         .deinit_target = aarch64_deinit_target,
2844         .examine = aarch64_examine,
2845
2846         .read_phys_memory = aarch64_read_phys_memory,
2847         .write_phys_memory = aarch64_write_phys_memory,
2848         .mmu = aarch64_mmu,
2849         .virt2phys = aarch64_virt2phys,
2850 };