]> git.sur5r.net Git - openocd/blob - src/target/aarch64.c
target/armv7m_trace: Fix typo in enum
[openocd] / src / target / aarch64.c
1 /***************************************************************************
2  *   Copyright (C) 2015 by David Ung                                       *
3  *                                                                         *
4  *   This program is free software; you can redistribute it and/or modify  *
5  *   it under the terms of the GNU General Public License as published by  *
6  *   the Free Software Foundation; either version 2 of the License, or     *
7  *   (at your option) any later version.                                   *
8  *                                                                         *
9  *   This program is distributed in the hope that it will be useful,       *
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
12  *   GNU General Public License for more details.                          *
13  *                                                                         *
14  *   You should have received a copy of the GNU General Public License     *
15  *   along with this program; if not, write to the                         *
16  *   Free Software Foundation, Inc.,                                       *
17  *                                                                         *
18  ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include <helper/time_support.h>
33
34 enum restart_mode {
35         RESTART_LAZY,
36         RESTART_SYNC,
37 };
38
39 enum halt_mode {
40         HALT_LAZY,
41         HALT_SYNC,
42 };
43
44 struct aarch64_private_config {
45         struct adiv5_private_config adiv5_config;
46         struct arm_cti *cti;
47 };
48
49 static int aarch64_poll(struct target *target);
50 static int aarch64_debug_entry(struct target *target);
51 static int aarch64_restore_context(struct target *target, bool bpwp);
52 static int aarch64_set_breakpoint(struct target *target,
53         struct breakpoint *breakpoint, uint8_t matchmode);
54 static int aarch64_set_context_breakpoint(struct target *target,
55         struct breakpoint *breakpoint, uint8_t matchmode);
56 static int aarch64_set_hybrid_breakpoint(struct target *target,
57         struct breakpoint *breakpoint);
58 static int aarch64_unset_breakpoint(struct target *target,
59         struct breakpoint *breakpoint);
60 static int aarch64_mmu(struct target *target, int *enabled);
61 static int aarch64_virt2phys(struct target *target,
62         target_addr_t virt, target_addr_t *phys);
63 static int aarch64_read_cpu_memory(struct target *target,
64         uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
65
66 #define foreach_smp_target(pos, head) \
67         for (pos = head; (pos != NULL); pos = pos->next)
68
69 static int aarch64_restore_system_control_reg(struct target *target)
70 {
71         enum arm_mode target_mode = ARM_MODE_ANY;
72         int retval = ERROR_OK;
73         uint32_t instr;
74
75         struct aarch64_common *aarch64 = target_to_aarch64(target);
76         struct armv8_common *armv8 = target_to_armv8(target);
77
78         if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
79                 aarch64->system_control_reg_curr = aarch64->system_control_reg;
80                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81
82                 switch (armv8->arm.core_mode) {
83                 case ARMV8_64_EL0T:
84                         target_mode = ARMV8_64_EL1H;
85                         /* fall through */
86                 case ARMV8_64_EL1T:
87                 case ARMV8_64_EL1H:
88                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
89                         break;
90                 case ARMV8_64_EL2T:
91                 case ARMV8_64_EL2H:
92                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
93                         break;
94                 case ARMV8_64_EL3H:
95                 case ARMV8_64_EL3T:
96                         instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
97                         break;
98
99                 case ARM_MODE_SVC:
100                 case ARM_MODE_ABT:
101                 case ARM_MODE_FIQ:
102                 case ARM_MODE_IRQ:
103                         instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
104                         break;
105
106                 default:
107                         LOG_INFO("cannot read system control register in this mode");
108                         return ERROR_FAIL;
109                 }
110
111                 if (target_mode != ARM_MODE_ANY)
112                         armv8_dpm_modeswitch(&armv8->dpm, target_mode);
113
114                 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
115                 if (retval != ERROR_OK)
116                         return retval;
117
118                 if (target_mode != ARM_MODE_ANY)
119                         armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
120         }
121
122         return retval;
123 }
124
125 /*  modify system_control_reg in order to enable or disable mmu for :
126  *  - virt2phys address conversion
127  *  - read or write memory in phys or virt address */
128 static int aarch64_mmu_modify(struct target *target, int enable)
129 {
130         struct aarch64_common *aarch64 = target_to_aarch64(target);
131         struct armv8_common *armv8 = &aarch64->armv8_common;
132         int retval = ERROR_OK;
133         uint32_t instr = 0;
134
135         if (enable) {
136                 /*      if mmu enabled at target stop and mmu not enable */
137                 if (!(aarch64->system_control_reg & 0x1U)) {
138                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139                         return ERROR_FAIL;
140                 }
141                 if (!(aarch64->system_control_reg_curr & 0x1U))
142                         aarch64->system_control_reg_curr |= 0x1U;
143         } else {
144                 if (aarch64->system_control_reg_curr & 0x4U) {
145                         /*  data cache is active */
146                         aarch64->system_control_reg_curr &= ~0x4U;
147                         /* flush data cache armv8 function to be called */
148                         if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
149                                 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
150                 }
151                 if ((aarch64->system_control_reg_curr & 0x1U)) {
152                         aarch64->system_control_reg_curr &= ~0x1U;
153                 }
154         }
155
156         switch (armv8->arm.core_mode) {
157         case ARMV8_64_EL0T:
158         case ARMV8_64_EL1T:
159         case ARMV8_64_EL1H:
160                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
161                 break;
162         case ARMV8_64_EL2T:
163         case ARMV8_64_EL2H:
164                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
165                 break;
166         case ARMV8_64_EL3H:
167         case ARMV8_64_EL3T:
168                 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
169                 break;
170
171         case ARM_MODE_SVC:
172         case ARM_MODE_ABT:
173         case ARM_MODE_FIQ:
174         case ARM_MODE_IRQ:
175                 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
176                 break;
177
178         default:
179                 LOG_DEBUG("unknown cpu state 0x%" PRIx32, armv8->arm.core_mode);
180                 break;
181         }
182
183         retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
184                                 aarch64->system_control_reg_curr);
185         return retval;
186 }
187
188 /*
189  * Basic debug access, very low level assumes state is saved
190  */
191 static int aarch64_init_debug_access(struct target *target)
192 {
193         struct armv8_common *armv8 = target_to_armv8(target);
194         int retval;
195         uint32_t dummy;
196
197         LOG_DEBUG("%s", target_name(target));
198
199         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
200                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
201         if (retval != ERROR_OK) {
202                 LOG_DEBUG("Examine %s failed", "oslock");
203                 return retval;
204         }
205
206         /* Clear Sticky Power Down status Bit in PRSR to enable access to
207            the registers in the Core Power Domain */
208         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
209                         armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
210         if (retval != ERROR_OK)
211                 return retval;
212
213         /*
214          * Static CTI configuration:
215          * Channel 0 -> trigger outputs HALT request to PE
216          * Channel 1 -> trigger outputs Resume request to PE
217          * Gate all channel trigger events from entering the CTM
218          */
219
220         /* Enable CTI */
221         retval = arm_cti_enable(armv8->cti, true);
222         /* By default, gate all channel events to and from the CTM */
223         if (retval == ERROR_OK)
224                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
225         /* output halt requests to PE on channel 0 event */
226         if (retval == ERROR_OK)
227                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
228         /* output restart requests to PE on channel 1 event */
229         if (retval == ERROR_OK)
230                 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
231         if (retval != ERROR_OK)
232                 return retval;
233
234         /* Resync breakpoint registers */
235
236         return ERROR_OK;
237 }
238
239 /* Write to memory mapped registers directly with no cache or mmu handling */
240 static int aarch64_dap_write_memap_register_u32(struct target *target,
241         uint32_t address,
242         uint32_t value)
243 {
244         int retval;
245         struct armv8_common *armv8 = target_to_armv8(target);
246
247         retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
248
249         return retval;
250 }
251
252 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
253 {
254         struct arm_dpm *dpm = &a8->armv8_common.dpm;
255         int retval;
256
257         dpm->arm = &a8->armv8_common.arm;
258         dpm->didr = debug;
259
260         retval = armv8_dpm_setup(dpm);
261         if (retval == ERROR_OK)
262                 retval = armv8_dpm_initialize(dpm);
263
264         return retval;
265 }
266
267 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
268 {
269         struct armv8_common *armv8 = target_to_armv8(target);
270         return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
271 }
272
273 static int aarch64_check_state_one(struct target *target,
274                 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
275 {
276         struct armv8_common *armv8 = target_to_armv8(target);
277         uint32_t prsr;
278         int retval;
279
280         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
281                         armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
282         if (retval != ERROR_OK)
283                 return retval;
284
285         if (p_prsr)
286                 *p_prsr = prsr;
287
288         if (p_result)
289                 *p_result = (prsr & mask) == (val & mask);
290
291         return ERROR_OK;
292 }
293
294 static int aarch64_wait_halt_one(struct target *target)
295 {
296         int retval = ERROR_OK;
297         uint32_t prsr;
298
299         int64_t then = timeval_ms();
300         for (;;) {
301                 int halted;
302
303                 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
304                 if (retval != ERROR_OK || halted)
305                         break;
306
307                 if (timeval_ms() > then + 1000) {
308                         retval = ERROR_TARGET_TIMEOUT;
309                         LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
310                         break;
311                 }
312         }
313         return retval;
314 }
315
316 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
317 {
318         int retval = ERROR_OK;
319         struct target_list *head = target->head;
320         struct target *first = NULL;
321
322         LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
323
324         while (head != NULL) {
325                 struct target *curr = head->target;
326                 struct armv8_common *armv8 = target_to_armv8(curr);
327                 head = head->next;
328
329                 if (exc_target && curr == target)
330                         continue;
331                 if (!target_was_examined(curr))
332                         continue;
333                 if (curr->state != TARGET_RUNNING)
334                         continue;
335
336                 /* HACK: mark this target as prepared for halting */
337                 curr->debug_reason = DBG_REASON_DBGRQ;
338
339                 /* open the gate for channel 0 to let HALT requests pass to the CTM */
340                 retval = arm_cti_ungate_channel(armv8->cti, 0);
341                 if (retval == ERROR_OK)
342                         retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
343                 if (retval != ERROR_OK)
344                         break;
345
346                 LOG_DEBUG("target %s prepared", target_name(curr));
347
348                 if (first == NULL)
349                         first = curr;
350         }
351
352         if (p_first) {
353                 if (exc_target && first)
354                         *p_first = first;
355                 else
356                         *p_first = target;
357         }
358
359         return retval;
360 }
361
362 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
363 {
364         int retval = ERROR_OK;
365         struct armv8_common *armv8 = target_to_armv8(target);
366
367         LOG_DEBUG("%s", target_name(target));
368
369         /* allow Halting Debug Mode */
370         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
371         if (retval != ERROR_OK)
372                 return retval;
373
374         /* trigger an event on channel 0, this outputs a halt request to the PE */
375         retval = arm_cti_pulse_channel(armv8->cti, 0);
376         if (retval != ERROR_OK)
377                 return retval;
378
379         if (mode == HALT_SYNC) {
380                 retval = aarch64_wait_halt_one(target);
381                 if (retval != ERROR_OK) {
382                         if (retval == ERROR_TARGET_TIMEOUT)
383                                 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
384                         return retval;
385                 }
386         }
387
388         return ERROR_OK;
389 }
390
391 static int aarch64_halt_smp(struct target *target, bool exc_target)
392 {
393         struct target *next = target;
394         int retval;
395
396         /* prepare halt on all PEs of the group */
397         retval = aarch64_prepare_halt_smp(target, exc_target, &next);
398
399         if (exc_target && next == target)
400                 return retval;
401
402         /* halt the target PE */
403         if (retval == ERROR_OK)
404                 retval = aarch64_halt_one(next, HALT_LAZY);
405
406         if (retval != ERROR_OK)
407                 return retval;
408
409         /* wait for all PEs to halt */
410         int64_t then = timeval_ms();
411         for (;;) {
412                 bool all_halted = true;
413                 struct target_list *head;
414                 struct target *curr;
415
416                 foreach_smp_target(head, target->head) {
417                         int halted;
418
419                         curr = head->target;
420
421                         if (!target_was_examined(curr))
422                                 continue;
423
424                         retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
425                         if (retval != ERROR_OK || !halted) {
426                                 all_halted = false;
427                                 break;
428                         }
429                 }
430
431                 if (all_halted)
432                         break;
433
434                 if (timeval_ms() > then + 1000) {
435                         retval = ERROR_TARGET_TIMEOUT;
436                         break;
437                 }
438
439                 /*
440                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
441                  * and it looks like the CTI's are not connected by a common
442                  * trigger matrix. It seems that we need to halt one core in each
443                  * cluster explicitly. So if we find that a core has not halted
444                  * yet, we trigger an explicit halt for the second cluster.
445                  */
446                 retval = aarch64_halt_one(curr, HALT_LAZY);
447                 if (retval != ERROR_OK)
448                         break;
449         }
450
451         return retval;
452 }
453
454 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
455 {
456         struct target *gdb_target = NULL;
457         struct target_list *head;
458         struct target *curr;
459
460         if (debug_reason == DBG_REASON_NOTHALTED) {
461                 LOG_DEBUG("Halting remaining targets in SMP group");
462                 aarch64_halt_smp(target, true);
463         }
464
465         /* poll all targets in the group, but skip the target that serves GDB */
466         foreach_smp_target(head, target->head) {
467                 curr = head->target;
468                 /* skip calling context */
469                 if (curr == target)
470                         continue;
471                 if (!target_was_examined(curr))
472                         continue;
473                 /* skip targets that were already halted */
474                 if (curr->state == TARGET_HALTED)
475                         continue;
476                 /* remember the gdb_service->target */
477                 if (curr->gdb_service != NULL)
478                         gdb_target = curr->gdb_service->target;
479                 /* skip it */
480                 if (curr == gdb_target)
481                         continue;
482
483                 /* avoid recursion in aarch64_poll() */
484                 curr->smp = 0;
485                 aarch64_poll(curr);
486                 curr->smp = 1;
487         }
488
489         /* after all targets were updated, poll the gdb serving target */
490         if (gdb_target != NULL && gdb_target != target)
491                 aarch64_poll(gdb_target);
492
493         return ERROR_OK;
494 }
495
496 /*
497  * Aarch64 Run control
498  */
499
500 static int aarch64_poll(struct target *target)
501 {
502         enum target_state prev_target_state;
503         int retval = ERROR_OK;
504         int halted;
505
506         retval = aarch64_check_state_one(target,
507                                 PRSR_HALT, PRSR_HALT, &halted, NULL);
508         if (retval != ERROR_OK)
509                 return retval;
510
511         if (halted) {
512                 prev_target_state = target->state;
513                 if (prev_target_state != TARGET_HALTED) {
514                         enum target_debug_reason debug_reason = target->debug_reason;
515
516                         /* We have a halting debug event */
517                         target->state = TARGET_HALTED;
518                         LOG_DEBUG("Target %s halted", target_name(target));
519                         retval = aarch64_debug_entry(target);
520                         if (retval != ERROR_OK)
521                                 return retval;
522
523                         if (target->smp)
524                                 update_halt_gdb(target, debug_reason);
525
526                         if (arm_semihosting(target, &retval) != 0)
527                                 return retval;
528
529                         switch (prev_target_state) {
530                         case TARGET_RUNNING:
531                         case TARGET_UNKNOWN:
532                         case TARGET_RESET:
533                                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
534                                 break;
535                         case TARGET_DEBUG_RUNNING:
536                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
537                                 break;
538                         default:
539                                 break;
540                         }
541                 }
542         } else
543                 target->state = TARGET_RUNNING;
544
545         return retval;
546 }
547
548 static int aarch64_halt(struct target *target)
549 {
550         struct armv8_common *armv8 = target_to_armv8(target);
551         armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
552
553         if (target->smp)
554                 return aarch64_halt_smp(target, false);
555
556         return aarch64_halt_one(target, HALT_SYNC);
557 }
558
559 static int aarch64_restore_one(struct target *target, int current,
560         uint64_t *address, int handle_breakpoints, int debug_execution)
561 {
562         struct armv8_common *armv8 = target_to_armv8(target);
563         struct arm *arm = &armv8->arm;
564         int retval;
565         uint64_t resume_pc;
566
567         LOG_DEBUG("%s", target_name(target));
568
569         if (!debug_execution)
570                 target_free_all_working_areas(target);
571
572         /* current = 1: continue on current pc, otherwise continue at <address> */
573         resume_pc = buf_get_u64(arm->pc->value, 0, 64);
574         if (!current)
575                 resume_pc = *address;
576         else
577                 *address = resume_pc;
578
579         /* Make sure that the Armv7 gdb thumb fixups does not
580          * kill the return address
581          */
582         switch (arm->core_state) {
583                 case ARM_STATE_ARM:
584                         resume_pc &= 0xFFFFFFFC;
585                         break;
586                 case ARM_STATE_AARCH64:
587                         resume_pc &= 0xFFFFFFFFFFFFFFFC;
588                         break;
589                 case ARM_STATE_THUMB:
590                 case ARM_STATE_THUMB_EE:
591                         /* When the return address is loaded into PC
592                          * bit 0 must be 1 to stay in Thumb state
593                          */
594                         resume_pc |= 0x1;
595                         break;
596                 case ARM_STATE_JAZELLE:
597                         LOG_ERROR("How do I resume into Jazelle state??");
598                         return ERROR_FAIL;
599         }
600         LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
601         buf_set_u64(arm->pc->value, 0, 64, resume_pc);
602         arm->pc->dirty = 1;
603         arm->pc->valid = 1;
604
605         /* called it now before restoring context because it uses cpu
606          * register r0 for restoring system control register */
607         retval = aarch64_restore_system_control_reg(target);
608         if (retval == ERROR_OK)
609                 retval = aarch64_restore_context(target, handle_breakpoints);
610
611         return retval;
612 }
613
614 /**
615  * prepare single target for restart
616  *
617  *
618  */
619 static int aarch64_prepare_restart_one(struct target *target)
620 {
621         struct armv8_common *armv8 = target_to_armv8(target);
622         int retval;
623         uint32_t dscr;
624         uint32_t tmp;
625
626         LOG_DEBUG("%s", target_name(target));
627
628         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
629                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
630         if (retval != ERROR_OK)
631                 return retval;
632
633         if ((dscr & DSCR_ITE) == 0)
634                 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
635         if ((dscr & DSCR_ERR) != 0)
636                 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
637
638         /* acknowledge a pending CTI halt event */
639         retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
640         /*
641          * open the CTI gate for channel 1 so that the restart events
642          * get passed along to all PEs. Also close gate for channel 0
643          * to isolate the PE from halt events.
644          */
645         if (retval == ERROR_OK)
646                 retval = arm_cti_ungate_channel(armv8->cti, 1);
647         if (retval == ERROR_OK)
648                 retval = arm_cti_gate_channel(armv8->cti, 0);
649
650         /* make sure that DSCR.HDE is set */
651         if (retval == ERROR_OK) {
652                 dscr |= DSCR_HDE;
653                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
654                                 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
655         }
656
657         if (retval == ERROR_OK) {
658                 /* clear sticky bits in PRSR, SDR is now 0 */
659                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
660                                 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
661         }
662
663         return retval;
664 }
665
666 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
667 {
668         struct armv8_common *armv8 = target_to_armv8(target);
669         int retval;
670
671         LOG_DEBUG("%s", target_name(target));
672
673         /* trigger an event on channel 1, generates a restart request to the PE */
674         retval = arm_cti_pulse_channel(armv8->cti, 1);
675         if (retval != ERROR_OK)
676                 return retval;
677
678         if (mode == RESTART_SYNC) {
679                 int64_t then = timeval_ms();
680                 for (;;) {
681                         int resumed;
682                         /*
683                          * if PRSR.SDR is set now, the target did restart, even
684                          * if it's now already halted again (e.g. due to breakpoint)
685                          */
686                         retval = aarch64_check_state_one(target,
687                                                 PRSR_SDR, PRSR_SDR, &resumed, NULL);
688                         if (retval != ERROR_OK || resumed)
689                                 break;
690
691                         if (timeval_ms() > then + 1000) {
692                                 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
693                                 retval = ERROR_TARGET_TIMEOUT;
694                                 break;
695                         }
696                 }
697         }
698
699         if (retval != ERROR_OK)
700                 return retval;
701
702         target->debug_reason = DBG_REASON_NOTHALTED;
703         target->state = TARGET_RUNNING;
704
705         return ERROR_OK;
706 }
707
708 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
709 {
710         int retval;
711
712         LOG_DEBUG("%s", target_name(target));
713
714         retval = aarch64_prepare_restart_one(target);
715         if (retval == ERROR_OK)
716                 retval = aarch64_do_restart_one(target, mode);
717
718         return retval;
719 }
720
721 /*
722  * prepare all but the current target for restart
723  */
724 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
725 {
726         int retval = ERROR_OK;
727         struct target_list *head;
728         struct target *first = NULL;
729         uint64_t address;
730
731         foreach_smp_target(head, target->head) {
732                 struct target *curr = head->target;
733
734                 /* skip calling target */
735                 if (curr == target)
736                         continue;
737                 if (!target_was_examined(curr))
738                         continue;
739                 if (curr->state != TARGET_HALTED)
740                         continue;
741
742                 /*  resume at current address, not in step mode */
743                 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
744                 if (retval == ERROR_OK)
745                         retval = aarch64_prepare_restart_one(curr);
746                 if (retval != ERROR_OK) {
747                         LOG_ERROR("failed to restore target %s", target_name(curr));
748                         break;
749                 }
750                 /* remember the first valid target in the group */
751                 if (first == NULL)
752                         first = curr;
753         }
754
755         if (p_first)
756                 *p_first = first;
757
758         return retval;
759 }
760
761
762 static int aarch64_step_restart_smp(struct target *target)
763 {
764         int retval = ERROR_OK;
765         struct target_list *head;
766         struct target *first = NULL;
767
768         LOG_DEBUG("%s", target_name(target));
769
770         retval = aarch64_prep_restart_smp(target, 0, &first);
771         if (retval != ERROR_OK)
772                 return retval;
773
774         if (first != NULL)
775                 retval = aarch64_do_restart_one(first, RESTART_LAZY);
776         if (retval != ERROR_OK) {
777                 LOG_DEBUG("error restarting target %s", target_name(first));
778                 return retval;
779         }
780
781         int64_t then = timeval_ms();
782         for (;;) {
783                 struct target *curr = target;
784                 bool all_resumed = true;
785
786                 foreach_smp_target(head, target->head) {
787                         uint32_t prsr;
788                         int resumed;
789
790                         curr = head->target;
791
792                         if (curr == target)
793                                 continue;
794
795                         if (!target_was_examined(curr))
796                                 continue;
797
798                         retval = aarch64_check_state_one(curr,
799                                         PRSR_SDR, PRSR_SDR, &resumed, &prsr);
800                         if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
801                                 all_resumed = false;
802                                 break;
803                         }
804
805                         if (curr->state != TARGET_RUNNING) {
806                                 curr->state = TARGET_RUNNING;
807                                 curr->debug_reason = DBG_REASON_NOTHALTED;
808                                 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
809                         }
810                 }
811
812                 if (all_resumed)
813                         break;
814
815                 if (timeval_ms() > then + 1000) {
816                         LOG_ERROR("%s: timeout waiting for target resume", __func__);
817                         retval = ERROR_TARGET_TIMEOUT;
818                         break;
819                 }
820                 /*
821                  * HACK: on Hi6220 there are 8 cores organized in 2 clusters
822                  * and it looks like the CTI's are not connected by a common
823                  * trigger matrix. It seems that we need to halt one core in each
824                  * cluster explicitly. So if we find that a core has not halted
825                  * yet, we trigger an explicit resume for the second cluster.
826                  */
827                 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
828                 if (retval != ERROR_OK)
829                         break;
830 }
831
832         return retval;
833 }
834
835 static int aarch64_resume(struct target *target, int current,
836         target_addr_t address, int handle_breakpoints, int debug_execution)
837 {
838         int retval = 0;
839         uint64_t addr = address;
840
841         struct armv8_common *armv8 = target_to_armv8(target);
842         armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
843
844         if (target->state != TARGET_HALTED)
845                 return ERROR_TARGET_NOT_HALTED;
846
847         /*
848          * If this target is part of a SMP group, prepare the others
849          * targets for resuming. This involves restoring the complete
850          * target register context and setting up CTI gates to accept
851          * resume events from the trigger matrix.
852          */
853         if (target->smp) {
854                 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
855                 if (retval != ERROR_OK)
856                         return retval;
857         }
858
859         /* all targets prepared, restore and restart the current target */
860         retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
861                                  debug_execution);
862         if (retval == ERROR_OK)
863                 retval = aarch64_restart_one(target, RESTART_SYNC);
864         if (retval != ERROR_OK)
865                 return retval;
866
867         if (target->smp) {
868                 int64_t then = timeval_ms();
869                 for (;;) {
870                         struct target *curr = target;
871                         struct target_list *head;
872                         bool all_resumed = true;
873
874                         foreach_smp_target(head, target->head) {
875                                 uint32_t prsr;
876                                 int resumed;
877
878                                 curr = head->target;
879                                 if (curr == target)
880                                         continue;
881                                 if (!target_was_examined(curr))
882                                         continue;
883
884                                 retval = aarch64_check_state_one(curr,
885                                                 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
886                                 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
887                                         all_resumed = false;
888                                         break;
889                                 }
890
891                                 if (curr->state != TARGET_RUNNING) {
892                                         curr->state = TARGET_RUNNING;
893                                         curr->debug_reason = DBG_REASON_NOTHALTED;
894                                         target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
895                                 }
896                         }
897
898                         if (all_resumed)
899                                 break;
900
901                         if (timeval_ms() > then + 1000) {
902                                 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
903                                 retval = ERROR_TARGET_TIMEOUT;
904                                 break;
905                         }
906
907                         /*
908                          * HACK: on Hi6220 there are 8 cores organized in 2 clusters
909                          * and it looks like the CTI's are not connected by a common
910                          * trigger matrix. It seems that we need to halt one core in each
911                          * cluster explicitly. So if we find that a core has not halted
912                          * yet, we trigger an explicit resume for the second cluster.
913                          */
914                         retval = aarch64_do_restart_one(curr, RESTART_LAZY);
915                         if (retval != ERROR_OK)
916                                 break;
917                 }
918         }
919
920         if (retval != ERROR_OK)
921                 return retval;
922
923         target->debug_reason = DBG_REASON_NOTHALTED;
924
925         if (!debug_execution) {
926                 target->state = TARGET_RUNNING;
927                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
928                 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
929         } else {
930                 target->state = TARGET_DEBUG_RUNNING;
931                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
932                 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
933         }
934
935         return ERROR_OK;
936 }
937
938 static int aarch64_debug_entry(struct target *target)
939 {
940         int retval = ERROR_OK;
941         struct armv8_common *armv8 = target_to_armv8(target);
942         struct arm_dpm *dpm = &armv8->dpm;
943         enum arm_state core_state;
944         uint32_t dscr;
945
946         /* make sure to clear all sticky errors */
947         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
948                         armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
949         if (retval == ERROR_OK)
950                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
951                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
952         if (retval == ERROR_OK)
953                 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
954
955         if (retval != ERROR_OK)
956                 return retval;
957
958         LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
959
960         dpm->dscr = dscr;
961         core_state = armv8_dpm_get_core_state(dpm);
962         armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
963         armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
964
965         /* close the CTI gate for all events */
966         if (retval == ERROR_OK)
967                 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
968         /* discard async exceptions */
969         if (retval == ERROR_OK)
970                 retval = dpm->instr_cpsr_sync(dpm);
971         if (retval != ERROR_OK)
972                 return retval;
973
974         /* Examine debug reason */
975         armv8_dpm_report_dscr(dpm, dscr);
976
977         /* save address of instruction that triggered the watchpoint? */
978         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
979                 uint32_t tmp;
980                 uint64_t wfar = 0;
981
982                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
983                                 armv8->debug_base + CPUV8_DBG_WFAR1,
984                                 &tmp);
985                 if (retval != ERROR_OK)
986                         return retval;
987                 wfar = tmp;
988                 wfar = (wfar << 32);
989                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
990                                 armv8->debug_base + CPUV8_DBG_WFAR0,
991                                 &tmp);
992                 if (retval != ERROR_OK)
993                         return retval;
994                 wfar |= tmp;
995                 armv8_dpm_report_wfar(&armv8->dpm, wfar);
996         }
997
998         retval = armv8_dpm_read_current_registers(&armv8->dpm);
999
1000         if (retval == ERROR_OK && armv8->post_debug_entry)
1001                 retval = armv8->post_debug_entry(target);
1002
1003         return retval;
1004 }
1005
1006 static int aarch64_post_debug_entry(struct target *target)
1007 {
1008         struct aarch64_common *aarch64 = target_to_aarch64(target);
1009         struct armv8_common *armv8 = &aarch64->armv8_common;
1010         int retval;
1011         enum arm_mode target_mode = ARM_MODE_ANY;
1012         uint32_t instr;
1013
1014         switch (armv8->arm.core_mode) {
1015         case ARMV8_64_EL0T:
1016                 target_mode = ARMV8_64_EL1H;
1017                 /* fall through */
1018         case ARMV8_64_EL1T:
1019         case ARMV8_64_EL1H:
1020                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1021                 break;
1022         case ARMV8_64_EL2T:
1023         case ARMV8_64_EL2H:
1024                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1025                 break;
1026         case ARMV8_64_EL3H:
1027         case ARMV8_64_EL3T:
1028                 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1029                 break;
1030
1031         case ARM_MODE_SVC:
1032         case ARM_MODE_ABT:
1033         case ARM_MODE_FIQ:
1034         case ARM_MODE_IRQ:
1035                 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1036                 break;
1037
1038         default:
1039                 LOG_INFO("cannot read system control register in this mode");
1040                 return ERROR_FAIL;
1041         }
1042
1043         if (target_mode != ARM_MODE_ANY)
1044                 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1045
1046         retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1047         if (retval != ERROR_OK)
1048                 return retval;
1049
1050         if (target_mode != ARM_MODE_ANY)
1051                 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1052
1053         LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1054         aarch64->system_control_reg_curr = aarch64->system_control_reg;
1055
1056         if (armv8->armv8_mmu.armv8_cache.info == -1) {
1057                 armv8_identify_cache(armv8);
1058                 armv8_read_mpidr(armv8);
1059         }
1060
1061         armv8->armv8_mmu.mmu_enabled =
1062                         (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1063         armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1064                 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1065         armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1066                 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1067         return ERROR_OK;
1068 }
1069
1070 /*
1071  * single-step a target
1072  */
1073 static int aarch64_step(struct target *target, int current, target_addr_t address,
1074         int handle_breakpoints)
1075 {
1076         struct armv8_common *armv8 = target_to_armv8(target);
1077         struct aarch64_common *aarch64 = target_to_aarch64(target);
1078         int saved_retval = ERROR_OK;
1079         int retval;
1080         uint32_t edecr;
1081
1082         armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1083
1084         if (target->state != TARGET_HALTED) {
1085                 LOG_WARNING("target not halted");
1086                 return ERROR_TARGET_NOT_HALTED;
1087         }
1088
1089         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1090                         armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1091         /* make sure EDECR.SS is not set when restoring the register */
1092
1093         if (retval == ERROR_OK) {
1094                 edecr &= ~0x4;
1095                 /* set EDECR.SS to enter hardware step mode */
1096                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1097                                 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1098         }
1099         /* disable interrupts while stepping */
1100         if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1101                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1102         /* bail out if stepping setup has failed */
1103         if (retval != ERROR_OK)
1104                 return retval;
1105
1106         if (target->smp && (current == 1)) {
1107                 /*
1108                  * isolate current target so that it doesn't get resumed
1109                  * together with the others
1110                  */
1111                 retval = arm_cti_gate_channel(armv8->cti, 1);
1112                 /* resume all other targets in the group */
1113                 if (retval == ERROR_OK)
1114                         retval = aarch64_step_restart_smp(target);
1115                 if (retval != ERROR_OK) {
1116                         LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1117                         return retval;
1118                 }
1119                 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1120         }
1121
1122         /* all other targets running, restore and restart the current target */
1123         retval = aarch64_restore_one(target, current, &address, 0, 0);
1124         if (retval == ERROR_OK)
1125                 retval = aarch64_restart_one(target, RESTART_LAZY);
1126
1127         if (retval != ERROR_OK)
1128                 return retval;
1129
1130         LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1131         if (!handle_breakpoints)
1132                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1133
1134         int64_t then = timeval_ms();
1135         for (;;) {
1136                 int stepped;
1137                 uint32_t prsr;
1138
1139                 retval = aarch64_check_state_one(target,
1140                                         PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1141                 if (retval != ERROR_OK || stepped)
1142                         break;
1143
1144                 if (timeval_ms() > then + 100) {
1145                         LOG_ERROR("timeout waiting for target %s halt after step",
1146                                         target_name(target));
1147                         retval = ERROR_TARGET_TIMEOUT;
1148                         break;
1149                 }
1150         }
1151
1152         /*
1153          * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1154          * causes a timeout. The core takes the step but doesn't complete it and so
1155          * debug state is never entered. However, you can manually halt the core
1156          * as an external debug even is also a WFI wakeup event.
1157          */
1158         if (retval == ERROR_TARGET_TIMEOUT)
1159                 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1160
1161         /* restore EDECR */
1162         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1163                         armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1164         if (retval != ERROR_OK)
1165                 return retval;
1166
1167         /* restore interrupts */
1168         if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1169                 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1170                 if (retval != ERROR_OK)
1171                         return ERROR_OK;
1172         }
1173
1174         if (saved_retval != ERROR_OK)
1175                 return saved_retval;
1176
1177         return aarch64_poll(target);
1178 }
1179
1180 static int aarch64_restore_context(struct target *target, bool bpwp)
1181 {
1182         struct armv8_common *armv8 = target_to_armv8(target);
1183         struct arm *arm = &armv8->arm;
1184
1185         int retval;
1186
1187         LOG_DEBUG("%s", target_name(target));
1188
1189         if (armv8->pre_restore_context)
1190                 armv8->pre_restore_context(target);
1191
1192         retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1193         if (retval == ERROR_OK) {
1194                 /* registers are now invalid */
1195                 register_cache_invalidate(arm->core_cache);
1196                 register_cache_invalidate(arm->core_cache->next);
1197         }
1198
1199         return retval;
1200 }
1201
1202 /*
1203  * Cortex-A8 Breakpoint and watchpoint functions
1204  */
1205
1206 /* Setup hardware Breakpoint Register Pair */
1207 static int aarch64_set_breakpoint(struct target *target,
1208         struct breakpoint *breakpoint, uint8_t matchmode)
1209 {
1210         int retval;
1211         int brp_i = 0;
1212         uint32_t control;
1213         uint8_t byte_addr_select = 0x0F;
1214         struct aarch64_common *aarch64 = target_to_aarch64(target);
1215         struct armv8_common *armv8 = &aarch64->armv8_common;
1216         struct aarch64_brp *brp_list = aarch64->brp_list;
1217
1218         if (breakpoint->set) {
1219                 LOG_WARNING("breakpoint already set");
1220                 return ERROR_OK;
1221         }
1222
1223         if (breakpoint->type == BKPT_HARD) {
1224                 int64_t bpt_value;
1225                 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1226                         brp_i++;
1227                 if (brp_i >= aarch64->brp_num) {
1228                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1229                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1230                 }
1231                 breakpoint->set = brp_i + 1;
1232                 if (breakpoint->length == 2)
1233                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1234                 control = ((matchmode & 0x7) << 20)
1235                         | (1 << 13)
1236                         | (byte_addr_select << 5)
1237                         | (3 << 1) | 1;
1238                 brp_list[brp_i].used = 1;
1239                 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1240                 brp_list[brp_i].control = control;
1241                 bpt_value = brp_list[brp_i].value;
1242
1243                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1244                                 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1245                                 (uint32_t)(bpt_value & 0xFFFFFFFF));
1246                 if (retval != ERROR_OK)
1247                         return retval;
1248                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1249                                 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1250                                 (uint32_t)(bpt_value >> 32));
1251                 if (retval != ERROR_OK)
1252                         return retval;
1253
1254                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1255                                 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1256                                 brp_list[brp_i].control);
1257                 if (retval != ERROR_OK)
1258                         return retval;
1259                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1260                         brp_list[brp_i].control,
1261                         brp_list[brp_i].value);
1262
1263         } else if (breakpoint->type == BKPT_SOFT) {
1264                 uint8_t code[4];
1265
1266                 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1267                 retval = target_read_memory(target,
1268                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1269                                 breakpoint->length, 1,
1270                                 breakpoint->orig_instr);
1271                 if (retval != ERROR_OK)
1272                         return retval;
1273
1274                 armv8_cache_d_inner_flush_virt(armv8,
1275                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1276                                 breakpoint->length);
1277
1278                 retval = target_write_memory(target,
1279                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1280                                 breakpoint->length, 1, code);
1281                 if (retval != ERROR_OK)
1282                         return retval;
1283
1284                 armv8_cache_d_inner_flush_virt(armv8,
1285                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1286                                 breakpoint->length);
1287
1288                 armv8_cache_i_inner_inval_virt(armv8,
1289                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1290                                 breakpoint->length);
1291
1292                 breakpoint->set = 0x11; /* Any nice value but 0 */
1293         }
1294
1295         /* Ensure that halting debug mode is enable */
1296         retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1297         if (retval != ERROR_OK) {
1298                 LOG_DEBUG("Failed to set DSCR.HDE");
1299                 return retval;
1300         }
1301
1302         return ERROR_OK;
1303 }
1304
1305 static int aarch64_set_context_breakpoint(struct target *target,
1306         struct breakpoint *breakpoint, uint8_t matchmode)
1307 {
1308         int retval = ERROR_FAIL;
1309         int brp_i = 0;
1310         uint32_t control;
1311         uint8_t byte_addr_select = 0x0F;
1312         struct aarch64_common *aarch64 = target_to_aarch64(target);
1313         struct armv8_common *armv8 = &aarch64->armv8_common;
1314         struct aarch64_brp *brp_list = aarch64->brp_list;
1315
1316         if (breakpoint->set) {
1317                 LOG_WARNING("breakpoint already set");
1318                 return retval;
1319         }
1320         /*check available context BRPs*/
1321         while ((brp_list[brp_i].used ||
1322                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1323                 brp_i++;
1324
1325         if (brp_i >= aarch64->brp_num) {
1326                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1327                 return ERROR_FAIL;
1328         }
1329
1330         breakpoint->set = brp_i + 1;
1331         control = ((matchmode & 0x7) << 20)
1332                 | (1 << 13)
1333                 | (byte_addr_select << 5)
1334                 | (3 << 1) | 1;
1335         brp_list[brp_i].used = 1;
1336         brp_list[brp_i].value = (breakpoint->asid);
1337         brp_list[brp_i].control = control;
1338         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1339                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1340                         brp_list[brp_i].value);
1341         if (retval != ERROR_OK)
1342                 return retval;
1343         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1344                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1345                         brp_list[brp_i].control);
1346         if (retval != ERROR_OK)
1347                 return retval;
1348         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1349                 brp_list[brp_i].control,
1350                 brp_list[brp_i].value);
1351         return ERROR_OK;
1352
1353 }
1354
1355 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1356 {
1357         int retval = ERROR_FAIL;
1358         int brp_1 = 0;  /* holds the contextID pair */
1359         int brp_2 = 0;  /* holds the IVA pair */
1360         uint32_t control_CTX, control_IVA;
1361         uint8_t CTX_byte_addr_select = 0x0F;
1362         uint8_t IVA_byte_addr_select = 0x0F;
1363         uint8_t CTX_machmode = 0x03;
1364         uint8_t IVA_machmode = 0x01;
1365         struct aarch64_common *aarch64 = target_to_aarch64(target);
1366         struct armv8_common *armv8 = &aarch64->armv8_common;
1367         struct aarch64_brp *brp_list = aarch64->brp_list;
1368
1369         if (breakpoint->set) {
1370                 LOG_WARNING("breakpoint already set");
1371                 return retval;
1372         }
1373         /*check available context BRPs*/
1374         while ((brp_list[brp_1].used ||
1375                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1376                 brp_1++;
1377
1378         printf("brp(CTX) found num: %d\n", brp_1);
1379         if (brp_1 >= aarch64->brp_num) {
1380                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1381                 return ERROR_FAIL;
1382         }
1383
1384         while ((brp_list[brp_2].used ||
1385                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1386                 brp_2++;
1387
1388         printf("brp(IVA) found num: %d\n", brp_2);
1389         if (brp_2 >= aarch64->brp_num) {
1390                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1391                 return ERROR_FAIL;
1392         }
1393
1394         breakpoint->set = brp_1 + 1;
1395         breakpoint->linked_BRP = brp_2;
1396         control_CTX = ((CTX_machmode & 0x7) << 20)
1397                 | (brp_2 << 16)
1398                 | (0 << 14)
1399                 | (CTX_byte_addr_select << 5)
1400                 | (3 << 1) | 1;
1401         brp_list[brp_1].used = 1;
1402         brp_list[brp_1].value = (breakpoint->asid);
1403         brp_list[brp_1].control = control_CTX;
1404         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1405                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1406                         brp_list[brp_1].value);
1407         if (retval != ERROR_OK)
1408                 return retval;
1409         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1410                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1411                         brp_list[brp_1].control);
1412         if (retval != ERROR_OK)
1413                 return retval;
1414
1415         control_IVA = ((IVA_machmode & 0x7) << 20)
1416                 | (brp_1 << 16)
1417                 | (1 << 13)
1418                 | (IVA_byte_addr_select << 5)
1419                 | (3 << 1) | 1;
1420         brp_list[brp_2].used = 1;
1421         brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1422         brp_list[brp_2].control = control_IVA;
1423         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1424                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1425                         brp_list[brp_2].value & 0xFFFFFFFF);
1426         if (retval != ERROR_OK)
1427                 return retval;
1428         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1429                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1430                         brp_list[brp_2].value >> 32);
1431         if (retval != ERROR_OK)
1432                 return retval;
1433         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1434                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1435                         brp_list[brp_2].control);
1436         if (retval != ERROR_OK)
1437                 return retval;
1438
1439         return ERROR_OK;
1440 }
1441
1442 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1443 {
1444         int retval;
1445         struct aarch64_common *aarch64 = target_to_aarch64(target);
1446         struct armv8_common *armv8 = &aarch64->armv8_common;
1447         struct aarch64_brp *brp_list = aarch64->brp_list;
1448
1449         if (!breakpoint->set) {
1450                 LOG_WARNING("breakpoint not set");
1451                 return ERROR_OK;
1452         }
1453
1454         if (breakpoint->type == BKPT_HARD) {
1455                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1456                         int brp_i = breakpoint->set - 1;
1457                         int brp_j = breakpoint->linked_BRP;
1458                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1459                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1460                                 return ERROR_OK;
1461                         }
1462                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1463                                 brp_list[brp_i].control, brp_list[brp_i].value);
1464                         brp_list[brp_i].used = 0;
1465                         brp_list[brp_i].value = 0;
1466                         brp_list[brp_i].control = 0;
1467                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1468                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1469                                         brp_list[brp_i].control);
1470                         if (retval != ERROR_OK)
1471                                 return retval;
1472                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1473                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1474                                         (uint32_t)brp_list[brp_i].value);
1475                         if (retval != ERROR_OK)
1476                                 return retval;
1477                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1478                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1479                                         (uint32_t)brp_list[brp_i].value);
1480                         if (retval != ERROR_OK)
1481                                 return retval;
1482                         if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1483                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1484                                 return ERROR_OK;
1485                         }
1486                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1487                                 brp_list[brp_j].control, brp_list[brp_j].value);
1488                         brp_list[brp_j].used = 0;
1489                         brp_list[brp_j].value = 0;
1490                         brp_list[brp_j].control = 0;
1491                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1492                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1493                                         brp_list[brp_j].control);
1494                         if (retval != ERROR_OK)
1495                                 return retval;
1496                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1497                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1498                                         (uint32_t)brp_list[brp_j].value);
1499                         if (retval != ERROR_OK)
1500                                 return retval;
1501                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1502                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1503                                         (uint32_t)brp_list[brp_j].value);
1504                         if (retval != ERROR_OK)
1505                                 return retval;
1506
1507                         breakpoint->linked_BRP = 0;
1508                         breakpoint->set = 0;
1509                         return ERROR_OK;
1510
1511                 } else {
1512                         int brp_i = breakpoint->set - 1;
1513                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1514                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1515                                 return ERROR_OK;
1516                         }
1517                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1518                                 brp_list[brp_i].control, brp_list[brp_i].value);
1519                         brp_list[brp_i].used = 0;
1520                         brp_list[brp_i].value = 0;
1521                         brp_list[brp_i].control = 0;
1522                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1523                                         + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1524                                         brp_list[brp_i].control);
1525                         if (retval != ERROR_OK)
1526                                 return retval;
1527                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1528                                         + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1529                                         brp_list[brp_i].value);
1530                         if (retval != ERROR_OK)
1531                                 return retval;
1532
1533                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1534                                         + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1535                                         (uint32_t)brp_list[brp_i].value);
1536                         if (retval != ERROR_OK)
1537                                 return retval;
1538                         breakpoint->set = 0;
1539                         return ERROR_OK;
1540                 }
1541         } else {
1542                 /* restore original instruction (kept in target endianness) */
1543
1544                 armv8_cache_d_inner_flush_virt(armv8,
1545                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1546                                 breakpoint->length);
1547
1548                 if (breakpoint->length == 4) {
1549                         retval = target_write_memory(target,
1550                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1551                                         4, 1, breakpoint->orig_instr);
1552                         if (retval != ERROR_OK)
1553                                 return retval;
1554                 } else {
1555                         retval = target_write_memory(target,
1556                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1557                                         2, 1, breakpoint->orig_instr);
1558                         if (retval != ERROR_OK)
1559                                 return retval;
1560                 }
1561
1562                 armv8_cache_d_inner_flush_virt(armv8,
1563                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1564                                 breakpoint->length);
1565
1566                 armv8_cache_i_inner_inval_virt(armv8,
1567                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1568                                 breakpoint->length);
1569         }
1570         breakpoint->set = 0;
1571
1572         return ERROR_OK;
1573 }
1574
1575 static int aarch64_add_breakpoint(struct target *target,
1576         struct breakpoint *breakpoint)
1577 {
1578         struct aarch64_common *aarch64 = target_to_aarch64(target);
1579
1580         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1581                 LOG_INFO("no hardware breakpoint available");
1582                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1583         }
1584
1585         if (breakpoint->type == BKPT_HARD)
1586                 aarch64->brp_num_available--;
1587
1588         return aarch64_set_breakpoint(target, breakpoint, 0x00);        /* Exact match */
1589 }
1590
1591 static int aarch64_add_context_breakpoint(struct target *target,
1592         struct breakpoint *breakpoint)
1593 {
1594         struct aarch64_common *aarch64 = target_to_aarch64(target);
1595
1596         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1597                 LOG_INFO("no hardware breakpoint available");
1598                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1599         }
1600
1601         if (breakpoint->type == BKPT_HARD)
1602                 aarch64->brp_num_available--;
1603
1604         return aarch64_set_context_breakpoint(target, breakpoint, 0x02);        /* asid match */
1605 }
1606
1607 static int aarch64_add_hybrid_breakpoint(struct target *target,
1608         struct breakpoint *breakpoint)
1609 {
1610         struct aarch64_common *aarch64 = target_to_aarch64(target);
1611
1612         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1613                 LOG_INFO("no hardware breakpoint available");
1614                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1615         }
1616
1617         if (breakpoint->type == BKPT_HARD)
1618                 aarch64->brp_num_available--;
1619
1620         return aarch64_set_hybrid_breakpoint(target, breakpoint);       /* ??? */
1621 }
1622
1623
1624 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1625 {
1626         struct aarch64_common *aarch64 = target_to_aarch64(target);
1627
1628 #if 0
1629 /* It is perfectly possible to remove breakpoints while the target is running */
1630         if (target->state != TARGET_HALTED) {
1631                 LOG_WARNING("target not halted");
1632                 return ERROR_TARGET_NOT_HALTED;
1633         }
1634 #endif
1635
1636         if (breakpoint->set) {
1637                 aarch64_unset_breakpoint(target, breakpoint);
1638                 if (breakpoint->type == BKPT_HARD)
1639                         aarch64->brp_num_available++;
1640         }
1641
1642         return ERROR_OK;
1643 }
1644
1645 /*
1646  * Cortex-A8 Reset functions
1647  */
1648
1649 static int aarch64_assert_reset(struct target *target)
1650 {
1651         struct armv8_common *armv8 = target_to_armv8(target);
1652
1653         LOG_DEBUG(" ");
1654
1655         /* FIXME when halt is requested, make it work somehow... */
1656
1657         /* Issue some kind of warm reset. */
1658         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1659                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1660         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1661                 /* REVISIT handle "pulls" cases, if there's
1662                  * hardware that needs them to work.
1663                  */
1664                 jtag_add_reset(0, 1);
1665         } else {
1666                 LOG_ERROR("%s: how to reset?", target_name(target));
1667                 return ERROR_FAIL;
1668         }
1669
1670         /* registers are now invalid */
1671         if (target_was_examined(target)) {
1672                 register_cache_invalidate(armv8->arm.core_cache);
1673                 register_cache_invalidate(armv8->arm.core_cache->next);
1674         }
1675
1676         target->state = TARGET_RESET;
1677
1678         return ERROR_OK;
1679 }
1680
1681 static int aarch64_deassert_reset(struct target *target)
1682 {
1683         int retval;
1684
1685         LOG_DEBUG(" ");
1686
1687         /* be certain SRST is off */
1688         jtag_add_reset(0, 0);
1689
1690         if (!target_was_examined(target))
1691                 return ERROR_OK;
1692
1693         retval = aarch64_poll(target);
1694         if (retval != ERROR_OK)
1695                 return retval;
1696
1697         if (target->reset_halt) {
1698                 if (target->state != TARGET_HALTED) {
1699                         LOG_WARNING("%s: ran after reset and before halt ...",
1700                                 target_name(target));
1701                         retval = target_halt(target);
1702                         if (retval != ERROR_OK)
1703                                 return retval;
1704                 }
1705         }
1706
1707         return aarch64_init_debug_access(target);
1708 }
1709
1710 static int aarch64_write_cpu_memory_slow(struct target *target,
1711         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1712 {
1713         struct armv8_common *armv8 = target_to_armv8(target);
1714         struct arm_dpm *dpm = &armv8->dpm;
1715         struct arm *arm = &armv8->arm;
1716         int retval;
1717
1718         armv8_reg_current(arm, 1)->dirty = true;
1719
1720         /* change DCC to normal mode if necessary */
1721         if (*dscr & DSCR_MA) {
1722                 *dscr &= ~DSCR_MA;
1723                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1724                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1725                 if (retval != ERROR_OK)
1726                         return retval;
1727         }
1728
1729         while (count) {
1730                 uint32_t data, opcode;
1731
1732                 /* write the data to store into DTRRX */
1733                 if (size == 1)
1734                         data = *buffer;
1735                 else if (size == 2)
1736                         data = target_buffer_get_u16(target, buffer);
1737                 else
1738                         data = target_buffer_get_u32(target, buffer);
1739                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1740                                 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1741                 if (retval != ERROR_OK)
1742                         return retval;
1743
1744                 if (arm->core_state == ARM_STATE_AARCH64)
1745                         retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1746                 else
1747                         retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1748                 if (retval != ERROR_OK)
1749                         return retval;
1750
1751                 if (size == 1)
1752                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1753                 else if (size == 2)
1754                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1755                 else
1756                         opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1757                 retval = dpm->instr_execute(dpm, opcode);
1758                 if (retval != ERROR_OK)
1759                         return retval;
1760
1761                 /* Advance */
1762                 buffer += size;
1763                 --count;
1764         }
1765
1766         return ERROR_OK;
1767 }
1768
1769 static int aarch64_write_cpu_memory_fast(struct target *target,
1770         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1771 {
1772         struct armv8_common *armv8 = target_to_armv8(target);
1773         struct arm *arm = &armv8->arm;
1774         int retval;
1775
1776         armv8_reg_current(arm, 1)->dirty = true;
1777
1778         /* Step 1.d   - Change DCC to memory mode */
1779         *dscr |= DSCR_MA;
1780         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1781                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1782         if (retval != ERROR_OK)
1783                 return retval;
1784
1785
1786         /* Step 2.a   - Do the write */
1787         retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1788                                         buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1789         if (retval != ERROR_OK)
1790                 return retval;
1791
1792         /* Step 3.a   - Switch DTR mode back to Normal mode */
1793         *dscr &= ~DSCR_MA;
1794         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1795                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1796         if (retval != ERROR_OK)
1797                 return retval;
1798
1799         return ERROR_OK;
1800 }
1801
1802 static int aarch64_write_cpu_memory(struct target *target,
1803         uint64_t address, uint32_t size,
1804         uint32_t count, const uint8_t *buffer)
1805 {
1806         /* write memory through APB-AP */
1807         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1808         struct armv8_common *armv8 = target_to_armv8(target);
1809         struct arm_dpm *dpm = &armv8->dpm;
1810         struct arm *arm = &armv8->arm;
1811         uint32_t dscr;
1812
1813         if (target->state != TARGET_HALTED) {
1814                 LOG_WARNING("target not halted");
1815                 return ERROR_TARGET_NOT_HALTED;
1816         }
1817
1818         /* Mark register X0 as dirty, as it will be used
1819          * for transferring the data.
1820          * It will be restored automatically when exiting
1821          * debug mode
1822          */
1823         armv8_reg_current(arm, 0)->dirty = true;
1824
1825         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1826
1827         /* Read DSCR */
1828         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1829                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1830         if (retval != ERROR_OK)
1831                 return retval;
1832
1833         /* Set Normal access mode  */
1834         dscr = (dscr & ~DSCR_MA);
1835         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1836                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1837         if (retval != ERROR_OK)
1838                 return retval;
1839
1840         if (arm->core_state == ARM_STATE_AARCH64) {
1841                 /* Write X0 with value 'address' using write procedure */
1842                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1843                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1844                 retval = dpm->instr_write_data_dcc_64(dpm,
1845                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1846         } else {
1847                 /* Write R0 with value 'address' using write procedure */
1848                 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1849                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1850                 retval = dpm->instr_write_data_dcc(dpm,
1851                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1852         }
1853
1854         if (retval != ERROR_OK)
1855                 return retval;
1856
1857         if (size == 4 && (address % 4) == 0)
1858                 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1859         else
1860                 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1861
1862         if (retval != ERROR_OK) {
1863                 /* Unset DTR mode */
1864                 mem_ap_read_atomic_u32(armv8->debug_ap,
1865                                         armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1866                 dscr &= ~DSCR_MA;
1867                 mem_ap_write_atomic_u32(armv8->debug_ap,
1868                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1869         }
1870
1871         /* Check for sticky abort flags in the DSCR */
1872         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1873                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1874         if (retval != ERROR_OK)
1875                 return retval;
1876
1877         dpm->dscr = dscr;
1878         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1879                 /* Abort occurred - clear it and exit */
1880                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1881                 armv8_dpm_handle_exception(dpm, true);
1882                 return ERROR_FAIL;
1883         }
1884
1885         /* Done */
1886         return ERROR_OK;
1887 }
1888
1889 static int aarch64_read_cpu_memory_slow(struct target *target,
1890         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1891 {
1892         struct armv8_common *armv8 = target_to_armv8(target);
1893         struct arm_dpm *dpm = &armv8->dpm;
1894         struct arm *arm = &armv8->arm;
1895         int retval;
1896
1897         armv8_reg_current(arm, 1)->dirty = true;
1898
1899         /* change DCC to normal mode (if necessary) */
1900         if (*dscr & DSCR_MA) {
1901                 *dscr &= DSCR_MA;
1902                 retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1903                                 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1904                 if (retval != ERROR_OK)
1905                         return retval;
1906         }
1907
1908         while (count) {
1909                 uint32_t opcode, data;
1910
1911                 if (size == 1)
1912                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1913                 else if (size == 2)
1914                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1915                 else
1916                         opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1917                 retval = dpm->instr_execute(dpm, opcode);
1918                 if (retval != ERROR_OK)
1919                         return retval;
1920
1921                 if (arm->core_state == ARM_STATE_AARCH64)
1922                         retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1923                 else
1924                         retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1925                 if (retval != ERROR_OK)
1926                         return retval;
1927
1928                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1929                                 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1930                 if (retval != ERROR_OK)
1931                         return retval;
1932
1933                 if (size == 1)
1934                         *buffer = (uint8_t)data;
1935                 else if (size == 2)
1936                         target_buffer_set_u16(target, buffer, (uint16_t)data);
1937                 else
1938                         target_buffer_set_u32(target, buffer, data);
1939
1940                 /* Advance */
1941                 buffer += size;
1942                 --count;
1943         }
1944
1945         return ERROR_OK;
1946 }
1947
1948 static int aarch64_read_cpu_memory_fast(struct target *target,
1949         uint32_t count, uint8_t *buffer, uint32_t *dscr)
1950 {
1951         struct armv8_common *armv8 = target_to_armv8(target);
1952         struct arm_dpm *dpm = &armv8->dpm;
1953         struct arm *arm = &armv8->arm;
1954         int retval;
1955         uint32_t value;
1956
1957         /* Mark X1 as dirty */
1958         armv8_reg_current(arm, 1)->dirty = true;
1959
1960         if (arm->core_state == ARM_STATE_AARCH64) {
1961                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1962                 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1963         } else {
1964                 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1965                 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1966         }
1967
1968         if (retval != ERROR_OK)
1969                 return retval;
1970
1971         /* Step 1.e - Change DCC to memory mode */
1972         *dscr |= DSCR_MA;
1973         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
1974                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1975         if (retval != ERROR_OK)
1976                 return retval;
1977
1978         /* Step 1.f - read DBGDTRTX and discard the value */
1979         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1980                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1981         if (retval != ERROR_OK)
1982                 return retval;
1983
1984         count--;
1985         /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1986          * Abort flags are sticky, so can be read at end of transactions
1987          *
1988          * This data is read in aligned to 32 bit boundary.
1989          */
1990
1991         if (count) {
1992                 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1993                  * increments X0 by 4. */
1994                 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1995                                                                         armv8->debug_base + CPUV8_DBG_DTRTX);
1996                 if (retval != ERROR_OK)
1997                         return retval;
1998         }
1999
2000         /* Step 3.a - set DTR access mode back to Normal mode   */
2001         *dscr &= ~DSCR_MA;
2002         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
2003                                         armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2004         if (retval != ERROR_OK)
2005                 return retval;
2006
2007         /* Step 3.b - read DBGDTRTX for the final value */
2008         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2009                         armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2010         if (retval != ERROR_OK)
2011                 return retval;
2012
2013         target_buffer_set_u32(target, buffer + count * 4, value);
2014         return retval;
2015 }
2016
2017 static int aarch64_read_cpu_memory(struct target *target,
2018         target_addr_t address, uint32_t size,
2019         uint32_t count, uint8_t *buffer)
2020 {
2021         /* read memory through APB-AP */
2022         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2023         struct armv8_common *armv8 = target_to_armv8(target);
2024         struct arm_dpm *dpm = &armv8->dpm;
2025         struct arm *arm = &armv8->arm;
2026         uint32_t dscr;
2027
2028         LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2029                         address, size, count);
2030
2031         if (target->state != TARGET_HALTED) {
2032                 LOG_WARNING("target not halted");
2033                 return ERROR_TARGET_NOT_HALTED;
2034         }
2035
2036         /* Mark register X0 as dirty, as it will be used
2037          * for transferring the data.
2038          * It will be restored automatically when exiting
2039          * debug mode
2040          */
2041         armv8_reg_current(arm, 0)->dirty = true;
2042
2043         /* Read DSCR */
2044         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2045                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2046         if (retval != ERROR_OK)
2047                 return retval;
2048
2049         /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2050
2051         /* Set Normal access mode  */
2052         dscr &= ~DSCR_MA;
2053         retval =  mem_ap_write_atomic_u32(armv8->debug_ap,
2054                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2055         if (retval != ERROR_OK)
2056                 return retval;
2057
2058         if (arm->core_state == ARM_STATE_AARCH64) {
2059                 /* Write X0 with value 'address' using write procedure */
2060                 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2061                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2062                 retval = dpm->instr_write_data_dcc_64(dpm,
2063                                 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2064         } else {
2065                 /* Write R0 with value 'address' using write procedure */
2066                 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2067                 /* Step 1.c   - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2068                 retval = dpm->instr_write_data_dcc(dpm,
2069                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2070         }
2071
2072         if (retval != ERROR_OK)
2073                 return retval;
2074
2075         if (size == 4 && (address % 4) == 0)
2076                 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2077         else
2078                 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2079
2080         if (dscr & DSCR_MA) {
2081                 dscr &= ~DSCR_MA;
2082                 mem_ap_write_atomic_u32(armv8->debug_ap,
2083                                         armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2084         }
2085
2086         if (retval != ERROR_OK)
2087                 return retval;
2088
2089         /* Check for sticky abort flags in the DSCR */
2090         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2091                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2092         if (retval != ERROR_OK)
2093                 return retval;
2094
2095         dpm->dscr = dscr;
2096
2097         if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2098                 /* Abort occurred - clear it and exit */
2099                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2100                 armv8_dpm_handle_exception(dpm, true);
2101                 return ERROR_FAIL;
2102         }
2103
2104         /* Done */
2105         return ERROR_OK;
2106 }
2107
2108 static int aarch64_read_phys_memory(struct target *target,
2109         target_addr_t address, uint32_t size,
2110         uint32_t count, uint8_t *buffer)
2111 {
2112         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2113
2114         if (count && buffer) {
2115                 /* read memory through APB-AP */
2116                 retval = aarch64_mmu_modify(target, 0);
2117                 if (retval != ERROR_OK)
2118                         return retval;
2119                 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2120         }
2121         return retval;
2122 }
2123
2124 static int aarch64_read_memory(struct target *target, target_addr_t address,
2125         uint32_t size, uint32_t count, uint8_t *buffer)
2126 {
2127         int mmu_enabled = 0;
2128         int retval;
2129
2130         /* determine if MMU was enabled on target stop */
2131         retval = aarch64_mmu(target, &mmu_enabled);
2132         if (retval != ERROR_OK)
2133                 return retval;
2134
2135         if (mmu_enabled) {
2136                 /* enable MMU as we could have disabled it for phys access */
2137                 retval = aarch64_mmu_modify(target, 1);
2138                 if (retval != ERROR_OK)
2139                         return retval;
2140         }
2141         return aarch64_read_cpu_memory(target, address, size, count, buffer);
2142 }
2143
2144 static int aarch64_write_phys_memory(struct target *target,
2145         target_addr_t address, uint32_t size,
2146         uint32_t count, const uint8_t *buffer)
2147 {
2148         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2149
2150         if (count && buffer) {
2151                 /* write memory through APB-AP */
2152                 retval = aarch64_mmu_modify(target, 0);
2153                 if (retval != ERROR_OK)
2154                         return retval;
2155                 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2156         }
2157
2158         return retval;
2159 }
2160
2161 static int aarch64_write_memory(struct target *target, target_addr_t address,
2162         uint32_t size, uint32_t count, const uint8_t *buffer)
2163 {
2164         int mmu_enabled = 0;
2165         int retval;
2166
2167         /* determine if MMU was enabled on target stop */
2168         retval = aarch64_mmu(target, &mmu_enabled);
2169         if (retval != ERROR_OK)
2170                 return retval;
2171
2172         if (mmu_enabled) {
2173                 /* enable MMU as we could have disabled it for phys access */
2174                 retval = aarch64_mmu_modify(target, 1);
2175                 if (retval != ERROR_OK)
2176                         return retval;
2177         }
2178         return aarch64_write_cpu_memory(target, address, size, count, buffer);
2179 }
2180
2181 static int aarch64_handle_target_request(void *priv)
2182 {
2183         struct target *target = priv;
2184         struct armv8_common *armv8 = target_to_armv8(target);
2185         int retval;
2186
2187         if (!target_was_examined(target))
2188                 return ERROR_OK;
2189         if (!target->dbg_msg_enabled)
2190                 return ERROR_OK;
2191
2192         if (target->state == TARGET_RUNNING) {
2193                 uint32_t request;
2194                 uint32_t dscr;
2195                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2196                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2197
2198                 /* check if we have data */
2199                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2200                         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2201                                         armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2202                         if (retval == ERROR_OK) {
2203                                 target_request(target, request);
2204                                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2205                                                 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2206                         }
2207                 }
2208         }
2209
2210         return ERROR_OK;
2211 }
2212
2213 static int aarch64_examine_first(struct target *target)
2214 {
2215         struct aarch64_common *aarch64 = target_to_aarch64(target);
2216         struct armv8_common *armv8 = &aarch64->armv8_common;
2217         struct adiv5_dap *swjdp = armv8->arm.dap;
2218         struct aarch64_private_config *pc;
2219         int i;
2220         int retval = ERROR_OK;
2221         uint64_t debug, ttypr;
2222         uint32_t cpuid;
2223         uint32_t tmp0, tmp1, tmp2, tmp3;
2224         debug = ttypr = cpuid = 0;
2225
2226         /* Search for the APB-AB - it is needed for access to debug registers */
2227         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2228         if (retval != ERROR_OK) {
2229                 LOG_ERROR("Could not find APB-AP for debug access");
2230                 return retval;
2231         }
2232
2233         retval = mem_ap_init(armv8->debug_ap);
2234         if (retval != ERROR_OK) {
2235                 LOG_ERROR("Could not initialize the APB-AP");
2236                 return retval;
2237         }
2238
2239         armv8->debug_ap->memaccess_tck = 10;
2240
2241         if (!target->dbgbase_set) {
2242                 uint32_t dbgbase;
2243                 /* Get ROM Table base */
2244                 uint32_t apid;
2245                 int32_t coreidx = target->coreid;
2246                 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2247                 if (retval != ERROR_OK)
2248                         return retval;
2249                 /* Lookup 0x15 -- Processor DAP */
2250                 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2251                                 &armv8->debug_base, &coreidx);
2252                 if (retval != ERROR_OK)
2253                         return retval;
2254                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2255                                 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2256         } else
2257                 armv8->debug_base = target->dbgbase;
2258
2259         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2260                         armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2261         if (retval != ERROR_OK) {
2262                 LOG_DEBUG("Examine %s failed", "oslock");
2263                 return retval;
2264         }
2265
2266         retval = mem_ap_read_u32(armv8->debug_ap,
2267                         armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2268         if (retval != ERROR_OK) {
2269                 LOG_DEBUG("Examine %s failed", "CPUID");
2270                 return retval;
2271         }
2272
2273         retval = mem_ap_read_u32(armv8->debug_ap,
2274                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2275         retval += mem_ap_read_u32(armv8->debug_ap,
2276                         armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2277         if (retval != ERROR_OK) {
2278                 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2279                 return retval;
2280         }
2281         retval = mem_ap_read_u32(armv8->debug_ap,
2282                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2283         retval += mem_ap_read_u32(armv8->debug_ap,
2284                         armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2285         if (retval != ERROR_OK) {
2286                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2287                 return retval;
2288         }
2289
2290         retval = dap_run(armv8->debug_ap->dap);
2291         if (retval != ERROR_OK) {
2292                 LOG_ERROR("%s: examination failed\n", target_name(target));
2293                 return retval;
2294         }
2295
2296         ttypr |= tmp1;
2297         ttypr = (ttypr << 32) | tmp0;
2298         debug |= tmp3;
2299         debug = (debug << 32) | tmp2;
2300
2301         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2302         LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2303         LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2304
2305         if (target->private_config == NULL)
2306                 return ERROR_FAIL;
2307
2308         pc = (struct aarch64_private_config *)target->private_config;
2309         if (pc->cti == NULL)
2310                 return ERROR_FAIL;
2311
2312         armv8->cti = pc->cti;
2313
2314         retval = aarch64_dpm_setup(aarch64, debug);
2315         if (retval != ERROR_OK)
2316                 return retval;
2317
2318         /* Setup Breakpoint Register Pairs */
2319         aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2320         aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2321         aarch64->brp_num_available = aarch64->brp_num;
2322         aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2323         for (i = 0; i < aarch64->brp_num; i++) {
2324                 aarch64->brp_list[i].used = 0;
2325                 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2326                         aarch64->brp_list[i].type = BRP_NORMAL;
2327                 else
2328                         aarch64->brp_list[i].type = BRP_CONTEXT;
2329                 aarch64->brp_list[i].value = 0;
2330                 aarch64->brp_list[i].control = 0;
2331                 aarch64->brp_list[i].BRPn = i;
2332         }
2333
2334         LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2335
2336         target->state = TARGET_UNKNOWN;
2337         target->debug_reason = DBG_REASON_NOTHALTED;
2338         aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2339         target_set_examined(target);
2340         return ERROR_OK;
2341 }
2342
2343 static int aarch64_examine(struct target *target)
2344 {
2345         int retval = ERROR_OK;
2346
2347         /* don't re-probe hardware after each reset */
2348         if (!target_was_examined(target))
2349                 retval = aarch64_examine_first(target);
2350
2351         /* Configure core debug access */
2352         if (retval == ERROR_OK)
2353                 retval = aarch64_init_debug_access(target);
2354
2355         return retval;
2356 }
2357
2358 /*
2359  *      Cortex-A8 target creation and initialization
2360  */
2361
2362 static int aarch64_init_target(struct command_context *cmd_ctx,
2363         struct target *target)
2364 {
2365         /* examine_first() does a bunch of this */
2366         arm_semihosting_init(target);
2367         return ERROR_OK;
2368 }
2369
2370 static int aarch64_init_arch_info(struct target *target,
2371         struct aarch64_common *aarch64, struct adiv5_dap *dap)
2372 {
2373         struct armv8_common *armv8 = &aarch64->armv8_common;
2374
2375         /* Setup struct aarch64_common */
2376         aarch64->common_magic = AARCH64_COMMON_MAGIC;
2377         armv8->arm.dap = dap;
2378
2379         /* register arch-specific functions */
2380         armv8->examine_debug_reason = NULL;
2381         armv8->post_debug_entry = aarch64_post_debug_entry;
2382         armv8->pre_restore_context = NULL;
2383         armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2384
2385         armv8_init_arch_info(target, armv8);
2386         target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2387
2388         return ERROR_OK;
2389 }
2390
2391 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2392 {
2393         struct aarch64_private_config *pc = target->private_config;
2394         struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2395
2396         if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2397                 return ERROR_FAIL;
2398
2399         return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2400 }
2401
2402 static void aarch64_deinit_target(struct target *target)
2403 {
2404         struct aarch64_common *aarch64 = target_to_aarch64(target);
2405         struct armv8_common *armv8 = &aarch64->armv8_common;
2406         struct arm_dpm *dpm = &armv8->dpm;
2407
2408         armv8_free_reg_cache(target);
2409         free(aarch64->brp_list);
2410         free(dpm->dbp);
2411         free(dpm->dwp);
2412         free(target->private_config);
2413         free(aarch64);
2414 }
2415
2416 static int aarch64_mmu(struct target *target, int *enabled)
2417 {
2418         if (target->state != TARGET_HALTED) {
2419                 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2420                 return ERROR_TARGET_INVALID;
2421         }
2422
2423         *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2424         return ERROR_OK;
2425 }
2426
2427 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2428                              target_addr_t *phys)
2429 {
2430         return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2431 }
2432
2433 /*
2434  * private target configuration items
2435  */
2436 enum aarch64_cfg_param {
2437         CFG_CTI,
2438 };
2439
2440 static const Jim_Nvp nvp_config_opts[] = {
2441         { .name = "-cti", .value = CFG_CTI },
2442         { .name = NULL, .value = -1 }
2443 };
2444
2445 static int aarch64_jim_configure(struct target *target, Jim_GetOptInfo *goi)
2446 {
2447         struct aarch64_private_config *pc;
2448         Jim_Nvp *n;
2449         int e;
2450
2451         pc = (struct aarch64_private_config *)target->private_config;
2452         if (pc == NULL) {
2453                         pc = calloc(1, sizeof(struct aarch64_private_config));
2454                         target->private_config = pc;
2455         }
2456
2457         /*
2458          * Call adiv5_jim_configure() to parse the common DAP options
2459          * It will return JIM_CONTINUE if it didn't find any known
2460          * options, JIM_OK if it correctly parsed the topmost option
2461          * and JIM_ERR if an error occured during parameter evaluation.
2462          * For JIM_CONTINUE, we check our own params.
2463          */
2464         e = adiv5_jim_configure(target, goi);
2465         if (e != JIM_CONTINUE)
2466                 return e;
2467
2468         /* parse config or cget options ... */
2469         if (goi->argc > 0) {
2470                 Jim_SetEmptyResult(goi->interp);
2471
2472                 /* check first if topmost item is for us */
2473                 e = Jim_Nvp_name2value_obj(goi->interp, nvp_config_opts,
2474                                 goi->argv[0], &n);
2475                 if (e != JIM_OK)
2476                         return JIM_CONTINUE;
2477
2478                 e = Jim_GetOpt_Obj(goi, NULL);
2479                 if (e != JIM_OK)
2480                         return e;
2481
2482                 switch (n->value) {
2483                 case CFG_CTI: {
2484                         if (goi->isconfigure) {
2485                                 Jim_Obj *o_cti;
2486                                 struct arm_cti *cti;
2487                                 e = Jim_GetOpt_Obj(goi, &o_cti);
2488                                 if (e != JIM_OK)
2489                                         return e;
2490                                 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2491                                 if (cti == NULL) {
2492                                         Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2493                                         return JIM_ERR;
2494                                 }
2495                                 pc->cti = cti;
2496                         } else {
2497                                 if (goi->argc != 0) {
2498                                         Jim_WrongNumArgs(goi->interp,
2499                                                         goi->argc, goi->argv,
2500                                                         "NO PARAMS");
2501                                         return JIM_ERR;
2502                                 }
2503
2504                                 if (pc == NULL || pc->cti == NULL) {
2505                                         Jim_SetResultString(goi->interp, "CTI not configured", -1);
2506                                         return JIM_ERR;
2507                                 }
2508                                 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2509                         }
2510                         break;
2511                 }
2512
2513                 default:
2514                         return JIM_CONTINUE;
2515                 }
2516         }
2517
2518         return JIM_OK;
2519 }
2520
2521 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2522 {
2523         struct target *target = get_current_target(CMD_CTX);
2524         struct armv8_common *armv8 = target_to_armv8(target);
2525
2526         return armv8_handle_cache_info_command(CMD_CTX,
2527                         &armv8->armv8_mmu.armv8_cache);
2528 }
2529
2530
2531 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2532 {
2533         struct target *target = get_current_target(CMD_CTX);
2534         if (!target_was_examined(target)) {
2535                 LOG_ERROR("target not examined yet");
2536                 return ERROR_FAIL;
2537         }
2538
2539         return aarch64_init_debug_access(target);
2540 }
2541 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2542 {
2543         struct target *target = get_current_target(CMD_CTX);
2544         /* check target is an smp target */
2545         struct target_list *head;
2546         struct target *curr;
2547         head = target->head;
2548         target->smp = 0;
2549         if (head != (struct target_list *)NULL) {
2550                 while (head != (struct target_list *)NULL) {
2551                         curr = head->target;
2552                         curr->smp = 0;
2553                         head = head->next;
2554                 }
2555                 /*  fixes the target display to the debugger */
2556                 target->gdb_service->target = target;
2557         }
2558         return ERROR_OK;
2559 }
2560
2561 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2562 {
2563         struct target *target = get_current_target(CMD_CTX);
2564         struct target_list *head;
2565         struct target *curr;
2566         head = target->head;
2567         if (head != (struct target_list *)NULL) {
2568                 target->smp = 1;
2569                 while (head != (struct target_list *)NULL) {
2570                         curr = head->target;
2571                         curr->smp = 1;
2572                         head = head->next;
2573                 }
2574         }
2575         return ERROR_OK;
2576 }
2577
2578 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2579 {
2580         struct target *target = get_current_target(CMD_CTX);
2581         struct aarch64_common *aarch64 = target_to_aarch64(target);
2582
2583         static const Jim_Nvp nvp_maskisr_modes[] = {
2584                 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2585                 { .name = "on", .value = AARCH64_ISRMASK_ON },
2586                 { .name = NULL, .value = -1 },
2587         };
2588         const Jim_Nvp *n;
2589
2590         if (CMD_ARGC > 0) {
2591                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2592                 if (n->name == NULL) {
2593                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2594                         return ERROR_COMMAND_SYNTAX_ERROR;
2595                 }
2596
2597                 aarch64->isrmasking_mode = n->value;
2598         }
2599
2600         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2601         command_print(CMD_CTX, "aarch64 interrupt mask %s", n->name);
2602
2603         return ERROR_OK;
2604 }
2605
2606 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2607 {
2608         struct command_context *context;
2609         struct target *target;
2610         struct arm *arm;
2611         int retval;
2612         bool is_mcr = false;
2613         int arg_cnt = 0;
2614
2615         if (Jim_CompareStringImmediate(interp, argv[0], "mcr")) {
2616                 is_mcr = true;
2617                 arg_cnt = 7;
2618         } else {
2619                 arg_cnt = 6;
2620         }
2621
2622         context = current_command_context(interp);
2623         assert(context != NULL);
2624
2625         target = get_current_target(context);
2626         if (target == NULL) {
2627                 LOG_ERROR("%s: no current target", __func__);
2628                 return JIM_ERR;
2629         }
2630         if (!target_was_examined(target)) {
2631                 LOG_ERROR("%s: not yet examined", target_name(target));
2632                 return JIM_ERR;
2633         }
2634
2635         arm = target_to_arm(target);
2636         if (!is_arm(arm)) {
2637                 LOG_ERROR("%s: not an ARM", target_name(target));
2638                 return JIM_ERR;
2639         }
2640
2641         if (target->state != TARGET_HALTED)
2642                 return ERROR_TARGET_NOT_HALTED;
2643
2644         if (arm->core_state == ARM_STATE_AARCH64) {
2645                 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2646                 return JIM_ERR;
2647         }
2648
2649         if (argc != arg_cnt) {
2650                 LOG_ERROR("%s: wrong number of arguments", __func__);
2651                 return JIM_ERR;
2652         }
2653
2654         int cpnum;
2655         uint32_t op1;
2656         uint32_t op2;
2657         uint32_t CRn;
2658         uint32_t CRm;
2659         uint32_t value;
2660         long l;
2661
2662         /* NOTE:  parameter sequence matches ARM instruction set usage:
2663          *      MCR     pNUM, op1, rX, CRn, CRm, op2    ; write CP from rX
2664          *      MRC     pNUM, op1, rX, CRn, CRm, op2    ; read CP into rX
2665          * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2666          */
2667         retval = Jim_GetLong(interp, argv[1], &l);
2668         if (retval != JIM_OK)
2669                 return retval;
2670         if (l & ~0xf) {
2671                 LOG_ERROR("%s: %s %d out of range", __func__,
2672                         "coprocessor", (int) l);
2673                 return JIM_ERR;
2674         }
2675         cpnum = l;
2676
2677         retval = Jim_GetLong(interp, argv[2], &l);
2678         if (retval != JIM_OK)
2679                 return retval;
2680         if (l & ~0x7) {
2681                 LOG_ERROR("%s: %s %d out of range", __func__,
2682                         "op1", (int) l);
2683                 return JIM_ERR;
2684         }
2685         op1 = l;
2686
2687         retval = Jim_GetLong(interp, argv[3], &l);
2688         if (retval != JIM_OK)
2689                 return retval;
2690         if (l & ~0xf) {
2691                 LOG_ERROR("%s: %s %d out of range", __func__,
2692                         "CRn", (int) l);
2693                 return JIM_ERR;
2694         }
2695         CRn = l;
2696
2697         retval = Jim_GetLong(interp, argv[4], &l);
2698         if (retval != JIM_OK)
2699                 return retval;
2700         if (l & ~0xf) {
2701                 LOG_ERROR("%s: %s %d out of range", __func__,
2702                         "CRm", (int) l);
2703                 return JIM_ERR;
2704         }
2705         CRm = l;
2706
2707         retval = Jim_GetLong(interp, argv[5], &l);
2708         if (retval != JIM_OK)
2709                 return retval;
2710         if (l & ~0x7) {
2711                 LOG_ERROR("%s: %s %d out of range", __func__,
2712                         "op2", (int) l);
2713                 return JIM_ERR;
2714         }
2715         op2 = l;
2716
2717         value = 0;
2718
2719         if (is_mcr == true) {
2720                 retval = Jim_GetLong(interp, argv[6], &l);
2721                 if (retval != JIM_OK)
2722                         return retval;
2723                 value = l;
2724
2725                 /* NOTE: parameters reordered! */
2726                 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2727                 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
2728                 if (retval != ERROR_OK)
2729                         return JIM_ERR;
2730         } else {
2731                 /* NOTE: parameters reordered! */
2732                 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2733                 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
2734                 if (retval != ERROR_OK)
2735                         return JIM_ERR;
2736
2737                 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
2738         }
2739
2740         return JIM_OK;
2741 }
2742
2743 static const struct command_registration aarch64_exec_command_handlers[] = {
2744         {
2745                 .name = "cache_info",
2746                 .handler = aarch64_handle_cache_info_command,
2747                 .mode = COMMAND_EXEC,
2748                 .help = "display information about target caches",
2749                 .usage = "",
2750         },
2751         {
2752                 .name = "dbginit",
2753                 .handler = aarch64_handle_dbginit_command,
2754                 .mode = COMMAND_EXEC,
2755                 .help = "Initialize core debug",
2756                 .usage = "",
2757         },
2758         {       .name = "smp_off",
2759                 .handler = aarch64_handle_smp_off_command,
2760                 .mode = COMMAND_EXEC,
2761                 .help = "Stop smp handling",
2762                 .usage = "",
2763         },
2764         {
2765                 .name = "smp_on",
2766                 .handler = aarch64_handle_smp_on_command,
2767                 .mode = COMMAND_EXEC,
2768                 .help = "Restart smp handling",
2769                 .usage = "",
2770         },
2771         {
2772                 .name = "maskisr",
2773                 .handler = aarch64_mask_interrupts_command,
2774                 .mode = COMMAND_ANY,
2775                 .help = "mask aarch64 interrupts during single-step",
2776                 .usage = "['on'|'off']",
2777         },
2778         {
2779                 .name = "mcr",
2780                 .mode = COMMAND_EXEC,
2781                 .jim_handler = jim_mcrmrc,
2782                 .help = "write coprocessor register",
2783                 .usage = "cpnum op1 CRn CRm op2 value",
2784         },
2785         {
2786                 .name = "mrc",
2787                 .mode = COMMAND_EXEC,
2788                 .jim_handler = jim_mcrmrc,
2789                 .help = "read coprocessor register",
2790                 .usage = "cpnum op1 CRn CRm op2",
2791         },
2792
2793
2794         COMMAND_REGISTRATION_DONE
2795 };
2796
2797 static const struct command_registration aarch64_command_handlers[] = {
2798         {
2799                 .chain = armv8_command_handlers,
2800         },
2801         {
2802                 .name = "aarch64",
2803                 .mode = COMMAND_ANY,
2804                 .help = "Aarch64 command group",
2805                 .usage = "",
2806                 .chain = aarch64_exec_command_handlers,
2807         },
2808         COMMAND_REGISTRATION_DONE
2809 };
2810
2811 struct target_type aarch64_target = {
2812         .name = "aarch64",
2813
2814         .poll = aarch64_poll,
2815         .arch_state = armv8_arch_state,
2816
2817         .halt = aarch64_halt,
2818         .resume = aarch64_resume,
2819         .step = aarch64_step,
2820
2821         .assert_reset = aarch64_assert_reset,
2822         .deassert_reset = aarch64_deassert_reset,
2823
2824         /* REVISIT allow exporting VFP3 registers ... */
2825         .get_gdb_reg_list = armv8_get_gdb_reg_list,
2826
2827         .read_memory = aarch64_read_memory,
2828         .write_memory = aarch64_write_memory,
2829
2830         .add_breakpoint = aarch64_add_breakpoint,
2831         .add_context_breakpoint = aarch64_add_context_breakpoint,
2832         .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2833         .remove_breakpoint = aarch64_remove_breakpoint,
2834         .add_watchpoint = NULL,
2835         .remove_watchpoint = NULL,
2836
2837         .commands = aarch64_command_handlers,
2838         .target_create = aarch64_target_create,
2839         .target_jim_configure = aarch64_jim_configure,
2840         .init_target = aarch64_init_target,
2841         .deinit_target = aarch64_deinit_target,
2842         .examine = aarch64_examine,
2843
2844         .read_phys_memory = aarch64_read_phys_memory,
2845         .write_phys_memory = aarch64_write_phys_memory,
2846         .mmu = aarch64_mmu,
2847         .virt2phys = aarch64_virt2phys,
2848 };