1 /***************************************************************************
2 * Copyright (C) 2006, 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2007,2008 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
8 * Copyright (C) 2009 Michael Schwingen *
9 * michael@schwingen.org *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 ***************************************************************************/
31 #include "breakpoints.h"
33 #include "target_type.h"
35 #include "arm_simulator.h"
36 #include "arm_disassembler.h"
37 #include <helper/time_support.h>
40 #include "arm_opcodes.h"
44 * Important XScale documents available as of October 2009 include:
46 * Intel XScale® Core Developer’s Manual, January 2004
47 * Order Number: 273473-002
48 * This has a chapter detailing debug facilities, and punts some
49 * details to chip-specific microarchitecture documents.
51 * Hot-Debug for Intel XScale® Core Debug White Paper, May 2005
52 * Document Number: 273539-005
53 * Less detailed than the developer's manual, but summarizes those
54 * missing details (for most XScales) and gives LOTS of notes about
55 * debugger/handler interaction issues. Presents a simpler reset
56 * and load-handler sequence than the arch doc. (Note, OpenOCD
57 * doesn't currently support "Hot-Debug" as defined there.)
59 * Chip-specific microarchitecture documents may also be useful.
62 /* forward declarations */
63 static int xscale_resume(struct target *, int current,
64 uint32_t address, int handle_breakpoints, int debug_execution);
65 static int xscale_debug_entry(struct target *);
66 static int xscale_restore_banked(struct target *);
67 static int xscale_get_reg(struct reg *reg);
68 static int xscale_set_reg(struct reg *reg, uint8_t *buf);
69 static int xscale_set_breakpoint(struct target *, struct breakpoint *);
70 static int xscale_set_watchpoint(struct target *, struct watchpoint *);
71 static int xscale_unset_breakpoint(struct target *, struct breakpoint *);
72 static int xscale_read_trace(struct target *);
74 /* This XScale "debug handler" is loaded into the processor's
75 * mini-ICache, which is 2K of code writable only via JTAG.
77 * FIXME the OpenOCD "bin2char" utility currently doesn't handle
78 * binary files cleanly. It's string oriented, and terminates them
79 * with a NUL character. Better would be to generate the constants
80 * and let other code decide names, scoping, and other housekeeping.
82 static /* unsigned const char xscale_debug_handler[] = ... */
83 #include "xscale_debug.h"
85 static char *const xscale_reg_list[] = {
86 "XSCALE_MAINID", /* 0 */
96 "XSCALE_IBCR0", /* 10 */
106 "XSCALE_RX", /* 20 */
110 static const struct xscale_reg xscale_reg_arch_info[] = {
111 {XSCALE_MAINID, NULL},
112 {XSCALE_CACHETYPE, NULL},
114 {XSCALE_AUXCTRL, NULL},
120 {XSCALE_CPACCESS, NULL},
121 {XSCALE_IBCR0, NULL},
122 {XSCALE_IBCR1, NULL},
125 {XSCALE_DBCON, NULL},
126 {XSCALE_TBREG, NULL},
127 {XSCALE_CHKPT0, NULL},
128 {XSCALE_CHKPT1, NULL},
129 {XSCALE_DCSR, NULL}, /* DCSR accessed via JTAG or SW */
130 {-1, NULL}, /* TX accessed via JTAG */
131 {-1, NULL}, /* RX accessed via JTAG */
132 {-1, NULL}, /* TXRXCTRL implicit access via JTAG */
135 /* convenience wrapper to access XScale specific registers */
136 static int xscale_set_reg_u32(struct reg *reg, uint32_t value)
140 buf_set_u32(buf, 0, 32, value);
142 return xscale_set_reg(reg, buf);
145 static const char xscale_not[] = "target is not an XScale";
147 static int xscale_verify_pointer(struct command_context *cmd_ctx,
148 struct xscale_common *xscale)
150 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
151 command_print(cmd_ctx, xscale_not);
152 return ERROR_TARGET_INVALID;
157 static int xscale_jtag_set_instr(struct jtag_tap *tap, uint32_t new_instr, tap_state_t end_state)
161 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
162 struct scan_field field;
165 memset(&field, 0, sizeof field);
166 field.num_bits = tap->ir_length;
167 field.out_value = scratch;
168 buf_set_u32(scratch, 0, field.num_bits, new_instr);
170 jtag_add_ir_scan(tap, &field, end_state);
176 static int xscale_read_dcsr(struct target *target)
178 struct xscale_common *xscale = target_to_xscale(target);
180 struct scan_field fields[3];
181 uint8_t field0 = 0x0;
182 uint8_t field0_check_value = 0x2;
183 uint8_t field0_check_mask = 0x7;
184 uint8_t field2 = 0x0;
185 uint8_t field2_check_value = 0x0;
186 uint8_t field2_check_mask = 0x1;
188 xscale_jtag_set_instr(target->tap,
189 XSCALE_SELDCSR << xscale->xscale_variant,
192 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
193 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
195 memset(&fields, 0, sizeof fields);
197 fields[0].num_bits = 3;
198 fields[0].out_value = &field0;
200 fields[0].in_value = &tmp;
202 fields[1].num_bits = 32;
203 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
205 fields[2].num_bits = 1;
206 fields[2].out_value = &field2;
208 fields[2].in_value = &tmp2;
210 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
212 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
213 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
215 retval = jtag_execute_queue();
216 if (retval != ERROR_OK) {
217 LOG_ERROR("JTAG error while reading DCSR");
221 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
222 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
224 /* write the register with the value we just read
225 * on this second pass, only the first bit of field0 is guaranteed to be 0)
227 field0_check_mask = 0x1;
228 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
229 fields[1].in_value = NULL;
231 jtag_add_dr_scan(target->tap, 3, fields, TAP_DRPAUSE);
233 /* DANGER!!! this must be here. It will make sure that the arguments
234 * to jtag_set_check_value() does not go out of scope! */
235 return jtag_execute_queue();
239 static void xscale_getbuf(jtag_callback_data_t arg)
241 uint8_t *in = (uint8_t *)arg;
242 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
245 static int xscale_receive(struct target *target, uint32_t *buffer, int num_words)
248 return ERROR_COMMAND_SYNTAX_ERROR;
250 struct xscale_common *xscale = target_to_xscale(target);
251 int retval = ERROR_OK;
253 struct scan_field fields[3];
254 uint8_t *field0 = malloc(num_words * 1);
255 uint8_t field0_check_value = 0x2;
256 uint8_t field0_check_mask = 0x6;
257 uint32_t *field1 = malloc(num_words * 4);
258 uint8_t field2_check_value = 0x0;
259 uint8_t field2_check_mask = 0x1;
261 int words_scheduled = 0;
264 path[0] = TAP_DRSELECT;
265 path[1] = TAP_DRCAPTURE;
266 path[2] = TAP_DRSHIFT;
268 memset(&fields, 0, sizeof fields);
270 fields[0].num_bits = 3;
272 fields[0].in_value = &tmp;
273 fields[0].check_value = &field0_check_value;
274 fields[0].check_mask = &field0_check_mask;
276 fields[1].num_bits = 32;
278 fields[2].num_bits = 1;
280 fields[2].in_value = &tmp2;
281 fields[2].check_value = &field2_check_value;
282 fields[2].check_mask = &field2_check_mask;
284 xscale_jtag_set_instr(target->tap,
285 XSCALE_DBGTX << xscale->xscale_variant,
287 jtag_add_runtest(1, TAP_IDLE); /* ensures that we're in the TAP_IDLE state as the above
290 /* repeat until all words have been collected */
292 while (words_done < num_words) {
295 for (i = words_done; i < num_words; i++) {
296 fields[0].in_value = &field0[i];
298 jtag_add_pathmove(3, path);
300 fields[1].in_value = (uint8_t *)(field1 + i);
302 jtag_add_dr_scan_check(target->tap, 3, fields, TAP_IDLE);
304 jtag_add_callback(xscale_getbuf, (jtag_callback_data_t)(field1 + i));
309 retval = jtag_execute_queue();
310 if (retval != ERROR_OK) {
311 LOG_ERROR("JTAG error while receiving data from debug handler");
315 /* examine results */
316 for (i = words_done; i < num_words; i++) {
317 if (!(field0[i] & 1)) {
318 /* move backwards if necessary */
320 for (j = i; j < num_words - 1; j++) {
321 field0[j] = field0[j + 1];
322 field1[j] = field1[j + 1];
327 if (words_scheduled == 0) {
328 if (attempts++ == 1000) {
330 "Failed to receiving data from debug handler after 1000 attempts");
331 retval = ERROR_TARGET_TIMEOUT;
336 words_done += words_scheduled;
339 for (i = 0; i < num_words; i++)
340 *(buffer++) = buf_get_u32((uint8_t *)&field1[i], 0, 32);
347 static int xscale_read_tx(struct target *target, int consume)
349 struct xscale_common *xscale = target_to_xscale(target);
351 tap_state_t noconsume_path[6];
353 struct timeval timeout, now;
354 struct scan_field fields[3];
355 uint8_t field0_in = 0x0;
356 uint8_t field0_check_value = 0x2;
357 uint8_t field0_check_mask = 0x6;
358 uint8_t field2_check_value = 0x0;
359 uint8_t field2_check_mask = 0x1;
361 xscale_jtag_set_instr(target->tap,
362 XSCALE_DBGTX << xscale->xscale_variant,
365 path[0] = TAP_DRSELECT;
366 path[1] = TAP_DRCAPTURE;
367 path[2] = TAP_DRSHIFT;
369 noconsume_path[0] = TAP_DRSELECT;
370 noconsume_path[1] = TAP_DRCAPTURE;
371 noconsume_path[2] = TAP_DREXIT1;
372 noconsume_path[3] = TAP_DRPAUSE;
373 noconsume_path[4] = TAP_DREXIT2;
374 noconsume_path[5] = TAP_DRSHIFT;
376 memset(&fields, 0, sizeof fields);
378 fields[0].num_bits = 3;
379 fields[0].in_value = &field0_in;
381 fields[1].num_bits = 32;
382 fields[1].in_value = xscale->reg_cache->reg_list[XSCALE_TX].value;
384 fields[2].num_bits = 1;
386 fields[2].in_value = &tmp;
388 gettimeofday(&timeout, NULL);
389 timeval_add_time(&timeout, 1, 0);
392 /* if we want to consume the register content (i.e. clear TX_READY),
393 * we have to go straight from Capture-DR to Shift-DR
394 * otherwise, we go from Capture-DR to Exit1-DR to Pause-DR
397 jtag_add_pathmove(3, path);
399 jtag_add_pathmove(ARRAY_SIZE(noconsume_path), noconsume_path);
401 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
403 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
404 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
406 retval = jtag_execute_queue();
407 if (retval != ERROR_OK) {
408 LOG_ERROR("JTAG error while reading TX");
409 return ERROR_TARGET_TIMEOUT;
412 gettimeofday(&now, NULL);
413 if ((now.tv_sec > timeout.tv_sec) ||
414 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
415 LOG_ERROR("time out reading TX register");
416 return ERROR_TARGET_TIMEOUT;
418 if (!((!(field0_in & 1)) && consume))
420 if (debug_level >= 3) {
421 LOG_DEBUG("waiting 100ms");
422 alive_sleep(100); /* avoid flooding the logs */
428 if (!(field0_in & 1))
429 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
434 static int xscale_write_rx(struct target *target)
436 struct xscale_common *xscale = target_to_xscale(target);
438 struct timeval timeout, now;
439 struct scan_field fields[3];
440 uint8_t field0_out = 0x0;
441 uint8_t field0_in = 0x0;
442 uint8_t field0_check_value = 0x2;
443 uint8_t field0_check_mask = 0x6;
444 uint8_t field2 = 0x0;
445 uint8_t field2_check_value = 0x0;
446 uint8_t field2_check_mask = 0x1;
448 xscale_jtag_set_instr(target->tap,
449 XSCALE_DBGRX << xscale->xscale_variant,
452 memset(&fields, 0, sizeof fields);
454 fields[0].num_bits = 3;
455 fields[0].out_value = &field0_out;
456 fields[0].in_value = &field0_in;
458 fields[1].num_bits = 32;
459 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_RX].value;
461 fields[2].num_bits = 1;
462 fields[2].out_value = &field2;
464 fields[2].in_value = &tmp;
466 gettimeofday(&timeout, NULL);
467 timeval_add_time(&timeout, 1, 0);
469 /* poll until rx_read is low */
470 LOG_DEBUG("polling RX");
472 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
474 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
475 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
477 retval = jtag_execute_queue();
478 if (retval != ERROR_OK) {
479 LOG_ERROR("JTAG error while writing RX");
483 gettimeofday(&now, NULL);
484 if ((now.tv_sec > timeout.tv_sec) ||
485 ((now.tv_sec == timeout.tv_sec) && (now.tv_usec > timeout.tv_usec))) {
486 LOG_ERROR("time out writing RX register");
487 return ERROR_TARGET_TIMEOUT;
489 if (!(field0_in & 1))
491 if (debug_level >= 3) {
492 LOG_DEBUG("waiting 100ms");
493 alive_sleep(100); /* avoid flooding the logs */
501 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
503 retval = jtag_execute_queue();
504 if (retval != ERROR_OK) {
505 LOG_ERROR("JTAG error while writing RX");
512 /* send count elements of size byte to the debug handler */
513 static int xscale_send(struct target *target, const uint8_t *buffer, int count, int size)
515 struct xscale_common *xscale = target_to_xscale(target);
521 xscale_jtag_set_instr(target->tap,
522 XSCALE_DBGRX << xscale->xscale_variant,
530 int endianness = target->endianness;
531 while (done_count++ < count) {
534 if (endianness == TARGET_LITTLE_ENDIAN)
535 t[1] = le_to_h_u32(buffer);
537 t[1] = be_to_h_u32(buffer);
540 if (endianness == TARGET_LITTLE_ENDIAN)
541 t[1] = le_to_h_u16(buffer);
543 t[1] = be_to_h_u16(buffer);
549 LOG_ERROR("BUG: size neither 4, 2 nor 1");
550 return ERROR_COMMAND_SYNTAX_ERROR;
552 jtag_add_dr_out(target->tap,
560 retval = jtag_execute_queue();
561 if (retval != ERROR_OK) {
562 LOG_ERROR("JTAG error while sending data to debug handler");
569 static int xscale_send_u32(struct target *target, uint32_t value)
571 struct xscale_common *xscale = target_to_xscale(target);
573 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
574 return xscale_write_rx(target);
577 static int xscale_write_dcsr(struct target *target, int hold_rst, int ext_dbg_brk)
579 struct xscale_common *xscale = target_to_xscale(target);
581 struct scan_field fields[3];
582 uint8_t field0 = 0x0;
583 uint8_t field0_check_value = 0x2;
584 uint8_t field0_check_mask = 0x7;
585 uint8_t field2 = 0x0;
586 uint8_t field2_check_value = 0x0;
587 uint8_t field2_check_mask = 0x1;
590 xscale->hold_rst = hold_rst;
592 if (ext_dbg_brk != -1)
593 xscale->external_debug_break = ext_dbg_brk;
595 xscale_jtag_set_instr(target->tap,
596 XSCALE_SELDCSR << xscale->xscale_variant,
599 buf_set_u32(&field0, 1, 1, xscale->hold_rst);
600 buf_set_u32(&field0, 2, 1, xscale->external_debug_break);
602 memset(&fields, 0, sizeof fields);
604 fields[0].num_bits = 3;
605 fields[0].out_value = &field0;
607 fields[0].in_value = &tmp;
609 fields[1].num_bits = 32;
610 fields[1].out_value = xscale->reg_cache->reg_list[XSCALE_DCSR].value;
612 fields[2].num_bits = 1;
613 fields[2].out_value = &field2;
615 fields[2].in_value = &tmp2;
617 jtag_add_dr_scan(target->tap, 3, fields, TAP_IDLE);
619 jtag_check_value_mask(fields + 0, &field0_check_value, &field0_check_mask);
620 jtag_check_value_mask(fields + 2, &field2_check_value, &field2_check_mask);
622 retval = jtag_execute_queue();
623 if (retval != ERROR_OK) {
624 LOG_ERROR("JTAG error while writing DCSR");
628 xscale->reg_cache->reg_list[XSCALE_DCSR].dirty = 0;
629 xscale->reg_cache->reg_list[XSCALE_DCSR].valid = 1;
634 /* parity of the number of bits 0 if even; 1 if odd. for 32 bit words */
635 static unsigned int parity(unsigned int v)
637 /* unsigned int ov = v; */
642 /* LOG_DEBUG("parity of 0x%x is %i", ov, (0x6996 >> v) & 1); */
643 return (0x6996 >> v) & 1;
646 static int xscale_load_ic(struct target *target, uint32_t va, uint32_t buffer[8])
648 struct xscale_common *xscale = target_to_xscale(target);
652 struct scan_field fields[2];
654 LOG_DEBUG("loading miniIC at 0x%8.8" PRIx32 "", va);
657 xscale_jtag_set_instr(target->tap,
658 XSCALE_LDIC << xscale->xscale_variant,
661 /* CMD is b011 to load a cacheline into the Mini ICache.
662 * Loading into the main ICache is deprecated, and unused.
663 * It's followed by three zero bits, and 27 address bits.
665 buf_set_u32(&cmd, 0, 6, 0x3);
667 /* virtual address of desired cache line */
668 buf_set_u32(packet, 0, 27, va >> 5);
670 memset(&fields, 0, sizeof fields);
672 fields[0].num_bits = 6;
673 fields[0].out_value = &cmd;
675 fields[1].num_bits = 27;
676 fields[1].out_value = packet;
678 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
680 /* rest of packet is a cacheline: 8 instructions, with parity */
681 fields[0].num_bits = 32;
682 fields[0].out_value = packet;
684 fields[1].num_bits = 1;
685 fields[1].out_value = &cmd;
687 for (word = 0; word < 8; word++) {
688 buf_set_u32(packet, 0, 32, buffer[word]);
691 memcpy(&value, packet, sizeof(uint32_t));
694 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
697 return jtag_execute_queue();
700 static int xscale_invalidate_ic_line(struct target *target, uint32_t va)
702 struct xscale_common *xscale = target_to_xscale(target);
705 struct scan_field fields[2];
707 xscale_jtag_set_instr(target->tap,
708 XSCALE_LDIC << xscale->xscale_variant,
711 /* CMD for invalidate IC line b000, bits [6:4] b000 */
712 buf_set_u32(&cmd, 0, 6, 0x0);
714 /* virtual address of desired cache line */
715 buf_set_u32(packet, 0, 27, va >> 5);
717 memset(&fields, 0, sizeof fields);
719 fields[0].num_bits = 6;
720 fields[0].out_value = &cmd;
722 fields[1].num_bits = 27;
723 fields[1].out_value = packet;
725 jtag_add_dr_scan(target->tap, 2, fields, TAP_IDLE);
730 static int xscale_update_vectors(struct target *target)
732 struct xscale_common *xscale = target_to_xscale(target);
736 uint32_t low_reset_branch, high_reset_branch;
738 for (i = 1; i < 8; i++) {
739 /* if there's a static vector specified for this exception, override */
740 if (xscale->static_high_vectors_set & (1 << i))
741 xscale->high_vectors[i] = xscale->static_high_vectors[i];
743 retval = target_read_u32(target, 0xffff0000 + 4*i, &xscale->high_vectors[i]);
744 if (retval == ERROR_TARGET_TIMEOUT)
746 if (retval != ERROR_OK) {
747 /* Some of these reads will fail as part of normal execution */
748 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
753 for (i = 1; i < 8; i++) {
754 if (xscale->static_low_vectors_set & (1 << i))
755 xscale->low_vectors[i] = xscale->static_low_vectors[i];
757 retval = target_read_u32(target, 0x0 + 4*i, &xscale->low_vectors[i]);
758 if (retval == ERROR_TARGET_TIMEOUT)
760 if (retval != ERROR_OK) {
761 /* Some of these reads will fail as part of normal execution */
762 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
767 /* calculate branches to debug handler */
768 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
769 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
771 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
772 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
774 /* invalidate and load exception vectors in mini i-cache */
775 xscale_invalidate_ic_line(target, 0x0);
776 xscale_invalidate_ic_line(target, 0xffff0000);
778 xscale_load_ic(target, 0x0, xscale->low_vectors);
779 xscale_load_ic(target, 0xffff0000, xscale->high_vectors);
784 static int xscale_arch_state(struct target *target)
786 struct xscale_common *xscale = target_to_xscale(target);
787 struct arm *arm = &xscale->arm;
789 static const char *state[] = {
790 "disabled", "enabled"
793 static const char *arch_dbg_reason[] = {
794 "", "\n(processor reset)", "\n(trace buffer full)"
797 if (arm->common_magic != ARM_COMMON_MAGIC) {
798 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
799 return ERROR_COMMAND_SYNTAX_ERROR;
802 arm_arch_state(target);
803 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s%s",
804 state[xscale->armv4_5_mmu.mmu_enabled],
805 state[xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled],
806 state[xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled],
807 arch_dbg_reason[xscale->arch_debug_reason]);
812 static int xscale_poll(struct target *target)
814 int retval = ERROR_OK;
816 if ((target->state == TARGET_RUNNING) || (target->state == TARGET_DEBUG_RUNNING)) {
817 enum target_state previous_state = target->state;
818 retval = xscale_read_tx(target, 0);
819 if (retval == ERROR_OK) {
821 /* there's data to read from the tx register, we entered debug state */
822 target->state = TARGET_HALTED;
824 /* process debug entry, fetching current mode regs */
825 retval = xscale_debug_entry(target);
826 } else if (retval != ERROR_TARGET_RESOURCE_NOT_AVAILABLE) {
827 LOG_USER("error while polling TX register, reset CPU");
828 /* here we "lie" so GDB won't get stuck and a reset can be perfomed */
829 target->state = TARGET_HALTED;
832 /* debug_entry could have overwritten target state (i.e. immediate resume)
833 * don't signal event handlers in that case
835 if (target->state != TARGET_HALTED)
838 /* if target was running, signal that we halted
839 * otherwise we reentered from debug execution */
840 if (previous_state == TARGET_RUNNING)
841 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
843 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
849 static int xscale_debug_entry(struct target *target)
851 struct xscale_common *xscale = target_to_xscale(target);
852 struct arm *arm = &xscale->arm;
859 /* clear external dbg break (will be written on next DCSR read) */
860 xscale->external_debug_break = 0;
861 retval = xscale_read_dcsr(target);
862 if (retval != ERROR_OK)
865 /* get r0, pc, r1 to r7 and cpsr */
866 retval = xscale_receive(target, buffer, 10);
867 if (retval != ERROR_OK)
870 /* move r0 from buffer to register cache */
871 buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, buffer[0]);
872 arm->core_cache->reg_list[0].dirty = 1;
873 arm->core_cache->reg_list[0].valid = 1;
874 LOG_DEBUG("r0: 0x%8.8" PRIx32 "", buffer[0]);
876 /* move pc from buffer to register cache */
877 buf_set_u32(arm->pc->value, 0, 32, buffer[1]);
880 LOG_DEBUG("pc: 0x%8.8" PRIx32 "", buffer[1]);
882 /* move data from buffer to register cache */
883 for (i = 1; i <= 7; i++) {
884 buf_set_u32(arm->core_cache->reg_list[i].value, 0, 32, buffer[1 + i]);
885 arm->core_cache->reg_list[i].dirty = 1;
886 arm->core_cache->reg_list[i].valid = 1;
887 LOG_DEBUG("r%i: 0x%8.8" PRIx32 "", i, buffer[i + 1]);
890 arm_set_cpsr(arm, buffer[9]);
891 LOG_DEBUG("cpsr: 0x%8.8" PRIx32 "", buffer[9]);
893 if (!is_arm_mode(arm->core_mode)) {
894 target->state = TARGET_UNKNOWN;
895 LOG_ERROR("cpsr contains invalid mode value - communication failure");
896 return ERROR_TARGET_FAILURE;
898 LOG_DEBUG("target entered debug state in %s mode",
899 arm_mode_name(arm->core_mode));
901 /* get banked registers, r8 to r14, and spsr if not in USR/SYS mode */
903 xscale_receive(target, buffer, 8);
904 buf_set_u32(arm->spsr->value, 0, 32, buffer[7]);
905 arm->spsr->dirty = false;
906 arm->spsr->valid = true;
908 /* r8 to r14, but no spsr */
909 xscale_receive(target, buffer, 7);
912 /* move data from buffer to right banked register in cache */
913 for (i = 8; i <= 14; i++) {
914 struct reg *r = arm_reg_current(arm, i);
916 buf_set_u32(r->value, 0, 32, buffer[i - 8]);
921 /* mark xscale regs invalid to ensure they are retrieved from the
922 * debug handler if requested */
923 for (i = 0; i < xscale->reg_cache->num_regs; i++)
924 xscale->reg_cache->reg_list[i].valid = 0;
926 /* examine debug reason */
927 xscale_read_dcsr(target);
928 moe = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 2, 3);
930 /* stored PC (for calculating fixup) */
931 pc = buf_get_u32(arm->pc->value, 0, 32);
934 case 0x0: /* Processor reset */
935 target->debug_reason = DBG_REASON_DBGRQ;
936 xscale->arch_debug_reason = XSCALE_DBG_REASON_RESET;
939 case 0x1: /* Instruction breakpoint hit */
940 target->debug_reason = DBG_REASON_BREAKPOINT;
941 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
944 case 0x2: /* Data breakpoint hit */
945 target->debug_reason = DBG_REASON_WATCHPOINT;
946 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
949 case 0x3: /* BKPT instruction executed */
950 target->debug_reason = DBG_REASON_BREAKPOINT;
951 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
954 case 0x4: /* Ext. debug event */
955 target->debug_reason = DBG_REASON_DBGRQ;
956 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
959 case 0x5: /* Vector trap occured */
960 target->debug_reason = DBG_REASON_BREAKPOINT;
961 xscale->arch_debug_reason = XSCALE_DBG_REASON_GENERIC;
964 case 0x6: /* Trace buffer full break */
965 target->debug_reason = DBG_REASON_DBGRQ;
966 xscale->arch_debug_reason = XSCALE_DBG_REASON_TB_FULL;
969 case 0x7: /* Reserved (may flag Hot-Debug support) */
971 LOG_ERROR("Method of Entry is 'Reserved'");
977 buf_set_u32(arm->pc->value, 0, 32, pc);
979 /* on the first debug entry, identify cache type */
980 if (xscale->armv4_5_mmu.armv4_5_cache.ctype == -1) {
981 uint32_t cache_type_reg;
983 /* read cp15 cache type register */
984 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CACHETYPE]);
985 cache_type_reg = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CACHETYPE].value,
989 armv4_5_identify_cache(cache_type_reg, &xscale->armv4_5_mmu.armv4_5_cache);
992 /* examine MMU and Cache settings
993 * read cp15 control register */
994 xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
995 xscale->cp15_control_reg =
996 buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
997 xscale->armv4_5_mmu.mmu_enabled = (xscale->cp15_control_reg & 0x1U) ? 1 : 0;
998 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
999 (xscale->cp15_control_reg & 0x4U) ? 1 : 0;
1000 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1001 (xscale->cp15_control_reg & 0x1000U) ? 1 : 0;
1003 /* tracing enabled, read collected trace data */
1004 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1005 xscale_read_trace(target);
1007 /* Resume if entered debug due to buffer fill and we're still collecting
1008 * trace data. Note that a debug exception due to trace buffer full
1009 * can only happen in fill mode. */
1010 if (xscale->arch_debug_reason == XSCALE_DBG_REASON_TB_FULL) {
1011 if (--xscale->trace.fill_counter > 0)
1012 xscale_resume(target, 1, 0x0, 1, 0);
1013 } else /* entered debug for other reason; reset counter */
1014 xscale->trace.fill_counter = 0;
1020 static int xscale_halt(struct target *target)
1022 struct xscale_common *xscale = target_to_xscale(target);
1024 LOG_DEBUG("target->state: %s",
1025 target_state_name(target));
1027 if (target->state == TARGET_HALTED) {
1028 LOG_DEBUG("target was already halted");
1030 } else if (target->state == TARGET_UNKNOWN) {
1031 /* this must not happen for a xscale target */
1032 LOG_ERROR("target was in unknown state when halt was requested");
1033 return ERROR_TARGET_INVALID;
1034 } else if (target->state == TARGET_RESET)
1035 LOG_DEBUG("target->state == TARGET_RESET");
1037 /* assert external dbg break */
1038 xscale->external_debug_break = 1;
1039 xscale_read_dcsr(target);
1041 target->debug_reason = DBG_REASON_DBGRQ;
1047 static int xscale_enable_single_step(struct target *target, uint32_t next_pc)
1049 struct xscale_common *xscale = target_to_xscale(target);
1050 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1053 if (xscale->ibcr0_used) {
1054 struct breakpoint *ibcr0_bp =
1055 breakpoint_find(target, buf_get_u32(ibcr0->value, 0, 32) & 0xfffffffe);
1058 xscale_unset_breakpoint(target, ibcr0_bp);
1061 "BUG: xscale->ibcr0_used is set, but no breakpoint with that address found");
1066 retval = xscale_set_reg_u32(ibcr0, next_pc | 0x1);
1067 if (retval != ERROR_OK)
1073 static int xscale_disable_single_step(struct target *target)
1075 struct xscale_common *xscale = target_to_xscale(target);
1076 struct reg *ibcr0 = &xscale->reg_cache->reg_list[XSCALE_IBCR0];
1079 retval = xscale_set_reg_u32(ibcr0, 0x0);
1080 if (retval != ERROR_OK)
1086 static void xscale_enable_watchpoints(struct target *target)
1088 struct watchpoint *watchpoint = target->watchpoints;
1090 while (watchpoint) {
1091 if (watchpoint->set == 0)
1092 xscale_set_watchpoint(target, watchpoint);
1093 watchpoint = watchpoint->next;
1097 static void xscale_enable_breakpoints(struct target *target)
1099 struct breakpoint *breakpoint = target->breakpoints;
1101 /* set any pending breakpoints */
1102 while (breakpoint) {
1103 if (breakpoint->set == 0)
1104 xscale_set_breakpoint(target, breakpoint);
1105 breakpoint = breakpoint->next;
1109 static void xscale_free_trace_data(struct xscale_common *xscale)
1111 struct xscale_trace_data *td = xscale->trace.data;
1113 struct xscale_trace_data *next_td = td->next;
1119 xscale->trace.data = NULL;
1122 static int xscale_resume(struct target *target, int current,
1123 uint32_t address, int handle_breakpoints, int debug_execution)
1125 struct xscale_common *xscale = target_to_xscale(target);
1126 struct arm *arm = &xscale->arm;
1127 uint32_t current_pc;
1133 if (target->state != TARGET_HALTED) {
1134 LOG_WARNING("target not halted");
1135 return ERROR_TARGET_NOT_HALTED;
1138 if (!debug_execution)
1139 target_free_all_working_areas(target);
1141 /* update vector tables */
1142 retval = xscale_update_vectors(target);
1143 if (retval != ERROR_OK)
1146 /* current = 1: continue on current pc, otherwise continue at <address> */
1148 buf_set_u32(arm->pc->value, 0, 32, address);
1150 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1152 /* if we're at the reset vector, we have to simulate the branch */
1153 if (current_pc == 0x0) {
1154 arm_simulate_step(target, NULL);
1155 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1158 /* the front-end may request us not to handle breakpoints */
1159 if (handle_breakpoints) {
1160 struct breakpoint *breakpoint;
1161 breakpoint = breakpoint_find(target,
1162 buf_get_u32(arm->pc->value, 0, 32));
1163 if (breakpoint != NULL) {
1165 enum trace_mode saved_trace_mode;
1167 /* there's a breakpoint at the current PC, we have to step over it */
1168 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1169 xscale_unset_breakpoint(target, breakpoint);
1171 /* calculate PC of next instruction */
1172 retval = arm_simulate_step(target, &next_pc);
1173 if (retval != ERROR_OK) {
1174 uint32_t current_opcode;
1175 target_read_u32(target, current_pc, ¤t_opcode);
1177 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1181 LOG_DEBUG("enable single-step");
1182 xscale_enable_single_step(target, next_pc);
1184 /* restore banked registers */
1185 retval = xscale_restore_banked(target);
1186 if (retval != ERROR_OK)
1189 /* send resume request */
1190 xscale_send_u32(target, 0x30);
1193 xscale_send_u32(target,
1194 buf_get_u32(arm->cpsr->value, 0, 32));
1195 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1196 buf_get_u32(arm->cpsr->value, 0, 32));
1198 for (i = 7; i >= 0; i--) {
1200 xscale_send_u32(target,
1201 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1202 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1203 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1207 xscale_send_u32(target,
1208 buf_get_u32(arm->pc->value, 0, 32));
1209 LOG_DEBUG("writing PC with value 0x%8.8" PRIx32,
1210 buf_get_u32(arm->pc->value, 0, 32));
1212 /* disable trace data collection in xscale_debug_entry() */
1213 saved_trace_mode = xscale->trace.mode;
1214 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1216 /* wait for and process debug entry */
1217 xscale_debug_entry(target);
1219 /* re-enable trace buffer, if enabled previously */
1220 xscale->trace.mode = saved_trace_mode;
1222 LOG_DEBUG("disable single-step");
1223 xscale_disable_single_step(target);
1225 LOG_DEBUG("set breakpoint at 0x%8.8" PRIx32 "", breakpoint->address);
1226 xscale_set_breakpoint(target, breakpoint);
1230 /* enable any pending breakpoints and watchpoints */
1231 xscale_enable_breakpoints(target);
1232 xscale_enable_watchpoints(target);
1234 /* restore banked registers */
1235 retval = xscale_restore_banked(target);
1236 if (retval != ERROR_OK)
1239 /* send resume request (command 0x30 or 0x31)
1240 * clean the trace buffer if it is to be enabled (0x62) */
1241 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1242 if (xscale->trace.mode == XSCALE_TRACE_FILL) {
1243 /* If trace enabled in fill mode and starting collection of new set
1244 * of buffers, initialize buffer counter and free previous buffers */
1245 if (xscale->trace.fill_counter == 0) {
1246 xscale->trace.fill_counter = xscale->trace.buffer_fill;
1247 xscale_free_trace_data(xscale);
1249 } else /* wrap mode; free previous buffer */
1250 xscale_free_trace_data(xscale);
1252 xscale_send_u32(target, 0x62);
1253 xscale_send_u32(target, 0x31);
1255 xscale_send_u32(target, 0x30);
1258 xscale_send_u32(target, buf_get_u32(arm->cpsr->value, 0, 32));
1259 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1260 buf_get_u32(arm->cpsr->value, 0, 32));
1262 for (i = 7; i >= 0; i--) {
1264 xscale_send_u32(target, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1265 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "",
1266 i, buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1270 xscale_send_u32(target, buf_get_u32(arm->pc->value, 0, 32));
1271 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1272 buf_get_u32(arm->pc->value, 0, 32));
1274 target->debug_reason = DBG_REASON_NOTHALTED;
1276 if (!debug_execution) {
1277 /* registers are now invalid */
1278 register_cache_invalidate(arm->core_cache);
1279 target->state = TARGET_RUNNING;
1280 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1282 target->state = TARGET_DEBUG_RUNNING;
1283 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1286 LOG_DEBUG("target resumed");
1291 static int xscale_step_inner(struct target *target, int current,
1292 uint32_t address, int handle_breakpoints)
1294 struct xscale_common *xscale = target_to_xscale(target);
1295 struct arm *arm = &xscale->arm;
1300 target->debug_reason = DBG_REASON_SINGLESTEP;
1302 /* calculate PC of next instruction */
1303 retval = arm_simulate_step(target, &next_pc);
1304 if (retval != ERROR_OK) {
1305 uint32_t current_opcode, current_pc;
1306 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1308 target_read_u32(target, current_pc, ¤t_opcode);
1310 "BUG: couldn't calculate PC of next instruction, current opcode was 0x%8.8" PRIx32 "",
1315 LOG_DEBUG("enable single-step");
1316 retval = xscale_enable_single_step(target, next_pc);
1317 if (retval != ERROR_OK)
1320 /* restore banked registers */
1321 retval = xscale_restore_banked(target);
1322 if (retval != ERROR_OK)
1325 /* send resume request (command 0x30 or 0x31)
1326 * clean the trace buffer if it is to be enabled (0x62) */
1327 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
1328 retval = xscale_send_u32(target, 0x62);
1329 if (retval != ERROR_OK)
1331 retval = xscale_send_u32(target, 0x31);
1332 if (retval != ERROR_OK)
1335 retval = xscale_send_u32(target, 0x30);
1336 if (retval != ERROR_OK)
1341 retval = xscale_send_u32(target,
1342 buf_get_u32(arm->cpsr->value, 0, 32));
1343 if (retval != ERROR_OK)
1345 LOG_DEBUG("writing cpsr with value 0x%8.8" PRIx32,
1346 buf_get_u32(arm->cpsr->value, 0, 32));
1348 for (i = 7; i >= 0; i--) {
1350 retval = xscale_send_u32(target,
1351 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1352 if (retval != ERROR_OK)
1354 LOG_DEBUG("writing r%i with value 0x%8.8" PRIx32 "", i,
1355 buf_get_u32(arm->core_cache->reg_list[i].value, 0, 32));
1359 retval = xscale_send_u32(target,
1360 buf_get_u32(arm->pc->value, 0, 32));
1361 if (retval != ERROR_OK)
1363 LOG_DEBUG("wrote PC with value 0x%8.8" PRIx32,
1364 buf_get_u32(arm->pc->value, 0, 32));
1366 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1368 /* registers are now invalid */
1369 register_cache_invalidate(arm->core_cache);
1371 /* wait for and process debug entry */
1372 retval = xscale_debug_entry(target);
1373 if (retval != ERROR_OK)
1376 LOG_DEBUG("disable single-step");
1377 retval = xscale_disable_single_step(target);
1378 if (retval != ERROR_OK)
1381 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1386 static int xscale_step(struct target *target, int current,
1387 uint32_t address, int handle_breakpoints)
1389 struct arm *arm = target_to_arm(target);
1390 struct breakpoint *breakpoint = NULL;
1392 uint32_t current_pc;
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target not halted");
1397 return ERROR_TARGET_NOT_HALTED;
1400 /* current = 1: continue on current pc, otherwise continue at <address> */
1402 buf_set_u32(arm->pc->value, 0, 32, address);
1404 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1406 /* if we're at the reset vector, we have to simulate the step */
1407 if (current_pc == 0x0) {
1408 retval = arm_simulate_step(target, NULL);
1409 if (retval != ERROR_OK)
1411 current_pc = buf_get_u32(arm->pc->value, 0, 32);
1412 LOG_DEBUG("current pc %" PRIx32, current_pc);
1414 target->debug_reason = DBG_REASON_SINGLESTEP;
1415 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1420 /* the front-end may request us not to handle breakpoints */
1421 if (handle_breakpoints)
1422 breakpoint = breakpoint_find(target,
1423 buf_get_u32(arm->pc->value, 0, 32));
1424 if (breakpoint != NULL) {
1425 retval = xscale_unset_breakpoint(target, breakpoint);
1426 if (retval != ERROR_OK)
1430 retval = xscale_step_inner(target, current, address, handle_breakpoints);
1431 if (retval != ERROR_OK)
1435 xscale_set_breakpoint(target, breakpoint);
1437 LOG_DEBUG("target stepped");
1443 static int xscale_assert_reset(struct target *target)
1445 struct xscale_common *xscale = target_to_xscale(target);
1447 LOG_DEBUG("target->state: %s",
1448 target_state_name(target));
1450 /* select DCSR instruction (set endstate to R-T-I to ensure we don't
1451 * end up in T-L-R, which would reset JTAG
1453 xscale_jtag_set_instr(target->tap,
1454 XSCALE_SELDCSR << xscale->xscale_variant,
1457 /* set Hold reset, Halt mode and Trap Reset */
1458 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1459 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1460 xscale_write_dcsr(target, 1, 0);
1462 /* select BYPASS, because having DCSR selected caused problems on the PXA27x */
1463 xscale_jtag_set_instr(target->tap, ~0, TAP_IDLE);
1464 jtag_execute_queue();
1467 jtag_add_reset(0, 1);
1469 /* sleep 1ms, to be sure we fulfill any requirements */
1470 jtag_add_sleep(1000);
1471 jtag_execute_queue();
1473 target->state = TARGET_RESET;
1475 if (target->reset_halt) {
1476 int retval = target_halt(target);
1477 if (retval != ERROR_OK)
1484 static int xscale_deassert_reset(struct target *target)
1486 struct xscale_common *xscale = target_to_xscale(target);
1487 struct breakpoint *breakpoint = target->breakpoints;
1491 xscale->ibcr_available = 2;
1492 xscale->ibcr0_used = 0;
1493 xscale->ibcr1_used = 0;
1495 xscale->dbr_available = 2;
1496 xscale->dbr0_used = 0;
1497 xscale->dbr1_used = 0;
1499 /* mark all hardware breakpoints as unset */
1500 while (breakpoint) {
1501 if (breakpoint->type == BKPT_HARD)
1502 breakpoint->set = 0;
1503 breakpoint = breakpoint->next;
1506 xscale->trace.mode = XSCALE_TRACE_DISABLED;
1507 xscale_free_trace_data(xscale);
1509 register_cache_invalidate(xscale->arm.core_cache);
1511 /* FIXME mark hardware watchpoints got unset too. Also,
1512 * at least some of the XScale registers are invalid...
1516 * REVISIT: *assumes* we had a SRST+TRST reset so the mini-icache
1517 * contents got invalidated. Safer to force that, so writing new
1518 * contents can't ever fail..
1523 const uint8_t *buffer = xscale_debug_handler;
1527 jtag_add_reset(0, 0);
1529 /* wait 300ms; 150 and 100ms were not enough */
1530 jtag_add_sleep(300*1000);
1532 jtag_add_runtest(2030, TAP_IDLE);
1533 jtag_execute_queue();
1535 /* set Hold reset, Halt mode and Trap Reset */
1536 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1537 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1538 xscale_write_dcsr(target, 1, 0);
1540 /* Load the debug handler into the mini-icache. Since
1541 * it's using halt mode (not monitor mode), it runs in
1542 * "Special Debug State" for access to registers, memory,
1543 * coprocessors, trace data, etc.
1545 address = xscale->handler_address;
1546 for (unsigned binary_size = sizeof xscale_debug_handler - 1;
1548 binary_size -= buf_cnt, buffer += buf_cnt) {
1549 uint32_t cache_line[8];
1552 buf_cnt = binary_size;
1556 for (i = 0; i < buf_cnt; i += 4) {
1557 /* convert LE buffer to host-endian uint32_t */
1558 cache_line[i / 4] = le_to_h_u32(&buffer[i]);
1561 for (; i < 32; i += 4)
1562 cache_line[i / 4] = 0xe1a08008;
1564 /* only load addresses other than the reset vectors */
1565 if ((address % 0x400) != 0x0) {
1566 retval = xscale_load_ic(target, address,
1568 if (retval != ERROR_OK)
1576 retval = xscale_load_ic(target, 0x0,
1577 xscale->low_vectors);
1578 if (retval != ERROR_OK)
1580 retval = xscale_load_ic(target, 0xffff0000,
1581 xscale->high_vectors);
1582 if (retval != ERROR_OK)
1585 jtag_add_runtest(30, TAP_IDLE);
1587 jtag_add_sleep(100000);
1589 /* set Hold reset, Halt mode and Trap Reset */
1590 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 30, 1, 0x1);
1591 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 16, 1, 0x1);
1592 xscale_write_dcsr(target, 1, 0);
1594 /* clear Hold reset to let the target run (should enter debug handler) */
1595 xscale_write_dcsr(target, 0, 1);
1596 target->state = TARGET_RUNNING;
1598 if (!target->reset_halt) {
1599 jtag_add_sleep(10000);
1601 /* we should have entered debug now */
1602 xscale_debug_entry(target);
1603 target->state = TARGET_HALTED;
1605 /* resume the target */
1606 xscale_resume(target, 1, 0x0, 1, 0);
1613 static int xscale_read_core_reg(struct target *target, struct reg *r,
1614 int num, enum arm_mode mode)
1616 /** \todo add debug handler support for core register reads */
1617 LOG_ERROR("not implemented");
1621 static int xscale_write_core_reg(struct target *target, struct reg *r,
1622 int num, enum arm_mode mode, uint32_t value)
1624 /** \todo add debug handler support for core register writes */
1625 LOG_ERROR("not implemented");
1629 static int xscale_full_context(struct target *target)
1631 struct arm *arm = target_to_arm(target);
1639 if (target->state != TARGET_HALTED) {
1640 LOG_WARNING("target not halted");
1641 return ERROR_TARGET_NOT_HALTED;
1644 buffer = malloc(4 * 8);
1646 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1647 * we can't enter User mode on an XScale (unpredictable),
1648 * but User shares registers with SYS
1650 for (i = 1; i < 7; i++) {
1651 enum arm_mode mode = armv4_5_number_to_mode(i);
1655 if (mode == ARM_MODE_USR)
1658 /* check if there are invalid registers in the current mode
1660 for (j = 0; valid && j <= 16; j++) {
1661 if (!ARMV4_5_CORE_REG_MODE(arm->core_cache,
1668 /* request banked registers */
1669 xscale_send_u32(target, 0x0);
1671 /* send CPSR for desired bank mode */
1672 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1674 /* get banked registers: r8 to r14; and SPSR
1675 * except in USR/SYS mode
1677 if (mode != ARM_MODE_SYS) {
1679 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1682 xscale_receive(target, buffer, 8);
1684 buf_set_u32(r->value, 0, 32, buffer[7]);
1688 xscale_receive(target, buffer, 7);
1690 /* move data from buffer to register cache */
1691 for (j = 8; j <= 14; j++) {
1692 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1695 buf_set_u32(r->value, 0, 32, buffer[j - 8]);
1706 static int xscale_restore_banked(struct target *target)
1708 struct arm *arm = target_to_arm(target);
1712 if (target->state != TARGET_HALTED) {
1713 LOG_WARNING("target not halted");
1714 return ERROR_TARGET_NOT_HALTED;
1717 /* iterate through processor modes (FIQ, IRQ, SVC, ABT, UND and SYS)
1718 * and check if any banked registers need to be written. Ignore
1719 * USR mode (number 0) in favor of SYS; we can't enter User mode on
1720 * an XScale (unpredictable), but they share all registers.
1722 for (i = 1; i < 7; i++) {
1723 enum arm_mode mode = armv4_5_number_to_mode(i);
1726 if (mode == ARM_MODE_USR)
1729 /* check if there are dirty registers in this mode */
1730 for (j = 8; j <= 14; j++) {
1731 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1736 /* if not USR/SYS, check if the SPSR needs to be written */
1737 if (mode != ARM_MODE_SYS) {
1738 if (ARMV4_5_CORE_REG_MODE(arm->core_cache,
1743 /* there's nothing to flush for this mode */
1747 /* command 0x1: "send banked registers" */
1748 xscale_send_u32(target, 0x1);
1750 /* send CPSR for desired mode */
1751 xscale_send_u32(target, mode | 0xc0 /* I/F bits */);
1753 /* send r8 to r14/lr ... only FIQ needs more than r13..r14,
1754 * but this protocol doesn't understand that nuance.
1756 for (j = 8; j <= 14; j++) {
1757 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1759 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1763 /* send spsr if not in USR/SYS mode */
1764 if (mode != ARM_MODE_SYS) {
1765 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1767 xscale_send_u32(target, buf_get_u32(r->value, 0, 32));
1775 static int xscale_read_memory(struct target *target, uint32_t address,
1776 uint32_t size, uint32_t count, uint8_t *buffer)
1778 struct xscale_common *xscale = target_to_xscale(target);
1783 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1788 if (target->state != TARGET_HALTED) {
1789 LOG_WARNING("target not halted");
1790 return ERROR_TARGET_NOT_HALTED;
1793 /* sanitize arguments */
1794 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1795 return ERROR_COMMAND_SYNTAX_ERROR;
1797 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1798 return ERROR_TARGET_UNALIGNED_ACCESS;
1800 /* send memory read request (command 0x1n, n: access size) */
1801 retval = xscale_send_u32(target, 0x10 | size);
1802 if (retval != ERROR_OK)
1805 /* send base address for read request */
1806 retval = xscale_send_u32(target, address);
1807 if (retval != ERROR_OK)
1810 /* send number of requested data words */
1811 retval = xscale_send_u32(target, count);
1812 if (retval != ERROR_OK)
1815 /* receive data from target (count times 32-bit words in host endianness) */
1816 buf32 = malloc(4 * count);
1817 retval = xscale_receive(target, buf32, count);
1818 if (retval != ERROR_OK)
1821 /* extract data from host-endian buffer into byte stream */
1822 for (i = 0; i < count; i++) {
1825 target_buffer_set_u32(target, buffer, buf32[i]);
1829 target_buffer_set_u16(target, buffer, buf32[i] & 0xffff);
1833 *buffer++ = buf32[i] & 0xff;
1836 LOG_ERROR("invalid read size");
1837 return ERROR_COMMAND_SYNTAX_ERROR;
1843 /* examine DCSR, to see if Sticky Abort (SA) got set */
1844 retval = xscale_read_dcsr(target);
1845 if (retval != ERROR_OK)
1847 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1849 retval = xscale_send_u32(target, 0x60);
1850 if (retval != ERROR_OK)
1853 return ERROR_TARGET_DATA_ABORT;
1859 static int xscale_read_phys_memory(struct target *target, uint32_t address,
1860 uint32_t size, uint32_t count, uint8_t *buffer)
1862 struct xscale_common *xscale = target_to_xscale(target);
1864 /* with MMU inactive, there are only physical addresses */
1865 if (!xscale->armv4_5_mmu.mmu_enabled)
1866 return xscale_read_memory(target, address, size, count, buffer);
1868 /** \todo: provide a non-stub implementation of this routine. */
1869 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1870 target_name(target), __func__);
1874 static int xscale_write_memory(struct target *target, uint32_t address,
1875 uint32_t size, uint32_t count, const uint8_t *buffer)
1877 struct xscale_common *xscale = target_to_xscale(target);
1880 LOG_DEBUG("address: 0x%8.8" PRIx32 ", size: 0x%8.8" PRIx32 ", count: 0x%8.8" PRIx32,
1885 if (target->state != TARGET_HALTED) {
1886 LOG_WARNING("target not halted");
1887 return ERROR_TARGET_NOT_HALTED;
1890 /* sanitize arguments */
1891 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1892 return ERROR_COMMAND_SYNTAX_ERROR;
1894 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1895 return ERROR_TARGET_UNALIGNED_ACCESS;
1897 /* send memory write request (command 0x2n, n: access size) */
1898 retval = xscale_send_u32(target, 0x20 | size);
1899 if (retval != ERROR_OK)
1902 /* send base address for read request */
1903 retval = xscale_send_u32(target, address);
1904 if (retval != ERROR_OK)
1907 /* send number of requested data words to be written*/
1908 retval = xscale_send_u32(target, count);
1909 if (retval != ERROR_OK)
1912 /* extract data from host-endian buffer into byte stream */
1914 for (i = 0; i < count; i++) {
1917 value = target_buffer_get_u32(target, buffer);
1918 xscale_send_u32(target, value);
1922 value = target_buffer_get_u16(target, buffer);
1923 xscale_send_u32(target, value);
1928 xscale_send_u32(target, value);
1932 LOG_ERROR("should never get here");
1937 retval = xscale_send(target, buffer, count, size);
1938 if (retval != ERROR_OK)
1941 /* examine DCSR, to see if Sticky Abort (SA) got set */
1942 retval = xscale_read_dcsr(target);
1943 if (retval != ERROR_OK)
1945 if (buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 5, 1) == 1) {
1947 retval = xscale_send_u32(target, 0x60);
1948 if (retval != ERROR_OK)
1951 LOG_ERROR("data abort writing memory");
1952 return ERROR_TARGET_DATA_ABORT;
1958 static int xscale_write_phys_memory(struct target *target, uint32_t address,
1959 uint32_t size, uint32_t count, const uint8_t *buffer)
1961 struct xscale_common *xscale = target_to_xscale(target);
1963 /* with MMU inactive, there are only physical addresses */
1964 if (!xscale->armv4_5_mmu.mmu_enabled)
1965 return xscale_write_memory(target, address, size, count, buffer);
1967 /** \todo: provide a non-stub implementation of this routine. */
1968 LOG_ERROR("%s: %s is not implemented. Disable MMU?",
1969 target_name(target), __func__);
1973 static int xscale_get_ttb(struct target *target, uint32_t *result)
1975 struct xscale_common *xscale = target_to_xscale(target);
1979 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_TTB]);
1980 if (retval != ERROR_OK)
1982 ttb = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_TTB].value, 0, 32);
1989 static int xscale_disable_mmu_caches(struct target *target, int mmu,
1990 int d_u_cache, int i_cache)
1992 struct xscale_common *xscale = target_to_xscale(target);
1993 uint32_t cp15_control;
1996 /* read cp15 control register */
1997 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
1998 if (retval != ERROR_OK)
2000 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2003 cp15_control &= ~0x1U;
2007 retval = xscale_send_u32(target, 0x50);
2008 if (retval != ERROR_OK)
2010 retval = xscale_send_u32(target, xscale->cache_clean_address);
2011 if (retval != ERROR_OK)
2014 /* invalidate DCache */
2015 retval = xscale_send_u32(target, 0x51);
2016 if (retval != ERROR_OK)
2019 cp15_control &= ~0x4U;
2023 /* invalidate ICache */
2024 retval = xscale_send_u32(target, 0x52);
2025 if (retval != ERROR_OK)
2027 cp15_control &= ~0x1000U;
2030 /* write new cp15 control register */
2031 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2032 if (retval != ERROR_OK)
2035 /* execute cpwait to ensure outstanding operations complete */
2036 retval = xscale_send_u32(target, 0x53);
2040 static int xscale_enable_mmu_caches(struct target *target, int mmu,
2041 int d_u_cache, int i_cache)
2043 struct xscale_common *xscale = target_to_xscale(target);
2044 uint32_t cp15_control;
2047 /* read cp15 control register */
2048 retval = xscale_get_reg(&xscale->reg_cache->reg_list[XSCALE_CTRL]);
2049 if (retval != ERROR_OK)
2051 cp15_control = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_CTRL].value, 0, 32);
2054 cp15_control |= 0x1U;
2057 cp15_control |= 0x4U;
2060 cp15_control |= 0x1000U;
2062 /* write new cp15 control register */
2063 retval = xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_CTRL], cp15_control);
2064 if (retval != ERROR_OK)
2067 /* execute cpwait to ensure outstanding operations complete */
2068 retval = xscale_send_u32(target, 0x53);
2072 static int xscale_set_breakpoint(struct target *target,
2073 struct breakpoint *breakpoint)
2076 struct xscale_common *xscale = target_to_xscale(target);
2078 if (target->state != TARGET_HALTED) {
2079 LOG_WARNING("target not halted");
2080 return ERROR_TARGET_NOT_HALTED;
2083 if (breakpoint->set) {
2084 LOG_WARNING("breakpoint already set");
2088 if (breakpoint->type == BKPT_HARD) {
2089 uint32_t value = breakpoint->address | 1;
2090 if (!xscale->ibcr0_used) {
2091 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], value);
2092 xscale->ibcr0_used = 1;
2093 breakpoint->set = 1; /* breakpoint set on first breakpoint register */
2094 } else if (!xscale->ibcr1_used) {
2095 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], value);
2096 xscale->ibcr1_used = 1;
2097 breakpoint->set = 2; /* breakpoint set on second breakpoint register */
2098 } else {/* bug: availability previously verified in xscale_add_breakpoint() */
2099 LOG_ERROR("BUG: no hardware comparator available");
2100 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2102 } else if (breakpoint->type == BKPT_SOFT) {
2103 if (breakpoint->length == 4) {
2104 /* keep the original instruction in target endianness */
2105 retval = target_read_memory(target, breakpoint->address, 4, 1,
2106 breakpoint->orig_instr);
2107 if (retval != ERROR_OK)
2109 /* write the bkpt instruction in target endianness
2110 *(arm7_9->arm_bkpt is host endian) */
2111 retval = target_write_u32(target, breakpoint->address,
2113 if (retval != ERROR_OK)
2116 /* keep the original instruction in target endianness */
2117 retval = target_read_memory(target, breakpoint->address, 2, 1,
2118 breakpoint->orig_instr);
2119 if (retval != ERROR_OK)
2121 /* write the bkpt instruction in target endianness
2122 *(arm7_9->arm_bkpt is host endian) */
2123 retval = target_write_u16(target, breakpoint->address,
2124 xscale->thumb_bkpt);
2125 if (retval != ERROR_OK)
2128 breakpoint->set = 1;
2130 xscale_send_u32(target, 0x50); /* clean dcache */
2131 xscale_send_u32(target, xscale->cache_clean_address);
2132 xscale_send_u32(target, 0x51); /* invalidate dcache */
2133 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2139 static int xscale_add_breakpoint(struct target *target,
2140 struct breakpoint *breakpoint)
2142 struct xscale_common *xscale = target_to_xscale(target);
2144 if ((breakpoint->type == BKPT_HARD) && (xscale->ibcr_available < 1)) {
2145 LOG_ERROR("no breakpoint unit available for hardware breakpoint");
2146 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2149 if ((breakpoint->length != 2) && (breakpoint->length != 4)) {
2150 LOG_ERROR("only breakpoints of two (Thumb) or four (ARM) bytes length supported");
2151 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2154 if (breakpoint->type == BKPT_HARD)
2155 xscale->ibcr_available--;
2157 return xscale_set_breakpoint(target, breakpoint);
2160 static int xscale_unset_breakpoint(struct target *target,
2161 struct breakpoint *breakpoint)
2164 struct xscale_common *xscale = target_to_xscale(target);
2166 if (target->state != TARGET_HALTED) {
2167 LOG_WARNING("target not halted");
2168 return ERROR_TARGET_NOT_HALTED;
2171 if (!breakpoint->set) {
2172 LOG_WARNING("breakpoint not set");
2176 if (breakpoint->type == BKPT_HARD) {
2177 if (breakpoint->set == 1) {
2178 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR0], 0x0);
2179 xscale->ibcr0_used = 0;
2180 } else if (breakpoint->set == 2) {
2181 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_IBCR1], 0x0);
2182 xscale->ibcr1_used = 0;
2184 breakpoint->set = 0;
2186 /* restore original instruction (kept in target endianness) */
2187 if (breakpoint->length == 4) {
2188 retval = target_write_memory(target, breakpoint->address, 4, 1,
2189 breakpoint->orig_instr);
2190 if (retval != ERROR_OK)
2193 retval = target_write_memory(target, breakpoint->address, 2, 1,
2194 breakpoint->orig_instr);
2195 if (retval != ERROR_OK)
2198 breakpoint->set = 0;
2200 xscale_send_u32(target, 0x50); /* clean dcache */
2201 xscale_send_u32(target, xscale->cache_clean_address);
2202 xscale_send_u32(target, 0x51); /* invalidate dcache */
2203 xscale_send_u32(target, 0x52); /* invalidate icache and flush fetch buffers */
2209 static int xscale_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
2211 struct xscale_common *xscale = target_to_xscale(target);
2213 if (target->state != TARGET_HALTED) {
2214 LOG_ERROR("target not halted");
2215 return ERROR_TARGET_NOT_HALTED;
2218 if (breakpoint->set)
2219 xscale_unset_breakpoint(target, breakpoint);
2221 if (breakpoint->type == BKPT_HARD)
2222 xscale->ibcr_available++;
2227 static int xscale_set_watchpoint(struct target *target,
2228 struct watchpoint *watchpoint)
2230 struct xscale_common *xscale = target_to_xscale(target);
2231 uint32_t enable = 0;
2232 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2233 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2235 if (target->state != TARGET_HALTED) {
2236 LOG_ERROR("target not halted");
2237 return ERROR_TARGET_NOT_HALTED;
2240 switch (watchpoint->rw) {
2251 LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
2254 /* For watchpoint across more than one word, both DBR registers must
2255 be enlisted, with the second used as a mask. */
2256 if (watchpoint->length > 4) {
2257 if (xscale->dbr0_used || xscale->dbr1_used) {
2258 LOG_ERROR("BUG: sufficient hardware comparators unavailable");
2259 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2262 /* Write mask value to DBR1, based on the length argument.
2263 * Address bits ignored by the comparator are those set in mask. */
2264 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1],
2265 watchpoint->length - 1);
2266 xscale->dbr1_used = 1;
2267 enable |= 0x100; /* DBCON[M] */
2270 if (!xscale->dbr0_used) {
2271 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR0], watchpoint->address);
2272 dbcon_value |= enable;
2273 xscale_set_reg_u32(dbcon, dbcon_value);
2274 watchpoint->set = 1;
2275 xscale->dbr0_used = 1;
2276 } else if (!xscale->dbr1_used) {
2277 xscale_set_reg_u32(&xscale->reg_cache->reg_list[XSCALE_DBR1], watchpoint->address);
2278 dbcon_value |= enable << 2;
2279 xscale_set_reg_u32(dbcon, dbcon_value);
2280 watchpoint->set = 2;
2281 xscale->dbr1_used = 1;
2283 LOG_ERROR("BUG: no hardware comparator available");
2284 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2290 static int xscale_add_watchpoint(struct target *target,
2291 struct watchpoint *watchpoint)
2293 struct xscale_common *xscale = target_to_xscale(target);
2295 if (xscale->dbr_available < 1) {
2296 LOG_ERROR("no more watchpoint registers available");
2297 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2300 if (watchpoint->value)
2301 LOG_WARNING("xscale does not support value, mask arguments; ignoring");
2303 /* check that length is a power of two */
2304 for (uint32_t len = watchpoint->length; len != 1; len /= 2) {
2306 LOG_ERROR("xscale requires that watchpoint length is a power of two");
2307 return ERROR_COMMAND_ARGUMENT_INVALID;
2311 if (watchpoint->length == 4) { /* single word watchpoint */
2312 xscale->dbr_available--;/* one DBR reg used */
2316 /* watchpoints across multiple words require both DBR registers */
2317 if (xscale->dbr_available < 2) {
2318 LOG_ERROR("insufficient watchpoint registers available");
2319 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2322 if (watchpoint->length > watchpoint->address) {
2323 LOG_ERROR("xscale does not support watchpoints with length "
2324 "greater than address");
2325 return ERROR_COMMAND_ARGUMENT_INVALID;
2328 xscale->dbr_available = 0;
2332 static int xscale_unset_watchpoint(struct target *target,
2333 struct watchpoint *watchpoint)
2335 struct xscale_common *xscale = target_to_xscale(target);
2336 struct reg *dbcon = &xscale->reg_cache->reg_list[XSCALE_DBCON];
2337 uint32_t dbcon_value = buf_get_u32(dbcon->value, 0, 32);
2339 if (target->state != TARGET_HALTED) {
2340 LOG_WARNING("target not halted");
2341 return ERROR_TARGET_NOT_HALTED;
2344 if (!watchpoint->set) {
2345 LOG_WARNING("breakpoint not set");
2349 if (watchpoint->set == 1) {
2350 if (watchpoint->length > 4) {
2351 dbcon_value &= ~0x103; /* clear DBCON[M] as well */
2352 xscale->dbr1_used = 0; /* DBR1 was used for mask */
2354 dbcon_value &= ~0x3;
2356 xscale_set_reg_u32(dbcon, dbcon_value);
2357 xscale->dbr0_used = 0;
2358 } else if (watchpoint->set == 2) {
2359 dbcon_value &= ~0xc;
2360 xscale_set_reg_u32(dbcon, dbcon_value);
2361 xscale->dbr1_used = 0;
2363 watchpoint->set = 0;
2368 static int xscale_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2370 struct xscale_common *xscale = target_to_xscale(target);
2372 if (target->state != TARGET_HALTED) {
2373 LOG_ERROR("target not halted");
2374 return ERROR_TARGET_NOT_HALTED;
2377 if (watchpoint->set)
2378 xscale_unset_watchpoint(target, watchpoint);
2380 if (watchpoint->length > 4)
2381 xscale->dbr_available++;/* both DBR regs now available */
2383 xscale->dbr_available++;
2388 static int xscale_get_reg(struct reg *reg)
2390 struct xscale_reg *arch_info = reg->arch_info;
2391 struct target *target = arch_info->target;
2392 struct xscale_common *xscale = target_to_xscale(target);
2394 /* DCSR, TX and RX are accessible via JTAG */
2395 if (strcmp(reg->name, "XSCALE_DCSR") == 0)
2396 return xscale_read_dcsr(arch_info->target);
2397 else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2398 /* 1 = consume register content */
2399 return xscale_read_tx(arch_info->target, 1);
2400 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2401 /* can't read from RX register (host -> debug handler) */
2403 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2404 /* can't (explicitly) read from TXRXCTRL register */
2406 } else {/* Other DBG registers have to be transfered by the debug handler
2407 * send CP read request (command 0x40) */
2408 xscale_send_u32(target, 0x40);
2410 /* send CP register number */
2411 xscale_send_u32(target, arch_info->dbg_handler_number);
2413 /* read register value */
2414 xscale_read_tx(target, 1);
2415 buf_cpy(xscale->reg_cache->reg_list[XSCALE_TX].value, reg->value, 32);
2424 static int xscale_set_reg(struct reg *reg, uint8_t *buf)
2426 struct xscale_reg *arch_info = reg->arch_info;
2427 struct target *target = arch_info->target;
2428 struct xscale_common *xscale = target_to_xscale(target);
2429 uint32_t value = buf_get_u32(buf, 0, 32);
2431 /* DCSR, TX and RX are accessible via JTAG */
2432 if (strcmp(reg->name, "XSCALE_DCSR") == 0) {
2433 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32, value);
2434 return xscale_write_dcsr(arch_info->target, -1, -1);
2435 } else if (strcmp(reg->name, "XSCALE_RX") == 0) {
2436 buf_set_u32(xscale->reg_cache->reg_list[XSCALE_RX].value, 0, 32, value);
2437 return xscale_write_rx(arch_info->target);
2438 } else if (strcmp(reg->name, "XSCALE_TX") == 0) {
2439 /* can't write to TX register (debug-handler -> host) */
2441 } else if (strcmp(reg->name, "XSCALE_TXRXCTRL") == 0) {
2442 /* can't (explicitly) write to TXRXCTRL register */
2444 } else {/* Other DBG registers have to be transfered by the debug handler
2445 * send CP write request (command 0x41) */
2446 xscale_send_u32(target, 0x41);
2448 /* send CP register number */
2449 xscale_send_u32(target, arch_info->dbg_handler_number);
2451 /* send CP register value */
2452 xscale_send_u32(target, value);
2453 buf_set_u32(reg->value, 0, 32, value);
2459 static int xscale_write_dcsr_sw(struct target *target, uint32_t value)
2461 struct xscale_common *xscale = target_to_xscale(target);
2462 struct reg *dcsr = &xscale->reg_cache->reg_list[XSCALE_DCSR];
2463 struct xscale_reg *dcsr_arch_info = dcsr->arch_info;
2465 /* send CP write request (command 0x41) */
2466 xscale_send_u32(target, 0x41);
2468 /* send CP register number */
2469 xscale_send_u32(target, dcsr_arch_info->dbg_handler_number);
2471 /* send CP register value */
2472 xscale_send_u32(target, value);
2473 buf_set_u32(dcsr->value, 0, 32, value);
2478 static int xscale_read_trace(struct target *target)
2480 struct xscale_common *xscale = target_to_xscale(target);
2481 struct arm *arm = &xscale->arm;
2482 struct xscale_trace_data **trace_data_p;
2484 /* 258 words from debug handler
2485 * 256 trace buffer entries
2486 * 2 checkpoint addresses
2488 uint32_t trace_buffer[258];
2489 int is_address[256];
2491 unsigned int num_checkpoints = 0;
2493 if (target->state != TARGET_HALTED) {
2494 LOG_WARNING("target must be stopped to read trace data");
2495 return ERROR_TARGET_NOT_HALTED;
2498 /* send read trace buffer command (command 0x61) */
2499 xscale_send_u32(target, 0x61);
2501 /* receive trace buffer content */
2502 xscale_receive(target, trace_buffer, 258);
2504 /* parse buffer backwards to identify address entries */
2505 for (i = 255; i >= 0; i--) {
2506 /* also count number of checkpointed entries */
2507 if ((trace_buffer[i] & 0xe0) == 0xc0)
2511 if (((trace_buffer[i] & 0xf0) == 0x90) ||
2512 ((trace_buffer[i] & 0xf0) == 0xd0)) {
2514 is_address[--i] = 1;
2516 is_address[--i] = 1;
2518 is_address[--i] = 1;
2520 is_address[--i] = 1;
2525 /* search first non-zero entry that is not part of an address */
2526 for (j = 0; (j < 256) && (trace_buffer[j] == 0) && (!is_address[j]); j++)
2530 LOG_DEBUG("no trace data collected");
2531 return ERROR_XSCALE_NO_TRACE_DATA;
2534 /* account for possible partial address at buffer start (wrap mode only) */
2535 if (is_address[0]) { /* first entry is address; complete set of 4? */
2538 if (!is_address[i++])
2541 j += i; /* partial address; can't use it */
2544 /* if first valid entry is indirect branch, can't use that either (no address) */
2545 if (((trace_buffer[j] & 0xf0) == 0x90) || ((trace_buffer[j] & 0xf0) == 0xd0))
2548 /* walk linked list to terminating entry */
2549 for (trace_data_p = &xscale->trace.data; *trace_data_p;
2550 trace_data_p = &(*trace_data_p)->next)
2553 *trace_data_p = malloc(sizeof(struct xscale_trace_data));
2554 (*trace_data_p)->next = NULL;
2555 (*trace_data_p)->chkpt0 = trace_buffer[256];
2556 (*trace_data_p)->chkpt1 = trace_buffer[257];
2557 (*trace_data_p)->last_instruction = buf_get_u32(arm->pc->value, 0, 32);
2558 (*trace_data_p)->entries = malloc(sizeof(struct xscale_trace_entry) * (256 - j));
2559 (*trace_data_p)->depth = 256 - j;
2560 (*trace_data_p)->num_checkpoints = num_checkpoints;
2562 for (i = j; i < 256; i++) {
2563 (*trace_data_p)->entries[i - j].data = trace_buffer[i];
2565 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_ADDRESS;
2567 (*trace_data_p)->entries[i - j].type = XSCALE_TRACE_MESSAGE;
2573 static int xscale_read_instruction(struct target *target, uint32_t pc,
2574 struct arm_instruction *instruction)
2576 struct xscale_common *const xscale = target_to_xscale(target);
2583 if (!xscale->trace.image)
2584 return ERROR_TRACE_IMAGE_UNAVAILABLE;
2586 /* search for the section the current instruction belongs to */
2587 for (i = 0; i < xscale->trace.image->num_sections; i++) {
2588 if ((xscale->trace.image->sections[i].base_address <= pc) &&
2589 (xscale->trace.image->sections[i].base_address +
2590 xscale->trace.image->sections[i].size > pc)) {
2596 if (section == -1) {
2597 /* current instruction couldn't be found in the image */
2598 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2601 if (xscale->trace.core_state == ARM_STATE_ARM) {
2603 retval = image_read_section(xscale->trace.image, section,
2604 pc - xscale->trace.image->sections[section].base_address,
2605 4, buf, &size_read);
2606 if (retval != ERROR_OK) {
2607 LOG_ERROR("error while reading instruction");
2608 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2610 opcode = target_buffer_get_u32(target, buf);
2611 arm_evaluate_opcode(opcode, pc, instruction);
2612 } else if (xscale->trace.core_state == ARM_STATE_THUMB) {
2614 retval = image_read_section(xscale->trace.image, section,
2615 pc - xscale->trace.image->sections[section].base_address,
2616 2, buf, &size_read);
2617 if (retval != ERROR_OK) {
2618 LOG_ERROR("error while reading instruction");
2619 return ERROR_TRACE_INSTRUCTION_UNAVAILABLE;
2621 opcode = target_buffer_get_u16(target, buf);
2622 thumb_evaluate_opcode(opcode, pc, instruction);
2624 LOG_ERROR("BUG: unknown core state encountered");
2631 /* Extract address encoded into trace data.
2632 * Write result to address referenced by argument 'target', or 0 if incomplete. */
2633 static inline void xscale_branch_address(struct xscale_trace_data *trace_data,
2634 int i, uint32_t *target)
2636 /* if there are less than four entries prior to the indirect branch message
2637 * we can't extract the address */
2641 *target = (trace_data->entries[i-1].data) | (trace_data->entries[i-2].data << 8) |
2642 (trace_data->entries[i-3].data << 16) | (trace_data->entries[i-4].data << 24);
2646 static inline void xscale_display_instruction(struct target *target, uint32_t pc,
2647 struct arm_instruction *instruction,
2648 struct command_context *cmd_ctx)
2650 int retval = xscale_read_instruction(target, pc, instruction);
2651 if (retval == ERROR_OK)
2652 command_print(cmd_ctx, "%s", instruction->text);
2654 command_print(cmd_ctx, "0x%8.8" PRIx32 "\t<not found in image>", pc);
2657 static int xscale_analyze_trace(struct target *target, struct command_context *cmd_ctx)
2659 struct xscale_common *xscale = target_to_xscale(target);
2660 struct xscale_trace_data *trace_data = xscale->trace.data;
2662 uint32_t breakpoint_pc;
2663 struct arm_instruction instruction;
2664 uint32_t current_pc = 0;/* initialized when address determined */
2666 if (!xscale->trace.image)
2667 LOG_WARNING("No trace image loaded; use 'xscale trace_image'");
2669 /* loop for each trace buffer that was loaded from target */
2670 while (trace_data) {
2671 int chkpt = 0; /* incremented as checkpointed entries found */
2674 /* FIXME: set this to correct mode when trace buffer is first enabled */
2675 xscale->trace.core_state = ARM_STATE_ARM;
2677 /* loop for each entry in this trace buffer */
2678 for (i = 0; i < trace_data->depth; i++) {
2680 uint32_t chkpt_reg = 0x0;
2681 uint32_t branch_target = 0;
2684 /* trace entry type is upper nybble of 'message byte' */
2685 int trace_msg_type = (trace_data->entries[i].data & 0xf0) >> 4;
2687 /* Target addresses of indirect branches are written into buffer
2688 * before the message byte representing the branch. Skip past it */
2689 if (trace_data->entries[i].type == XSCALE_TRACE_ADDRESS)
2692 switch (trace_msg_type) {
2693 case 0: /* Exceptions */
2701 exception = (trace_data->entries[i].data & 0x70) >> 4;
2703 /* FIXME: vector table may be at ffff0000 */
2704 branch_target = (trace_data->entries[i].data & 0xf0) >> 2;
2707 case 8: /* Direct Branch */
2710 case 9: /* Indirect Branch */
2711 xscale_branch_address(trace_data, i, &branch_target);
2714 case 13: /* Checkpointed Indirect Branch */
2715 xscale_branch_address(trace_data, i, &branch_target);
2716 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2717 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2720 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2726 case 12: /* Checkpointed Direct Branch */
2727 if ((trace_data->num_checkpoints == 2) && (chkpt == 0))
2728 chkpt_reg = trace_data->chkpt1; /* 2 chkpts, this is
2731 chkpt_reg = trace_data->chkpt0; /* 1 chkpt, or 2 and
2734 /* if no current_pc, checkpoint will be starting point */
2735 if (current_pc == 0)
2736 branch_target = chkpt_reg;
2741 case 15:/* Roll-over */
2744 default:/* Reserved */
2745 LOG_WARNING("trace is suspect: invalid trace message byte");
2750 /* If we don't have the current_pc yet, but we did get the branch target
2751 * (either from the trace buffer on indirect branch, or from a checkpoint reg),
2752 * then we can start displaying instructions at the next iteration, with
2753 * branch_target as the starting point.
2755 if (current_pc == 0) {
2756 current_pc = branch_target; /* remains 0 unless branch_target *obtained */
2760 /* We have current_pc. Read and display the instructions from the image.
2761 * First, display count instructions (lower nybble of message byte). */
2762 count = trace_data->entries[i].data & 0x0f;
2763 for (j = 0; j < count; j++) {
2764 xscale_display_instruction(target, current_pc, &instruction,
2766 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2769 /* An additional instruction is implicitly added to count for
2770 * rollover and some exceptions: undef, swi, prefetch abort. */
2771 if ((trace_msg_type == 15) || (exception > 0 && exception < 4)) {
2772 xscale_display_instruction(target, current_pc, &instruction,
2774 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2777 if (trace_msg_type == 15) /* rollover */
2781 command_print(cmd_ctx, "--- exception %i ---", exception);
2785 /* not exception or rollover; next instruction is a branch and is
2786 * not included in the count */
2787 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2789 /* for direct branches, extract branch destination from instruction */
2790 if ((trace_msg_type == 8) || (trace_msg_type == 12)) {
2791 retval = xscale_read_instruction(target, current_pc, &instruction);
2792 if (retval == ERROR_OK)
2793 current_pc = instruction.info.b_bl_bx_blx.target_address;
2795 current_pc = 0; /* branch destination unknown */
2797 /* direct branch w/ checkpoint; can also get from checkpoint reg */
2798 if (trace_msg_type == 12) {
2799 if (current_pc == 0)
2800 current_pc = chkpt_reg;
2801 else if (current_pc != chkpt_reg) /* sanity check */
2802 LOG_WARNING("trace is suspect: checkpoint register "
2803 "inconsistent with adddress from image");
2806 if (current_pc == 0)
2807 command_print(cmd_ctx, "address unknown");
2812 /* indirect branch; the branch destination was read from trace buffer */
2813 if ((trace_msg_type == 9) || (trace_msg_type == 13)) {
2814 current_pc = branch_target;
2816 /* sanity check (checkpoint reg is redundant) */
2817 if ((trace_msg_type == 13) && (chkpt_reg != branch_target))
2818 LOG_WARNING("trace is suspect: checkpoint register "
2819 "inconsistent with address from trace buffer");
2822 } /* END: for (i = 0; i < trace_data->depth; i++) */
2824 breakpoint_pc = trace_data->last_instruction; /* used below */
2825 trace_data = trace_data->next;
2827 } /* END: while (trace_data) */
2829 /* Finally... display all instructions up to the value of the pc when the
2830 * debug break occurred (saved when trace data was collected from target).
2831 * This is necessary because the trace only records execution branches and 16
2832 * consecutive instructions (rollovers), so last few typically missed.
2834 if (current_pc == 0)
2835 return ERROR_OK;/* current_pc was never found */
2837 /* how many instructions remaining? */
2838 int gap_count = (breakpoint_pc - current_pc) /
2839 (xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2);
2841 /* should never be negative or over 16, but verify */
2842 if (gap_count < 0 || gap_count > 16) {
2843 LOG_WARNING("trace is suspect: excessive gap at end of trace");
2844 return ERROR_OK;/* bail; large number or negative value no good */
2847 /* display remaining instructions */
2848 for (i = 0; i < gap_count; i++) {
2849 xscale_display_instruction(target, current_pc, &instruction, cmd_ctx);
2850 current_pc += xscale->trace.core_state == ARM_STATE_ARM ? 4 : 2;
2856 static const struct reg_arch_type xscale_reg_type = {
2857 .get = xscale_get_reg,
2858 .set = xscale_set_reg,
2861 static void xscale_build_reg_cache(struct target *target)
2863 struct xscale_common *xscale = target_to_xscale(target);
2864 struct arm *arm = &xscale->arm;
2865 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2866 struct xscale_reg *arch_info = malloc(sizeof(xscale_reg_arch_info));
2868 int num_regs = ARRAY_SIZE(xscale_reg_arch_info);
2870 (*cache_p) = arm_build_reg_cache(target, arm);
2872 (*cache_p)->next = malloc(sizeof(struct reg_cache));
2873 cache_p = &(*cache_p)->next;
2875 /* fill in values for the xscale reg cache */
2876 (*cache_p)->name = "XScale registers";
2877 (*cache_p)->next = NULL;
2878 (*cache_p)->reg_list = malloc(num_regs * sizeof(struct reg));
2879 (*cache_p)->num_regs = num_regs;
2881 for (i = 0; i < num_regs; i++) {
2882 (*cache_p)->reg_list[i].name = xscale_reg_list[i];
2883 (*cache_p)->reg_list[i].value = calloc(4, 1);
2884 (*cache_p)->reg_list[i].dirty = 0;
2885 (*cache_p)->reg_list[i].valid = 0;
2886 (*cache_p)->reg_list[i].size = 32;
2887 (*cache_p)->reg_list[i].arch_info = &arch_info[i];
2888 (*cache_p)->reg_list[i].type = &xscale_reg_type;
2889 arch_info[i] = xscale_reg_arch_info[i];
2890 arch_info[i].target = target;
2893 xscale->reg_cache = (*cache_p);
2896 static int xscale_init_target(struct command_context *cmd_ctx,
2897 struct target *target)
2899 xscale_build_reg_cache(target);
2903 static int xscale_init_arch_info(struct target *target,
2904 struct xscale_common *xscale, struct jtag_tap *tap, const char *variant)
2907 uint32_t high_reset_branch, low_reset_branch;
2912 /* store architecture specfic data */
2913 xscale->common_magic = XSCALE_COMMON_MAGIC;
2915 /* we don't really *need* a variant param ... */
2919 if (strcmp(variant, "pxa250") == 0
2920 || strcmp(variant, "pxa255") == 0
2921 || strcmp(variant, "pxa26x") == 0)
2923 else if (strcmp(variant, "pxa27x") == 0
2924 || strcmp(variant, "ixp42x") == 0
2925 || strcmp(variant, "ixp45x") == 0
2926 || strcmp(variant, "ixp46x") == 0)
2928 else if (strcmp(variant, "pxa3xx") == 0)
2931 LOG_WARNING("%s: unrecognized variant %s",
2932 tap->dotted_name, variant);
2934 if (ir_length && ir_length != tap->ir_length) {
2935 LOG_WARNING("%s: IR length for %s is %d; fixing",
2936 tap->dotted_name, variant, ir_length);
2937 tap->ir_length = ir_length;
2941 /* PXA3xx shifts the JTAG instructions */
2942 if (tap->ir_length == 11)
2943 xscale->xscale_variant = XSCALE_PXA3XX;
2945 xscale->xscale_variant = XSCALE_IXP4XX_PXA2XX;
2947 /* the debug handler isn't installed (and thus not running) at this time */
2948 xscale->handler_address = 0xfe000800;
2950 /* clear the vectors we keep locally for reference */
2951 memset(xscale->low_vectors, 0, sizeof(xscale->low_vectors));
2952 memset(xscale->high_vectors, 0, sizeof(xscale->high_vectors));
2954 /* no user-specified vectors have been configured yet */
2955 xscale->static_low_vectors_set = 0x0;
2956 xscale->static_high_vectors_set = 0x0;
2958 /* calculate branches to debug handler */
2959 low_reset_branch = (xscale->handler_address + 0x20 - 0x0 - 0x8) >> 2;
2960 high_reset_branch = (xscale->handler_address + 0x20 - 0xffff0000 - 0x8) >> 2;
2962 xscale->low_vectors[0] = ARMV4_5_B((low_reset_branch & 0xffffff), 0);
2963 xscale->high_vectors[0] = ARMV4_5_B((high_reset_branch & 0xffffff), 0);
2965 for (i = 1; i <= 7; i++) {
2966 xscale->low_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2967 xscale->high_vectors[i] = ARMV4_5_B(0xfffffe, 0);
2970 /* 64kB aligned region used for DCache cleaning */
2971 xscale->cache_clean_address = 0xfffe0000;
2973 xscale->hold_rst = 0;
2974 xscale->external_debug_break = 0;
2976 xscale->ibcr_available = 2;
2977 xscale->ibcr0_used = 0;
2978 xscale->ibcr1_used = 0;
2980 xscale->dbr_available = 2;
2981 xscale->dbr0_used = 0;
2982 xscale->dbr1_used = 0;
2984 LOG_INFO("%s: hardware has 2 breakpoints and 2 watchpoints",
2985 target_name(target));
2987 xscale->arm_bkpt = ARMV5_BKPT(0x0);
2988 xscale->thumb_bkpt = ARMV5_T_BKPT(0x0) & 0xffff;
2990 xscale->vector_catch = 0x1;
2992 xscale->trace.data = NULL;
2993 xscale->trace.image = NULL;
2994 xscale->trace.mode = XSCALE_TRACE_DISABLED;
2995 xscale->trace.buffer_fill = 0;
2996 xscale->trace.fill_counter = 0;
2998 /* prepare ARMv4/5 specific information */
2999 arm->arch_info = xscale;
3000 arm->core_type = ARM_MODE_ANY;
3001 arm->read_core_reg = xscale_read_core_reg;
3002 arm->write_core_reg = xscale_write_core_reg;
3003 arm->full_context = xscale_full_context;
3005 arm_init_arch_info(target, arm);
3007 xscale->armv4_5_mmu.armv4_5_cache.ctype = -1;
3008 xscale->armv4_5_mmu.get_ttb = xscale_get_ttb;
3009 xscale->armv4_5_mmu.read_memory = xscale_read_memory;
3010 xscale->armv4_5_mmu.write_memory = xscale_write_memory;
3011 xscale->armv4_5_mmu.disable_mmu_caches = xscale_disable_mmu_caches;
3012 xscale->armv4_5_mmu.enable_mmu_caches = xscale_enable_mmu_caches;
3013 xscale->armv4_5_mmu.has_tiny_pages = 1;
3014 xscale->armv4_5_mmu.mmu_enabled = 0;
3019 static int xscale_target_create(struct target *target, Jim_Interp *interp)
3021 struct xscale_common *xscale;
3023 if (sizeof xscale_debug_handler - 1 > 0x800) {
3024 LOG_ERROR("debug_handler.bin: larger than 2kb");
3028 xscale = calloc(1, sizeof(*xscale));
3032 return xscale_init_arch_info(target, xscale, target->tap,
3036 COMMAND_HANDLER(xscale_handle_debug_handler_command)
3038 struct target *target = NULL;
3039 struct xscale_common *xscale;
3041 uint32_t handler_address;
3044 return ERROR_COMMAND_SYNTAX_ERROR;
3046 target = get_target(CMD_ARGV[0]);
3047 if (target == NULL) {
3048 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3052 xscale = target_to_xscale(target);
3053 retval = xscale_verify_pointer(CMD_CTX, xscale);
3054 if (retval != ERROR_OK)
3057 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], handler_address);
3059 if (((handler_address >= 0x800) && (handler_address <= 0x1fef800)) ||
3060 ((handler_address >= 0xfe000800) && (handler_address <= 0xfffff800)))
3061 xscale->handler_address = handler_address;
3064 "xscale debug_handler <address> must be between 0x800 and 0x1fef800 or between 0xfe000800 and 0xfffff800");
3071 COMMAND_HANDLER(xscale_handle_cache_clean_address_command)
3073 struct target *target = NULL;
3074 struct xscale_common *xscale;
3076 uint32_t cache_clean_address;
3079 return ERROR_COMMAND_SYNTAX_ERROR;
3081 target = get_target(CMD_ARGV[0]);
3082 if (target == NULL) {
3083 LOG_ERROR("target '%s' not defined", CMD_ARGV[0]);
3086 xscale = target_to_xscale(target);
3087 retval = xscale_verify_pointer(CMD_CTX, xscale);
3088 if (retval != ERROR_OK)
3091 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], cache_clean_address);
3093 if (cache_clean_address & 0xffff)
3094 LOG_ERROR("xscale cache_clean_address <address> must be 64kb aligned");
3096 xscale->cache_clean_address = cache_clean_address;
3101 COMMAND_HANDLER(xscale_handle_cache_info_command)
3103 struct target *target = get_current_target(CMD_CTX);
3104 struct xscale_common *xscale = target_to_xscale(target);
3107 retval = xscale_verify_pointer(CMD_CTX, xscale);
3108 if (retval != ERROR_OK)
3111 return armv4_5_handle_cache_info_command(CMD_CTX, &xscale->armv4_5_mmu.armv4_5_cache);
3114 static int xscale_virt2phys(struct target *target,
3115 uint32_t virtual, uint32_t *physical)
3117 struct xscale_common *xscale = target_to_xscale(target);
3120 if (xscale->common_magic != XSCALE_COMMON_MAGIC) {
3121 LOG_ERROR(xscale_not);
3122 return ERROR_TARGET_INVALID;
3126 int retval = armv4_5_mmu_translate_va(target, &xscale->armv4_5_mmu,
3127 virtual, &cb, &ret);
3128 if (retval != ERROR_OK)
3134 static int xscale_mmu(struct target *target, int *enabled)
3136 struct xscale_common *xscale = target_to_xscale(target);
3138 if (target->state != TARGET_HALTED) {
3139 LOG_ERROR("Target not halted");
3140 return ERROR_TARGET_INVALID;
3142 *enabled = xscale->armv4_5_mmu.mmu_enabled;
3146 COMMAND_HANDLER(xscale_handle_mmu_command)
3148 struct target *target = get_current_target(CMD_CTX);
3149 struct xscale_common *xscale = target_to_xscale(target);
3152 retval = xscale_verify_pointer(CMD_CTX, xscale);
3153 if (retval != ERROR_OK)
3156 if (target->state != TARGET_HALTED) {
3157 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3161 if (CMD_ARGC >= 1) {
3163 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3165 xscale_enable_mmu_caches(target, 1, 0, 0);
3167 xscale_disable_mmu_caches(target, 1, 0, 0);
3168 xscale->armv4_5_mmu.mmu_enabled = enable;
3171 command_print(CMD_CTX, "mmu %s",
3172 (xscale->armv4_5_mmu.mmu_enabled) ? "enabled" : "disabled");
3177 COMMAND_HANDLER(xscale_handle_idcache_command)
3179 struct target *target = get_current_target(CMD_CTX);
3180 struct xscale_common *xscale = target_to_xscale(target);
3182 int retval = xscale_verify_pointer(CMD_CTX, xscale);
3183 if (retval != ERROR_OK)
3186 if (target->state != TARGET_HALTED) {
3187 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3191 bool icache = false;
3192 if (strcmp(CMD_NAME, "icache") == 0)
3194 if (CMD_ARGC >= 1) {
3196 COMMAND_PARSE_ENABLE(CMD_ARGV[0], enable);
3198 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled = enable;
3200 xscale_enable_mmu_caches(target, 0, 0, 1);
3202 xscale_disable_mmu_caches(target, 0, 0, 1);
3204 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled = enable;
3206 xscale_enable_mmu_caches(target, 0, 1, 0);
3208 xscale_disable_mmu_caches(target, 0, 1, 0);
3212 bool enabled = icache ?
3213 xscale->armv4_5_mmu.armv4_5_cache.i_cache_enabled :
3214 xscale->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled;
3215 const char *msg = enabled ? "enabled" : "disabled";
3216 command_print(CMD_CTX, "%s %s", CMD_NAME, msg);
3221 static const struct {
3225 { "fiq", DCSR_TF, },
3226 { "irq", DCSR_TI, },
3227 { "dabt", DCSR_TD, },
3228 { "pabt", DCSR_TA, },
3229 { "swi", DCSR_TS, },
3230 { "undef", DCSR_TU, },
3231 { "reset", DCSR_TR, },
3234 COMMAND_HANDLER(xscale_handle_vector_catch_command)
3236 struct target *target = get_current_target(CMD_CTX);
3237 struct xscale_common *xscale = target_to_xscale(target);
3239 uint32_t dcsr_value;
3241 struct reg *dcsr_reg = &xscale->reg_cache->reg_list[XSCALE_DCSR];
3243 retval = xscale_verify_pointer(CMD_CTX, xscale);
3244 if (retval != ERROR_OK)
3247 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3249 if (CMD_ARGC == 1) {
3250 if (strcmp(CMD_ARGV[0], "all") == 0) {
3251 catch = DCSR_TRAP_MASK;
3253 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
3258 while (CMD_ARGC-- > 0) {
3260 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3261 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name))
3263 catch |= vec_ids[i].mask;
3266 if (i == ARRAY_SIZE(vec_ids)) {
3267 LOG_ERROR("No vector '%s'", CMD_ARGV[CMD_ARGC]);
3268 return ERROR_COMMAND_SYNTAX_ERROR;
3271 *(uint32_t *)(dcsr_reg->value) &= ~DCSR_TRAP_MASK;
3272 *(uint32_t *)(dcsr_reg->value) |= catch;
3273 xscale_write_dcsr(target, -1, -1);
3276 dcsr_value = buf_get_u32(dcsr_reg->value, 0, 32);
3277 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
3278 command_print(CMD_CTX, "%15s: %s", vec_ids[i].name,
3279 (dcsr_value & vec_ids[i].mask) ? "catch" : "ignore");
3286 COMMAND_HANDLER(xscale_handle_vector_table_command)
3288 struct target *target = get_current_target(CMD_CTX);
3289 struct xscale_common *xscale = target_to_xscale(target);
3293 retval = xscale_verify_pointer(CMD_CTX, xscale);
3294 if (retval != ERROR_OK)
3297 if (CMD_ARGC == 0) { /* print current settings */
3300 command_print(CMD_CTX, "active user-set static vectors:");
3301 for (idx = 1; idx < 8; idx++)
3302 if (xscale->static_low_vectors_set & (1 << idx))
3303 command_print(CMD_CTX,
3304 "low %d: 0x%" PRIx32,
3306 xscale->static_low_vectors[idx]);
3307 for (idx = 1; idx < 8; idx++)
3308 if (xscale->static_high_vectors_set & (1 << idx))
3309 command_print(CMD_CTX,
3310 "high %d: 0x%" PRIx32,
3312 xscale->static_high_vectors[idx]);
3320 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], idx);
3322 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], vec);
3324 if (idx < 1 || idx >= 8)
3327 if (!err && strcmp(CMD_ARGV[0], "low") == 0) {
3328 xscale->static_low_vectors_set |= (1<<idx);
3329 xscale->static_low_vectors[idx] = vec;
3330 } else if (!err && (strcmp(CMD_ARGV[0], "high") == 0)) {
3331 xscale->static_high_vectors_set |= (1<<idx);
3332 xscale->static_high_vectors[idx] = vec;
3338 return ERROR_COMMAND_SYNTAX_ERROR;
3344 COMMAND_HANDLER(xscale_handle_trace_buffer_command)
3346 struct target *target = get_current_target(CMD_CTX);
3347 struct xscale_common *xscale = target_to_xscale(target);
3348 uint32_t dcsr_value;
3351 retval = xscale_verify_pointer(CMD_CTX, xscale);
3352 if (retval != ERROR_OK)
3355 if (target->state != TARGET_HALTED) {
3356 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3360 if (CMD_ARGC >= 1) {
3361 if (strcmp("enable", CMD_ARGV[0]) == 0)
3362 xscale->trace.mode = XSCALE_TRACE_WRAP; /* default */
3363 else if (strcmp("disable", CMD_ARGV[0]) == 0)
3364 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3366 return ERROR_COMMAND_SYNTAX_ERROR;
3369 if (CMD_ARGC >= 2 && xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3370 if (strcmp("fill", CMD_ARGV[1]) == 0) {
3371 int buffcount = 1; /* default */
3373 COMMAND_PARSE_NUMBER(int, CMD_ARGV[2], buffcount);
3374 if (buffcount < 1) { /* invalid */
3375 command_print(CMD_CTX, "fill buffer count must be > 0");
3376 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3377 return ERROR_COMMAND_SYNTAX_ERROR;
3379 xscale->trace.buffer_fill = buffcount;
3380 xscale->trace.mode = XSCALE_TRACE_FILL;
3381 } else if (strcmp("wrap", CMD_ARGV[1]) == 0)
3382 xscale->trace.mode = XSCALE_TRACE_WRAP;
3384 xscale->trace.mode = XSCALE_TRACE_DISABLED;
3385 return ERROR_COMMAND_SYNTAX_ERROR;
3389 if (xscale->trace.mode != XSCALE_TRACE_DISABLED) {
3390 char fill_string[12];
3391 sprintf(fill_string, "fill %" PRId32, xscale->trace.buffer_fill);
3392 command_print(CMD_CTX, "trace buffer enabled (%s)",
3393 (xscale->trace.mode == XSCALE_TRACE_FILL)
3394 ? fill_string : "wrap");
3396 command_print(CMD_CTX, "trace buffer disabled");
3398 dcsr_value = buf_get_u32(xscale->reg_cache->reg_list[XSCALE_DCSR].value, 0, 32);
3399 if (xscale->trace.mode == XSCALE_TRACE_FILL)
3400 xscale_write_dcsr_sw(target, (dcsr_value & 0xfffffffc) | 2);
3402 xscale_write_dcsr_sw(target, dcsr_value & 0xfffffffc);
3407 COMMAND_HANDLER(xscale_handle_trace_image_command)
3409 struct target *target = get_current_target(CMD_CTX);
3410 struct xscale_common *xscale = target_to_xscale(target);
3414 return ERROR_COMMAND_SYNTAX_ERROR;
3416 retval = xscale_verify_pointer(CMD_CTX, xscale);
3417 if (retval != ERROR_OK)
3420 if (xscale->trace.image) {
3421 image_close(xscale->trace.image);
3422 free(xscale->trace.image);
3423 command_print(CMD_CTX, "previously loaded image found and closed");
3426 xscale->trace.image = malloc(sizeof(struct image));
3427 xscale->trace.image->base_address_set = 0;
3428 xscale->trace.image->start_address_set = 0;
3430 /* a base address isn't always necessary, default to 0x0 (i.e. don't relocate) */
3431 if (CMD_ARGC >= 2) {
3432 xscale->trace.image->base_address_set = 1;
3433 COMMAND_PARSE_NUMBER(llong, CMD_ARGV[1], xscale->trace.image->base_address);
3435 xscale->trace.image->base_address_set = 0;
3437 if (image_open(xscale->trace.image, CMD_ARGV[0],
3438 (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) {
3439 free(xscale->trace.image);
3440 xscale->trace.image = NULL;
3447 COMMAND_HANDLER(xscale_handle_dump_trace_command)
3449 struct target *target = get_current_target(CMD_CTX);
3450 struct xscale_common *xscale = target_to_xscale(target);
3451 struct xscale_trace_data *trace_data;
3455 retval = xscale_verify_pointer(CMD_CTX, xscale);
3456 if (retval != ERROR_OK)
3459 if (target->state != TARGET_HALTED) {
3460 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3465 return ERROR_COMMAND_SYNTAX_ERROR;
3467 trace_data = xscale->trace.data;
3470 command_print(CMD_CTX, "no trace data collected");
3474 if (fileio_open(&file, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY) != ERROR_OK)
3477 while (trace_data) {
3480 fileio_write_u32(&file, trace_data->chkpt0);
3481 fileio_write_u32(&file, trace_data->chkpt1);
3482 fileio_write_u32(&file, trace_data->last_instruction);
3483 fileio_write_u32(&file, trace_data->depth);
3485 for (i = 0; i < trace_data->depth; i++)
3486 fileio_write_u32(&file, trace_data->entries[i].data |
3487 ((trace_data->entries[i].type & 0xffff) << 16));
3489 trace_data = trace_data->next;
3492 fileio_close(&file);
3497 COMMAND_HANDLER(xscale_handle_analyze_trace_buffer_command)
3499 struct target *target = get_current_target(CMD_CTX);
3500 struct xscale_common *xscale = target_to_xscale(target);
3503 retval = xscale_verify_pointer(CMD_CTX, xscale);
3504 if (retval != ERROR_OK)
3507 xscale_analyze_trace(target, CMD_CTX);
3512 COMMAND_HANDLER(xscale_handle_cp15)
3514 struct target *target = get_current_target(CMD_CTX);
3515 struct xscale_common *xscale = target_to_xscale(target);
3518 retval = xscale_verify_pointer(CMD_CTX, xscale);
3519 if (retval != ERROR_OK)
3522 if (target->state != TARGET_HALTED) {
3523 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
3526 uint32_t reg_no = 0;
3527 struct reg *reg = NULL;
3529 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg_no);
3530 /*translate from xscale cp15 register no to openocd register*/
3533 reg_no = XSCALE_MAINID;
3536 reg_no = XSCALE_CTRL;
3539 reg_no = XSCALE_TTB;
3542 reg_no = XSCALE_DAC;
3545 reg_no = XSCALE_FSR;
3548 reg_no = XSCALE_FAR;
3551 reg_no = XSCALE_PID;
3554 reg_no = XSCALE_CPACCESS;
3557 command_print(CMD_CTX, "invalid register number");
3558 return ERROR_COMMAND_SYNTAX_ERROR;
3560 reg = &xscale->reg_cache->reg_list[reg_no];
3563 if (CMD_ARGC == 1) {
3566 /* read cp15 control register */
3567 xscale_get_reg(reg);
3568 value = buf_get_u32(reg->value, 0, 32);
3569 command_print(CMD_CTX, "%s (/%i): 0x%" PRIx32 "", reg->name, (int)(reg->size),
3571 } else if (CMD_ARGC == 2) {
3573 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
3575 /* send CP write request (command 0x41) */
3576 xscale_send_u32(target, 0x41);
3578 /* send CP register number */
3579 xscale_send_u32(target, reg_no);
3581 /* send CP register value */
3582 xscale_send_u32(target, value);
3584 /* execute cpwait to ensure outstanding operations complete */
3585 xscale_send_u32(target, 0x53);
3587 return ERROR_COMMAND_SYNTAX_ERROR;
3592 static const struct command_registration xscale_exec_command_handlers[] = {
3594 .name = "cache_info",
3595 .handler = xscale_handle_cache_info_command,
3596 .mode = COMMAND_EXEC,
3597 .help = "display information about CPU caches",
3601 .handler = xscale_handle_mmu_command,
3602 .mode = COMMAND_EXEC,
3603 .help = "enable or disable the MMU",
3604 .usage = "['enable'|'disable']",
3608 .handler = xscale_handle_idcache_command,
3609 .mode = COMMAND_EXEC,
3610 .help = "display ICache state, optionally enabling or "
3612 .usage = "['enable'|'disable']",
3616 .handler = xscale_handle_idcache_command,
3617 .mode = COMMAND_EXEC,
3618 .help = "display DCache state, optionally enabling or "
3620 .usage = "['enable'|'disable']",
3623 .name = "vector_catch",
3624 .handler = xscale_handle_vector_catch_command,
3625 .mode = COMMAND_EXEC,
3626 .help = "set or display mask of vectors "
3627 "that should trigger debug entry",
3628 .usage = "['all'|'none'|'fiq'|'irq'|'dabt'|'pabt'|'swi'|'undef'|'reset']",
3631 .name = "vector_table",
3632 .handler = xscale_handle_vector_table_command,
3633 .mode = COMMAND_EXEC,
3634 .help = "set vector table entry in mini-ICache, "
3635 "or display current tables",
3636 .usage = "[('high'|'low') index code]",
3639 .name = "trace_buffer",
3640 .handler = xscale_handle_trace_buffer_command,
3641 .mode = COMMAND_EXEC,
3642 .help = "display trace buffer status, enable or disable "
3643 "tracing, and optionally reconfigure trace mode",
3644 .usage = "['enable'|'disable' ['fill' [number]|'wrap']]",
3647 .name = "dump_trace",
3648 .handler = xscale_handle_dump_trace_command,
3649 .mode = COMMAND_EXEC,
3650 .help = "dump content of trace buffer to file",
3651 .usage = "filename",
3654 .name = "analyze_trace",
3655 .handler = xscale_handle_analyze_trace_buffer_command,
3656 .mode = COMMAND_EXEC,
3657 .help = "analyze content of trace buffer",
3661 .name = "trace_image",
3662 .handler = xscale_handle_trace_image_command,
3663 .mode = COMMAND_EXEC,
3664 .help = "load image from file to address (default 0)",
3665 .usage = "filename [offset [filetype]]",
3669 .handler = xscale_handle_cp15,
3670 .mode = COMMAND_EXEC,
3671 .help = "Read or write coprocessor 15 register.",
3672 .usage = "register [value]",
3674 COMMAND_REGISTRATION_DONE
3676 static const struct command_registration xscale_any_command_handlers[] = {
3678 .name = "debug_handler",
3679 .handler = xscale_handle_debug_handler_command,
3680 .mode = COMMAND_ANY,
3681 .help = "Change address used for debug handler.",
3682 .usage = "<target> <address>",
3685 .name = "cache_clean_address",
3686 .handler = xscale_handle_cache_clean_address_command,
3687 .mode = COMMAND_ANY,
3688 .help = "Change address used for cleaning data cache.",
3692 .chain = xscale_exec_command_handlers,
3694 COMMAND_REGISTRATION_DONE
3696 static const struct command_registration xscale_command_handlers[] = {
3698 .chain = arm_command_handlers,
3702 .mode = COMMAND_ANY,
3703 .help = "xscale command group",
3705 .chain = xscale_any_command_handlers,
3707 COMMAND_REGISTRATION_DONE
3710 struct target_type xscale_target = {
3713 .poll = xscale_poll,
3714 .arch_state = xscale_arch_state,
3716 .target_request_data = NULL,
3718 .halt = xscale_halt,
3719 .resume = xscale_resume,
3720 .step = xscale_step,
3722 .assert_reset = xscale_assert_reset,
3723 .deassert_reset = xscale_deassert_reset,
3724 .soft_reset_halt = NULL,
3726 /* REVISIT on some cores, allow exporting iwmmxt registers ... */
3727 .get_gdb_reg_list = arm_get_gdb_reg_list,
3729 .read_memory = xscale_read_memory,
3730 .read_phys_memory = xscale_read_phys_memory,
3731 .write_memory = xscale_write_memory,
3732 .write_phys_memory = xscale_write_phys_memory,
3734 .checksum_memory = arm_checksum_memory,
3735 .blank_check_memory = arm_blank_check_memory,
3737 .run_algorithm = armv4_5_run_algorithm,
3739 .add_breakpoint = xscale_add_breakpoint,
3740 .remove_breakpoint = xscale_remove_breakpoint,
3741 .add_watchpoint = xscale_add_watchpoint,
3742 .remove_watchpoint = xscale_remove_watchpoint,
3744 .commands = xscale_command_handlers,
3745 .target_create = xscale_target_create,
3746 .init_target = xscale_init_target,
3748 .virt2phys = xscale_virt2phys,