This supports both 0.11 and 0.13 versions of the debug spec.
Support for `-rtos riscv` will come in a separate commit since it was
easy to separate out, and is likely to be more controversial.
Flash support for the SiFive boards will also come in a later commit.
Change-Id: I1d38fe669c2041b4e21a5c54a091594aac3e2190
Signed-off-by: Tim Newsome <tim@sifive.com>
Reviewed-on: http://openocd.zylin.com/4578
Tested-by: jenkins
Reviewed-by: Liviu Ionescu <ilg@livius.net>
Reviewed-by: Matthias Welwarsky <matthias@welwarsky.de>
"timer" or any new group created with addreg command.
@end deffn
+@section RISC-V Architecture
+
+@uref{http://riscv.org/, RISC-V} is a free and open ISA. OpenOCD supports JTAG
+debug of targets that implement version 0.11 and 0.13 of the RISC-V Debug
+Specification.
+
+@subsection RISC-V Terminology
+
+A @emph{hart} is a hardware thread. A hart may share resources (eg. FPU) with
+another hart, or may be a separate core. RISC-V treats those the same, and
+OpenOCD exposes each hart as a separate core.
+
+@subsection RISC-V Debug Configuration Commands
+
+@deffn Command {riscv expose_csrs} n0[-m0][,n1[-m1]]...
+Configure a list of inclusive ranges for CSRs to expose in addition to the
+standard ones. This must be executed before `init`.
+
+By default OpenOCD attempts to expose only CSRs that are mentioned in a spec,
+and then only if the corresponding extension appears to be implemented. This
+command can be used if OpenOCD gets this wrong, or a target implements custom
+CSRs.
+@end deffn
+
+@deffn Command {riscv set_command_timeout_sec} [seconds]
+Set the wall-clock timeout (in seconds) for individual commands. The default
+should work fine for all but the slowest targets (eg. simulators).
+@end deffn
+
+@deffn Command {riscv set_reset_timeout_sec} [seconds]
+Set the maximum time to wait for a hart to come out of reset after reset is
+deasserted.
+@end deffn
+
+@deffn Command {riscv set_scratch_ram} none|[address]
+Set the address of 16 bytes of scratch RAM the debugger can use, or 'none'.
+This is used to access 64-bit floating point registers on 32-bit targets.
+@end deffn
+
+@deffn Command {riscv set_prefer_sba} on|off
+When on, prefer to use System Bus Access to access memory. When off, prefer to
+use the Program Buffer to access memory.
+@end deffn
+
+@subsection RISC-V Authentication Commands
+
+The following commands can be used to authenticate to a RISC-V system. Eg. a
+trivial challenge-response protocol could be implemented as follows in a
+configuration file, immediately following @command{init}:
+@example
+set challenge [ocd_riscv authdata_read]
+riscv authdata_write [expr $challenge + 1]
+@end example
+
+@deffn Command {riscv authdata_read}
+Return the 32-bit value read from authdata. Note that to get read value back in
+a TCL script, it needs to be invoked as @command{ocd_riscv authdata_read}.
+@end deffn
+
+@deffn Command {riscv authdata_write} value
+Write the 32-bit value to authdata.
+@end deffn
+
+@subsection RISC-V DMI Commands
+
+The following commands allow direct access to the Debug Module Interface, which
+can be used to interact with custom debug features.
+
+@deffn Command {riscv dmi_read}
+Perform a 32-bit DMI read at address, returning the value. Note that to get
+read value back in a TCL script, it needs to be invoked as @command{ocd_riscv
+dmi_read}.
+@end deffn
+
+@deffn Command {riscv dmi_write} address value
+Perform a 32-bit DMI write of value at address.
+@end deffn
+
@anchor{softwaredebugmessagesandtracing}
@section Software Debug Messages and Tracing
@cindex Linux-ARM DCC support
*/
#define ERROR_FAIL (-4)
#define ERROR_WAIT (-5)
+/* ERROR_TIMEOUT is already taken by winerror.h. */
+#define ERROR_TIMEOUT_REACHED (-6)
#endif /* OPENOCD_HELPER_LOG_H */
OOCD_TRACE_FILES =
endif
-%C%_libtarget_la_LIBADD = %D%/openrisc/libopenrisc.la
+%C%_libtarget_la_LIBADD = %D%/openrisc/libopenrisc.la \
+ %D%/riscv/libriscv.la
+
STARTUP_TCL_SRCS += %D%/startup.tcl
%D%/arm_cti.h
include %D%/openrisc/Makefile.am
+include %D%/riscv/Makefile.am
--- /dev/null
+noinst_LTLIBRARIES += %D%/libriscv.la
+%C%_libriscv_la_SOURCES = \
+ %D%/asm.h \
+ %D%/batch.h \
+ %D%/debug_defines.h \
+ %D%/encoding.h \
+ %D%/gdb_regs.h \
+ %D%/opcodes.h \
+ %D%/program.h \
+ %D%/riscv.h \
+ %D%/batch.c \
+ %D%/program.c \
+ %D%/riscv-011.c \
+ %D%/riscv-013.c \
+ %D%/riscv.c \
+ %D%/riscv_semihosting.c
--- /dev/null
+#ifndef TARGET__RISCV__ASM_H
+#define TARGET__RISCV__ASM_H
+
+#include "riscv.h"
+
+/*** Version-independent functions that we don't want in the main address space. ***/
+
+static uint32_t load(const struct target *target, unsigned int rd,
+ unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t load(const struct target *target, unsigned int rd,
+ unsigned int base, uint16_t offset)
+{
+ switch (riscv_xlen(target)) {
+ case 32:
+ return lw(rd, base, offset);
+ case 64:
+ return ld(rd, base, offset);
+ }
+ assert(0);
+ return 0; /* Silence -Werror=return-type */
+}
+
+static uint32_t store(const struct target *target, unsigned int src,
+ unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t store(const struct target *target, unsigned int src,
+ unsigned int base, uint16_t offset)
+{
+ switch (riscv_xlen(target)) {
+ case 32:
+ return sw(src, base, offset);
+ case 64:
+ return sd(src, base, offset);
+ }
+ assert(0);
+ return 0; /* Silence -Werror=return-type */
+}
+
+#endif
--- /dev/null
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "batch.h"
+#include "debug_defines.h"
+#include "riscv.h"
+
+#define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
+#define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
+
+static void dump_field(const struct scan_field *field);
+
+struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle)
+{
+ scans += 4;
+ struct riscv_batch *out = malloc(sizeof(*out));
+ memset(out, 0, sizeof(*out));
+ out->target = target;
+ out->allocated_scans = scans;
+ out->used_scans = 0;
+ out->idle_count = idle;
+ out->data_out = malloc(sizeof(*out->data_out) * (scans) * sizeof(uint64_t));
+ out->data_in = malloc(sizeof(*out->data_in) * (scans) * sizeof(uint64_t));
+ out->fields = malloc(sizeof(*out->fields) * (scans));
+ out->last_scan = RISCV_SCAN_TYPE_INVALID;
+ out->read_keys = malloc(sizeof(*out->read_keys) * (scans));
+ out->read_keys_used = 0;
+ return out;
+}
+
+void riscv_batch_free(struct riscv_batch *batch)
+{
+ free(batch->data_in);
+ free(batch->data_out);
+ free(batch->fields);
+ free(batch);
+}
+
+bool riscv_batch_full(struct riscv_batch *batch)
+{
+ return batch->used_scans > (batch->allocated_scans - 4);
+}
+
+int riscv_batch_run(struct riscv_batch *batch)
+{
+ if (batch->used_scans == 0) {
+ LOG_DEBUG("Ignoring empty batch.");
+ return ERROR_OK;
+ }
+
+ keep_alive();
+
+ LOG_DEBUG("running a batch of %ld scans", (long)batch->used_scans);
+ riscv_batch_add_nop(batch);
+
+ for (size_t i = 0; i < batch->used_scans; ++i) {
+ jtag_add_dr_scan(batch->target->tap, 1, batch->fields + i, TAP_IDLE);
+ if (batch->idle_count > 0)
+ jtag_add_runtest(batch->idle_count, TAP_IDLE);
+ }
+
+ LOG_DEBUG("executing queue");
+ if (jtag_execute_queue() != ERROR_OK) {
+ LOG_ERROR("Unable to execute JTAG queue");
+ return ERROR_FAIL;
+ }
+
+ for (size_t i = 0; i < batch->used_scans; ++i)
+ dump_field(batch->fields + i);
+
+ return ERROR_OK;
+}
+
+void riscv_batch_add_dmi_write(struct riscv_batch *batch, unsigned address, uint64_t data)
+{
+ assert(batch->used_scans < batch->allocated_scans);
+ struct scan_field *field = batch->fields + batch->used_scans;
+ field->num_bits = riscv_dmi_write_u64_bits(batch->target);
+ field->out_value = (void *)(batch->data_out + batch->used_scans * sizeof(uint64_t));
+ field->in_value = (void *)(batch->data_in + batch->used_scans * sizeof(uint64_t));
+ riscv_fill_dmi_write_u64(batch->target, (char *)field->out_value, address, data);
+ riscv_fill_dmi_nop_u64(batch->target, (char *)field->in_value);
+ batch->last_scan = RISCV_SCAN_TYPE_WRITE;
+ batch->used_scans++;
+}
+
+size_t riscv_batch_add_dmi_read(struct riscv_batch *batch, unsigned address)
+{
+ assert(batch->used_scans < batch->allocated_scans);
+ struct scan_field *field = batch->fields + batch->used_scans;
+ field->num_bits = riscv_dmi_write_u64_bits(batch->target);
+ field->out_value = (void *)(batch->data_out + batch->used_scans * sizeof(uint64_t));
+ field->in_value = (void *)(batch->data_in + batch->used_scans * sizeof(uint64_t));
+ riscv_fill_dmi_read_u64(batch->target, (char *)field->out_value, address);
+ riscv_fill_dmi_nop_u64(batch->target, (char *)field->in_value);
+ batch->last_scan = RISCV_SCAN_TYPE_READ;
+ batch->used_scans++;
+
+ /* FIXME We get the read response back on the next scan. For now I'm
+ * just sticking a NOP in there, but this should be coelesced away. */
+ riscv_batch_add_nop(batch);
+
+ batch->read_keys[batch->read_keys_used] = batch->used_scans - 1;
+ LOG_DEBUG("read key %u for batch 0x%p is %u (0x%p)",
+ (unsigned) batch->read_keys_used, batch, (unsigned) (batch->used_scans - 1),
+ batch->data_in + sizeof(uint64_t) * (batch->used_scans + 1));
+ return batch->read_keys_used++;
+}
+
+uint64_t riscv_batch_get_dmi_read(struct riscv_batch *batch, size_t key)
+{
+ assert(key < batch->read_keys_used);
+ size_t index = batch->read_keys[key];
+ assert(index <= batch->used_scans);
+ uint8_t *base = batch->data_in + 8 * index;
+ return base[0] |
+ ((uint64_t) base[1]) << 8 |
+ ((uint64_t) base[2]) << 16 |
+ ((uint64_t) base[3]) << 24 |
+ ((uint64_t) base[4]) << 32 |
+ ((uint64_t) base[5]) << 40 |
+ ((uint64_t) base[6]) << 48 |
+ ((uint64_t) base[7]) << 56;
+}
+
+void riscv_batch_add_nop(struct riscv_batch *batch)
+{
+ assert(batch->used_scans < batch->allocated_scans);
+ struct scan_field *field = batch->fields + batch->used_scans;
+ field->num_bits = riscv_dmi_write_u64_bits(batch->target);
+ field->out_value = (void *)(batch->data_out + batch->used_scans * sizeof(uint64_t));
+ field->in_value = (void *)(batch->data_in + batch->used_scans * sizeof(uint64_t));
+ riscv_fill_dmi_nop_u64(batch->target, (char *)field->out_value);
+ riscv_fill_dmi_nop_u64(batch->target, (char *)field->in_value);
+ batch->last_scan = RISCV_SCAN_TYPE_NOP;
+ batch->used_scans++;
+ LOG_DEBUG(" added NOP with in_value=0x%p", field->in_value);
+}
+
+void dump_field(const struct scan_field *field)
+{
+ static const char * const op_string[] = {"-", "r", "w", "?"};
+ static const char * const status_string[] = {"+", "?", "F", "b"};
+
+ if (debug_level < LOG_LVL_DEBUG)
+ return;
+
+ assert(field->out_value != NULL);
+ uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
+ unsigned int out_op = get_field(out, DTM_DMI_OP);
+ unsigned int out_data = get_field(out, DTM_DMI_DATA);
+ unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
+
+ if (field->in_value) {
+ uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
+ unsigned int in_op = get_field(in, DTM_DMI_OP);
+ unsigned int in_data = get_field(in, DTM_DMI_DATA);
+ unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
+
+ log_printf_lf(LOG_LVL_DEBUG,
+ __FILE__, __LINE__, __PRETTY_FUNCTION__,
+ "%db %s %08x @%02x -> %s %08x @%02x",
+ field->num_bits,
+ op_string[out_op], out_data, out_address,
+ status_string[in_op], in_data, in_address);
+ } else {
+ log_printf_lf(LOG_LVL_DEBUG,
+ __FILE__, __LINE__, __PRETTY_FUNCTION__, "%db %s %08x @%02x -> ?",
+ field->num_bits, op_string[out_op], out_data, out_address);
+ }
+}
--- /dev/null
+#ifndef TARGET__RISCV__SCANS_H
+#define TARGET__RISCV__SCANS_H
+
+#include "target/target.h"
+#include "jtag/jtag.h"
+
+enum riscv_scan_type {
+ RISCV_SCAN_TYPE_INVALID,
+ RISCV_SCAN_TYPE_NOP,
+ RISCV_SCAN_TYPE_READ,
+ RISCV_SCAN_TYPE_WRITE,
+};
+
+/* A batch of multiple JTAG scans, which are grouped together to avoid the
+ * overhead of some JTAG adapters when sending single commands. This is
+ * designed to support block copies, as that's what we actually need to go
+ * fast. */
+struct riscv_batch {
+ struct target *target;
+
+ size_t allocated_scans;
+ size_t used_scans;
+
+ size_t idle_count;
+
+ uint8_t *data_out;
+ uint8_t *data_in;
+ struct scan_field *fields;
+
+ /* In JTAG we scan out the previous value's output when performing a
+ * scan. This is a pain for users, so we just provide them the
+ * illusion of not having to do this by eliding all but the last NOP.
+ * */
+ enum riscv_scan_type last_scan;
+
+ /* The read keys. */
+ size_t *read_keys;
+ size_t read_keys_used;
+};
+
+/* Allocates (or frees) a new scan set. "scans" is the maximum number of JTAG
+ * scans that can be issued to this object, and idle is the number of JTAG idle
+ * cycles between every real scan. */
+struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle);
+void riscv_batch_free(struct riscv_batch *batch);
+
+/* Checks to see if this batch is full. */
+bool riscv_batch_full(struct riscv_batch *batch);
+
+/* Executes this scan batch. */
+int riscv_batch_run(struct riscv_batch *batch);
+
+/* Adds a DMI write to this batch. */
+void riscv_batch_add_dmi_write(struct riscv_batch *batch, unsigned address, uint64_t data);
+
+/* DMI reads must be handled in two parts: the first one schedules a read and
+ * provides a key, the second one actually obtains the value of that read .*/
+size_t riscv_batch_add_dmi_read(struct riscv_batch *batch, unsigned address);
+uint64_t riscv_batch_get_dmi_read(struct riscv_batch *batch, size_t key);
+
+/* Scans in a NOP. */
+void riscv_batch_add_nop(struct riscv_batch *batch);
+
+#endif
--- /dev/null
+#define DTM_IDCODE 0x01
+/*
+* Identifies the release version of this part.
+ */
+#define DTM_IDCODE_VERSION_OFFSET 28
+#define DTM_IDCODE_VERSION_LENGTH 4
+#define DTM_IDCODE_VERSION (0xfU << DTM_IDCODE_VERSION_OFFSET)
+/*
+* Identifies the designer's part number of this part.
+ */
+#define DTM_IDCODE_PARTNUMBER_OFFSET 12
+#define DTM_IDCODE_PARTNUMBER_LENGTH 16
+#define DTM_IDCODE_PARTNUMBER (0xffffU << DTM_IDCODE_PARTNUMBER_OFFSET)
+/*
+* Identifies the designer/manufacturer of this part. Bits 6:0 must be
+* bits 6:0 of the designer/manufacturer's Identification Code as
+* assigned by JEDEC Standard JEP106. Bits 10:7 contain the modulo-16
+* count of the number of continuation characters (0x7f) in that same
+* Identification Code.
+ */
+#define DTM_IDCODE_MANUFID_OFFSET 1
+#define DTM_IDCODE_MANUFID_LENGTH 11
+#define DTM_IDCODE_MANUFID (0x7ffU << DTM_IDCODE_MANUFID_OFFSET)
+#define DTM_IDCODE_1_OFFSET 0
+#define DTM_IDCODE_1_LENGTH 1
+#define DTM_IDCODE_1 (0x1U << DTM_IDCODE_1_OFFSET)
+#define DTM_DTMCS 0x10
+/*
+* Writing 1 to this bit does a hard reset of the DTM,
+* causing the DTM to forget about any outstanding DMI transactions.
+* In general this should only be used when the Debugger has
+* reason to expect that the outstanding DMI transaction will never
+* complete (e.g. a reset condition caused an inflight DMI transaction to
+* be cancelled).
+ */
+#define DTM_DTMCS_DMIHARDRESET_OFFSET 17
+#define DTM_DTMCS_DMIHARDRESET_LENGTH 1
+#define DTM_DTMCS_DMIHARDRESET (0x1U << DTM_DTMCS_DMIHARDRESET_OFFSET)
+/*
+* Writing 1 to this bit clears the sticky error state
+* and allows the DTM to retry or complete the previous
+* transaction.
+ */
+#define DTM_DTMCS_DMIRESET_OFFSET 16
+#define DTM_DTMCS_DMIRESET_LENGTH 1
+#define DTM_DTMCS_DMIRESET (0x1U << DTM_DTMCS_DMIRESET_OFFSET)
+/*
+* This is a hint to the debugger of the minimum number of
+* cycles a debugger should spend in
+* Run-Test/Idle after every DMI scan to avoid a `busy'
+* return code (\Fdmistat of 3). A debugger must still
+* check \Fdmistat when necessary.
+*
+* 0: It is not necessary to enter Run-Test/Idle at all.
+*
+* 1: Enter Run-Test/Idle and leave it immediately.
+*
+* 2: Enter Run-Test/Idle and stay there for 1 cycle before leaving.
+*
+* And so on.
+ */
+#define DTM_DTMCS_IDLE_OFFSET 12
+#define DTM_DTMCS_IDLE_LENGTH 3
+#define DTM_DTMCS_IDLE (0x7U << DTM_DTMCS_IDLE_OFFSET)
+/*
+* 0: No error.
+*
+* 1: Reserved. Interpret the same as 2.
+*
+* 2: An operation failed (resulted in \Fop of 2).
+*
+* 3: An operation was attempted while a DMI access was still in
+* progress (resulted in \Fop of 3).
+ */
+#define DTM_DTMCS_DMISTAT_OFFSET 10
+#define DTM_DTMCS_DMISTAT_LENGTH 2
+#define DTM_DTMCS_DMISTAT (0x3U << DTM_DTMCS_DMISTAT_OFFSET)
+/*
+* The size of \Faddress in \Rdmi.
+ */
+#define DTM_DTMCS_ABITS_OFFSET 4
+#define DTM_DTMCS_ABITS_LENGTH 6
+#define DTM_DTMCS_ABITS (0x3fU << DTM_DTMCS_ABITS_OFFSET)
+/*
+* 0: Version described in spec version 0.11.
+*
+* 1: Version described in spec version 0.13 (and later?), which
+* reduces the DMI data width to 32 bits.
+*
+* 15: Version not described in any available version of this spec.
+ */
+#define DTM_DTMCS_VERSION_OFFSET 0
+#define DTM_DTMCS_VERSION_LENGTH 4
+#define DTM_DTMCS_VERSION (0xfU << DTM_DTMCS_VERSION_OFFSET)
+#define DTM_DMI 0x11
+/*
+* Address used for DMI access. In Update-DR this value is used
+* to access the DM over the DMI.
+ */
+#define DTM_DMI_ADDRESS_OFFSET 34
+#define DTM_DMI_ADDRESS_LENGTH abits
+#define DTM_DMI_ADDRESS (((1L<<abits)-1) << DTM_DMI_ADDRESS_OFFSET)
+/*
+* The data to send to the DM over the DMI during Update-DR, and
+* the data returned from the DM as a result of the previous operation.
+ */
+#define DTM_DMI_DATA_OFFSET 2
+#define DTM_DMI_DATA_LENGTH 32
+#define DTM_DMI_DATA (0xffffffffULL << DTM_DMI_DATA_OFFSET)
+/*
+* When the debugger writes this field, it has the following meaning:
+*
+* 0: Ignore \Fdata and \Faddress. (nop)
+*
+* Don't send anything over the DMI during Update-DR.
+* This operation should never result in a busy or error response.
+* The address and data reported in the following Capture-DR
+* are undefined.
+*
+* 1: Read from \Faddress. (read)
+*
+* 2: Write \Fdata to \Faddress. (write)
+*
+* 3: Reserved.
+*
+* When the debugger reads this field, it means the following:
+*
+* 0: The previous operation completed successfully.
+*
+* 1: Reserved.
+*
+* 2: A previous operation failed. The data scanned into \Rdmi in
+* this access will be ignored. This status is sticky and can be
+* cleared by writing \Fdmireset in \Rdtmcs.
+*
+* This indicates that the DM itself responded with an error.
+* Note: there are no specified cases in which the DM would
+* respond with an error, and DMI is not required to support
+* returning errors.
+*
+* 3: An operation was attempted while a DMI request is still in
+* progress. The data scanned into \Rdmi in this access will be
+* ignored. This status is sticky and can be cleared by writing
+* \Fdmireset in \Rdtmcs. If a debugger sees this status, it
+* needs to give the target more TCK edges between Update-DR and
+* Capture-DR. The simplest way to do that is to add extra transitions
+* in Run-Test/Idle.
+*
+* (The DTM, DM, and/or component may be in different clock domains,
+* so synchronization may be required. Some relatively fixed number of
+* TCK ticks may be needed for the request to reach the DM, complete,
+* and for the response to be synchronized back into the TCK domain.)
+ */
+#define DTM_DMI_OP_OFFSET 0
+#define DTM_DMI_OP_LENGTH 2
+#define DTM_DMI_OP (0x3ULL << DTM_DMI_OP_OFFSET)
+#define CSR_DCSR 0x7b0
+/*
+* 0: There is no external debug support.
+*
+* 4: External debug support exists as it is described in this document.
+*
+* 15: There is external debug support, but it does not conform to any
+* available version of this spec.
+ */
+#define CSR_DCSR_XDEBUGVER_OFFSET 28
+#define CSR_DCSR_XDEBUGVER_LENGTH 4
+#define CSR_DCSR_XDEBUGVER (0xfU << CSR_DCSR_XDEBUGVER_OFFSET)
+/*
+* When 1, {\tt ebreak} instructions in Machine Mode enter Debug Mode.
+ */
+#define CSR_DCSR_EBREAKM_OFFSET 15
+#define CSR_DCSR_EBREAKM_LENGTH 1
+#define CSR_DCSR_EBREAKM (0x1U << CSR_DCSR_EBREAKM_OFFSET)
+/*
+* When 1, {\tt ebreak} instructions in Supervisor Mode enter Debug Mode.
+ */
+#define CSR_DCSR_EBREAKS_OFFSET 13
+#define CSR_DCSR_EBREAKS_LENGTH 1
+#define CSR_DCSR_EBREAKS (0x1U << CSR_DCSR_EBREAKS_OFFSET)
+/*
+* When 1, {\tt ebreak} instructions in User/Application Mode enter
+* Debug Mode.
+ */
+#define CSR_DCSR_EBREAKU_OFFSET 12
+#define CSR_DCSR_EBREAKU_LENGTH 1
+#define CSR_DCSR_EBREAKU (0x1U << CSR_DCSR_EBREAKU_OFFSET)
+/*
+* 0: Interrupts are disabled during single stepping.
+*
+* 1: Interrupts are enabled during single stepping.
+*
+* Implementations may hard wire this bit to 0.
+* The debugger must read back the value it
+* writes to check whether the feature is supported. If not
+* supported, interrupt behavior can be emulated by the debugger.
+ */
+#define CSR_DCSR_STEPIE_OFFSET 11
+#define CSR_DCSR_STEPIE_LENGTH 1
+#define CSR_DCSR_STEPIE (0x1U << CSR_DCSR_STEPIE_OFFSET)
+/*
+* 0: Increment counters as usual.
+*
+* 1: Don't increment any counters while in Debug Mode or on {\tt
+* ebreak} instructions that cause entry into Debug Mode. These
+* counters include the {\tt cycle} and {\tt instret} CSRs. This is
+* preferred for most debugging scenarios.
+*
+* An implementation may choose not to support writing to this bit.
+* The debugger must read back the value it writes to check whether
+* the feature is supported.
+ */
+#define CSR_DCSR_STOPCOUNT_OFFSET 10
+#define CSR_DCSR_STOPCOUNT_LENGTH 1
+#define CSR_DCSR_STOPCOUNT (0x1U << CSR_DCSR_STOPCOUNT_OFFSET)
+/*
+* 0: Increment timers as usual.
+*
+* 1: Don't increment any hart-local timers while in Debug Mode.
+*
+* An implementation may choose not to support writing to this bit.
+* The debugger must read back the value it writes to check whether
+* the feature is supported.
+ */
+#define CSR_DCSR_STOPTIME_OFFSET 9
+#define CSR_DCSR_STOPTIME_LENGTH 1
+#define CSR_DCSR_STOPTIME (0x1U << CSR_DCSR_STOPTIME_OFFSET)
+/*
+* Explains why Debug Mode was entered.
+*
+* When there are multiple reasons to enter Debug Mode in a single
+* cycle, hardware should set \Fcause to the cause with the highest
+* priority.
+*
+* 1: An {\tt ebreak} instruction was executed. (priority 3)
+*
+* 2: The Trigger Module caused a breakpoint exception. (priority 4)
+*
+* 3: The debugger requested entry to Debug Mode. (priority 2)
+*
+* 4: The hart single stepped because \Fstep was set. (priority 1)
+*
+* Other values are reserved for future use.
+ */
+#define CSR_DCSR_CAUSE_OFFSET 6
+#define CSR_DCSR_CAUSE_LENGTH 3
+#define CSR_DCSR_CAUSE (0x7U << CSR_DCSR_CAUSE_OFFSET)
+/*
+* When 1, \Fmprv in \Rmstatus takes effect during debug mode.
+* When 0, it is ignored during debug mode.
+* Implementing this bit is optional.
+* If not implemented it should be tied to 0.
+ */
+#define CSR_DCSR_MPRVEN_OFFSET 4
+#define CSR_DCSR_MPRVEN_LENGTH 1
+#define CSR_DCSR_MPRVEN (0x1U << CSR_DCSR_MPRVEN_OFFSET)
+/*
+* When set, there is a Non-Maskable-Interrupt (NMI) pending for the hart.
+*
+* Since an NMI can indicate a hardware error condition,
+* reliable debugging may no longer be possible once this bit becomes set.
+* This is implementation-dependent.
+ */
+#define CSR_DCSR_NMIP_OFFSET 3
+#define CSR_DCSR_NMIP_LENGTH 1
+#define CSR_DCSR_NMIP (0x1U << CSR_DCSR_NMIP_OFFSET)
+/*
+* When set and not in Debug Mode, the hart will only execute a single
+* instruction and then enter Debug Mode.
+* If the instruction does not complete due to an exception,
+* the hart will immediately enter Debug Mode before executing
+* the trap handler, with appropriate exception registers set.
+ */
+#define CSR_DCSR_STEP_OFFSET 2
+#define CSR_DCSR_STEP_LENGTH 1
+#define CSR_DCSR_STEP (0x1U << CSR_DCSR_STEP_OFFSET)
+/*
+* Contains the privilege level the hart was operating in when Debug
+* Mode was entered. The encoding is described in Table
+* \ref{tab:privlevel}. A debugger can change this value to change
+* the hart's privilege level when exiting Debug Mode.
+*
+* Not all privilege levels are supported on all harts. If the
+* encoding written is not supported or the debugger is not allowed to
+* change to it, the hart may change to any supported privilege level.
+ */
+#define CSR_DCSR_PRV_OFFSET 0
+#define CSR_DCSR_PRV_LENGTH 2
+#define CSR_DCSR_PRV (0x3U << CSR_DCSR_PRV_OFFSET)
+#define CSR_DPC 0x7b1
+#define CSR_DPC_DPC_OFFSET 0
+#define CSR_DPC_DPC_LENGTH MXLEN
+#define CSR_DPC_DPC (((1L<<MXLEN)-1) << CSR_DPC_DPC_OFFSET)
+#define CSR_DSCRATCH0 0x7b2
+#define CSR_DSCRATCH1 0x7b3
+#define CSR_TSELECT 0x7a0
+#define CSR_TSELECT_INDEX_OFFSET 0
+#define CSR_TSELECT_INDEX_LENGTH MXLEN
+#define CSR_TSELECT_INDEX (((1L<<MXLEN)-1) << CSR_TSELECT_INDEX_OFFSET)
+#define CSR_TDATA1 0x7a1
+/*
+* 0: There is no trigger at this \Rtselect.
+*
+* 1: The trigger is a legacy SiFive address match trigger. These
+* should not be implemented and aren't further documented here.
+*
+* 2: The trigger is an address/data match trigger. The remaining bits
+* in this register act as described in \Rmcontrol.
+*
+* 3: The trigger is an instruction count trigger. The remaining bits
+* in this register act as described in \Ricount.
+*
+* 4: The trigger is an interrupt trigger. The remaining bits
+* in this register act as described in \Ritrigger.
+*
+* 5: The trigger is an exception trigger. The remaining bits
+* in this register act as described in \Retrigger.
+*
+* 15: This trigger exists (so enumeration shouldn't terminate), but
+* is not currently available.
+*
+* Other values are reserved for future use.
+*
+* When this field is written to an unsupported value, it takes on its
+* reset value instead. The reset value is any one of the types
+* supported by the trigger selected by \Rtselect.
+ */
+#define CSR_TDATA1_TYPE_OFFSET (MXLEN-4)
+#define CSR_TDATA1_TYPE_LENGTH 4
+#define CSR_TDATA1_TYPE (0xfULL << CSR_TDATA1_TYPE_OFFSET)
+/*
+* 0: Both Debug and M Mode can write the {\tt tdata} registers at the
+* selected \Rtselect.
+*
+* 1: Only Debug Mode can write the {\tt tdata} registers at the
+* selected \Rtselect. Writes from other modes are ignored.
+*
+* This bit is only writable from Debug Mode.
+ */
+#define CSR_TDATA1_DMODE_OFFSET (MXLEN-5)
+#define CSR_TDATA1_DMODE_LENGTH 1
+#define CSR_TDATA1_DMODE (0x1ULL << CSR_TDATA1_DMODE_OFFSET)
+/*
+* Trigger-specific data.
+ */
+#define CSR_TDATA1_DATA_OFFSET 0
+#define CSR_TDATA1_DATA_LENGTH (MXLEN - 5)
+#define CSR_TDATA1_DATA (((1L<<MXLEN - 5)-1) << CSR_TDATA1_DATA_OFFSET)
+#define CSR_TDATA2 0x7a2
+#define CSR_TDATA2_DATA_OFFSET 0
+#define CSR_TDATA2_DATA_LENGTH MXLEN
+#define CSR_TDATA2_DATA (((1L<<MXLEN)-1) << CSR_TDATA2_DATA_OFFSET)
+#define CSR_TDATA3 0x7a3
+#define CSR_TDATA3_DATA_OFFSET 0
+#define CSR_TDATA3_DATA_LENGTH MXLEN
+#define CSR_TDATA3_DATA (((1L<<MXLEN)-1) << CSR_TDATA3_DATA_OFFSET)
+#define CSR_TINFO 0x7a4
+/*
+* One bit for each possible \Ftype enumerated in \Rtdataone. Bit N
+* corresponds to type N. If the bit is set, then that type is
+* supported by the currently selected trigger.
+*
+* If the currently selected trigger doesn't exist, this field
+* contains 1.
+*
+* If \Ftype is not writable, this register may be unimplemented, in
+* which case reading it causes an illegal instruction exception. In
+* this case the debugger can read the only supported type from
+* \Rtdataone.
+ */
+#define CSR_TINFO_INFO_OFFSET 0
+#define CSR_TINFO_INFO_LENGTH 16
+#define CSR_TINFO_INFO (0xffffULL << CSR_TINFO_INFO_OFFSET)
+#define CSR_MCONTROL 0x7a1
+#define CSR_MCONTROL_TYPE_OFFSET (MXLEN-4)
+#define CSR_MCONTROL_TYPE_LENGTH 4
+#define CSR_MCONTROL_TYPE (0xfULL << CSR_MCONTROL_TYPE_OFFSET)
+#define CSR_MCONTROL_DMODE_OFFSET (MXLEN-5)
+#define CSR_MCONTROL_DMODE_LENGTH 1
+#define CSR_MCONTROL_DMODE (0x1ULL << CSR_MCONTROL_DMODE_OFFSET)
+/*
+* Specifies the largest naturally aligned powers-of-two (NAPOT) range
+* supported by the hardware when \Fmatch is 1. The value is the
+* logarithm base 2 of the
+* number of bytes in that range. A value of 0 indicates that only
+* exact value matches are supported (one byte range). A value of 63
+* corresponds to the maximum NAPOT range, which is $2^{63}$ bytes in
+* size.
+ */
+#define CSR_MCONTROL_MASKMAX_OFFSET (MXLEN-11)
+#define CSR_MCONTROL_MASKMAX_LENGTH 6
+#define CSR_MCONTROL_MASKMAX (0x3fULL << CSR_MCONTROL_MASKMAX_OFFSET)
+/*
+* If this optional bit is implemented, the hardware sets it when this
+* trigger matches. The trigger's user can set or clear it at any
+* time. The trigger's user can use this bit to determine which
+* trigger(s) matched. If the bit is not implemented, it is always 0
+* and writing it has no effect.
+ */
+#define CSR_MCONTROL_HIT_OFFSET 20
+#define CSR_MCONTROL_HIT_LENGTH 1
+#define CSR_MCONTROL_HIT (0x1ULL << CSR_MCONTROL_HIT_OFFSET)
+/*
+* 0: Perform a match on the virtual address.
+*
+* 1: Perform a match on the data value loaded/stored, or the
+* instruction executed.
+ */
+#define CSR_MCONTROL_SELECT_OFFSET 19
+#define CSR_MCONTROL_SELECT_LENGTH 1
+#define CSR_MCONTROL_SELECT (0x1ULL << CSR_MCONTROL_SELECT_OFFSET)
+/*
+* 0: The action for this trigger will be taken just before the
+* instruction that triggered it is executed, but after all preceding
+* instructions are are committed.
+*
+* 1: The action for this trigger will be taken after the instruction
+* that triggered it is executed. It should be taken before the next
+* instruction is executed, but it is better to implement triggers and
+* not implement that suggestion than to not implement them at all.
+*
+* Most hardware will only implement one timing or the other, possibly
+* dependent on \Fselect, \Fexecute, \Fload, and \Fstore. This bit
+* primarily exists for the hardware to communicate to the debugger
+* what will happen. Hardware may implement the bit fully writable, in
+* which case the debugger has a little more control.
+*
+* Data load triggers with \Ftiming of 0 will result in the same load
+* happening again when the debugger lets the hart run. For data load
+* triggers, debuggers must first attempt to set the breakpoint with
+* \Ftiming of 1.
+*
+* A chain of triggers that don't all have the same \Ftiming value
+* will never fire (unless consecutive instructions match the
+* appropriate triggers).
+ */
+#define CSR_MCONTROL_TIMING_OFFSET 18
+#define CSR_MCONTROL_TIMING_LENGTH 1
+#define CSR_MCONTROL_TIMING (0x1ULL << CSR_MCONTROL_TIMING_OFFSET)
+/*
+* The action to take when the trigger fires. The values are explained
+* in Table~\ref{tab:action}.
+ */
+#define CSR_MCONTROL_ACTION_OFFSET 12
+#define CSR_MCONTROL_ACTION_LENGTH 6
+#define CSR_MCONTROL_ACTION (0x3fULL << CSR_MCONTROL_ACTION_OFFSET)
+/*
+* 0: When this trigger matches, the configured action is taken.
+*
+* 1: While this trigger does not match, it prevents the trigger with
+* the next index from matching.
+*
+* Because \Fchain affects the next trigger, hardware must zero it in
+* writes to \Rmcontrol that set \Fdmode to 0 if the next trigger has
+* \Fdmode of 1.
+* In addition hardware should ignore writes to \Rmcontrol that set
+* \Fdmode to 1 if the previous trigger has both \Fdmode of 0 and
+* \Fchain of 1. Debuggers must avoid the latter case by checking
+* \Fchain on the previous trigger if they're writing \Rmcontrol.
+*
+* Implementations that wish to limit the maximum length of a trigger
+* chain (eg. to meet timing requirements) may do so by zeroing
+* \Fchain in writes to \Rmcontrol that would make the chain too long.
+ */
+#define CSR_MCONTROL_CHAIN_OFFSET 11
+#define CSR_MCONTROL_CHAIN_LENGTH 1
+#define CSR_MCONTROL_CHAIN (0x1ULL << CSR_MCONTROL_CHAIN_OFFSET)
+/*
+* 0: Matches when the value equals \Rtdatatwo.
+*
+* 1: Matches when the top M bits of the value match the top M bits of
+* \Rtdatatwo. M is MXLEN-1 minus the index of the least-significant
+* bit containing 0 in \Rtdatatwo.
+*
+* 2: Matches when the value is greater than (unsigned) or equal to
+* \Rtdatatwo.
+*
+* 3: Matches when the value is less than (unsigned) \Rtdatatwo.
+*
+* 4: Matches when the lower half of the value equals the lower half
+* of \Rtdatatwo after the lower half of the value is ANDed with the
+* upper half of \Rtdatatwo.
+*
+* 5: Matches when the upper half of the value equals the lower half
+* of \Rtdatatwo after the upper half of the value is ANDed with the
+* upper half of \Rtdatatwo.
+*
+* Other values are reserved for future use.
+ */
+#define CSR_MCONTROL_MATCH_OFFSET 7
+#define CSR_MCONTROL_MATCH_LENGTH 4
+#define CSR_MCONTROL_MATCH (0xfULL << CSR_MCONTROL_MATCH_OFFSET)
+/*
+* When set, enable this trigger in M mode.
+ */
+#define CSR_MCONTROL_M_OFFSET 6
+#define CSR_MCONTROL_M_LENGTH 1
+#define CSR_MCONTROL_M (0x1ULL << CSR_MCONTROL_M_OFFSET)
+/*
+* When set, enable this trigger in S mode.
+ */
+#define CSR_MCONTROL_S_OFFSET 4
+#define CSR_MCONTROL_S_LENGTH 1
+#define CSR_MCONTROL_S (0x1ULL << CSR_MCONTROL_S_OFFSET)
+/*
+* When set, enable this trigger in U mode.
+ */
+#define CSR_MCONTROL_U_OFFSET 3
+#define CSR_MCONTROL_U_LENGTH 1
+#define CSR_MCONTROL_U (0x1ULL << CSR_MCONTROL_U_OFFSET)
+/*
+* When set, the trigger fires on the virtual address or opcode of an
+* instruction that is executed.
+ */
+#define CSR_MCONTROL_EXECUTE_OFFSET 2
+#define CSR_MCONTROL_EXECUTE_LENGTH 1
+#define CSR_MCONTROL_EXECUTE (0x1ULL << CSR_MCONTROL_EXECUTE_OFFSET)
+/*
+* When set, the trigger fires on the virtual address or data of a store.
+ */
+#define CSR_MCONTROL_STORE_OFFSET 1
+#define CSR_MCONTROL_STORE_LENGTH 1
+#define CSR_MCONTROL_STORE (0x1ULL << CSR_MCONTROL_STORE_OFFSET)
+/*
+* When set, the trigger fires on the virtual address or data of a load.
+ */
+#define CSR_MCONTROL_LOAD_OFFSET 0
+#define CSR_MCONTROL_LOAD_LENGTH 1
+#define CSR_MCONTROL_LOAD (0x1ULL << CSR_MCONTROL_LOAD_OFFSET)
+#define CSR_ICOUNT 0x7a1
+#define CSR_ICOUNT_TYPE_OFFSET (MXLEN-4)
+#define CSR_ICOUNT_TYPE_LENGTH 4
+#define CSR_ICOUNT_TYPE (0xfULL << CSR_ICOUNT_TYPE_OFFSET)
+#define CSR_ICOUNT_DMODE_OFFSET (MXLEN-5)
+#define CSR_ICOUNT_DMODE_LENGTH 1
+#define CSR_ICOUNT_DMODE (0x1ULL << CSR_ICOUNT_DMODE_OFFSET)
+/*
+* If this optional bit is implemented, the hardware sets it when this
+* trigger matches. The trigger's user can set or clear it at any
+* time. The trigger's user can use this bit to determine which
+* trigger(s) matched. If the bit is not implemented, it is always 0
+* and writing it has no effect.
+ */
+#define CSR_ICOUNT_HIT_OFFSET 24
+#define CSR_ICOUNT_HIT_LENGTH 1
+#define CSR_ICOUNT_HIT (0x1ULL << CSR_ICOUNT_HIT_OFFSET)
+/*
+* When count is decremented to 0, the trigger fires. Instead of
+* changing \Fcount from 1 to 0, it is also acceptable for hardware to
+* clear \Fm, \Fs, and \Fu. This allows \Fcount to be hard-wired
+* to 1 if this register just exists for single step.
+ */
+#define CSR_ICOUNT_COUNT_OFFSET 10
+#define CSR_ICOUNT_COUNT_LENGTH 14
+#define CSR_ICOUNT_COUNT (0x3fffULL << CSR_ICOUNT_COUNT_OFFSET)
+/*
+* When set, every instruction completed or exception taken in M mode decrements \Fcount
+* by 1.
+ */
+#define CSR_ICOUNT_M_OFFSET 9
+#define CSR_ICOUNT_M_LENGTH 1
+#define CSR_ICOUNT_M (0x1ULL << CSR_ICOUNT_M_OFFSET)
+/*
+* When set, every instruction completed or exception taken in S mode decrements \Fcount
+* by 1.
+ */
+#define CSR_ICOUNT_S_OFFSET 7
+#define CSR_ICOUNT_S_LENGTH 1
+#define CSR_ICOUNT_S (0x1ULL << CSR_ICOUNT_S_OFFSET)
+/*
+* When set, every instruction completed or exception taken in U mode decrements \Fcount
+* by 1.
+ */
+#define CSR_ICOUNT_U_OFFSET 6
+#define CSR_ICOUNT_U_LENGTH 1
+#define CSR_ICOUNT_U (0x1ULL << CSR_ICOUNT_U_OFFSET)
+/*
+* The action to take when the trigger fires. The values are explained
+* in Table~\ref{tab:action}.
+ */
+#define CSR_ICOUNT_ACTION_OFFSET 0
+#define CSR_ICOUNT_ACTION_LENGTH 6
+#define CSR_ICOUNT_ACTION (0x3fULL << CSR_ICOUNT_ACTION_OFFSET)
+#define CSR_ITRIGGER 0x7a1
+#define CSR_ITRIGGER_TYPE_OFFSET (MXLEN-4)
+#define CSR_ITRIGGER_TYPE_LENGTH 4
+#define CSR_ITRIGGER_TYPE (0xfULL << CSR_ITRIGGER_TYPE_OFFSET)
+#define CSR_ITRIGGER_DMODE_OFFSET (MXLEN-5)
+#define CSR_ITRIGGER_DMODE_LENGTH 1
+#define CSR_ITRIGGER_DMODE (0x1ULL << CSR_ITRIGGER_DMODE_OFFSET)
+/*
+* If this optional bit is implemented, the hardware sets it when this
+* trigger matches. The trigger's user can set or clear it at any
+* time. The trigger's user can use this bit to determine which
+* trigger(s) matched. If the bit is not implemented, it is always 0
+* and writing it has no effect.
+ */
+#define CSR_ITRIGGER_HIT_OFFSET (MXLEN-6)
+#define CSR_ITRIGGER_HIT_LENGTH 1
+#define CSR_ITRIGGER_HIT (0x1ULL << CSR_ITRIGGER_HIT_OFFSET)
+/*
+* When set, enable this trigger for interrupts that are taken from M
+* mode.
+ */
+#define CSR_ITRIGGER_M_OFFSET 9
+#define CSR_ITRIGGER_M_LENGTH 1
+#define CSR_ITRIGGER_M (0x1ULL << CSR_ITRIGGER_M_OFFSET)
+/*
+* When set, enable this trigger for interrupts that are taken from S
+* mode.
+ */
+#define CSR_ITRIGGER_S_OFFSET 7
+#define CSR_ITRIGGER_S_LENGTH 1
+#define CSR_ITRIGGER_S (0x1ULL << CSR_ITRIGGER_S_OFFSET)
+/*
+* When set, enable this trigger for interrupts that are taken from U
+* mode.
+ */
+#define CSR_ITRIGGER_U_OFFSET 6
+#define CSR_ITRIGGER_U_LENGTH 1
+#define CSR_ITRIGGER_U (0x1ULL << CSR_ITRIGGER_U_OFFSET)
+/*
+* The action to take when the trigger fires. The values are explained
+* in Table~\ref{tab:action}.
+ */
+#define CSR_ITRIGGER_ACTION_OFFSET 0
+#define CSR_ITRIGGER_ACTION_LENGTH 6
+#define CSR_ITRIGGER_ACTION (0x3fULL << CSR_ITRIGGER_ACTION_OFFSET)
+#define CSR_ETRIGGER 0x7a1
+#define CSR_ETRIGGER_TYPE_OFFSET (MXLEN-4)
+#define CSR_ETRIGGER_TYPE_LENGTH 4
+#define CSR_ETRIGGER_TYPE (0xfULL << CSR_ETRIGGER_TYPE_OFFSET)
+#define CSR_ETRIGGER_DMODE_OFFSET (MXLEN-5)
+#define CSR_ETRIGGER_DMODE_LENGTH 1
+#define CSR_ETRIGGER_DMODE (0x1ULL << CSR_ETRIGGER_DMODE_OFFSET)
+/*
+* If this optional bit is implemented, the hardware sets it when this
+* trigger matches. The trigger's user can set or clear it at any
+* time. The trigger's user can use this bit to determine which
+* trigger(s) matched. If the bit is not implemented, it is always 0
+* and writing it has no effect.
+ */
+#define CSR_ETRIGGER_HIT_OFFSET (MXLEN-6)
+#define CSR_ETRIGGER_HIT_LENGTH 1
+#define CSR_ETRIGGER_HIT (0x1ULL << CSR_ETRIGGER_HIT_OFFSET)
+/*
+* When set, enable this trigger for exceptions that are taken from M
+* mode.
+ */
+#define CSR_ETRIGGER_M_OFFSET 9
+#define CSR_ETRIGGER_M_LENGTH 1
+#define CSR_ETRIGGER_M (0x1ULL << CSR_ETRIGGER_M_OFFSET)
+/*
+* When set, enable this trigger for exceptions that are taken from S
+* mode.
+ */
+#define CSR_ETRIGGER_S_OFFSET 7
+#define CSR_ETRIGGER_S_LENGTH 1
+#define CSR_ETRIGGER_S (0x1ULL << CSR_ETRIGGER_S_OFFSET)
+/*
+* When set, enable this trigger for exceptions that are taken from U
+* mode.
+ */
+#define CSR_ETRIGGER_U_OFFSET 6
+#define CSR_ETRIGGER_U_LENGTH 1
+#define CSR_ETRIGGER_U (0x1ULL << CSR_ETRIGGER_U_OFFSET)
+/*
+* The action to take when the trigger fires. The values are explained
+* in Table~\ref{tab:action}.
+ */
+#define CSR_ETRIGGER_ACTION_OFFSET 0
+#define CSR_ETRIGGER_ACTION_LENGTH 6
+#define CSR_ETRIGGER_ACTION (0x3fULL << CSR_ETRIGGER_ACTION_OFFSET)
+#define DMI_DMSTATUS 0x11
+/*
+* If 1, then there is an implicit {\tt ebreak} instruction at the
+* non-existent word immediately after the Program Buffer. This saves
+* the debugger from having to write the {\tt ebreak} itself, and
+* allows the Program Buffer to be one word smaller.
+*
+* This must be 1 when \Fprogbufsize is 1.
+ */
+#define DMI_DMSTATUS_IMPEBREAK_OFFSET 22
+#define DMI_DMSTATUS_IMPEBREAK_LENGTH 1
+#define DMI_DMSTATUS_IMPEBREAK (0x1U << DMI_DMSTATUS_IMPEBREAK_OFFSET)
+/*
+* This field is 1 when all currently selected harts have been reset but the reset has not been acknowledged.
+ */
+#define DMI_DMSTATUS_ALLHAVERESET_OFFSET 19
+#define DMI_DMSTATUS_ALLHAVERESET_LENGTH 1
+#define DMI_DMSTATUS_ALLHAVERESET (0x1U << DMI_DMSTATUS_ALLHAVERESET_OFFSET)
+/*
+* This field is 1 when any currently selected hart has been reset but the reset has not been acknowledged.
+ */
+#define DMI_DMSTATUS_ANYHAVERESET_OFFSET 18
+#define DMI_DMSTATUS_ANYHAVERESET_LENGTH 1
+#define DMI_DMSTATUS_ANYHAVERESET (0x1U << DMI_DMSTATUS_ANYHAVERESET_OFFSET)
+/*
+* This field is 1 when all currently selected harts have acknowledged
+* the previous resume request.
+ */
+#define DMI_DMSTATUS_ALLRESUMEACK_OFFSET 17
+#define DMI_DMSTATUS_ALLRESUMEACK_LENGTH 1
+#define DMI_DMSTATUS_ALLRESUMEACK (0x1U << DMI_DMSTATUS_ALLRESUMEACK_OFFSET)
+/*
+* This field is 1 when any currently selected hart has acknowledged
+* the previous resume request.
+ */
+#define DMI_DMSTATUS_ANYRESUMEACK_OFFSET 16
+#define DMI_DMSTATUS_ANYRESUMEACK_LENGTH 1
+#define DMI_DMSTATUS_ANYRESUMEACK (0x1U << DMI_DMSTATUS_ANYRESUMEACK_OFFSET)
+/*
+* This field is 1 when all currently selected harts do not exist in this system.
+ */
+#define DMI_DMSTATUS_ALLNONEXISTENT_OFFSET 15
+#define DMI_DMSTATUS_ALLNONEXISTENT_LENGTH 1
+#define DMI_DMSTATUS_ALLNONEXISTENT (0x1U << DMI_DMSTATUS_ALLNONEXISTENT_OFFSET)
+/*
+* This field is 1 when any currently selected hart does not exist in this system.
+ */
+#define DMI_DMSTATUS_ANYNONEXISTENT_OFFSET 14
+#define DMI_DMSTATUS_ANYNONEXISTENT_LENGTH 1
+#define DMI_DMSTATUS_ANYNONEXISTENT (0x1U << DMI_DMSTATUS_ANYNONEXISTENT_OFFSET)
+/*
+* This field is 1 when all currently selected harts are unavailable.
+ */
+#define DMI_DMSTATUS_ALLUNAVAIL_OFFSET 13
+#define DMI_DMSTATUS_ALLUNAVAIL_LENGTH 1
+#define DMI_DMSTATUS_ALLUNAVAIL (0x1U << DMI_DMSTATUS_ALLUNAVAIL_OFFSET)
+/*
+* This field is 1 when any currently selected hart is unavailable.
+ */
+#define DMI_DMSTATUS_ANYUNAVAIL_OFFSET 12
+#define DMI_DMSTATUS_ANYUNAVAIL_LENGTH 1
+#define DMI_DMSTATUS_ANYUNAVAIL (0x1U << DMI_DMSTATUS_ANYUNAVAIL_OFFSET)
+/*
+* This field is 1 when all currently selected harts are running.
+ */
+#define DMI_DMSTATUS_ALLRUNNING_OFFSET 11
+#define DMI_DMSTATUS_ALLRUNNING_LENGTH 1
+#define DMI_DMSTATUS_ALLRUNNING (0x1U << DMI_DMSTATUS_ALLRUNNING_OFFSET)
+/*
+* This field is 1 when any currently selected hart is running.
+ */
+#define DMI_DMSTATUS_ANYRUNNING_OFFSET 10
+#define DMI_DMSTATUS_ANYRUNNING_LENGTH 1
+#define DMI_DMSTATUS_ANYRUNNING (0x1U << DMI_DMSTATUS_ANYRUNNING_OFFSET)
+/*
+* This field is 1 when all currently selected harts are halted.
+ */
+#define DMI_DMSTATUS_ALLHALTED_OFFSET 9
+#define DMI_DMSTATUS_ALLHALTED_LENGTH 1
+#define DMI_DMSTATUS_ALLHALTED (0x1U << DMI_DMSTATUS_ALLHALTED_OFFSET)
+/*
+* This field is 1 when any currently selected hart is halted.
+ */
+#define DMI_DMSTATUS_ANYHALTED_OFFSET 8
+#define DMI_DMSTATUS_ANYHALTED_LENGTH 1
+#define DMI_DMSTATUS_ANYHALTED (0x1U << DMI_DMSTATUS_ANYHALTED_OFFSET)
+/*
+* 0 when authentication is required before using the DM. 1 when the
+* authentication check has passed. On components that don't implement
+* authentication, this bit must be preset as 1.
+ */
+#define DMI_DMSTATUS_AUTHENTICATED_OFFSET 7
+#define DMI_DMSTATUS_AUTHENTICATED_LENGTH 1
+#define DMI_DMSTATUS_AUTHENTICATED (0x1U << DMI_DMSTATUS_AUTHENTICATED_OFFSET)
+/*
+* 0: The authentication module is ready to process the next
+* read/write to \Rauthdata.
+*
+* 1: The authentication module is busy. Accessing \Rauthdata results
+* in unspecified behavior.
+*
+* \Fauthbusy only becomes set in immediate response to an access to
+* \Rauthdata.
+ */
+#define DMI_DMSTATUS_AUTHBUSY_OFFSET 6
+#define DMI_DMSTATUS_AUTHBUSY_LENGTH 1
+#define DMI_DMSTATUS_AUTHBUSY (0x1U << DMI_DMSTATUS_AUTHBUSY_OFFSET)
+/*
+* 1 if this Debug Module supports halt-on-reset functionality
+* controllable by the \Fsetresethaltreq and \Fclrresethaltreq bits.
+* 0 otherwise.
+ */
+#define DMI_DMSTATUS_HASRESETHALTREQ_OFFSET 5
+#define DMI_DMSTATUS_HASRESETHALTREQ_LENGTH 1
+#define DMI_DMSTATUS_HASRESETHALTREQ (0x1U << DMI_DMSTATUS_HASRESETHALTREQ_OFFSET)
+/*
+* 0: \Rdevtreeaddrzero--\Rdevtreeaddrthree hold information which
+* is not relevant to the Device Tree.
+*
+* 1: \Rdevtreeaddrzero--\Rdevtreeaddrthree registers hold the address of the
+* Device Tree.
+ */
+#define DMI_DMSTATUS_DEVTREEVALID_OFFSET 4
+#define DMI_DMSTATUS_DEVTREEVALID_LENGTH 1
+#define DMI_DMSTATUS_DEVTREEVALID (0x1U << DMI_DMSTATUS_DEVTREEVALID_OFFSET)
+/*
+* 0: There is no Debug Module present.
+*
+* 1: There is a Debug Module and it conforms to version 0.11 of this
+* specification.
+*
+* 2: There is a Debug Module and it conforms to version 0.13 of this
+* specification.
+*
+* 15: There is a Debug Module but it does not conform to any
+* available version of this spec.
+ */
+#define DMI_DMSTATUS_VERSION_OFFSET 0
+#define DMI_DMSTATUS_VERSION_LENGTH 4
+#define DMI_DMSTATUS_VERSION (0xfU << DMI_DMSTATUS_VERSION_OFFSET)
+#define DMI_DMCONTROL 0x10
+/*
+* Writes the halt request bit for all currently selected harts.
+* When set to 1, each selected hart will halt if it is not currently
+* halted.
+*
+* Writing 1 or 0 has no effect on a hart which is already halted, but
+* the bit must be cleared to 0 before the hart is resumed.
+*
+* Writes apply to the new value of \Fhartsel and \Fhasel.
+ */
+#define DMI_DMCONTROL_HALTREQ_OFFSET 31
+#define DMI_DMCONTROL_HALTREQ_LENGTH 1
+#define DMI_DMCONTROL_HALTREQ (0x1U << DMI_DMCONTROL_HALTREQ_OFFSET)
+/*
+* Writes the resume request bit for all currently selected harts.
+* When set to 1, each selected hart will resume if it is currently
+* halted.
+*
+* The resume request bit is ignored while the halt request bit is
+* set.
+*
+* Writes apply to the new value of \Fhartsel and \Fhasel.
+ */
+#define DMI_DMCONTROL_RESUMEREQ_OFFSET 30
+#define DMI_DMCONTROL_RESUMEREQ_LENGTH 1
+#define DMI_DMCONTROL_RESUMEREQ (0x1U << DMI_DMCONTROL_RESUMEREQ_OFFSET)
+/*
+* This optional field writes the reset bit for all the currently
+* selected harts. To perform a reset the debugger writes 1, and then
+* writes 0 to deassert the reset signal.
+*
+* If this feature is not implemented, the bit always stays 0, so
+* after writing 1 the debugger can read the register back to see if
+* the feature is supported.
+*
+* Writes apply to the new value of \Fhartsel and \Fhasel.
+ */
+#define DMI_DMCONTROL_HARTRESET_OFFSET 29
+#define DMI_DMCONTROL_HARTRESET_LENGTH 1
+#define DMI_DMCONTROL_HARTRESET (0x1U << DMI_DMCONTROL_HARTRESET_OFFSET)
+/*
+* Writing 1 to this bit clears the {\tt havereset} bits for
+* any selected harts.
+*
+* Writes apply to the new value of \Fhartsel and \Fhasel.
+ */
+#define DMI_DMCONTROL_ACKHAVERESET_OFFSET 28
+#define DMI_DMCONTROL_ACKHAVERESET_LENGTH 1
+#define DMI_DMCONTROL_ACKHAVERESET (0x1U << DMI_DMCONTROL_ACKHAVERESET_OFFSET)
+/*
+* Selects the definition of currently selected harts.
+*
+* 0: There is a single currently selected hart, that selected by \Fhartsel.
+*
+* 1: There may be multiple currently selected harts -- that selected by \Fhartsel,
+* plus those selected by the hart array mask register.
+*
+* An implementation which does not implement the hart array mask register
+* must tie this field to 0. A debugger which wishes to use the hart array
+* mask register feature should set this bit and read back to see if the functionality
+* is supported.
+ */
+#define DMI_DMCONTROL_HASEL_OFFSET 26
+#define DMI_DMCONTROL_HASEL_LENGTH 1
+#define DMI_DMCONTROL_HASEL (0x1U << DMI_DMCONTROL_HASEL_OFFSET)
+/*
+* The low 10 bits of \Fhartsel: the DM-specific index of the hart to
+* select. This hart is always part of the currently selected harts.
+ */
+#define DMI_DMCONTROL_HARTSELLO_OFFSET 16
+#define DMI_DMCONTROL_HARTSELLO_LENGTH 10
+#define DMI_DMCONTROL_HARTSELLO (0x3ffU << DMI_DMCONTROL_HARTSELLO_OFFSET)
+/*
+* The high 10 bits of \Fhartsel: the DM-specific index of the hart to
+* select. This hart is always part of the currently selected harts.
+ */
+#define DMI_DMCONTROL_HARTSELHI_OFFSET 6
+#define DMI_DMCONTROL_HARTSELHI_LENGTH 10
+#define DMI_DMCONTROL_HARTSELHI (0x3ffU << DMI_DMCONTROL_HARTSELHI_OFFSET)
+/*
+* This optional field writes the halt-on-reset request bit for all
+* currently selected harts.
+* When set to 1, each selected hart will halt upon the next deassertion
+* of its reset. The halt-on-reset request bit is not automatically
+* cleared. The debugger must write to \Fclrresethaltreq to clear it.
+*
+* Writes apply to the new value of \Fhartsel and \Fhasel.
+*
+* If \Fhasresethaltreq is 0, this field is not implemented.
+ */
+#define DMI_DMCONTROL_SETRESETHALTREQ_OFFSET 3
+#define DMI_DMCONTROL_SETRESETHALTREQ_LENGTH 1
+#define DMI_DMCONTROL_SETRESETHALTREQ (0x1U << DMI_DMCONTROL_SETRESETHALTREQ_OFFSET)
+/*
+* This optional field clears the halt-on-reset request bit for all
+* currently selected harts.
+*
+* Writes apply to the new value of \Fhartsel and \Fhasel.
+ */
+#define DMI_DMCONTROL_CLRRESETHALTREQ_OFFSET 2
+#define DMI_DMCONTROL_CLRRESETHALTREQ_LENGTH 1
+#define DMI_DMCONTROL_CLRRESETHALTREQ (0x1U << DMI_DMCONTROL_CLRRESETHALTREQ_OFFSET)
+/*
+* This bit controls the reset signal from the DM to the rest of the
+* system. The signal should reset every part of the system, including
+* every hart, except for the DM and any logic required to access the
+* DM.
+* To perform a system reset the debugger writes 1,
+* and then writes 0
+* to deassert the reset.
+ */
+#define DMI_DMCONTROL_NDMRESET_OFFSET 1
+#define DMI_DMCONTROL_NDMRESET_LENGTH 1
+#define DMI_DMCONTROL_NDMRESET (0x1U << DMI_DMCONTROL_NDMRESET_OFFSET)
+/*
+* This bit serves as a reset signal for the Debug Module itself.
+*
+* 0: The module's state, including authentication mechanism,
+* takes its reset values (the \Fdmactive bit is the only bit which can
+* be written to something other than its reset value).
+*
+* 1: The module functions normally.
+*
+* No other mechanism should exist that may result in resetting the
+* Debug Module after power up, including the platform's system reset
+* or Debug Transport reset signals.
+*
+* A debugger may pulse this bit low to get the Debug Module into a
+* known state.
+*
+* Implementations may use this bit to aid debugging, for example by
+* preventing the Debug Module from being power gated while debugging
+* is active.
+ */
+#define DMI_DMCONTROL_DMACTIVE_OFFSET 0
+#define DMI_DMCONTROL_DMACTIVE_LENGTH 1
+#define DMI_DMCONTROL_DMACTIVE (0x1U << DMI_DMCONTROL_DMACTIVE_OFFSET)
+#define DMI_HARTINFO 0x12
+/*
+* Number of {\tt dscratch} registers available for the debugger
+* to use during program buffer execution, starting from \Rdscratchzero.
+* The debugger can make no assumptions about the contents of these
+* registers between commands.
+ */
+#define DMI_HARTINFO_NSCRATCH_OFFSET 20
+#define DMI_HARTINFO_NSCRATCH_LENGTH 4
+#define DMI_HARTINFO_NSCRATCH (0xfU << DMI_HARTINFO_NSCRATCH_OFFSET)
+/*
+* 0: The {\tt data} registers are shadowed in the hart by CSR
+* registers. Each CSR register is MXLEN bits in size, and corresponds
+* to a single argument, per Table~\ref{tab:datareg}.
+*
+* 1: The {\tt data} registers are shadowed in the hart's memory map.
+* Each register takes up 4 bytes in the memory map.
+ */
+#define DMI_HARTINFO_DATAACCESS_OFFSET 16
+#define DMI_HARTINFO_DATAACCESS_LENGTH 1
+#define DMI_HARTINFO_DATAACCESS (0x1U << DMI_HARTINFO_DATAACCESS_OFFSET)
+/*
+* If \Fdataaccess is 0: Number of CSR registers dedicated to
+* shadowing the {\tt data} registers.
+*
+* If \Fdataaccess is 1: Number of 32-bit words in the memory map
+* dedicated to shadowing the {\tt data} registers.
+*
+* Since there are at most 12 {\tt data} registers, the value in this
+* register must be 12 or smaller.
+ */
+#define DMI_HARTINFO_DATASIZE_OFFSET 12
+#define DMI_HARTINFO_DATASIZE_LENGTH 4
+#define DMI_HARTINFO_DATASIZE (0xfU << DMI_HARTINFO_DATASIZE_OFFSET)
+/*
+* If \Fdataaccess is 0: The number of the first CSR dedicated to
+* shadowing the {\tt data} registers.
+*
+* If \Fdataaccess is 1: Signed address of RAM where the {\tt data}
+* registers are shadowed, to be used to access relative to \Rzero.
+ */
+#define DMI_HARTINFO_DATAADDR_OFFSET 0
+#define DMI_HARTINFO_DATAADDR_LENGTH 12
+#define DMI_HARTINFO_DATAADDR (0xfffU << DMI_HARTINFO_DATAADDR_OFFSET)
+#define DMI_HAWINDOWSEL 0x14
+/*
+* The high bits of this field may be tied to 0, depending on how large
+* the array mask register is. Eg. on a system with 48 harts only bit 0
+* of this field may actually be writable.
+ */
+#define DMI_HAWINDOWSEL_HAWINDOWSEL_OFFSET 0
+#define DMI_HAWINDOWSEL_HAWINDOWSEL_LENGTH 15
+#define DMI_HAWINDOWSEL_HAWINDOWSEL (0x7fffU << DMI_HAWINDOWSEL_HAWINDOWSEL_OFFSET)
+#define DMI_HAWINDOW 0x15
+#define DMI_HAWINDOW_MASKDATA_OFFSET 0
+#define DMI_HAWINDOW_MASKDATA_LENGTH 32
+#define DMI_HAWINDOW_MASKDATA (0xffffffffU << DMI_HAWINDOW_MASKDATA_OFFSET)
+#define DMI_ABSTRACTCS 0x16
+/*
+* Size of the Program Buffer, in 32-bit words. Valid sizes are 0 - 16.
+ */
+#define DMI_ABSTRACTCS_PROGBUFSIZE_OFFSET 24
+#define DMI_ABSTRACTCS_PROGBUFSIZE_LENGTH 5
+#define DMI_ABSTRACTCS_PROGBUFSIZE (0x1fU << DMI_ABSTRACTCS_PROGBUFSIZE_OFFSET)
+/*
+* 1: An abstract command is currently being executed.
+*
+* This bit is set as soon as \Rcommand is written, and is
+* not cleared until that command has completed.
+ */
+#define DMI_ABSTRACTCS_BUSY_OFFSET 12
+#define DMI_ABSTRACTCS_BUSY_LENGTH 1
+#define DMI_ABSTRACTCS_BUSY (0x1U << DMI_ABSTRACTCS_BUSY_OFFSET)
+/*
+* Gets set if an abstract command fails. The bits in this field remain set until
+* they are cleared by writing 1 to them. No abstract command is
+* started until the value is reset to 0.
+*
+* 0 (none): No error.
+*
+* 1 (busy): An abstract command was executing while \Rcommand,
+* \Rabstractcs, \Rabstractauto was written, or when one
+* of the {\tt data} or {\tt progbuf} registers was read or written.
+*
+* 2 (not supported): The requested command is not supported,
+* regardless of whether the hart is running or not.
+*
+* 3 (exception): An exception occurred while executing the command
+* (eg. while executing the Program Buffer).
+*
+* 4 (halt/resume): The abstract command couldn't execute because the
+* hart wasn't in the required state (running/halted).
+*
+* 7 (other): The command failed for another reason.
+ */
+#define DMI_ABSTRACTCS_CMDERR_OFFSET 8
+#define DMI_ABSTRACTCS_CMDERR_LENGTH 3
+#define DMI_ABSTRACTCS_CMDERR (0x7U << DMI_ABSTRACTCS_CMDERR_OFFSET)
+/*
+* Number of {\tt data} registers that are implemented as part of the
+* abstract command interface. Valid sizes are 0 - 12.
+ */
+#define DMI_ABSTRACTCS_DATACOUNT_OFFSET 0
+#define DMI_ABSTRACTCS_DATACOUNT_LENGTH 4
+#define DMI_ABSTRACTCS_DATACOUNT (0xfU << DMI_ABSTRACTCS_DATACOUNT_OFFSET)
+#define DMI_COMMAND 0x17
+/*
+* The type determines the overall functionality of this
+* abstract command.
+ */
+#define DMI_COMMAND_CMDTYPE_OFFSET 24
+#define DMI_COMMAND_CMDTYPE_LENGTH 8
+#define DMI_COMMAND_CMDTYPE (0xffU << DMI_COMMAND_CMDTYPE_OFFSET)
+/*
+* This field is interpreted in a command-specific manner,
+* described for each abstract command.
+ */
+#define DMI_COMMAND_CONTROL_OFFSET 0
+#define DMI_COMMAND_CONTROL_LENGTH 24
+#define DMI_COMMAND_CONTROL (0xffffffU << DMI_COMMAND_CONTROL_OFFSET)
+#define DMI_ABSTRACTAUTO 0x18
+/*
+* When a bit in this field is 1, read or write accesses to the corresponding {\tt progbuf} word
+* cause the command in \Rcommand to be executed again.
+ */
+#define DMI_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET 16
+#define DMI_ABSTRACTAUTO_AUTOEXECPROGBUF_LENGTH 16
+#define DMI_ABSTRACTAUTO_AUTOEXECPROGBUF (0xffffU << DMI_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET)
+/*
+* When a bit in this field is 1, read or write accesses to the corresponding {\tt data} word
+* cause the command in \Rcommand to be executed again.
+ */
+#define DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET 0
+#define DMI_ABSTRACTAUTO_AUTOEXECDATA_LENGTH 12
+#define DMI_ABSTRACTAUTO_AUTOEXECDATA (0xfffU << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET)
+#define DMI_DEVTREEADDR0 0x19
+#define DMI_DEVTREEADDR0_ADDR_OFFSET 0
+#define DMI_DEVTREEADDR0_ADDR_LENGTH 32
+#define DMI_DEVTREEADDR0_ADDR (0xffffffffU << DMI_DEVTREEADDR0_ADDR_OFFSET)
+#define DMI_DEVTREEADDR1 0x1a
+#define DMI_DEVTREEADDR2 0x1b
+#define DMI_DEVTREEADDR3 0x1c
+#define DMI_NEXTDM 0x1d
+#define DMI_NEXTDM_ADDR_OFFSET 0
+#define DMI_NEXTDM_ADDR_LENGTH 32
+#define DMI_NEXTDM_ADDR (0xffffffffU << DMI_NEXTDM_ADDR_OFFSET)
+#define DMI_DATA0 0x04
+#define DMI_DATA0_DATA_OFFSET 0
+#define DMI_DATA0_DATA_LENGTH 32
+#define DMI_DATA0_DATA (0xffffffffU << DMI_DATA0_DATA_OFFSET)
+#define DMI_DATA11 0x0f
+#define DMI_PROGBUF0 0x20
+#define DMI_PROGBUF0_DATA_OFFSET 0
+#define DMI_PROGBUF0_DATA_LENGTH 32
+#define DMI_PROGBUF0_DATA (0xffffffffU << DMI_PROGBUF0_DATA_OFFSET)
+#define DMI_PROGBUF15 0x2f
+#define DMI_AUTHDATA 0x30
+#define DMI_AUTHDATA_DATA_OFFSET 0
+#define DMI_AUTHDATA_DATA_LENGTH 32
+#define DMI_AUTHDATA_DATA (0xffffffffU << DMI_AUTHDATA_DATA_OFFSET)
+#define DMI_HALTSUM0 0x40
+#define DMI_HALTSUM0_HALTSUM0_OFFSET 0
+#define DMI_HALTSUM0_HALTSUM0_LENGTH 32
+#define DMI_HALTSUM0_HALTSUM0 (0xffffffffU << DMI_HALTSUM0_HALTSUM0_OFFSET)
+#define DMI_HALTSUM1 0x13
+#define DMI_HALTSUM1_HALTSUM1_OFFSET 0
+#define DMI_HALTSUM1_HALTSUM1_LENGTH 32
+#define DMI_HALTSUM1_HALTSUM1 (0xffffffffU << DMI_HALTSUM1_HALTSUM1_OFFSET)
+#define DMI_HALTSUM2 0x34
+#define DMI_HALTSUM2_HALTSUM2_OFFSET 0
+#define DMI_HALTSUM2_HALTSUM2_LENGTH 32
+#define DMI_HALTSUM2_HALTSUM2 (0xffffffffU << DMI_HALTSUM2_HALTSUM2_OFFSET)
+#define DMI_HALTSUM3 0x35
+#define DMI_HALTSUM3_HALTSUM3_OFFSET 0
+#define DMI_HALTSUM3_HALTSUM3_LENGTH 32
+#define DMI_HALTSUM3_HALTSUM3 (0xffffffffU << DMI_HALTSUM3_HALTSUM3_OFFSET)
+#define DMI_SBADDRESS3 0x37
+/*
+* Accesses bits 127:96 of the physical address in {\tt sbaddress} (if
+* the system address bus is that wide).
+ */
+#define DMI_SBADDRESS3_ADDRESS_OFFSET 0
+#define DMI_SBADDRESS3_ADDRESS_LENGTH 32
+#define DMI_SBADDRESS3_ADDRESS (0xffffffffU << DMI_SBADDRESS3_ADDRESS_OFFSET)
+#define DMI_SBCS 0x38
+/*
+* 0: The System Bus interface conforms to mainline drafts of this
+* spec older than 1 January, 2018.
+*
+* 1: The System Bus interface conforms to this version of the spec.
+*
+* Other values are reserved for future versions.
+ */
+#define DMI_SBCS_SBVERSION_OFFSET 29
+#define DMI_SBCS_SBVERSION_LENGTH 3
+#define DMI_SBCS_SBVERSION (0x7U << DMI_SBCS_SBVERSION_OFFSET)
+/*
+* Set when the debugger attempts to read data while a read is in
+* progress, or when the debugger initiates a new access while one is
+* already in progress (while \Fsbbusy is set). It remains set until
+* it's explicitly cleared by the debugger.
+*
+* While this field is non-zero, no more system bus accesses can be
+* initiated by the Debug Module.
+ */
+#define DMI_SBCS_SBBUSYERROR_OFFSET 22
+#define DMI_SBCS_SBBUSYERROR_LENGTH 1
+#define DMI_SBCS_SBBUSYERROR (0x1U << DMI_SBCS_SBBUSYERROR_OFFSET)
+/*
+* When 1, indicates the system bus master is busy. (Whether the
+* system bus itself is busy is related, but not the same thing.) This
+* bit goes high immediately when a read or write is requested for any
+* reason, and does not go low until the access is fully completed.
+*
+* Writes to \Rsbcs while \Fsbbusy is high result in undefined
+* behavior. A debugger must not write to \Rsbcs until it reads
+* \Fsbbusy as 0.
+ */
+#define DMI_SBCS_SBBUSY_OFFSET 21
+#define DMI_SBCS_SBBUSY_LENGTH 1
+#define DMI_SBCS_SBBUSY (0x1U << DMI_SBCS_SBBUSY_OFFSET)
+/*
+* When 1, every write to \Rsbaddresszero automatically triggers a
+* system bus read at the new address.
+ */
+#define DMI_SBCS_SBREADONADDR_OFFSET 20
+#define DMI_SBCS_SBREADONADDR_LENGTH 1
+#define DMI_SBCS_SBREADONADDR (0x1U << DMI_SBCS_SBREADONADDR_OFFSET)
+/*
+* Select the access size to use for system bus accesses.
+*
+* 0: 8-bit
+*
+* 1: 16-bit
+*
+* 2: 32-bit
+*
+* 3: 64-bit
+*
+* 4: 128-bit
+*
+* If \Fsbaccess has an unsupported value when the DM starts a bus
+* access, the access is not performed and \Fsberror is set to 3.
+ */
+#define DMI_SBCS_SBACCESS_OFFSET 17
+#define DMI_SBCS_SBACCESS_LENGTH 3
+#define DMI_SBCS_SBACCESS (0x7U << DMI_SBCS_SBACCESS_OFFSET)
+/*
+* When 1, {\tt sbaddress} is incremented by the access size (in
+* bytes) selected in \Fsbaccess after every system bus access.
+ */
+#define DMI_SBCS_SBAUTOINCREMENT_OFFSET 16
+#define DMI_SBCS_SBAUTOINCREMENT_LENGTH 1
+#define DMI_SBCS_SBAUTOINCREMENT (0x1U << DMI_SBCS_SBAUTOINCREMENT_OFFSET)
+/*
+* When 1, every read from \Rsbdatazero automatically triggers a
+* system bus read at the (possibly auto-incremented) address.
+ */
+#define DMI_SBCS_SBREADONDATA_OFFSET 15
+#define DMI_SBCS_SBREADONDATA_LENGTH 1
+#define DMI_SBCS_SBREADONDATA (0x1U << DMI_SBCS_SBREADONDATA_OFFSET)
+/*
+* When the Debug Module's system bus
+* master causes a bus error, this field gets set. The bits in this
+* field remain set until they are cleared by writing 1 to them.
+* While this field is non-zero, no more system bus accesses can be
+* initiated by the Debug Module.
+*
+* An implementation may report "Other" (7) for any error condition.
+*
+* 0: There was no bus error.
+*
+* 1: There was a timeout.
+*
+* 2: A bad address was accessed.
+*
+* 3: There was an alignment error.
+*
+* 4: An access of unsupported size was requested.
+*
+* 7: Other.
+ */
+#define DMI_SBCS_SBERROR_OFFSET 12
+#define DMI_SBCS_SBERROR_LENGTH 3
+#define DMI_SBCS_SBERROR (0x7U << DMI_SBCS_SBERROR_OFFSET)
+/*
+* Width of system bus addresses in bits. (0 indicates there is no bus
+* access support.)
+ */
+#define DMI_SBCS_SBASIZE_OFFSET 5
+#define DMI_SBCS_SBASIZE_LENGTH 7
+#define DMI_SBCS_SBASIZE (0x7fU << DMI_SBCS_SBASIZE_OFFSET)
+/*
+* 1 when 128-bit system bus accesses are supported.
+ */
+#define DMI_SBCS_SBACCESS128_OFFSET 4
+#define DMI_SBCS_SBACCESS128_LENGTH 1
+#define DMI_SBCS_SBACCESS128 (0x1U << DMI_SBCS_SBACCESS128_OFFSET)
+/*
+* 1 when 64-bit system bus accesses are supported.
+ */
+#define DMI_SBCS_SBACCESS64_OFFSET 3
+#define DMI_SBCS_SBACCESS64_LENGTH 1
+#define DMI_SBCS_SBACCESS64 (0x1U << DMI_SBCS_SBACCESS64_OFFSET)
+/*
+* 1 when 32-bit system bus accesses are supported.
+ */
+#define DMI_SBCS_SBACCESS32_OFFSET 2
+#define DMI_SBCS_SBACCESS32_LENGTH 1
+#define DMI_SBCS_SBACCESS32 (0x1U << DMI_SBCS_SBACCESS32_OFFSET)
+/*
+* 1 when 16-bit system bus accesses are supported.
+ */
+#define DMI_SBCS_SBACCESS16_OFFSET 1
+#define DMI_SBCS_SBACCESS16_LENGTH 1
+#define DMI_SBCS_SBACCESS16 (0x1U << DMI_SBCS_SBACCESS16_OFFSET)
+/*
+* 1 when 8-bit system bus accesses are supported.
+ */
+#define DMI_SBCS_SBACCESS8_OFFSET 0
+#define DMI_SBCS_SBACCESS8_LENGTH 1
+#define DMI_SBCS_SBACCESS8 (0x1U << DMI_SBCS_SBACCESS8_OFFSET)
+#define DMI_SBADDRESS0 0x39
+/*
+* Accesses bits 31:0 of the physical address in {\tt sbaddress}.
+ */
+#define DMI_SBADDRESS0_ADDRESS_OFFSET 0
+#define DMI_SBADDRESS0_ADDRESS_LENGTH 32
+#define DMI_SBADDRESS0_ADDRESS (0xffffffffU << DMI_SBADDRESS0_ADDRESS_OFFSET)
+#define DMI_SBADDRESS1 0x3a
+/*
+* Accesses bits 63:32 of the physical address in {\tt sbaddress} (if
+* the system address bus is that wide).
+ */
+#define DMI_SBADDRESS1_ADDRESS_OFFSET 0
+#define DMI_SBADDRESS1_ADDRESS_LENGTH 32
+#define DMI_SBADDRESS1_ADDRESS (0xffffffffU << DMI_SBADDRESS1_ADDRESS_OFFSET)
+#define DMI_SBADDRESS2 0x3b
+/*
+* Accesses bits 95:64 of the physical address in {\tt sbaddress} (if
+* the system address bus is that wide).
+ */
+#define DMI_SBADDRESS2_ADDRESS_OFFSET 0
+#define DMI_SBADDRESS2_ADDRESS_LENGTH 32
+#define DMI_SBADDRESS2_ADDRESS (0xffffffffU << DMI_SBADDRESS2_ADDRESS_OFFSET)
+#define DMI_SBDATA0 0x3c
+/*
+* Accesses bits 31:0 of {\tt sbdata}.
+ */
+#define DMI_SBDATA0_DATA_OFFSET 0
+#define DMI_SBDATA0_DATA_LENGTH 32
+#define DMI_SBDATA0_DATA (0xffffffffU << DMI_SBDATA0_DATA_OFFSET)
+#define DMI_SBDATA1 0x3d
+/*
+* Accesses bits 63:32 of {\tt sbdata} (if the system bus is that
+* wide).
+ */
+#define DMI_SBDATA1_DATA_OFFSET 0
+#define DMI_SBDATA1_DATA_LENGTH 32
+#define DMI_SBDATA1_DATA (0xffffffffU << DMI_SBDATA1_DATA_OFFSET)
+#define DMI_SBDATA2 0x3e
+/*
+* Accesses bits 95:64 of {\tt sbdata} (if the system bus is that
+* wide).
+ */
+#define DMI_SBDATA2_DATA_OFFSET 0
+#define DMI_SBDATA2_DATA_LENGTH 32
+#define DMI_SBDATA2_DATA (0xffffffffU << DMI_SBDATA2_DATA_OFFSET)
+#define DMI_SBDATA3 0x3f
+/*
+* Accesses bits 127:96 of {\tt sbdata} (if the system bus is that
+* wide).
+ */
+#define DMI_SBDATA3_DATA_OFFSET 0
+#define DMI_SBDATA3_DATA_LENGTH 32
+#define DMI_SBDATA3_DATA (0xffffffffU << DMI_SBDATA3_DATA_OFFSET)
+#define SHORTNAME 0x123
+/*
+* Description of what this field is used for.
+ */
+#define SHORTNAME_FIELD_OFFSET 0
+#define SHORTNAME_FIELD_LENGTH 8
+#define SHORTNAME_FIELD (0xffU << SHORTNAME_FIELD_OFFSET)
+#define AC_ACCESS_REGISTER None
+/*
+* This is 0 to indicate Access Register Command.
+ */
+#define AC_ACCESS_REGISTER_CMDTYPE_OFFSET 24
+#define AC_ACCESS_REGISTER_CMDTYPE_LENGTH 8
+#define AC_ACCESS_REGISTER_CMDTYPE (0xffU << AC_ACCESS_REGISTER_CMDTYPE_OFFSET)
+/*
+* 2: Access the lowest 32 bits of the register.
+*
+* 3: Access the lowest 64 bits of the register.
+*
+* 4: Access the lowest 128 bits of the register.
+*
+* If \Fsize specifies a size larger than the register's actual size,
+* then the access must fail. If a register is accessible, then reads of \Fsize
+* less than or equal to the register's actual size must be supported.
+*
+* This field controls the Argument Width as referenced in
+* Table~\ref{tab:datareg}.
+ */
+#define AC_ACCESS_REGISTER_SIZE_OFFSET 20
+#define AC_ACCESS_REGISTER_SIZE_LENGTH 3
+#define AC_ACCESS_REGISTER_SIZE (0x7U << AC_ACCESS_REGISTER_SIZE_OFFSET)
+/*
+* When 1, execute the program in the Program Buffer exactly once
+* after performing the transfer, if any.
+ */
+#define AC_ACCESS_REGISTER_POSTEXEC_OFFSET 18
+#define AC_ACCESS_REGISTER_POSTEXEC_LENGTH 1
+#define AC_ACCESS_REGISTER_POSTEXEC (0x1U << AC_ACCESS_REGISTER_POSTEXEC_OFFSET)
+/*
+* 0: Don't do the operation specified by \Fwrite.
+*
+* 1: Do the operation specified by \Fwrite.
+*
+* This bit can be used to just execute the Program Buffer without
+* having to worry about placing valid values into \Fsize or \Fregno.
+ */
+#define AC_ACCESS_REGISTER_TRANSFER_OFFSET 17
+#define AC_ACCESS_REGISTER_TRANSFER_LENGTH 1
+#define AC_ACCESS_REGISTER_TRANSFER (0x1U << AC_ACCESS_REGISTER_TRANSFER_OFFSET)
+/*
+* When \Ftransfer is set:
+* 0: Copy data from the specified register into {\tt arg0} portion
+* of {\tt data}.
+*
+* 1: Copy data from {\tt arg0} portion of {\tt data} into the
+* specified register.
+ */
+#define AC_ACCESS_REGISTER_WRITE_OFFSET 16
+#define AC_ACCESS_REGISTER_WRITE_LENGTH 1
+#define AC_ACCESS_REGISTER_WRITE (0x1U << AC_ACCESS_REGISTER_WRITE_OFFSET)
+/*
+* Number of the register to access, as described in
+* Table~\ref{tab:regno}.
+* \Rdpc may be used as an alias for PC if this command is
+* supported on a non-halted hart.
+ */
+#define AC_ACCESS_REGISTER_REGNO_OFFSET 0
+#define AC_ACCESS_REGISTER_REGNO_LENGTH 16
+#define AC_ACCESS_REGISTER_REGNO (0xffffU << AC_ACCESS_REGISTER_REGNO_OFFSET)
+#define AC_QUICK_ACCESS None
+/*
+* This is 1 to indicate Quick Access command.
+ */
+#define AC_QUICK_ACCESS_CMDTYPE_OFFSET 24
+#define AC_QUICK_ACCESS_CMDTYPE_LENGTH 8
+#define AC_QUICK_ACCESS_CMDTYPE (0xffU << AC_QUICK_ACCESS_CMDTYPE_OFFSET)
+#define VIRT_PRIV virtual
+/*
+* Contains the privilege level the hart was operating in when Debug
+* Mode was entered. The encoding is described in Table
+* \ref{tab:privlevel}, and matches the privilege level encoding from
+* the RISC-V Privileged ISA Specification. A user can write this
+* value to change the hart's privilege level when exiting Debug Mode.
+ */
+#define VIRT_PRIV_PRV_OFFSET 0
+#define VIRT_PRIV_PRV_LENGTH 2
+#define VIRT_PRIV_PRV (0x3U << VIRT_PRIV_PRV_OFFSET)
--- /dev/null
+/* See LICENSE for license details. */
+
+#ifndef RISCV_CSR_ENCODING_H
+#define RISCV_CSR_ENCODING_H
+
+#define MSTATUS_UIE 0x00000001
+#define MSTATUS_SIE 0x00000002
+#define MSTATUS_HIE 0x00000004
+#define MSTATUS_MIE 0x00000008
+#define MSTATUS_UPIE 0x00000010
+#define MSTATUS_SPIE 0x00000020
+#define MSTATUS_HPIE 0x00000040
+#define MSTATUS_MPIE 0x00000080
+#define MSTATUS_SPP 0x00000100
+#define MSTATUS_HPP 0x00000600
+#define MSTATUS_MPP 0x00001800
+#define MSTATUS_FS 0x00006000
+#define MSTATUS_XS 0x00018000
+#define MSTATUS_MPRV 0x00020000
+#define MSTATUS_SUM 0x00040000
+#define MSTATUS_MXR 0x00080000
+#define MSTATUS_TVM 0x00100000
+#define MSTATUS_TW 0x00200000
+#define MSTATUS_TSR 0x00400000
+#define MSTATUS32_SD 0x80000000
+#define MSTATUS_UXL 0x0000000300000000
+#define MSTATUS_SXL 0x0000000C00000000
+#define MSTATUS64_SD 0x8000000000000000
+
+#define SSTATUS_UIE 0x00000001
+#define SSTATUS_SIE 0x00000002
+#define SSTATUS_UPIE 0x00000010
+#define SSTATUS_SPIE 0x00000020
+#define SSTATUS_SPP 0x00000100
+#define SSTATUS_FS 0x00006000
+#define SSTATUS_XS 0x00018000
+#define SSTATUS_SUM 0x00040000
+#define SSTATUS_MXR 0x00080000
+#define SSTATUS32_SD 0x80000000
+#define SSTATUS_UXL 0x0000000300000000
+#define SSTATUS64_SD 0x8000000000000000
+
+#define DCSR_XDEBUGVER (3U<<30)
+#define DCSR_NDRESET (1<<29)
+#define DCSR_FULLRESET (1<<28)
+#define DCSR_EBREAKM (1<<15)
+#define DCSR_EBREAKH (1<<14)
+#define DCSR_EBREAKS (1<<13)
+#define DCSR_EBREAKU (1<<12)
+#define DCSR_STOPCYCLE (1<<10)
+#define DCSR_STOPTIME (1<<9)
+#define DCSR_CAUSE (7<<6)
+#define DCSR_DEBUGINT (1<<5)
+#define DCSR_HALT (1<<3)
+#define DCSR_STEP (1<<2)
+#define DCSR_PRV (3<<0)
+
+#define DCSR_CAUSE_NONE 0
+#define DCSR_CAUSE_SWBP 1
+#define DCSR_CAUSE_HWBP 2
+#define DCSR_CAUSE_DEBUGINT 3
+#define DCSR_CAUSE_STEP 4
+#define DCSR_CAUSE_HALT 5
+
+#define MCONTROL_TYPE(xlen) (0xfULL<<((xlen)-4))
+#define MCONTROL_DMODE(xlen) (1ULL<<((xlen)-5))
+#define MCONTROL_MASKMAX(xlen) (0x3fULL<<((xlen)-11))
+
+#define MCONTROL_SELECT (1<<19)
+#define MCONTROL_TIMING (1<<18)
+#define MCONTROL_ACTION (0x3f<<12)
+#define MCONTROL_CHAIN (1<<11)
+#define MCONTROL_MATCH (0xf<<7)
+#define MCONTROL_M (1<<6)
+#define MCONTROL_H (1<<5)
+#define MCONTROL_S (1<<4)
+#define MCONTROL_U (1<<3)
+#define MCONTROL_EXECUTE (1<<2)
+#define MCONTROL_STORE (1<<1)
+#define MCONTROL_LOAD (1<<0)
+
+#define MCONTROL_TYPE_NONE 0
+#define MCONTROL_TYPE_MATCH 2
+
+#define MCONTROL_ACTION_DEBUG_EXCEPTION 0
+#define MCONTROL_ACTION_DEBUG_MODE 1
+#define MCONTROL_ACTION_TRACE_START 2
+#define MCONTROL_ACTION_TRACE_STOP 3
+#define MCONTROL_ACTION_TRACE_EMIT 4
+
+#define MCONTROL_MATCH_EQUAL 0
+#define MCONTROL_MATCH_NAPOT 1
+#define MCONTROL_MATCH_GE 2
+#define MCONTROL_MATCH_LT 3
+#define MCONTROL_MATCH_MASK_LOW 4
+#define MCONTROL_MATCH_MASK_HIGH 5
+
+#define MIP_SSIP (1 << IRQ_S_SOFT)
+#define MIP_HSIP (1 << IRQ_H_SOFT)
+#define MIP_MSIP (1 << IRQ_M_SOFT)
+#define MIP_STIP (1 << IRQ_S_TIMER)
+#define MIP_HTIP (1 << IRQ_H_TIMER)
+#define MIP_MTIP (1 << IRQ_M_TIMER)
+#define MIP_SEIP (1 << IRQ_S_EXT)
+#define MIP_HEIP (1 << IRQ_H_EXT)
+#define MIP_MEIP (1 << IRQ_M_EXT)
+
+#define SIP_SSIP MIP_SSIP
+#define SIP_STIP MIP_STIP
+
+#define PRV_U 0
+#define PRV_S 1
+#define PRV_H 2
+#define PRV_M 3
+
+#define SATP32_MODE 0x80000000
+#define SATP32_ASID 0x7FC00000
+#define SATP32_PPN 0x003FFFFF
+#define SATP64_MODE 0xF000000000000000
+#define SATP64_ASID 0x0FFFF00000000000
+#define SATP64_PPN 0x00000FFFFFFFFFFF
+
+#define SATP_MODE_OFF 0
+#define SATP_MODE_SV32 1
+#define SATP_MODE_SV39 8
+#define SATP_MODE_SV48 9
+#define SATP_MODE_SV57 10
+#define SATP_MODE_SV64 11
+
+#define PMP_R 0x01
+#define PMP_W 0x02
+#define PMP_X 0x04
+#define PMP_A 0x18
+#define PMP_L 0x80
+#define PMP_SHIFT 2
+
+#define PMP_TOR 0x08
+#define PMP_NA4 0x10
+#define PMP_NAPOT 0x18
+
+#define IRQ_S_SOFT 1
+#define IRQ_H_SOFT 2
+#define IRQ_M_SOFT 3
+#define IRQ_S_TIMER 5
+#define IRQ_H_TIMER 6
+#define IRQ_M_TIMER 7
+#define IRQ_S_EXT 9
+#define IRQ_H_EXT 10
+#define IRQ_M_EXT 11
+#define IRQ_COP 12
+#define IRQ_HOST 13
+
+#define DEFAULT_RSTVEC 0x00001000
+#define CLINT_BASE 0x02000000
+#define CLINT_SIZE 0x000c0000
+#define EXT_IO_BASE 0x40000000
+#define DRAM_BASE 0x80000000
+
+/* page table entry (PTE) fields */
+#define PTE_V 0x001 /* Valid */
+#define PTE_R 0x002 /* Read */
+#define PTE_W 0x004 /* Write */
+#define PTE_X 0x008 /* Execute */
+#define PTE_U 0x010 /* User */
+#define PTE_G 0x020 /* Global */
+#define PTE_A 0x040 /* Accessed */
+#define PTE_D 0x080 /* Dirty */
+#define PTE_SOFT 0x300 /* Reserved for Software */
+
+#define PTE_PPN_SHIFT 10
+
+#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V)
+
+#ifdef __riscv
+
+#if __riscv_xlen == 64
+# define MSTATUS_SD MSTATUS64_SD
+# define SSTATUS_SD SSTATUS64_SD
+# define RISCV_PGLEVEL_BITS 9
+# define SATP_MODE SATP64_MODE
+#else
+# define MSTATUS_SD MSTATUS32_SD
+# define SSTATUS_SD SSTATUS32_SD
+# define RISCV_PGLEVEL_BITS 10
+# define SATP_MODE SATP32_MODE
+#endif
+#define RISCV_PGSHIFT 12
+#define RISCV_PGSIZE (1 << RISCV_PGSHIFT)
+
+#ifndef __ASSEMBLER__
+
+#ifdef __GNUC__
+
+/*
+#define read_csr(reg) ({ unsigned long __tmp; \
+ asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
+ __tmp; })
+
+#define write_csr(reg, val) ({ \
+ asm volatile ("csrw " #reg ", %0" :: "rK"(val)); })
+
+#define swap_csr(reg, val) ({ unsigned long __tmp; \
+ asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "rK"(val)); \
+ __tmp; })
+
+#define set_csr(reg, bit) ({ unsigned long __tmp; \
+ asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \
+ __tmp; })
+
+#define clear_csr(reg, bit) ({ unsigned long __tmp; \
+ asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \
+ __tmp; })
+ */
+
+#define rdtime() read_csr(time)
+#define rdcycle() read_csr(cycle)
+#define rdinstret() read_csr(instret)
+
+#endif
+
+#endif
+
+#endif
+
+#endif
+/* Automatically generated by parse-opcodes. */
+#ifndef RISCV_ENCODING_H
+#define RISCV_ENCODING_H
+#define MATCH_BEQ 0x63
+#define MASK_BEQ 0x707f
+#define MATCH_BNE 0x1063
+#define MASK_BNE 0x707f
+#define MATCH_BLT 0x4063
+#define MASK_BLT 0x707f
+#define MATCH_BGE 0x5063
+#define MASK_BGE 0x707f
+#define MATCH_BLTU 0x6063
+#define MASK_BLTU 0x707f
+#define MATCH_BGEU 0x7063
+#define MASK_BGEU 0x707f
+#define MATCH_JALR 0x67
+#define MASK_JALR 0x707f
+#define MATCH_JAL 0x6f
+#define MASK_JAL 0x7f
+#define MATCH_LUI 0x37
+#define MASK_LUI 0x7f
+#define MATCH_AUIPC 0x17
+#define MASK_AUIPC 0x7f
+#define MATCH_ADDI 0x13
+#define MASK_ADDI 0x707f
+#define MATCH_SLLI 0x1013
+#define MASK_SLLI 0xfc00707f
+#define MATCH_SLTI 0x2013
+#define MASK_SLTI 0x707f
+#define MATCH_SLTIU 0x3013
+#define MASK_SLTIU 0x707f
+#define MATCH_XORI 0x4013
+#define MASK_XORI 0x707f
+#define MATCH_SRLI 0x5013
+#define MASK_SRLI 0xfc00707f
+#define MATCH_SRAI 0x40005013
+#define MASK_SRAI 0xfc00707f
+#define MATCH_ORI 0x6013
+#define MASK_ORI 0x707f
+#define MATCH_ANDI 0x7013
+#define MASK_ANDI 0x707f
+#define MATCH_ADD 0x33
+#define MASK_ADD 0xfe00707f
+#define MATCH_SUB 0x40000033
+#define MASK_SUB 0xfe00707f
+#define MATCH_SLL 0x1033
+#define MASK_SLL 0xfe00707f
+#define MATCH_SLT 0x2033
+#define MASK_SLT 0xfe00707f
+#define MATCH_SLTU 0x3033
+#define MASK_SLTU 0xfe00707f
+#define MATCH_XOR 0x4033
+#define MASK_XOR 0xfe00707f
+#define MATCH_SRL 0x5033
+#define MASK_SRL 0xfe00707f
+#define MATCH_SRA 0x40005033
+#define MASK_SRA 0xfe00707f
+#define MATCH_OR 0x6033
+#define MASK_OR 0xfe00707f
+#define MATCH_AND 0x7033
+#define MASK_AND 0xfe00707f
+#define MATCH_ADDIW 0x1b
+#define MASK_ADDIW 0x707f
+#define MATCH_SLLIW 0x101b
+#define MASK_SLLIW 0xfe00707f
+#define MATCH_SRLIW 0x501b
+#define MASK_SRLIW 0xfe00707f
+#define MATCH_SRAIW 0x4000501b
+#define MASK_SRAIW 0xfe00707f
+#define MATCH_ADDW 0x3b
+#define MASK_ADDW 0xfe00707f
+#define MATCH_SUBW 0x4000003b
+#define MASK_SUBW 0xfe00707f
+#define MATCH_SLLW 0x103b
+#define MASK_SLLW 0xfe00707f
+#define MATCH_SRLW 0x503b
+#define MASK_SRLW 0xfe00707f
+#define MATCH_SRAW 0x4000503b
+#define MASK_SRAW 0xfe00707f
+#define MATCH_LB 0x3
+#define MASK_LB 0x707f
+#define MATCH_LH 0x1003
+#define MASK_LH 0x707f
+#define MATCH_LW 0x2003
+#define MASK_LW 0x707f
+#define MATCH_LD 0x3003
+#define MASK_LD 0x707f
+#define MATCH_LBU 0x4003
+#define MASK_LBU 0x707f
+#define MATCH_LHU 0x5003
+#define MASK_LHU 0x707f
+#define MATCH_LWU 0x6003
+#define MASK_LWU 0x707f
+#define MATCH_SB 0x23
+#define MASK_SB 0x707f
+#define MATCH_SH 0x1023
+#define MASK_SH 0x707f
+#define MATCH_SW 0x2023
+#define MASK_SW 0x707f
+#define MATCH_SD 0x3023
+#define MASK_SD 0x707f
+#define MATCH_FENCE 0xf
+#define MASK_FENCE 0x707f
+#define MATCH_FENCE_I 0x100f
+#define MASK_FENCE_I 0x707f
+#define MATCH_MUL 0x2000033
+#define MASK_MUL 0xfe00707f
+#define MATCH_MULH 0x2001033
+#define MASK_MULH 0xfe00707f
+#define MATCH_MULHSU 0x2002033
+#define MASK_MULHSU 0xfe00707f
+#define MATCH_MULHU 0x2003033
+#define MASK_MULHU 0xfe00707f
+#define MATCH_DIV 0x2004033
+#define MASK_DIV 0xfe00707f
+#define MATCH_DIVU 0x2005033
+#define MASK_DIVU 0xfe00707f
+#define MATCH_REM 0x2006033
+#define MASK_REM 0xfe00707f
+#define MATCH_REMU 0x2007033
+#define MASK_REMU 0xfe00707f
+#define MATCH_MULW 0x200003b
+#define MASK_MULW 0xfe00707f
+#define MATCH_DIVW 0x200403b
+#define MASK_DIVW 0xfe00707f
+#define MATCH_DIVUW 0x200503b
+#define MASK_DIVUW 0xfe00707f
+#define MATCH_REMW 0x200603b
+#define MASK_REMW 0xfe00707f
+#define MATCH_REMUW 0x200703b
+#define MASK_REMUW 0xfe00707f
+#define MATCH_AMOADD_W 0x202f
+#define MASK_AMOADD_W 0xf800707f
+#define MATCH_AMOXOR_W 0x2000202f
+#define MASK_AMOXOR_W 0xf800707f
+#define MATCH_AMOOR_W 0x4000202f
+#define MASK_AMOOR_W 0xf800707f
+#define MATCH_AMOAND_W 0x6000202f
+#define MASK_AMOAND_W 0xf800707f
+#define MATCH_AMOMIN_W 0x8000202f
+#define MASK_AMOMIN_W 0xf800707f
+#define MATCH_AMOMAX_W 0xa000202f
+#define MASK_AMOMAX_W 0xf800707f
+#define MATCH_AMOMINU_W 0xc000202f
+#define MASK_AMOMINU_W 0xf800707f
+#define MATCH_AMOMAXU_W 0xe000202f
+#define MASK_AMOMAXU_W 0xf800707f
+#define MATCH_AMOSWAP_W 0x800202f
+#define MASK_AMOSWAP_W 0xf800707f
+#define MATCH_LR_W 0x1000202f
+#define MASK_LR_W 0xf9f0707f
+#define MATCH_SC_W 0x1800202f
+#define MASK_SC_W 0xf800707f
+#define MATCH_AMOADD_D 0x302f
+#define MASK_AMOADD_D 0xf800707f
+#define MATCH_AMOXOR_D 0x2000302f
+#define MASK_AMOXOR_D 0xf800707f
+#define MATCH_AMOOR_D 0x4000302f
+#define MASK_AMOOR_D 0xf800707f
+#define MATCH_AMOAND_D 0x6000302f
+#define MASK_AMOAND_D 0xf800707f
+#define MATCH_AMOMIN_D 0x8000302f
+#define MASK_AMOMIN_D 0xf800707f
+#define MATCH_AMOMAX_D 0xa000302f
+#define MASK_AMOMAX_D 0xf800707f
+#define MATCH_AMOMINU_D 0xc000302f
+#define MASK_AMOMINU_D 0xf800707f
+#define MATCH_AMOMAXU_D 0xe000302f
+#define MASK_AMOMAXU_D 0xf800707f
+#define MATCH_AMOSWAP_D 0x800302f
+#define MASK_AMOSWAP_D 0xf800707f
+#define MATCH_LR_D 0x1000302f
+#define MASK_LR_D 0xf9f0707f
+#define MATCH_SC_D 0x1800302f
+#define MASK_SC_D 0xf800707f
+#define MATCH_ECALL 0x73
+#define MASK_ECALL 0xffffffff
+#define MATCH_EBREAK 0x100073
+#define MASK_EBREAK 0xffffffff
+#define MATCH_URET 0x200073
+#define MASK_URET 0xffffffff
+#define MATCH_SRET 0x10200073
+#define MASK_SRET 0xffffffff
+#define MATCH_MRET 0x30200073
+#define MASK_MRET 0xffffffff
+#define MATCH_DRET 0x7b200073
+#define MASK_DRET 0xffffffff
+#define MATCH_SFENCE_VMA 0x12000073
+#define MASK_SFENCE_VMA 0xfe007fff
+#define MATCH_WFI 0x10500073
+#define MASK_WFI 0xffffffff
+#define MATCH_CSRRW 0x1073
+#define MASK_CSRRW 0x707f
+#define MATCH_CSRRS 0x2073
+#define MASK_CSRRS 0x707f
+#define MATCH_CSRRC 0x3073
+#define MASK_CSRRC 0x707f
+#define MATCH_CSRRWI 0x5073
+#define MASK_CSRRWI 0x707f
+#define MATCH_CSRRSI 0x6073
+#define MASK_CSRRSI 0x707f
+#define MATCH_CSRRCI 0x7073
+#define MASK_CSRRCI 0x707f
+#define MATCH_FADD_S 0x53
+#define MASK_FADD_S 0xfe00007f
+#define MATCH_FSUB_S 0x8000053
+#define MASK_FSUB_S 0xfe00007f
+#define MATCH_FMUL_S 0x10000053
+#define MASK_FMUL_S 0xfe00007f
+#define MATCH_FDIV_S 0x18000053
+#define MASK_FDIV_S 0xfe00007f
+#define MATCH_FSGNJ_S 0x20000053
+#define MASK_FSGNJ_S 0xfe00707f
+#define MATCH_FSGNJN_S 0x20001053
+#define MASK_FSGNJN_S 0xfe00707f
+#define MATCH_FSGNJX_S 0x20002053
+#define MASK_FSGNJX_S 0xfe00707f
+#define MATCH_FMIN_S 0x28000053
+#define MASK_FMIN_S 0xfe00707f
+#define MATCH_FMAX_S 0x28001053
+#define MASK_FMAX_S 0xfe00707f
+#define MATCH_FSQRT_S 0x58000053
+#define MASK_FSQRT_S 0xfff0007f
+#define MATCH_FADD_D 0x2000053
+#define MASK_FADD_D 0xfe00007f
+#define MATCH_FSUB_D 0xa000053
+#define MASK_FSUB_D 0xfe00007f
+#define MATCH_FMUL_D 0x12000053
+#define MASK_FMUL_D 0xfe00007f
+#define MATCH_FDIV_D 0x1a000053
+#define MASK_FDIV_D 0xfe00007f
+#define MATCH_FSGNJ_D 0x22000053
+#define MASK_FSGNJ_D 0xfe00707f
+#define MATCH_FSGNJN_D 0x22001053
+#define MASK_FSGNJN_D 0xfe00707f
+#define MATCH_FSGNJX_D 0x22002053
+#define MASK_FSGNJX_D 0xfe00707f
+#define MATCH_FMIN_D 0x2a000053
+#define MASK_FMIN_D 0xfe00707f
+#define MATCH_FMAX_D 0x2a001053
+#define MASK_FMAX_D 0xfe00707f
+#define MATCH_FCVT_S_D 0x40100053
+#define MASK_FCVT_S_D 0xfff0007f
+#define MATCH_FCVT_D_S 0x42000053
+#define MASK_FCVT_D_S 0xfff0007f
+#define MATCH_FSQRT_D 0x5a000053
+#define MASK_FSQRT_D 0xfff0007f
+#define MATCH_FADD_Q 0x6000053
+#define MASK_FADD_Q 0xfe00007f
+#define MATCH_FSUB_Q 0xe000053
+#define MASK_FSUB_Q 0xfe00007f
+#define MATCH_FMUL_Q 0x16000053
+#define MASK_FMUL_Q 0xfe00007f
+#define MATCH_FDIV_Q 0x1e000053
+#define MASK_FDIV_Q 0xfe00007f
+#define MATCH_FSGNJ_Q 0x26000053
+#define MASK_FSGNJ_Q 0xfe00707f
+#define MATCH_FSGNJN_Q 0x26001053
+#define MASK_FSGNJN_Q 0xfe00707f
+#define MATCH_FSGNJX_Q 0x26002053
+#define MASK_FSGNJX_Q 0xfe00707f
+#define MATCH_FMIN_Q 0x2e000053
+#define MASK_FMIN_Q 0xfe00707f
+#define MATCH_FMAX_Q 0x2e001053
+#define MASK_FMAX_Q 0xfe00707f
+#define MATCH_FCVT_S_Q 0x40300053
+#define MASK_FCVT_S_Q 0xfff0007f
+#define MATCH_FCVT_Q_S 0x46000053
+#define MASK_FCVT_Q_S 0xfff0007f
+#define MATCH_FCVT_D_Q 0x42300053
+#define MASK_FCVT_D_Q 0xfff0007f
+#define MATCH_FCVT_Q_D 0x46100053
+#define MASK_FCVT_Q_D 0xfff0007f
+#define MATCH_FSQRT_Q 0x5e000053
+#define MASK_FSQRT_Q 0xfff0007f
+#define MATCH_FLE_S 0xa0000053
+#define MASK_FLE_S 0xfe00707f
+#define MATCH_FLT_S 0xa0001053
+#define MASK_FLT_S 0xfe00707f
+#define MATCH_FEQ_S 0xa0002053
+#define MASK_FEQ_S 0xfe00707f
+#define MATCH_FLE_D 0xa2000053
+#define MASK_FLE_D 0xfe00707f
+#define MATCH_FLT_D 0xa2001053
+#define MASK_FLT_D 0xfe00707f
+#define MATCH_FEQ_D 0xa2002053
+#define MASK_FEQ_D 0xfe00707f
+#define MATCH_FLE_Q 0xa6000053
+#define MASK_FLE_Q 0xfe00707f
+#define MATCH_FLT_Q 0xa6001053
+#define MASK_FLT_Q 0xfe00707f
+#define MATCH_FEQ_Q 0xa6002053
+#define MASK_FEQ_Q 0xfe00707f
+#define MATCH_FCVT_W_S 0xc0000053
+#define MASK_FCVT_W_S 0xfff0007f
+#define MATCH_FCVT_WU_S 0xc0100053
+#define MASK_FCVT_WU_S 0xfff0007f
+#define MATCH_FCVT_L_S 0xc0200053
+#define MASK_FCVT_L_S 0xfff0007f
+#define MATCH_FCVT_LU_S 0xc0300053
+#define MASK_FCVT_LU_S 0xfff0007f
+#define MATCH_FMV_X_W 0xe0000053
+#define MASK_FMV_X_W 0xfff0707f
+#define MATCH_FCLASS_S 0xe0001053
+#define MASK_FCLASS_S 0xfff0707f
+#define MATCH_FCVT_W_D 0xc2000053
+#define MASK_FCVT_W_D 0xfff0007f
+#define MATCH_FCVT_WU_D 0xc2100053
+#define MASK_FCVT_WU_D 0xfff0007f
+#define MATCH_FCVT_L_D 0xc2200053
+#define MASK_FCVT_L_D 0xfff0007f
+#define MATCH_FCVT_LU_D 0xc2300053
+#define MASK_FCVT_LU_D 0xfff0007f
+#define MATCH_FMV_X_D 0xe2000053
+#define MASK_FMV_X_D 0xfff0707f
+#define MATCH_FCLASS_D 0xe2001053
+#define MASK_FCLASS_D 0xfff0707f
+#define MATCH_FCVT_W_Q 0xc6000053
+#define MASK_FCVT_W_Q 0xfff0007f
+#define MATCH_FCVT_WU_Q 0xc6100053
+#define MASK_FCVT_WU_Q 0xfff0007f
+#define MATCH_FCVT_L_Q 0xc6200053
+#define MASK_FCVT_L_Q 0xfff0007f
+#define MATCH_FCVT_LU_Q 0xc6300053
+#define MASK_FCVT_LU_Q 0xfff0007f
+#define MATCH_FMV_X_Q 0xe6000053
+#define MASK_FMV_X_Q 0xfff0707f
+#define MATCH_FCLASS_Q 0xe6001053
+#define MASK_FCLASS_Q 0xfff0707f
+#define MATCH_FCVT_S_W 0xd0000053
+#define MASK_FCVT_S_W 0xfff0007f
+#define MATCH_FCVT_S_WU 0xd0100053
+#define MASK_FCVT_S_WU 0xfff0007f
+#define MATCH_FCVT_S_L 0xd0200053
+#define MASK_FCVT_S_L 0xfff0007f
+#define MATCH_FCVT_S_LU 0xd0300053
+#define MASK_FCVT_S_LU 0xfff0007f
+#define MATCH_FMV_W_X 0xf0000053
+#define MASK_FMV_W_X 0xfff0707f
+#define MATCH_FCVT_D_W 0xd2000053
+#define MASK_FCVT_D_W 0xfff0007f
+#define MATCH_FCVT_D_WU 0xd2100053
+#define MASK_FCVT_D_WU 0xfff0007f
+#define MATCH_FCVT_D_L 0xd2200053
+#define MASK_FCVT_D_L 0xfff0007f
+#define MATCH_FCVT_D_LU 0xd2300053
+#define MASK_FCVT_D_LU 0xfff0007f
+#define MATCH_FMV_D_X 0xf2000053
+#define MASK_FMV_D_X 0xfff0707f
+#define MATCH_FCVT_Q_W 0xd6000053
+#define MASK_FCVT_Q_W 0xfff0007f
+#define MATCH_FCVT_Q_WU 0xd6100053
+#define MASK_FCVT_Q_WU 0xfff0007f
+#define MATCH_FCVT_Q_L 0xd6200053
+#define MASK_FCVT_Q_L 0xfff0007f
+#define MATCH_FCVT_Q_LU 0xd6300053
+#define MASK_FCVT_Q_LU 0xfff0007f
+#define MATCH_FMV_Q_X 0xf6000053
+#define MASK_FMV_Q_X 0xfff0707f
+#define MATCH_FLW 0x2007
+#define MASK_FLW 0x707f
+#define MATCH_FLD 0x3007
+#define MASK_FLD 0x707f
+#define MATCH_FLQ 0x4007
+#define MASK_FLQ 0x707f
+#define MATCH_FSW 0x2027
+#define MASK_FSW 0x707f
+#define MATCH_FSD 0x3027
+#define MASK_FSD 0x707f
+#define MATCH_FSQ 0x4027
+#define MASK_FSQ 0x707f
+#define MATCH_FMADD_S 0x43
+#define MASK_FMADD_S 0x600007f
+#define MATCH_FMSUB_S 0x47
+#define MASK_FMSUB_S 0x600007f
+#define MATCH_FNMSUB_S 0x4b
+#define MASK_FNMSUB_S 0x600007f
+#define MATCH_FNMADD_S 0x4f
+#define MASK_FNMADD_S 0x600007f
+#define MATCH_FMADD_D 0x2000043
+#define MASK_FMADD_D 0x600007f
+#define MATCH_FMSUB_D 0x2000047
+#define MASK_FMSUB_D 0x600007f
+#define MATCH_FNMSUB_D 0x200004b
+#define MASK_FNMSUB_D 0x600007f
+#define MATCH_FNMADD_D 0x200004f
+#define MASK_FNMADD_D 0x600007f
+#define MATCH_FMADD_Q 0x6000043
+#define MASK_FMADD_Q 0x600007f
+#define MATCH_FMSUB_Q 0x6000047
+#define MASK_FMSUB_Q 0x600007f
+#define MATCH_FNMSUB_Q 0x600004b
+#define MASK_FNMSUB_Q 0x600007f
+#define MATCH_FNMADD_Q 0x600004f
+#define MASK_FNMADD_Q 0x600007f
+#define MATCH_C_NOP 0x1
+#define MASK_C_NOP 0xffff
+#define MATCH_C_ADDI16SP 0x6101
+#define MASK_C_ADDI16SP 0xef83
+#define MATCH_C_JR 0x8002
+#define MASK_C_JR 0xf07f
+#define MATCH_C_JALR 0x9002
+#define MASK_C_JALR 0xf07f
+#define MATCH_C_EBREAK 0x9002
+#define MASK_C_EBREAK 0xffff
+#define MATCH_C_LD 0x6000
+#define MASK_C_LD 0xe003
+#define MATCH_C_SD 0xe000
+#define MASK_C_SD 0xe003
+#define MATCH_C_ADDIW 0x2001
+#define MASK_C_ADDIW 0xe003
+#define MATCH_C_LDSP 0x6002
+#define MASK_C_LDSP 0xe003
+#define MATCH_C_SDSP 0xe002
+#define MASK_C_SDSP 0xe003
+#define MATCH_C_ADDI4SPN 0x0
+#define MASK_C_ADDI4SPN 0xe003
+#define MATCH_C_FLD 0x2000
+#define MASK_C_FLD 0xe003
+#define MATCH_C_LW 0x4000
+#define MASK_C_LW 0xe003
+#define MATCH_C_FLW 0x6000
+#define MASK_C_FLW 0xe003
+#define MATCH_C_FSD 0xa000
+#define MASK_C_FSD 0xe003
+#define MATCH_C_SW 0xc000
+#define MASK_C_SW 0xe003
+#define MATCH_C_FSW 0xe000
+#define MASK_C_FSW 0xe003
+#define MATCH_C_ADDI 0x1
+#define MASK_C_ADDI 0xe003
+#define MATCH_C_JAL 0x2001
+#define MASK_C_JAL 0xe003
+#define MATCH_C_LI 0x4001
+#define MASK_C_LI 0xe003
+#define MATCH_C_LUI 0x6001
+#define MASK_C_LUI 0xe003
+#define MATCH_C_SRLI 0x8001
+#define MASK_C_SRLI 0xec03
+#define MATCH_C_SRAI 0x8401
+#define MASK_C_SRAI 0xec03
+#define MATCH_C_ANDI 0x8801
+#define MASK_C_ANDI 0xec03
+#define MATCH_C_SUB 0x8c01
+#define MASK_C_SUB 0xfc63
+#define MATCH_C_XOR 0x8c21
+#define MASK_C_XOR 0xfc63
+#define MATCH_C_OR 0x8c41
+#define MASK_C_OR 0xfc63
+#define MATCH_C_AND 0x8c61
+#define MASK_C_AND 0xfc63
+#define MATCH_C_SUBW 0x9c01
+#define MASK_C_SUBW 0xfc63
+#define MATCH_C_ADDW 0x9c21
+#define MASK_C_ADDW 0xfc63
+#define MATCH_C_J 0xa001
+#define MASK_C_J 0xe003
+#define MATCH_C_BEQZ 0xc001
+#define MASK_C_BEQZ 0xe003
+#define MATCH_C_BNEZ 0xe001
+#define MASK_C_BNEZ 0xe003
+#define MATCH_C_SLLI 0x2
+#define MASK_C_SLLI 0xe003
+#define MATCH_C_FLDSP 0x2002
+#define MASK_C_FLDSP 0xe003
+#define MATCH_C_LWSP 0x4002
+#define MASK_C_LWSP 0xe003
+#define MATCH_C_FLWSP 0x6002
+#define MASK_C_FLWSP 0xe003
+#define MATCH_C_MV 0x8002
+#define MASK_C_MV 0xf003
+#define MATCH_C_ADD 0x9002
+#define MASK_C_ADD 0xf003
+#define MATCH_C_FSDSP 0xa002
+#define MASK_C_FSDSP 0xe003
+#define MATCH_C_SWSP 0xc002
+#define MASK_C_SWSP 0xe003
+#define MATCH_C_FSWSP 0xe002
+#define MASK_C_FSWSP 0xe003
+#define MATCH_CUSTOM0 0xb
+#define MASK_CUSTOM0 0x707f
+#define MATCH_CUSTOM0_RS1 0x200b
+#define MASK_CUSTOM0_RS1 0x707f
+#define MATCH_CUSTOM0_RS1_RS2 0x300b
+#define MASK_CUSTOM0_RS1_RS2 0x707f
+#define MATCH_CUSTOM0_RD 0x400b
+#define MASK_CUSTOM0_RD 0x707f
+#define MATCH_CUSTOM0_RD_RS1 0x600b
+#define MASK_CUSTOM0_RD_RS1 0x707f
+#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
+#define MASK_CUSTOM0_RD_RS1_RS2 0x707f
+#define MATCH_CUSTOM1 0x2b
+#define MASK_CUSTOM1 0x707f
+#define MATCH_CUSTOM1_RS1 0x202b
+#define MASK_CUSTOM1_RS1 0x707f
+#define MATCH_CUSTOM1_RS1_RS2 0x302b
+#define MASK_CUSTOM1_RS1_RS2 0x707f
+#define MATCH_CUSTOM1_RD 0x402b
+#define MASK_CUSTOM1_RD 0x707f
+#define MATCH_CUSTOM1_RD_RS1 0x602b
+#define MASK_CUSTOM1_RD_RS1 0x707f
+#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
+#define MASK_CUSTOM1_RD_RS1_RS2 0x707f
+#define MATCH_CUSTOM2 0x5b
+#define MASK_CUSTOM2 0x707f
+#define MATCH_CUSTOM2_RS1 0x205b
+#define MASK_CUSTOM2_RS1 0x707f
+#define MATCH_CUSTOM2_RS1_RS2 0x305b
+#define MASK_CUSTOM2_RS1_RS2 0x707f
+#define MATCH_CUSTOM2_RD 0x405b
+#define MASK_CUSTOM2_RD 0x707f
+#define MATCH_CUSTOM2_RD_RS1 0x605b
+#define MASK_CUSTOM2_RD_RS1 0x707f
+#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
+#define MASK_CUSTOM2_RD_RS1_RS2 0x707f
+#define MATCH_CUSTOM3 0x7b
+#define MASK_CUSTOM3 0x707f
+#define MATCH_CUSTOM3_RS1 0x207b
+#define MASK_CUSTOM3_RS1 0x707f
+#define MATCH_CUSTOM3_RS1_RS2 0x307b
+#define MASK_CUSTOM3_RS1_RS2 0x707f
+#define MATCH_CUSTOM3_RD 0x407b
+#define MASK_CUSTOM3_RD 0x707f
+#define MATCH_CUSTOM3_RD_RS1 0x607b
+#define MASK_CUSTOM3_RD_RS1 0x707f
+#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
+#define MASK_CUSTOM3_RD_RS1_RS2 0x707f
+#define CSR_FFLAGS 0x1
+#define CSR_FRM 0x2
+#define CSR_FCSR 0x3
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_HPMCOUNTER3 0xc03
+#define CSR_HPMCOUNTER4 0xc04
+#define CSR_HPMCOUNTER5 0xc05
+#define CSR_HPMCOUNTER6 0xc06
+#define CSR_HPMCOUNTER7 0xc07
+#define CSR_HPMCOUNTER8 0xc08
+#define CSR_HPMCOUNTER9 0xc09
+#define CSR_HPMCOUNTER10 0xc0a
+#define CSR_HPMCOUNTER11 0xc0b
+#define CSR_HPMCOUNTER12 0xc0c
+#define CSR_HPMCOUNTER13 0xc0d
+#define CSR_HPMCOUNTER14 0xc0e
+#define CSR_HPMCOUNTER15 0xc0f
+#define CSR_HPMCOUNTER16 0xc10
+#define CSR_HPMCOUNTER17 0xc11
+#define CSR_HPMCOUNTER18 0xc12
+#define CSR_HPMCOUNTER19 0xc13
+#define CSR_HPMCOUNTER20 0xc14
+#define CSR_HPMCOUNTER21 0xc15
+#define CSR_HPMCOUNTER22 0xc16
+#define CSR_HPMCOUNTER23 0xc17
+#define CSR_HPMCOUNTER24 0xc18
+#define CSR_HPMCOUNTER25 0xc19
+#define CSR_HPMCOUNTER26 0xc1a
+#define CSR_HPMCOUNTER27 0xc1b
+#define CSR_HPMCOUNTER28 0xc1c
+#define CSR_HPMCOUNTER29 0xc1d
+#define CSR_HPMCOUNTER30 0xc1e
+#define CSR_HPMCOUNTER31 0xc1f
+#define CSR_SSTATUS 0x100
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_STVAL 0x143
+#define CSR_SIP 0x144
+#define CSR_SATP 0x180
+#define CSR_MSTATUS 0x300
+#define CSR_MISA 0x301
+#define CSR_MEDELEG 0x302
+#define CSR_MIDELEG 0x303
+#define CSR_MIE 0x304
+#define CSR_MTVEC 0x305
+#define CSR_MCOUNTEREN 0x306
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MTVAL 0x343
+#define CSR_MIP 0x344
+#define CSR_PMPCFG0 0x3a0
+#define CSR_PMPCFG1 0x3a1
+#define CSR_PMPCFG2 0x3a2
+#define CSR_PMPCFG3 0x3a3
+#define CSR_PMPADDR0 0x3b0
+#define CSR_PMPADDR1 0x3b1
+#define CSR_PMPADDR2 0x3b2
+#define CSR_PMPADDR3 0x3b3
+#define CSR_PMPADDR4 0x3b4
+#define CSR_PMPADDR5 0x3b5
+#define CSR_PMPADDR6 0x3b6
+#define CSR_PMPADDR7 0x3b7
+#define CSR_PMPADDR8 0x3b8
+#define CSR_PMPADDR9 0x3b9
+#define CSR_PMPADDR10 0x3ba
+#define CSR_PMPADDR11 0x3bb
+#define CSR_PMPADDR12 0x3bc
+#define CSR_PMPADDR13 0x3bd
+#define CSR_PMPADDR14 0x3be
+#define CSR_PMPADDR15 0x3bf
+#define CSR_TSELECT 0x7a0
+#define CSR_TDATA1 0x7a1
+#define CSR_TDATA2 0x7a2
+#define CSR_TDATA3 0x7a3
+#define CSR_DCSR 0x7b0
+#define CSR_DPC 0x7b1
+#define CSR_DSCRATCH 0x7b2
+#define CSR_MCYCLE 0xb00
+#define CSR_MINSTRET 0xb02
+#define CSR_MHPMCOUNTER3 0xb03
+#define CSR_MHPMCOUNTER4 0xb04
+#define CSR_MHPMCOUNTER5 0xb05
+#define CSR_MHPMCOUNTER6 0xb06
+#define CSR_MHPMCOUNTER7 0xb07
+#define CSR_MHPMCOUNTER8 0xb08
+#define CSR_MHPMCOUNTER9 0xb09
+#define CSR_MHPMCOUNTER10 0xb0a
+#define CSR_MHPMCOUNTER11 0xb0b
+#define CSR_MHPMCOUNTER12 0xb0c
+#define CSR_MHPMCOUNTER13 0xb0d
+#define CSR_MHPMCOUNTER14 0xb0e
+#define CSR_MHPMCOUNTER15 0xb0f
+#define CSR_MHPMCOUNTER16 0xb10
+#define CSR_MHPMCOUNTER17 0xb11
+#define CSR_MHPMCOUNTER18 0xb12
+#define CSR_MHPMCOUNTER19 0xb13
+#define CSR_MHPMCOUNTER20 0xb14
+#define CSR_MHPMCOUNTER21 0xb15
+#define CSR_MHPMCOUNTER22 0xb16
+#define CSR_MHPMCOUNTER23 0xb17
+#define CSR_MHPMCOUNTER24 0xb18
+#define CSR_MHPMCOUNTER25 0xb19
+#define CSR_MHPMCOUNTER26 0xb1a
+#define CSR_MHPMCOUNTER27 0xb1b
+#define CSR_MHPMCOUNTER28 0xb1c
+#define CSR_MHPMCOUNTER29 0xb1d
+#define CSR_MHPMCOUNTER30 0xb1e
+#define CSR_MHPMCOUNTER31 0xb1f
+#define CSR_MHPMEVENT3 0x323
+#define CSR_MHPMEVENT4 0x324
+#define CSR_MHPMEVENT5 0x325
+#define CSR_MHPMEVENT6 0x326
+#define CSR_MHPMEVENT7 0x327
+#define CSR_MHPMEVENT8 0x328
+#define CSR_MHPMEVENT9 0x329
+#define CSR_MHPMEVENT10 0x32a
+#define CSR_MHPMEVENT11 0x32b
+#define CSR_MHPMEVENT12 0x32c
+#define CSR_MHPMEVENT13 0x32d
+#define CSR_MHPMEVENT14 0x32e
+#define CSR_MHPMEVENT15 0x32f
+#define CSR_MHPMEVENT16 0x330
+#define CSR_MHPMEVENT17 0x331
+#define CSR_MHPMEVENT18 0x332
+#define CSR_MHPMEVENT19 0x333
+#define CSR_MHPMEVENT20 0x334
+#define CSR_MHPMEVENT21 0x335
+#define CSR_MHPMEVENT22 0x336
+#define CSR_MHPMEVENT23 0x337
+#define CSR_MHPMEVENT24 0x338
+#define CSR_MHPMEVENT25 0x339
+#define CSR_MHPMEVENT26 0x33a
+#define CSR_MHPMEVENT27 0x33b
+#define CSR_MHPMEVENT28 0x33c
+#define CSR_MHPMEVENT29 0x33d
+#define CSR_MHPMEVENT30 0x33e
+#define CSR_MHPMEVENT31 0x33f
+#define CSR_MVENDORID 0xf11
+#define CSR_MARCHID 0xf12
+#define CSR_MIMPID 0xf13
+#define CSR_MHARTID 0xf14
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+#define CSR_HPMCOUNTER3H 0xc83
+#define CSR_HPMCOUNTER4H 0xc84
+#define CSR_HPMCOUNTER5H 0xc85
+#define CSR_HPMCOUNTER6H 0xc86
+#define CSR_HPMCOUNTER7H 0xc87
+#define CSR_HPMCOUNTER8H 0xc88
+#define CSR_HPMCOUNTER9H 0xc89
+#define CSR_HPMCOUNTER10H 0xc8a
+#define CSR_HPMCOUNTER11H 0xc8b
+#define CSR_HPMCOUNTER12H 0xc8c
+#define CSR_HPMCOUNTER13H 0xc8d
+#define CSR_HPMCOUNTER14H 0xc8e
+#define CSR_HPMCOUNTER15H 0xc8f
+#define CSR_HPMCOUNTER16H 0xc90
+#define CSR_HPMCOUNTER17H 0xc91
+#define CSR_HPMCOUNTER18H 0xc92
+#define CSR_HPMCOUNTER19H 0xc93
+#define CSR_HPMCOUNTER20H 0xc94
+#define CSR_HPMCOUNTER21H 0xc95
+#define CSR_HPMCOUNTER22H 0xc96
+#define CSR_HPMCOUNTER23H 0xc97
+#define CSR_HPMCOUNTER24H 0xc98
+#define CSR_HPMCOUNTER25H 0xc99
+#define CSR_HPMCOUNTER26H 0xc9a
+#define CSR_HPMCOUNTER27H 0xc9b
+#define CSR_HPMCOUNTER28H 0xc9c
+#define CSR_HPMCOUNTER29H 0xc9d
+#define CSR_HPMCOUNTER30H 0xc9e
+#define CSR_HPMCOUNTER31H 0xc9f
+#define CSR_MCYCLEH 0xb80
+#define CSR_MINSTRETH 0xb82
+#define CSR_MHPMCOUNTER3H 0xb83
+#define CSR_MHPMCOUNTER4H 0xb84
+#define CSR_MHPMCOUNTER5H 0xb85
+#define CSR_MHPMCOUNTER6H 0xb86
+#define CSR_MHPMCOUNTER7H 0xb87
+#define CSR_MHPMCOUNTER8H 0xb88
+#define CSR_MHPMCOUNTER9H 0xb89
+#define CSR_MHPMCOUNTER10H 0xb8a
+#define CSR_MHPMCOUNTER11H 0xb8b
+#define CSR_MHPMCOUNTER12H 0xb8c
+#define CSR_MHPMCOUNTER13H 0xb8d
+#define CSR_MHPMCOUNTER14H 0xb8e
+#define CSR_MHPMCOUNTER15H 0xb8f
+#define CSR_MHPMCOUNTER16H 0xb90
+#define CSR_MHPMCOUNTER17H 0xb91
+#define CSR_MHPMCOUNTER18H 0xb92
+#define CSR_MHPMCOUNTER19H 0xb93
+#define CSR_MHPMCOUNTER20H 0xb94
+#define CSR_MHPMCOUNTER21H 0xb95
+#define CSR_MHPMCOUNTER22H 0xb96
+#define CSR_MHPMCOUNTER23H 0xb97
+#define CSR_MHPMCOUNTER24H 0xb98
+#define CSR_MHPMCOUNTER25H 0xb99
+#define CSR_MHPMCOUNTER26H 0xb9a
+#define CSR_MHPMCOUNTER27H 0xb9b
+#define CSR_MHPMCOUNTER28H 0xb9c
+#define CSR_MHPMCOUNTER29H 0xb9d
+#define CSR_MHPMCOUNTER30H 0xb9e
+#define CSR_MHPMCOUNTER31H 0xb9f
+#define CAUSE_MISALIGNED_FETCH 0x0
+#define CAUSE_FETCH_ACCESS 0x1
+#define CAUSE_ILLEGAL_INSTRUCTION 0x2
+#define CAUSE_BREAKPOINT 0x3
+#define CAUSE_MISALIGNED_LOAD 0x4
+#define CAUSE_LOAD_ACCESS 0x5
+#define CAUSE_MISALIGNED_STORE 0x6
+#define CAUSE_STORE_ACCESS 0x7
+#define CAUSE_USER_ECALL 0x8
+#define CAUSE_SUPERVISOR_ECALL 0x9
+#define CAUSE_HYPERVISOR_ECALL 0xa
+#define CAUSE_MACHINE_ECALL 0xb
+#define CAUSE_FETCH_PAGE_FAULT 0xc
+#define CAUSE_LOAD_PAGE_FAULT 0xd
+#define CAUSE_STORE_PAGE_FAULT 0xf
+#endif
+#ifdef DECLARE_INSN
+DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
+DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
+DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
+DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
+DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
+DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
+DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
+DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
+DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
+DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
+DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
+DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
+DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
+DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
+DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
+DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
+DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
+DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
+DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
+DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
+DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
+DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
+DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
+DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
+DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
+DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
+DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
+DECLARE_INSN(or, MATCH_OR, MASK_OR)
+DECLARE_INSN(and, MATCH_AND, MASK_AND)
+DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
+DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
+DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
+DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
+DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
+DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
+DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
+DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
+DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
+DECLARE_INSN(lb, MATCH_LB, MASK_LB)
+DECLARE_INSN(lh, MATCH_LH, MASK_LH)
+DECLARE_INSN(lw, MATCH_LW, MASK_LW)
+DECLARE_INSN(ld, MATCH_LD, MASK_LD)
+DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
+DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
+DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
+DECLARE_INSN(sb, MATCH_SB, MASK_SB)
+DECLARE_INSN(sh, MATCH_SH, MASK_SH)
+DECLARE_INSN(sw, MATCH_SW, MASK_SW)
+DECLARE_INSN(sd, MATCH_SD, MASK_SD)
+DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
+DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
+DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
+DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
+DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
+DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
+DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
+DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
+DECLARE_INSN(rem, MATCH_REM, MASK_REM)
+DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
+DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
+DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
+DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
+DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
+DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
+DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
+DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
+DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
+DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
+DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
+DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
+DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
+DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
+DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
+DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
+DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
+DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
+DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
+DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
+DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
+DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
+DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
+DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
+DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
+DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
+DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
+DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
+DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL)
+DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK)
+DECLARE_INSN(uret, MATCH_URET, MASK_URET)
+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
+DECLARE_INSN(mret, MATCH_MRET, MASK_MRET)
+DECLARE_INSN(dret, MATCH_DRET, MASK_DRET)
+DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA)
+DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI)
+DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
+DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
+DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
+DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
+DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
+DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
+DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
+DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
+DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
+DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
+DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
+DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
+DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
+DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
+DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
+DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
+DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
+DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
+DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
+DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
+DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
+DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
+DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
+DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
+DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
+DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
+DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
+DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
+DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q)
+DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q)
+DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q)
+DECLARE_INSN(fdiv_q, MATCH_FDIV_Q, MASK_FDIV_Q)
+DECLARE_INSN(fsgnj_q, MATCH_FSGNJ_Q, MASK_FSGNJ_Q)
+DECLARE_INSN(fsgnjn_q, MATCH_FSGNJN_Q, MASK_FSGNJN_Q)
+DECLARE_INSN(fsgnjx_q, MATCH_FSGNJX_Q, MASK_FSGNJX_Q)
+DECLARE_INSN(fmin_q, MATCH_FMIN_Q, MASK_FMIN_Q)
+DECLARE_INSN(fmax_q, MATCH_FMAX_Q, MASK_FMAX_Q)
+DECLARE_INSN(fcvt_s_q, MATCH_FCVT_S_Q, MASK_FCVT_S_Q)
+DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S)
+DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q)
+DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D)
+DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q)
+DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
+DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
+DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
+DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
+DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
+DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
+DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q)
+DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q)
+DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q)
+DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
+DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
+DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
+DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
+DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W)
+DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
+DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
+DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
+DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
+DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
+DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
+DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
+DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q)
+DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q)
+DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q)
+DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q)
+DECLARE_INSN(fmv_x_q, MATCH_FMV_X_Q, MASK_FMV_X_Q)
+DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q)
+DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
+DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
+DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
+DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
+DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X)
+DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
+DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
+DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
+DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
+DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
+DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W)
+DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU)
+DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L)
+DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU)
+DECLARE_INSN(fmv_q_x, MATCH_FMV_Q_X, MASK_FMV_Q_X)
+DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
+DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
+DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ)
+DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
+DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
+DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ)
+DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
+DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
+DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
+DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
+DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
+DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
+DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
+DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
+DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q)
+DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q)
+DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q)
+DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q)
+DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP)
+DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP)
+DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
+DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
+DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK)
+DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
+DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
+DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
+DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
+DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
+DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN)
+DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD)
+DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
+DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW)
+DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD)
+DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
+DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW)
+DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
+DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
+DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
+DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI)
+DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
+DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
+DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI)
+DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
+DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR)
+DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR)
+DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND)
+DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW)
+DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW)
+DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
+DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
+DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
+DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
+DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP)
+DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
+DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP)
+DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV)
+DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
+DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP)
+DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
+DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP)
+DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
+DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
+DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
+DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
+DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
+DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
+DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
+DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
+DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
+DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
+DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
+DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
+DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
+DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
+DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
+DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
+DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
+DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
+DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
+DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
+DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
+DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
+DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
+DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
+#endif
+#ifdef DECLARE_CSR
+DECLARE_CSR(fflags, CSR_FFLAGS)
+DECLARE_CSR(frm, CSR_FRM)
+DECLARE_CSR(fcsr, CSR_FCSR)
+DECLARE_CSR(cycle, CSR_CYCLE)
+DECLARE_CSR(time, CSR_TIME)
+DECLARE_CSR(instret, CSR_INSTRET)
+DECLARE_CSR(hpmcounter3, CSR_HPMCOUNTER3)
+DECLARE_CSR(hpmcounter4, CSR_HPMCOUNTER4)
+DECLARE_CSR(hpmcounter5, CSR_HPMCOUNTER5)
+DECLARE_CSR(hpmcounter6, CSR_HPMCOUNTER6)
+DECLARE_CSR(hpmcounter7, CSR_HPMCOUNTER7)
+DECLARE_CSR(hpmcounter8, CSR_HPMCOUNTER8)
+DECLARE_CSR(hpmcounter9, CSR_HPMCOUNTER9)
+DECLARE_CSR(hpmcounter10, CSR_HPMCOUNTER10)
+DECLARE_CSR(hpmcounter11, CSR_HPMCOUNTER11)
+DECLARE_CSR(hpmcounter12, CSR_HPMCOUNTER12)
+DECLARE_CSR(hpmcounter13, CSR_HPMCOUNTER13)
+DECLARE_CSR(hpmcounter14, CSR_HPMCOUNTER14)
+DECLARE_CSR(hpmcounter15, CSR_HPMCOUNTER15)
+DECLARE_CSR(hpmcounter16, CSR_HPMCOUNTER16)
+DECLARE_CSR(hpmcounter17, CSR_HPMCOUNTER17)
+DECLARE_CSR(hpmcounter18, CSR_HPMCOUNTER18)
+DECLARE_CSR(hpmcounter19, CSR_HPMCOUNTER19)
+DECLARE_CSR(hpmcounter20, CSR_HPMCOUNTER20)
+DECLARE_CSR(hpmcounter21, CSR_HPMCOUNTER21)
+DECLARE_CSR(hpmcounter22, CSR_HPMCOUNTER22)
+DECLARE_CSR(hpmcounter23, CSR_HPMCOUNTER23)
+DECLARE_CSR(hpmcounter24, CSR_HPMCOUNTER24)
+DECLARE_CSR(hpmcounter25, CSR_HPMCOUNTER25)
+DECLARE_CSR(hpmcounter26, CSR_HPMCOUNTER26)
+DECLARE_CSR(hpmcounter27, CSR_HPMCOUNTER27)
+DECLARE_CSR(hpmcounter28, CSR_HPMCOUNTER28)
+DECLARE_CSR(hpmcounter29, CSR_HPMCOUNTER29)
+DECLARE_CSR(hpmcounter30, CSR_HPMCOUNTER30)
+DECLARE_CSR(hpmcounter31, CSR_HPMCOUNTER31)
+DECLARE_CSR(sstatus, CSR_SSTATUS)
+DECLARE_CSR(sie, CSR_SIE)
+DECLARE_CSR(stvec, CSR_STVEC)
+DECLARE_CSR(scounteren, CSR_SCOUNTEREN)
+DECLARE_CSR(sscratch, CSR_SSCRATCH)
+DECLARE_CSR(sepc, CSR_SEPC)
+DECLARE_CSR(scause, CSR_SCAUSE)
+DECLARE_CSR(stval, CSR_STVAL)
+DECLARE_CSR(sip, CSR_SIP)
+DECLARE_CSR(satp, CSR_SATP)
+DECLARE_CSR(mstatus, CSR_MSTATUS)
+DECLARE_CSR(misa, CSR_MISA)
+DECLARE_CSR(medeleg, CSR_MEDELEG)
+DECLARE_CSR(mideleg, CSR_MIDELEG)
+DECLARE_CSR(mie, CSR_MIE)
+DECLARE_CSR(mtvec, CSR_MTVEC)
+DECLARE_CSR(mcounteren, CSR_MCOUNTEREN)
+DECLARE_CSR(mscratch, CSR_MSCRATCH)
+DECLARE_CSR(mepc, CSR_MEPC)
+DECLARE_CSR(mcause, CSR_MCAUSE)
+DECLARE_CSR(mtval, CSR_MTVAL)
+DECLARE_CSR(mip, CSR_MIP)
+DECLARE_CSR(pmpcfg0, CSR_PMPCFG0)
+DECLARE_CSR(pmpcfg1, CSR_PMPCFG1)
+DECLARE_CSR(pmpcfg2, CSR_PMPCFG2)
+DECLARE_CSR(pmpcfg3, CSR_PMPCFG3)
+DECLARE_CSR(pmpaddr0, CSR_PMPADDR0)
+DECLARE_CSR(pmpaddr1, CSR_PMPADDR1)
+DECLARE_CSR(pmpaddr2, CSR_PMPADDR2)
+DECLARE_CSR(pmpaddr3, CSR_PMPADDR3)
+DECLARE_CSR(pmpaddr4, CSR_PMPADDR4)
+DECLARE_CSR(pmpaddr5, CSR_PMPADDR5)
+DECLARE_CSR(pmpaddr6, CSR_PMPADDR6)
+DECLARE_CSR(pmpaddr7, CSR_PMPADDR7)
+DECLARE_CSR(pmpaddr8, CSR_PMPADDR8)
+DECLARE_CSR(pmpaddr9, CSR_PMPADDR9)
+DECLARE_CSR(pmpaddr10, CSR_PMPADDR10)
+DECLARE_CSR(pmpaddr11, CSR_PMPADDR11)
+DECLARE_CSR(pmpaddr12, CSR_PMPADDR12)
+DECLARE_CSR(pmpaddr13, CSR_PMPADDR13)
+DECLARE_CSR(pmpaddr14, CSR_PMPADDR14)
+DECLARE_CSR(pmpaddr15, CSR_PMPADDR15)
+DECLARE_CSR(tselect, CSR_TSELECT)
+DECLARE_CSR(tdata1, CSR_TDATA1)
+DECLARE_CSR(tdata2, CSR_TDATA2)
+DECLARE_CSR(tdata3, CSR_TDATA3)
+DECLARE_CSR(dcsr, CSR_DCSR)
+DECLARE_CSR(dpc, CSR_DPC)
+DECLARE_CSR(dscratch, CSR_DSCRATCH)
+DECLARE_CSR(mcycle, CSR_MCYCLE)
+DECLARE_CSR(minstret, CSR_MINSTRET)
+DECLARE_CSR(mhpmcounter3, CSR_MHPMCOUNTER3)
+DECLARE_CSR(mhpmcounter4, CSR_MHPMCOUNTER4)
+DECLARE_CSR(mhpmcounter5, CSR_MHPMCOUNTER5)
+DECLARE_CSR(mhpmcounter6, CSR_MHPMCOUNTER6)
+DECLARE_CSR(mhpmcounter7, CSR_MHPMCOUNTER7)
+DECLARE_CSR(mhpmcounter8, CSR_MHPMCOUNTER8)
+DECLARE_CSR(mhpmcounter9, CSR_MHPMCOUNTER9)
+DECLARE_CSR(mhpmcounter10, CSR_MHPMCOUNTER10)
+DECLARE_CSR(mhpmcounter11, CSR_MHPMCOUNTER11)
+DECLARE_CSR(mhpmcounter12, CSR_MHPMCOUNTER12)
+DECLARE_CSR(mhpmcounter13, CSR_MHPMCOUNTER13)
+DECLARE_CSR(mhpmcounter14, CSR_MHPMCOUNTER14)
+DECLARE_CSR(mhpmcounter15, CSR_MHPMCOUNTER15)
+DECLARE_CSR(mhpmcounter16, CSR_MHPMCOUNTER16)
+DECLARE_CSR(mhpmcounter17, CSR_MHPMCOUNTER17)
+DECLARE_CSR(mhpmcounter18, CSR_MHPMCOUNTER18)
+DECLARE_CSR(mhpmcounter19, CSR_MHPMCOUNTER19)
+DECLARE_CSR(mhpmcounter20, CSR_MHPMCOUNTER20)
+DECLARE_CSR(mhpmcounter21, CSR_MHPMCOUNTER21)
+DECLARE_CSR(mhpmcounter22, CSR_MHPMCOUNTER22)
+DECLARE_CSR(mhpmcounter23, CSR_MHPMCOUNTER23)
+DECLARE_CSR(mhpmcounter24, CSR_MHPMCOUNTER24)
+DECLARE_CSR(mhpmcounter25, CSR_MHPMCOUNTER25)
+DECLARE_CSR(mhpmcounter26, CSR_MHPMCOUNTER26)
+DECLARE_CSR(mhpmcounter27, CSR_MHPMCOUNTER27)
+DECLARE_CSR(mhpmcounter28, CSR_MHPMCOUNTER28)
+DECLARE_CSR(mhpmcounter29, CSR_MHPMCOUNTER29)
+DECLARE_CSR(mhpmcounter30, CSR_MHPMCOUNTER30)
+DECLARE_CSR(mhpmcounter31, CSR_MHPMCOUNTER31)
+DECLARE_CSR(mhpmevent3, CSR_MHPMEVENT3)
+DECLARE_CSR(mhpmevent4, CSR_MHPMEVENT4)
+DECLARE_CSR(mhpmevent5, CSR_MHPMEVENT5)
+DECLARE_CSR(mhpmevent6, CSR_MHPMEVENT6)
+DECLARE_CSR(mhpmevent7, CSR_MHPMEVENT7)
+DECLARE_CSR(mhpmevent8, CSR_MHPMEVENT8)
+DECLARE_CSR(mhpmevent9, CSR_MHPMEVENT9)
+DECLARE_CSR(mhpmevent10, CSR_MHPMEVENT10)
+DECLARE_CSR(mhpmevent11, CSR_MHPMEVENT11)
+DECLARE_CSR(mhpmevent12, CSR_MHPMEVENT12)
+DECLARE_CSR(mhpmevent13, CSR_MHPMEVENT13)
+DECLARE_CSR(mhpmevent14, CSR_MHPMEVENT14)
+DECLARE_CSR(mhpmevent15, CSR_MHPMEVENT15)
+DECLARE_CSR(mhpmevent16, CSR_MHPMEVENT16)
+DECLARE_CSR(mhpmevent17, CSR_MHPMEVENT17)
+DECLARE_CSR(mhpmevent18, CSR_MHPMEVENT18)
+DECLARE_CSR(mhpmevent19, CSR_MHPMEVENT19)
+DECLARE_CSR(mhpmevent20, CSR_MHPMEVENT20)
+DECLARE_CSR(mhpmevent21, CSR_MHPMEVENT21)
+DECLARE_CSR(mhpmevent22, CSR_MHPMEVENT22)
+DECLARE_CSR(mhpmevent23, CSR_MHPMEVENT23)
+DECLARE_CSR(mhpmevent24, CSR_MHPMEVENT24)
+DECLARE_CSR(mhpmevent25, CSR_MHPMEVENT25)
+DECLARE_CSR(mhpmevent26, CSR_MHPMEVENT26)
+DECLARE_CSR(mhpmevent27, CSR_MHPMEVENT27)
+DECLARE_CSR(mhpmevent28, CSR_MHPMEVENT28)
+DECLARE_CSR(mhpmevent29, CSR_MHPMEVENT29)
+DECLARE_CSR(mhpmevent30, CSR_MHPMEVENT30)
+DECLARE_CSR(mhpmevent31, CSR_MHPMEVENT31)
+DECLARE_CSR(mvendorid, CSR_MVENDORID)
+DECLARE_CSR(marchid, CSR_MARCHID)
+DECLARE_CSR(mimpid, CSR_MIMPID)
+DECLARE_CSR(mhartid, CSR_MHARTID)
+DECLARE_CSR(cycleh, CSR_CYCLEH)
+DECLARE_CSR(timeh, CSR_TIMEH)
+DECLARE_CSR(instreth, CSR_INSTRETH)
+DECLARE_CSR(hpmcounter3h, CSR_HPMCOUNTER3H)
+DECLARE_CSR(hpmcounter4h, CSR_HPMCOUNTER4H)
+DECLARE_CSR(hpmcounter5h, CSR_HPMCOUNTER5H)
+DECLARE_CSR(hpmcounter6h, CSR_HPMCOUNTER6H)
+DECLARE_CSR(hpmcounter7h, CSR_HPMCOUNTER7H)
+DECLARE_CSR(hpmcounter8h, CSR_HPMCOUNTER8H)
+DECLARE_CSR(hpmcounter9h, CSR_HPMCOUNTER9H)
+DECLARE_CSR(hpmcounter10h, CSR_HPMCOUNTER10H)
+DECLARE_CSR(hpmcounter11h, CSR_HPMCOUNTER11H)
+DECLARE_CSR(hpmcounter12h, CSR_HPMCOUNTER12H)
+DECLARE_CSR(hpmcounter13h, CSR_HPMCOUNTER13H)
+DECLARE_CSR(hpmcounter14h, CSR_HPMCOUNTER14H)
+DECLARE_CSR(hpmcounter15h, CSR_HPMCOUNTER15H)
+DECLARE_CSR(hpmcounter16h, CSR_HPMCOUNTER16H)
+DECLARE_CSR(hpmcounter17h, CSR_HPMCOUNTER17H)
+DECLARE_CSR(hpmcounter18h, CSR_HPMCOUNTER18H)
+DECLARE_CSR(hpmcounter19h, CSR_HPMCOUNTER19H)
+DECLARE_CSR(hpmcounter20h, CSR_HPMCOUNTER20H)
+DECLARE_CSR(hpmcounter21h, CSR_HPMCOUNTER21H)
+DECLARE_CSR(hpmcounter22h, CSR_HPMCOUNTER22H)
+DECLARE_CSR(hpmcounter23h, CSR_HPMCOUNTER23H)
+DECLARE_CSR(hpmcounter24h, CSR_HPMCOUNTER24H)
+DECLARE_CSR(hpmcounter25h, CSR_HPMCOUNTER25H)
+DECLARE_CSR(hpmcounter26h, CSR_HPMCOUNTER26H)
+DECLARE_CSR(hpmcounter27h, CSR_HPMCOUNTER27H)
+DECLARE_CSR(hpmcounter28h, CSR_HPMCOUNTER28H)
+DECLARE_CSR(hpmcounter29h, CSR_HPMCOUNTER29H)
+DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H)
+DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H)
+DECLARE_CSR(mcycleh, CSR_MCYCLEH)
+DECLARE_CSR(minstreth, CSR_MINSTRETH)
+DECLARE_CSR(mhpmcounter3h, CSR_MHPMCOUNTER3H)
+DECLARE_CSR(mhpmcounter4h, CSR_MHPMCOUNTER4H)
+DECLARE_CSR(mhpmcounter5h, CSR_MHPMCOUNTER5H)
+DECLARE_CSR(mhpmcounter6h, CSR_MHPMCOUNTER6H)
+DECLARE_CSR(mhpmcounter7h, CSR_MHPMCOUNTER7H)
+DECLARE_CSR(mhpmcounter8h, CSR_MHPMCOUNTER8H)
+DECLARE_CSR(mhpmcounter9h, CSR_MHPMCOUNTER9H)
+DECLARE_CSR(mhpmcounter10h, CSR_MHPMCOUNTER10H)
+DECLARE_CSR(mhpmcounter11h, CSR_MHPMCOUNTER11H)
+DECLARE_CSR(mhpmcounter12h, CSR_MHPMCOUNTER12H)
+DECLARE_CSR(mhpmcounter13h, CSR_MHPMCOUNTER13H)
+DECLARE_CSR(mhpmcounter14h, CSR_MHPMCOUNTER14H)
+DECLARE_CSR(mhpmcounter15h, CSR_MHPMCOUNTER15H)
+DECLARE_CSR(mhpmcounter16h, CSR_MHPMCOUNTER16H)
+DECLARE_CSR(mhpmcounter17h, CSR_MHPMCOUNTER17H)
+DECLARE_CSR(mhpmcounter18h, CSR_MHPMCOUNTER18H)
+DECLARE_CSR(mhpmcounter19h, CSR_MHPMCOUNTER19H)
+DECLARE_CSR(mhpmcounter20h, CSR_MHPMCOUNTER20H)
+DECLARE_CSR(mhpmcounter21h, CSR_MHPMCOUNTER21H)
+DECLARE_CSR(mhpmcounter22h, CSR_MHPMCOUNTER22H)
+DECLARE_CSR(mhpmcounter23h, CSR_MHPMCOUNTER23H)
+DECLARE_CSR(mhpmcounter24h, CSR_MHPMCOUNTER24H)
+DECLARE_CSR(mhpmcounter25h, CSR_MHPMCOUNTER25H)
+DECLARE_CSR(mhpmcounter26h, CSR_MHPMCOUNTER26H)
+DECLARE_CSR(mhpmcounter27h, CSR_MHPMCOUNTER27H)
+DECLARE_CSR(mhpmcounter28h, CSR_MHPMCOUNTER28H)
+DECLARE_CSR(mhpmcounter29h, CSR_MHPMCOUNTER29H)
+DECLARE_CSR(mhpmcounter30h, CSR_MHPMCOUNTER30H)
+DECLARE_CSR(mhpmcounter31h, CSR_MHPMCOUNTER31H)
+#endif
+#ifdef DECLARE_CAUSE
+DECLARE_CAUSE("misaligned fetch", CAUSE_MISALIGNED_FETCH)
+DECLARE_CAUSE("fetch access", CAUSE_FETCH_ACCESS)
+DECLARE_CAUSE("illegal instruction", CAUSE_ILLEGAL_INSTRUCTION)
+DECLARE_CAUSE("breakpoint", CAUSE_BREAKPOINT)
+DECLARE_CAUSE("misaligned load", CAUSE_MISALIGNED_LOAD)
+DECLARE_CAUSE("load access", CAUSE_LOAD_ACCESS)
+DECLARE_CAUSE("misaligned store", CAUSE_MISALIGNED_STORE)
+DECLARE_CAUSE("store access", CAUSE_STORE_ACCESS)
+DECLARE_CAUSE("user_ecall", CAUSE_USER_ECALL)
+DECLARE_CAUSE("supervisor_ecall", CAUSE_SUPERVISOR_ECALL)
+DECLARE_CAUSE("hypervisor_ecall", CAUSE_HYPERVISOR_ECALL)
+DECLARE_CAUSE("machine_ecall", CAUSE_MACHINE_ECALL)
+DECLARE_CAUSE("fetch page fault", CAUSE_FETCH_PAGE_FAULT)
+DECLARE_CAUSE("load page fault", CAUSE_LOAD_PAGE_FAULT)
+DECLARE_CAUSE("store page fault", CAUSE_STORE_PAGE_FAULT)
+#endif
--- /dev/null
+#ifndef TARGET__RISCV__GDB_REGS_H
+#define TARGET__RISCV__GDB_REGS_H
+
+/* gdb's register list is defined in riscv_gdb_reg_names gdb/riscv-tdep.c in
+ * its source tree. We must interpret the numbers the same here. */
+enum gdb_regno {
+ GDB_REGNO_ZERO = 0, /* Read-only register, always 0. */
+ GDB_REGNO_RA = 1, /* Return Address. */
+ GDB_REGNO_SP = 2, /* Stack Pointer. */
+ GDB_REGNO_GP = 3, /* Global Pointer. */
+ GDB_REGNO_TP = 4, /* Thread Pointer. */
+ GDB_REGNO_T0,
+ GDB_REGNO_T1,
+ GDB_REGNO_T2,
+ GDB_REGNO_S0 = 8,
+ GDB_REGNO_FP = 8, /* Frame Pointer. */
+ GDB_REGNO_S1,
+ GDB_REGNO_A0 = 10, /* First argument. */
+ GDB_REGNO_A1 = 11, /* Second argument. */
+ GDB_REGNO_A2,
+ GDB_REGNO_A3,
+ GDB_REGNO_A4,
+ GDB_REGNO_A5,
+ GDB_REGNO_A6,
+ GDB_REGNO_A7,
+ GDB_REGNO_S2,
+ GDB_REGNO_S3,
+ GDB_REGNO_S4,
+ GDB_REGNO_S5,
+ GDB_REGNO_S6,
+ GDB_REGNO_S7,
+ GDB_REGNO_S8,
+ GDB_REGNO_S9,
+ GDB_REGNO_S10,
+ GDB_REGNO_S11,
+ GDB_REGNO_T3,
+ GDB_REGNO_T4,
+ GDB_REGNO_T5,
+ GDB_REGNO_T6,
+ GDB_REGNO_XPR31 = GDB_REGNO_T6,
+
+ GDB_REGNO_PC = 32,
+ GDB_REGNO_FPR0 = 33,
+ GDB_REGNO_FT0 = GDB_REGNO_FPR0,
+ GDB_REGNO_FT1,
+ GDB_REGNO_FT2,
+ GDB_REGNO_FT3,
+ GDB_REGNO_FT4,
+ GDB_REGNO_FT5,
+ GDB_REGNO_FT6,
+ GDB_REGNO_FT7,
+ GDB_REGNO_FS0,
+ GDB_REGNO_FS1,
+ GDB_REGNO_FA0,
+ GDB_REGNO_FA1,
+ GDB_REGNO_FA2,
+ GDB_REGNO_FA3,
+ GDB_REGNO_FA4,
+ GDB_REGNO_FA5,
+ GDB_REGNO_FA6,
+ GDB_REGNO_FA7,
+ GDB_REGNO_FS2,
+ GDB_REGNO_FS3,
+ GDB_REGNO_FS4,
+ GDB_REGNO_FS5,
+ GDB_REGNO_FS6,
+ GDB_REGNO_FS7,
+ GDB_REGNO_FS8,
+ GDB_REGNO_FS9,
+ GDB_REGNO_FS10,
+ GDB_REGNO_FS11,
+ GDB_REGNO_FT8,
+ GDB_REGNO_FT9,
+ GDB_REGNO_FT10,
+ GDB_REGNO_FT11,
+ GDB_REGNO_FPR31 = GDB_REGNO_FT11,
+ GDB_REGNO_CSR0 = 65,
+ GDB_REGNO_TSELECT = CSR_TSELECT + GDB_REGNO_CSR0,
+ GDB_REGNO_TDATA1 = CSR_TDATA1 + GDB_REGNO_CSR0,
+ GDB_REGNO_TDATA2 = CSR_TDATA2 + GDB_REGNO_CSR0,
+ GDB_REGNO_MISA = CSR_MISA + GDB_REGNO_CSR0,
+ GDB_REGNO_DPC = CSR_DPC + GDB_REGNO_CSR0,
+ GDB_REGNO_DCSR = CSR_DCSR + GDB_REGNO_CSR0,
+ GDB_REGNO_DSCRATCH = CSR_DSCRATCH + GDB_REGNO_CSR0,
+ GDB_REGNO_MSTATUS = CSR_MSTATUS + GDB_REGNO_CSR0,
+ GDB_REGNO_CSR4095 = GDB_REGNO_CSR0 + 4095,
+ GDB_REGNO_PRIV = 4161,
+ GDB_REGNO_COUNT
+};
+
+const char *gdb_regno_name(enum gdb_regno regno);
+
+#endif
--- /dev/null
+#include "encoding.h"
+
+#define ZERO 0
+#define T0 5
+#define S0 8
+#define S1 9
+
+static uint32_t bits(uint32_t value, unsigned int hi, unsigned int lo)
+{
+ return (value >> lo) & ((1 << (hi+1-lo)) - 1);
+}
+
+static uint32_t bit(uint32_t value, unsigned int b)
+{
+ return (value >> b) & 1;
+}
+
+static uint32_t jal(unsigned int rd, uint32_t imm) __attribute__ ((unused));
+static uint32_t jal(unsigned int rd, uint32_t imm)
+{
+ return (bit(imm, 20) << 31) |
+ (bits(imm, 10, 1) << 21) |
+ (bit(imm, 11) << 20) |
+ (bits(imm, 19, 12) << 12) |
+ (rd << 7) |
+ MATCH_JAL;
+}
+
+static uint32_t csrsi(unsigned int csr, uint16_t imm) __attribute__ ((unused));
+static uint32_t csrsi(unsigned int csr, uint16_t imm)
+{
+ return (csr << 20) |
+ (bits(imm, 4, 0) << 15) |
+ MATCH_CSRRSI;
+}
+
+static uint32_t sw(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t sw(unsigned int src, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 5) << 25) |
+ (src << 20) |
+ (base << 15) |
+ (bits(offset, 4, 0) << 7) |
+ MATCH_SW;
+}
+
+static uint32_t sd(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t sd(unsigned int src, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 5) << 25) |
+ (src << 20) |
+ (base << 15) |
+ (bits(offset, 4, 0) << 7) |
+ MATCH_SD;
+}
+
+static uint32_t sh(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t sh(unsigned int src, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 5) << 25) |
+ (src << 20) |
+ (base << 15) |
+ (bits(offset, 4, 0) << 7) |
+ MATCH_SH;
+}
+
+static uint32_t sb(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t sb(unsigned int src, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 5) << 25) |
+ (src << 20) |
+ (base << 15) |
+ (bits(offset, 4, 0) << 7) |
+ MATCH_SB;
+}
+
+static uint32_t ld(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t ld(unsigned int rd, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 0) << 20) |
+ (base << 15) |
+ (bits(rd, 4, 0) << 7) |
+ MATCH_LD;
+}
+
+static uint32_t lw(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t lw(unsigned int rd, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 0) << 20) |
+ (base << 15) |
+ (bits(rd, 4, 0) << 7) |
+ MATCH_LW;
+}
+
+static uint32_t lh(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t lh(unsigned int rd, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 0) << 20) |
+ (base << 15) |
+ (bits(rd, 4, 0) << 7) |
+ MATCH_LH;
+}
+
+static uint32_t lb(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t lb(unsigned int rd, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 0) << 20) |
+ (base << 15) |
+ (bits(rd, 4, 0) << 7) |
+ MATCH_LB;
+}
+
+static uint32_t csrw(unsigned int source, unsigned int csr) __attribute__ ((unused));
+static uint32_t csrw(unsigned int source, unsigned int csr)
+{
+ return (csr << 20) | (source << 15) | MATCH_CSRRW;
+}
+
+static uint32_t addi(unsigned int dest, unsigned int src, uint16_t imm) __attribute__ ((unused));
+static uint32_t addi(unsigned int dest, unsigned int src, uint16_t imm)
+{
+ return (bits(imm, 11, 0) << 20) |
+ (src << 15) |
+ (dest << 7) |
+ MATCH_ADDI;
+}
+
+static uint32_t csrr(unsigned int rd, unsigned int csr) __attribute__ ((unused));
+static uint32_t csrr(unsigned int rd, unsigned int csr)
+{
+ return (csr << 20) | (rd << 7) | MATCH_CSRRS;
+}
+
+static uint32_t csrrs(unsigned int rd, unsigned int rs, unsigned int csr) __attribute__ ((unused));
+static uint32_t csrrs(unsigned int rd, unsigned int rs, unsigned int csr)
+{
+ return (csr << 20) | (rs << 15) | (rd << 7) | MATCH_CSRRS;
+}
+
+static uint32_t csrrw(unsigned int rd, unsigned int rs, unsigned int csr) __attribute__ ((unused));
+static uint32_t csrrw(unsigned int rd, unsigned int rs, unsigned int csr)
+{
+ return (csr << 20) | (rs << 15) | (rd << 7) | MATCH_CSRRW;
+}
+
+static uint32_t fsw(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t fsw(unsigned int src, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 5) << 25) |
+ (bits(src, 4, 0) << 20) |
+ (base << 15) |
+ (bits(offset, 4, 0) << 7) |
+ MATCH_FSW;
+}
+
+static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 5) << 25) |
+ (bits(src, 4, 0) << 20) |
+ (base << 15) |
+ (bits(offset, 4, 0) << 7) |
+ MATCH_FSD;
+}
+
+static uint32_t flw(unsigned int dest, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t flw(unsigned int dest, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 0) << 20) |
+ (base << 15) |
+ (bits(dest, 4, 0) << 7) |
+ MATCH_FLW;
+}
+
+static uint32_t fld(unsigned int dest, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t fld(unsigned int dest, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 0) << 20) |
+ (base << 15) |
+ (bits(dest, 4, 0) << 7) |
+ MATCH_FLD;
+}
+
+static uint32_t fmv_x_w(unsigned dest, unsigned src) __attribute__ ((unused));
+static uint32_t fmv_x_w(unsigned dest, unsigned src)
+{
+ return src << 15 |
+ dest << 7 |
+ MATCH_FMV_X_W;
+}
+
+static uint32_t fmv_x_d(unsigned dest, unsigned src) __attribute__ ((unused));
+static uint32_t fmv_x_d(unsigned dest, unsigned src)
+{
+ return src << 15 |
+ dest << 7 |
+ MATCH_FMV_X_D;
+}
+
+static uint32_t fmv_w_x(unsigned dest, unsigned src) __attribute__ ((unused));
+static uint32_t fmv_w_x(unsigned dest, unsigned src)
+{
+ return src << 15 |
+ dest << 7 |
+ MATCH_FMV_W_X;
+}
+
+static uint32_t fmv_d_x(unsigned dest, unsigned src) __attribute__ ((unused));
+static uint32_t fmv_d_x(unsigned dest, unsigned src)
+{
+ return src << 15 |
+ dest << 7 |
+ MATCH_FMV_D_X;
+}
+
+static uint32_t ebreak(void) __attribute__ ((unused));
+static uint32_t ebreak(void)
+{
+ return MATCH_EBREAK;
+}
+static uint32_t ebreak_c(void) __attribute__ ((unused));
+static uint32_t ebreak_c(void)
+{
+ return MATCH_C_EBREAK;
+}
+
+static uint32_t fence_i(void) __attribute__ ((unused));
+static uint32_t fence_i(void)
+{
+ return MATCH_FENCE_I;
+}
+
+static uint32_t lui(unsigned int dest, uint32_t imm) __attribute__ ((unused));
+static uint32_t lui(unsigned int dest, uint32_t imm)
+{
+ return (bits(imm, 19, 0) << 12) |
+ (dest << 7) |
+ MATCH_LUI;
+}
+
+/*
+static uint32_t csrci(unsigned int csr, uint16_t imm) __attribute__ ((unused));
+static uint32_t csrci(unsigned int csr, uint16_t imm)
+{
+ return (csr << 20) |
+ (bits(imm, 4, 0) << 15) |
+ MATCH_CSRRCI;
+}
+
+static uint32_t li(unsigned int dest, uint16_t imm) __attribute__ ((unused));
+static uint32_t li(unsigned int dest, uint16_t imm)
+{
+ return addi(dest, 0, imm);
+}
+
+static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused));
+static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset)
+{
+ return (bits(offset, 11, 5) << 25) |
+ (bits(src, 4, 0) << 20) |
+ (base << 15) |
+ (bits(offset, 4, 0) << 7) |
+ MATCH_FSD;
+}
+
+static uint32_t ori(unsigned int dest, unsigned int src, uint16_t imm) __attribute__ ((unused));
+static uint32_t ori(unsigned int dest, unsigned int src, uint16_t imm)
+{
+ return (bits(imm, 11, 0) << 20) |
+ (src << 15) |
+ (dest << 7) |
+ MATCH_ORI;
+}
+
+static uint32_t nop(void) __attribute__ ((unused));
+static uint32_t nop(void)
+{
+ return addi(0, 0, 0);
+}
+*/
+
+static uint32_t xori(unsigned int dest, unsigned int src, uint16_t imm) __attribute__ ((unused));
+static uint32_t xori(unsigned int dest, unsigned int src, uint16_t imm)
+{
+ return (bits(imm, 11, 0) << 20) |
+ (src << 15) |
+ (dest << 7) |
+ MATCH_XORI;
+}
+
+static uint32_t srli(unsigned int dest, unsigned int src, uint8_t shamt) __attribute__ ((unused));
+static uint32_t srli(unsigned int dest, unsigned int src, uint8_t shamt)
+{
+ return (bits(shamt, 4, 0) << 20) |
+ (src << 15) |
+ (dest << 7) |
+ MATCH_SRLI;
+}
+
+static uint32_t fence(void) __attribute__((unused));
+static uint32_t fence(void)
+{
+ return MATCH_FENCE;
+}
+
+static uint32_t auipc(unsigned int dest) __attribute__((unused));
+static uint32_t auipc(unsigned int dest)
+{
+ return MATCH_AUIPC | (dest << 7);
+}
--- /dev/null
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "target/target.h"
+#include "target/register.h"
+#include "riscv.h"
+#include "program.h"
+#include "helper/log.h"
+
+#include "asm.h"
+#include "encoding.h"
+
+/* Program interface. */
+int riscv_program_init(struct riscv_program *p, struct target *target)
+{
+ memset(p, 0, sizeof(*p));
+ p->target = target;
+ p->instruction_count = 0;
+ p->target_xlen = riscv_xlen(target);
+ for (size_t i = 0; i < RISCV_REGISTER_COUNT; ++i)
+ p->writes_xreg[i] = 0;
+
+ for (size_t i = 0; i < RISCV_MAX_DEBUG_BUFFER_SIZE; ++i)
+ p->debug_buffer[i] = -1;
+
+ return ERROR_OK;
+}
+
+int riscv_program_write(struct riscv_program *program)
+{
+ for (unsigned i = 0; i < program->instruction_count; ++i) {
+ LOG_DEBUG("%p: debug_buffer[%02x] = DASM(0x%08x)", program, i, program->debug_buffer[i]);
+ if (riscv_write_debug_buffer(program->target, i,
+ program->debug_buffer[i]) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ return ERROR_OK;
+}
+
+/** Add ebreak and execute the program. */
+int riscv_program_exec(struct riscv_program *p, struct target *t)
+{
+ keep_alive();
+
+ riscv_reg_t saved_registers[GDB_REGNO_XPR31 + 1];
+ for (size_t i = GDB_REGNO_ZERO + 1; i <= GDB_REGNO_XPR31; ++i) {
+ if (p->writes_xreg[i]) {
+ LOG_DEBUG("Saving register %d as used by program", (int)i);
+ int result = riscv_get_register(t, &saved_registers[i], i);
+ if (result != ERROR_OK)
+ return result;
+ }
+ }
+
+ if (riscv_program_ebreak(p) != ERROR_OK) {
+ LOG_ERROR("Unable to write ebreak");
+ for (size_t i = 0; i < riscv_debug_buffer_size(p->target); ++i)
+ LOG_ERROR("ram[%02x]: DASM(0x%08lx) [0x%08lx]", (int)i, (long)p->debug_buffer[i], (long)p->debug_buffer[i]);
+ return ERROR_FAIL;
+ }
+
+ if (riscv_program_write(p) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (riscv_execute_debug_buffer(t) != ERROR_OK) {
+ LOG_DEBUG("Unable to execute program %p", p);
+ return ERROR_FAIL;
+ }
+
+ for (size_t i = 0; i < riscv_debug_buffer_size(p->target); ++i)
+ if (i >= riscv_debug_buffer_size(p->target))
+ p->debug_buffer[i] = riscv_read_debug_buffer(t, i);
+
+ for (size_t i = GDB_REGNO_ZERO; i <= GDB_REGNO_XPR31; ++i)
+ if (p->writes_xreg[i])
+ riscv_set_register(t, i, saved_registers[i]);
+
+ return ERROR_OK;
+}
+
+int riscv_program_swr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
+{
+ return riscv_program_insert(p, sw(d, b, offset));
+}
+
+int riscv_program_shr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
+{
+ return riscv_program_insert(p, sh(d, b, offset));
+}
+
+int riscv_program_sbr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
+{
+ return riscv_program_insert(p, sb(d, b, offset));
+}
+
+int riscv_program_lwr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
+{
+ return riscv_program_insert(p, lw(d, b, offset));
+}
+
+int riscv_program_lhr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
+{
+ return riscv_program_insert(p, lh(d, b, offset));
+}
+
+int riscv_program_lbr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno b, int offset)
+{
+ return riscv_program_insert(p, lb(d, b, offset));
+}
+
+int riscv_program_csrr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno csr)
+{
+ assert(csr >= GDB_REGNO_CSR0 && csr <= GDB_REGNO_CSR4095);
+ return riscv_program_insert(p, csrrs(d, GDB_REGNO_ZERO, csr - GDB_REGNO_CSR0));
+}
+
+int riscv_program_csrw(struct riscv_program *p, enum gdb_regno s, enum gdb_regno csr)
+{
+ assert(csr >= GDB_REGNO_CSR0);
+ return riscv_program_insert(p, csrrw(GDB_REGNO_ZERO, s, csr - GDB_REGNO_CSR0));
+}
+
+int riscv_program_fence_i(struct riscv_program *p)
+{
+ return riscv_program_insert(p, fence_i());
+}
+
+int riscv_program_fence(struct riscv_program *p)
+{
+ return riscv_program_insert(p, fence());
+}
+
+int riscv_program_ebreak(struct riscv_program *p)
+{
+ struct target *target = p->target;
+ RISCV_INFO(r);
+ if (p->instruction_count == riscv_debug_buffer_size(p->target) &&
+ r->impebreak) {
+ return ERROR_OK;
+ }
+ return riscv_program_insert(p, ebreak());
+}
+
+int riscv_program_addi(struct riscv_program *p, enum gdb_regno d, enum gdb_regno s, int16_t u)
+{
+ return riscv_program_insert(p, addi(d, s, u));
+}
+
+int riscv_program_insert(struct riscv_program *p, riscv_insn_t i)
+{
+ if (p->instruction_count >= riscv_debug_buffer_size(p->target)) {
+ LOG_ERROR("Unable to insert instruction:");
+ LOG_ERROR(" instruction_count=%d", (int)p->instruction_count);
+ LOG_ERROR(" buffer size =%d", (int)riscv_debug_buffer_size(p->target));
+ return ERROR_FAIL;
+ }
+
+ p->debug_buffer[p->instruction_count] = i;
+ p->instruction_count++;
+ return ERROR_OK;
+}
--- /dev/null
+#ifndef TARGET__RISCV__PROGRAM_H
+#define TARGET__RISCV__PROGRAM_H
+
+#include "riscv.h"
+
+#define RISCV_MAX_DEBUG_BUFFER_SIZE 32
+#define RISCV_REGISTER_COUNT 32
+#define RISCV_DSCRATCH_COUNT 2
+
+/* The various RISC-V debug specifications all revolve around setting up
+ * program buffers and executing them on the target. This structure contains a
+ * single program, which can then be executed on targets. */
+struct riscv_program {
+ struct target *target;
+
+ uint32_t debug_buffer[RISCV_MAX_DEBUG_BUFFER_SIZE];
+
+ /* Number of 32-bit instructions in the program. */
+ size_t instruction_count;
+
+ /* Side effects of executing this program. These must be accounted for
+ * in order to maintain correct executing of the target system. */
+ bool writes_xreg[RISCV_REGISTER_COUNT];
+
+ /* XLEN on the target. */
+ int target_xlen;
+};
+
+/* Initializes a program with the header. */
+int riscv_program_init(struct riscv_program *p, struct target *t);
+
+/* Write the program to the program buffer. */
+int riscv_program_write(struct riscv_program *program);
+
+/* Executes a program, returning 0 if the program successfully executed. Note
+ * that this may cause registers to be saved or restored, which could result to
+ * calls to things like riscv_save_register which itself could require a
+ * program to execute. That's OK, just make sure this eventually terminates.
+ * */
+int riscv_program_exec(struct riscv_program *p, struct target *t);
+int riscv_program_load(struct riscv_program *p, struct target *t);
+
+/* Clears a program, removing all the state associated with it. */
+int riscv_program_clear(struct riscv_program *p, struct target *t);
+
+/* A lower level interface, you shouldn't use this unless you have a reason. */
+int riscv_program_insert(struct riscv_program *p, riscv_insn_t i);
+
+/* There is hardware support for saving at least one register. This register
+ * doesn't need to be saved/restored the usual way, which is useful during
+ * early initialization when we can't save/restore arbitrary registerrs to host
+ * memory. */
+int riscv_program_save_to_dscratch(struct riscv_program *p, enum gdb_regno to_save);
+
+/* Helpers to assembly various instructions. Return 0 on success. These might
+ * assembly into a multi-instruction sequence that overwrites some other
+ * register, but those will be properly saved and restored. */
+int riscv_program_lwr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno a, int o);
+int riscv_program_lhr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno a, int o);
+int riscv_program_lbr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno a, int o);
+
+int riscv_program_swr(struct riscv_program *p, enum gdb_regno s, enum gdb_regno a, int o);
+int riscv_program_shr(struct riscv_program *p, enum gdb_regno s, enum gdb_regno a, int o);
+int riscv_program_sbr(struct riscv_program *p, enum gdb_regno s, enum gdb_regno a, int o);
+
+int riscv_program_csrr(struct riscv_program *p, enum gdb_regno d, enum gdb_regno csr);
+int riscv_program_csrw(struct riscv_program *p, enum gdb_regno s, enum gdb_regno csr);
+
+int riscv_program_fence_i(struct riscv_program *p);
+int riscv_program_fence(struct riscv_program *p);
+int riscv_program_ebreak(struct riscv_program *p);
+
+int riscv_program_addi(struct riscv_program *p, enum gdb_regno d, enum gdb_regno s, int16_t i);
+
+#endif
--- /dev/null
+/*
+ * Support for RISC-V, debug version 0.11. This was never an officially adopted
+ * spec, but SiFive made some silicon that uses it.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <time.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "target/target.h"
+#include "target/algorithm.h"
+#include "target/target_type.h"
+#include "log.h"
+#include "jtag/jtag.h"
+#include "target/register.h"
+#include "target/breakpoints.h"
+#include "helper/time_support.h"
+#include "riscv.h"
+#include "asm.h"
+#include "gdb_regs.h"
+
+/**
+ * Since almost everything can be accomplish by scanning the dbus register, all
+ * functions here assume dbus is already selected. The exception are functions
+ * called directly by OpenOCD, which can't assume anything about what's
+ * currently in IR. They should set IR to dbus explicitly.
+ */
+
+/**
+ * Code structure
+ *
+ * At the bottom of the stack are the OpenOCD JTAG functions:
+ * jtag_add_[id]r_scan
+ * jtag_execute_query
+ * jtag_add_runtest
+ *
+ * There are a few functions to just instantly shift a register and get its
+ * value:
+ * dtmcontrol_scan
+ * idcode_scan
+ * dbus_scan
+ *
+ * Because doing one scan and waiting for the result is slow, most functions
+ * batch up a bunch of dbus writes and then execute them all at once. They use
+ * the scans "class" for this:
+ * scans_new
+ * scans_delete
+ * scans_execute
+ * scans_add_...
+ * Usually you new(), call a bunch of add functions, then execute() and look
+ * at the results by calling scans_get...()
+ *
+ * Optimized functions will directly use the scans class above, but slightly
+ * lazier code will use the cache functions that in turn use the scans
+ * functions:
+ * cache_get...
+ * cache_set...
+ * cache_write
+ * cache_set... update a local structure, which is then synced to the target
+ * with cache_write(). Only Debug RAM words that are actually changed are sent
+ * to the target. Afterwards use cache_get... to read results.
+ */
+
+#define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
+#define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
+
+#define DIM(x) (sizeof(x)/sizeof(*x))
+
+/* Constants for legacy SiFive hardware breakpoints. */
+#define CSR_BPCONTROL_X (1<<0)
+#define CSR_BPCONTROL_W (1<<1)
+#define CSR_BPCONTROL_R (1<<2)
+#define CSR_BPCONTROL_U (1<<3)
+#define CSR_BPCONTROL_S (1<<4)
+#define CSR_BPCONTROL_H (1<<5)
+#define CSR_BPCONTROL_M (1<<6)
+#define CSR_BPCONTROL_BPMATCH (0xf<<7)
+#define CSR_BPCONTROL_BPACTION (0xff<<11)
+
+#define DEBUG_ROM_START 0x800
+#define DEBUG_ROM_RESUME (DEBUG_ROM_START + 4)
+#define DEBUG_ROM_EXCEPTION (DEBUG_ROM_START + 8)
+#define DEBUG_RAM_START 0x400
+
+#define SETHALTNOT 0x10c
+
+/*** JTAG registers. ***/
+
+#define DTMCONTROL 0x10
+#define DTMCONTROL_DBUS_RESET (1<<16)
+#define DTMCONTROL_IDLE (7<<10)
+#define DTMCONTROL_ADDRBITS (0xf<<4)
+#define DTMCONTROL_VERSION (0xf)
+
+#define DBUS 0x11
+#define DBUS_OP_START 0
+#define DBUS_OP_SIZE 2
+typedef enum {
+ DBUS_OP_NOP = 0,
+ DBUS_OP_READ = 1,
+ DBUS_OP_WRITE = 2
+} dbus_op_t;
+typedef enum {
+ DBUS_STATUS_SUCCESS = 0,
+ DBUS_STATUS_FAILED = 2,
+ DBUS_STATUS_BUSY = 3
+} dbus_status_t;
+#define DBUS_DATA_START 2
+#define DBUS_DATA_SIZE 34
+#define DBUS_ADDRESS_START 36
+
+typedef enum {
+ RE_OK,
+ RE_FAIL,
+ RE_AGAIN
+} riscv_error_t;
+
+typedef enum slot {
+ SLOT0,
+ SLOT1,
+ SLOT_LAST,
+} slot_t;
+
+/*** Debug Bus registers. ***/
+
+#define DMCONTROL 0x10
+#define DMCONTROL_INTERRUPT (((uint64_t)1)<<33)
+#define DMCONTROL_HALTNOT (((uint64_t)1)<<32)
+#define DMCONTROL_BUSERROR (7<<19)
+#define DMCONTROL_SERIAL (3<<16)
+#define DMCONTROL_AUTOINCREMENT (1<<15)
+#define DMCONTROL_ACCESS (7<<12)
+#define DMCONTROL_HARTID (0x3ff<<2)
+#define DMCONTROL_NDRESET (1<<1)
+#define DMCONTROL_FULLRESET 1
+
+#define DMINFO 0x11
+#define DMINFO_ABUSSIZE (0x7fU<<25)
+#define DMINFO_SERIALCOUNT (0xf<<21)
+#define DMINFO_ACCESS128 (1<<20)
+#define DMINFO_ACCESS64 (1<<19)
+#define DMINFO_ACCESS32 (1<<18)
+#define DMINFO_ACCESS16 (1<<17)
+#define DMINFO_ACCESS8 (1<<16)
+#define DMINFO_DRAMSIZE (0x3f<<10)
+#define DMINFO_AUTHENTICATED (1<<5)
+#define DMINFO_AUTHBUSY (1<<4)
+#define DMINFO_AUTHTYPE (3<<2)
+#define DMINFO_VERSION 3
+
+/*** Info about the core being debugged. ***/
+
+#define DBUS_ADDRESS_UNKNOWN 0xffff
+
+#define DRAM_CACHE_SIZE 16
+
+struct trigger {
+ uint64_t address;
+ uint32_t length;
+ uint64_t mask;
+ uint64_t value;
+ bool read, write, execute;
+ int unique_id;
+};
+
+struct memory_cache_line {
+ uint32_t data;
+ bool valid;
+ bool dirty;
+};
+
+typedef struct {
+ /* Number of address bits in the dbus register. */
+ uint8_t addrbits;
+ /* Number of words in Debug RAM. */
+ unsigned int dramsize;
+ uint64_t dcsr;
+ uint64_t dpc;
+ uint64_t tselect;
+ bool tselect_dirty;
+ /* The value that mstatus actually has on the target right now. This is not
+ * the value we present to the user. That one may be stored in the
+ * reg_cache. */
+ uint64_t mstatus_actual;
+
+ struct memory_cache_line dram_cache[DRAM_CACHE_SIZE];
+
+ /* Number of run-test/idle cycles the target requests we do after each dbus
+ * access. */
+ unsigned int dtmcontrol_idle;
+
+ /* This value is incremented every time a dbus access comes back as "busy".
+ * It's used to determine how many run-test/idle cycles to feed the target
+ * in between accesses. */
+ unsigned int dbus_busy_delay;
+
+ /* This value is incremented every time we read the debug interrupt as
+ * high. It's used to add extra run-test/idle cycles after setting debug
+ * interrupt high, so ideally we never have to perform a whole extra scan
+ * before the interrupt is cleared. */
+ unsigned int interrupt_high_delay;
+
+ bool need_strict_step;
+ bool never_halted;
+} riscv011_info_t;
+
+typedef struct {
+ bool haltnot;
+ bool interrupt;
+} bits_t;
+
+/*** Necessary prototypes. ***/
+
+static int poll_target(struct target *target, bool announce);
+static int riscv011_poll(struct target *target);
+static int get_register(struct target *target, riscv_reg_t *value, int hartid,
+ int regid);
+
+/*** Utility functions. ***/
+
+#define DEBUG_LENGTH 264
+
+static riscv011_info_t *get_info(const struct target *target)
+{
+ riscv_info_t *info = (riscv_info_t *) target->arch_info;
+ return (riscv011_info_t *) info->version_specific;
+}
+
+static unsigned int slot_offset(const struct target *target, slot_t slot)
+{
+ riscv011_info_t *info = get_info(target);
+ switch (riscv_xlen(target)) {
+ case 32:
+ switch (slot) {
+ case SLOT0: return 4;
+ case SLOT1: return 5;
+ case SLOT_LAST: return info->dramsize-1;
+ }
+ case 64:
+ switch (slot) {
+ case SLOT0: return 4;
+ case SLOT1: return 6;
+ case SLOT_LAST: return info->dramsize-2;
+ }
+ }
+ LOG_ERROR("slot_offset called with xlen=%d, slot=%d",
+ riscv_xlen(target), slot);
+ assert(0);
+ return 0; /* Silence -Werror=return-type */
+}
+
+static uint32_t load_slot(const struct target *target, unsigned int dest,
+ slot_t slot)
+{
+ unsigned int offset = DEBUG_RAM_START + 4 * slot_offset(target, slot);
+ return load(target, dest, ZERO, offset);
+}
+
+static uint32_t store_slot(const struct target *target, unsigned int src,
+ slot_t slot)
+{
+ unsigned int offset = DEBUG_RAM_START + 4 * slot_offset(target, slot);
+ return store(target, src, ZERO, offset);
+}
+
+static uint16_t dram_address(unsigned int index)
+{
+ if (index < 0x10)
+ return index;
+ else
+ return 0x40 + index - 0x10;
+}
+
+static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
+{
+ struct scan_field field;
+ uint8_t in_value[4];
+ uint8_t out_value[4];
+
+ buf_set_u32(out_value, 0, 32, out);
+
+ jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
+
+ field.num_bits = 32;
+ field.out_value = out_value;
+ field.in_value = in_value;
+ jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
+
+ /* Always return to dbus. */
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ int retval = jtag_execute_queue();
+ if (retval != ERROR_OK) {
+ LOG_ERROR("failed jtag scan: %d", retval);
+ return retval;
+ }
+
+ uint32_t in = buf_get_u32(field.in_value, 0, 32);
+ LOG_DEBUG("DTMCONTROL: 0x%x -> 0x%x", out, in);
+
+ return in;
+}
+
+static uint32_t idcode_scan(struct target *target)
+{
+ struct scan_field field;
+ uint8_t in_value[4];
+
+ jtag_add_ir_scan(target->tap, &select_idcode, TAP_IDLE);
+
+ field.num_bits = 32;
+ field.out_value = NULL;
+ field.in_value = in_value;
+ jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
+
+ int retval = jtag_execute_queue();
+ if (retval != ERROR_OK) {
+ LOG_ERROR("failed jtag scan: %d", retval);
+ return retval;
+ }
+
+ /* Always return to dbus. */
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ uint32_t in = buf_get_u32(field.in_value, 0, 32);
+ LOG_DEBUG("IDCODE: 0x0 -> 0x%x", in);
+
+ return in;
+}
+
+static void increase_dbus_busy_delay(struct target *target)
+{
+ riscv011_info_t *info = get_info(target);
+ info->dbus_busy_delay += info->dbus_busy_delay / 10 + 1;
+ LOG_DEBUG("dtmcontrol_idle=%d, dbus_busy_delay=%d, interrupt_high_delay=%d",
+ info->dtmcontrol_idle, info->dbus_busy_delay,
+ info->interrupt_high_delay);
+
+ dtmcontrol_scan(target, DTMCONTROL_DBUS_RESET);
+}
+
+static void increase_interrupt_high_delay(struct target *target)
+{
+ riscv011_info_t *info = get_info(target);
+ info->interrupt_high_delay += info->interrupt_high_delay / 10 + 1;
+ LOG_DEBUG("dtmcontrol_idle=%d, dbus_busy_delay=%d, interrupt_high_delay=%d",
+ info->dtmcontrol_idle, info->dbus_busy_delay,
+ info->interrupt_high_delay);
+}
+
+static void add_dbus_scan(const struct target *target, struct scan_field *field,
+ uint8_t *out_value, uint8_t *in_value, dbus_op_t op,
+ uint16_t address, uint64_t data)
+{
+ riscv011_info_t *info = get_info(target);
+
+ field->num_bits = info->addrbits + DBUS_OP_SIZE + DBUS_DATA_SIZE;
+ field->in_value = in_value;
+ field->out_value = out_value;
+
+ buf_set_u64(out_value, DBUS_OP_START, DBUS_OP_SIZE, op);
+ buf_set_u64(out_value, DBUS_DATA_START, DBUS_DATA_SIZE, data);
+ buf_set_u64(out_value, DBUS_ADDRESS_START, info->addrbits, address);
+
+ jtag_add_dr_scan(target->tap, 1, field, TAP_IDLE);
+
+ int idle_count = info->dtmcontrol_idle + info->dbus_busy_delay;
+ if (data & DMCONTROL_INTERRUPT)
+ idle_count += info->interrupt_high_delay;
+
+ if (idle_count)
+ jtag_add_runtest(idle_count, TAP_IDLE);
+}
+
+static void dump_field(const struct scan_field *field)
+{
+ static const char * const op_string[] = {"nop", "r", "w", "?"};
+ static const char * const status_string[] = {"+", "?", "F", "b"};
+
+ if (debug_level < LOG_LVL_DEBUG)
+ return;
+
+ uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
+ unsigned int out_op = (out >> DBUS_OP_START) & ((1 << DBUS_OP_SIZE) - 1);
+ char out_interrupt = ((out >> DBUS_DATA_START) & DMCONTROL_INTERRUPT) ? 'i' : '.';
+ char out_haltnot = ((out >> DBUS_DATA_START) & DMCONTROL_HALTNOT) ? 'h' : '.';
+ unsigned int out_data = out >> 2;
+ unsigned int out_address = out >> DBUS_ADDRESS_START;
+ uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
+ unsigned int in_op = (in >> DBUS_OP_START) & ((1 << DBUS_OP_SIZE) - 1);
+ char in_interrupt = ((in >> DBUS_DATA_START) & DMCONTROL_INTERRUPT) ? 'i' : '.';
+ char in_haltnot = ((in >> DBUS_DATA_START) & DMCONTROL_HALTNOT) ? 'h' : '.';
+ unsigned int in_data = in >> 2;
+ unsigned int in_address = in >> DBUS_ADDRESS_START;
+
+ log_printf_lf(LOG_LVL_DEBUG,
+ __FILE__, __LINE__, "scan",
+ "%db %s %c%c:%08x @%02x -> %s %c%c:%08x @%02x",
+ field->num_bits,
+ op_string[out_op], out_interrupt, out_haltnot, out_data,
+ out_address,
+ status_string[in_op], in_interrupt, in_haltnot, in_data,
+ in_address);
+}
+
+static dbus_status_t dbus_scan(struct target *target, uint16_t *address_in,
+ uint64_t *data_in, dbus_op_t op, uint16_t address_out, uint64_t data_out)
+{
+ riscv011_info_t *info = get_info(target);
+ uint8_t in[8] = {0};
+ uint8_t out[8];
+ struct scan_field field = {
+ .num_bits = info->addrbits + DBUS_OP_SIZE + DBUS_DATA_SIZE,
+ .out_value = out,
+ .in_value = in
+ };
+
+ assert(info->addrbits != 0);
+
+ buf_set_u64(out, DBUS_OP_START, DBUS_OP_SIZE, op);
+ buf_set_u64(out, DBUS_DATA_START, DBUS_DATA_SIZE, data_out);
+ buf_set_u64(out, DBUS_ADDRESS_START, info->addrbits, address_out);
+
+ /* Assume dbus is already selected. */
+ jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
+
+ int idle_count = info->dtmcontrol_idle + info->dbus_busy_delay;
+
+ if (idle_count)
+ jtag_add_runtest(idle_count, TAP_IDLE);
+
+ int retval = jtag_execute_queue();
+ if (retval != ERROR_OK) {
+ LOG_ERROR("dbus_scan failed jtag scan");
+ return DBUS_STATUS_FAILED;
+ }
+
+ if (data_in)
+ *data_in = buf_get_u64(in, DBUS_DATA_START, DBUS_DATA_SIZE);
+
+ if (address_in)
+ *address_in = buf_get_u32(in, DBUS_ADDRESS_START, info->addrbits);
+
+ dump_field(&field);
+
+ return buf_get_u32(in, DBUS_OP_START, DBUS_OP_SIZE);
+}
+
+static uint64_t dbus_read(struct target *target, uint16_t address)
+{
+ uint64_t value;
+ dbus_status_t status;
+ uint16_t address_in;
+
+ /* If the previous read/write was to the same address, we will get the read data
+ * from the previous access.
+ * While somewhat nonintuitive, this is an efficient way to get the data.
+ */
+
+ unsigned i = 0;
+ do {
+ status = dbus_scan(target, &address_in, &value, DBUS_OP_READ, address, 0);
+ if (status == DBUS_STATUS_BUSY)
+ increase_dbus_busy_delay(target);
+ if (status == DBUS_STATUS_FAILED) {
+ LOG_ERROR("dbus_read(0x%x) failed!", address);
+ return 0;
+ }
+ } while (((status == DBUS_STATUS_BUSY) || (address_in != address)) &&
+ i++ < 256);
+
+ if (status != DBUS_STATUS_SUCCESS)
+ LOG_ERROR("failed read from 0x%x; value=0x%" PRIx64 ", status=%d\n", address, value, status);
+
+ return value;
+}
+
+static void dbus_write(struct target *target, uint16_t address, uint64_t value)
+{
+ dbus_status_t status = DBUS_STATUS_BUSY;
+ unsigned i = 0;
+ while (status == DBUS_STATUS_BUSY && i++ < 256) {
+ status = dbus_scan(target, NULL, NULL, DBUS_OP_WRITE, address, value);
+ if (status == DBUS_STATUS_BUSY)
+ increase_dbus_busy_delay(target);
+ }
+ if (status != DBUS_STATUS_SUCCESS)
+ LOG_ERROR("failed to write 0x%" PRIx64 " to 0x%x; status=%d\n", value, address, status);
+}
+
+/*** scans "class" ***/
+
+typedef struct {
+ /* Number of scans that space is reserved for. */
+ unsigned int scan_count;
+ /* Size reserved in memory for each scan, in bytes. */
+ unsigned int scan_size;
+ unsigned int next_scan;
+ uint8_t *in;
+ uint8_t *out;
+ struct scan_field *field;
+ const struct target *target;
+} scans_t;
+
+static scans_t *scans_new(struct target *target, unsigned int scan_count)
+{
+ scans_t *scans = malloc(sizeof(scans_t));
+ scans->scan_count = scan_count;
+ /* This code also gets called before xlen is detected. */
+ if (riscv_xlen(target))
+ scans->scan_size = 2 + riscv_xlen(target) / 8;
+ else
+ scans->scan_size = 2 + 128 / 8;
+ scans->next_scan = 0;
+ scans->in = calloc(scans->scan_size, scans->scan_count);
+ scans->out = calloc(scans->scan_size, scans->scan_count);
+ scans->field = calloc(scans->scan_count, sizeof(struct scan_field));
+ scans->target = target;
+ return scans;
+}
+
+static scans_t *scans_delete(scans_t *scans)
+{
+ assert(scans);
+ free(scans->field);
+ free(scans->out);
+ free(scans->in);
+ free(scans);
+ return NULL;
+}
+
+static void scans_reset(scans_t *scans)
+{
+ scans->next_scan = 0;
+}
+
+static void scans_dump(scans_t *scans)
+{
+ for (unsigned int i = 0; i < scans->next_scan; i++)
+ dump_field(&scans->field[i]);
+}
+
+static int scans_execute(scans_t *scans)
+{
+ int retval = jtag_execute_queue();
+ if (retval != ERROR_OK) {
+ LOG_ERROR("failed jtag scan: %d", retval);
+ return retval;
+ }
+
+ scans_dump(scans);
+
+ return ERROR_OK;
+}
+
+/** Add a 32-bit dbus write to the scans structure. */
+static void scans_add_write32(scans_t *scans, uint16_t address, uint32_t data,
+ bool set_interrupt)
+{
+ const unsigned int i = scans->next_scan;
+ int data_offset = scans->scan_size * i;
+ add_dbus_scan(scans->target, &scans->field[i], scans->out + data_offset,
+ scans->in + data_offset, DBUS_OP_WRITE, address,
+ (set_interrupt ? DMCONTROL_INTERRUPT : 0) | DMCONTROL_HALTNOT | data);
+ scans->next_scan++;
+ assert(scans->next_scan <= scans->scan_count);
+}
+
+/** Add a 32-bit dbus write for an instruction that jumps to the beginning of
+ * debug RAM. */
+static void scans_add_write_jump(scans_t *scans, uint16_t address,
+ bool set_interrupt)
+{
+ scans_add_write32(scans, address,
+ jal(0, (uint32_t) (DEBUG_ROM_RESUME - (DEBUG_RAM_START + 4*address))),
+ set_interrupt);
+}
+
+/** Add a 32-bit dbus write for an instruction that loads from the indicated
+ * slot. */
+static void scans_add_write_load(scans_t *scans, uint16_t address,
+ unsigned int reg, slot_t slot, bool set_interrupt)
+{
+ scans_add_write32(scans, address, load_slot(scans->target, reg, slot),
+ set_interrupt);
+}
+
+/** Add a 32-bit dbus write for an instruction that stores to the indicated
+ * slot. */
+static void scans_add_write_store(scans_t *scans, uint16_t address,
+ unsigned int reg, slot_t slot, bool set_interrupt)
+{
+ scans_add_write32(scans, address, store_slot(scans->target, reg, slot),
+ set_interrupt);
+}
+
+/** Add a 32-bit dbus read. */
+static void scans_add_read32(scans_t *scans, uint16_t address, bool set_interrupt)
+{
+ assert(scans->next_scan < scans->scan_count);
+ const unsigned int i = scans->next_scan;
+ int data_offset = scans->scan_size * i;
+ add_dbus_scan(scans->target, &scans->field[i], scans->out + data_offset,
+ scans->in + data_offset, DBUS_OP_READ, address,
+ (set_interrupt ? DMCONTROL_INTERRUPT : 0) | DMCONTROL_HALTNOT);
+ scans->next_scan++;
+}
+
+/** Add one or more scans to read the indicated slot. */
+static void scans_add_read(scans_t *scans, slot_t slot, bool set_interrupt)
+{
+ const struct target *target = scans->target;
+ switch (riscv_xlen(target)) {
+ case 32:
+ scans_add_read32(scans, slot_offset(target, slot), set_interrupt);
+ break;
+ case 64:
+ scans_add_read32(scans, slot_offset(target, slot), false);
+ scans_add_read32(scans, slot_offset(target, slot) + 1, set_interrupt);
+ break;
+ }
+}
+
+static uint32_t scans_get_u32(scans_t *scans, unsigned int index,
+ unsigned first, unsigned num)
+{
+ return buf_get_u32(scans->in + scans->scan_size * index, first, num);
+}
+
+static uint64_t scans_get_u64(scans_t *scans, unsigned int index,
+ unsigned first, unsigned num)
+{
+ return buf_get_u64(scans->in + scans->scan_size * index, first, num);
+}
+
+/*** end of scans class ***/
+
+static uint32_t dram_read32(struct target *target, unsigned int index)
+{
+ uint16_t address = dram_address(index);
+ uint32_t value = dbus_read(target, address);
+ return value;
+}
+
+static void dram_write32(struct target *target, unsigned int index, uint32_t value,
+ bool set_interrupt)
+{
+ uint64_t dbus_value = DMCONTROL_HALTNOT | value;
+ if (set_interrupt)
+ dbus_value |= DMCONTROL_INTERRUPT;
+ dbus_write(target, dram_address(index), dbus_value);
+}
+
+/** Read the haltnot and interrupt bits. */
+static bits_t read_bits(struct target *target)
+{
+ uint64_t value;
+ dbus_status_t status;
+ uint16_t address_in;
+ riscv011_info_t *info = get_info(target);
+
+ bits_t err_result = {
+ .haltnot = 0,
+ .interrupt = 0
+ };
+
+ do {
+ unsigned i = 0;
+ do {
+ status = dbus_scan(target, &address_in, &value, DBUS_OP_READ, 0, 0);
+ if (status == DBUS_STATUS_BUSY) {
+ if (address_in == (1<<info->addrbits) - 1 &&
+ value == (1ULL<<DBUS_DATA_SIZE) - 1) {
+ LOG_ERROR("TDO seems to be stuck high.");
+ return err_result;
+ }
+ increase_dbus_busy_delay(target);
+ } else if (status == DBUS_STATUS_FAILED) {
+ /* TODO: return an actual error */
+ return err_result;
+ }
+ } while (status == DBUS_STATUS_BUSY && i++ < 256);
+
+ if (i >= 256) {
+ LOG_ERROR("Failed to read from 0x%x; status=%d", address_in, status);
+ return err_result;
+ }
+ } while (address_in > 0x10 && address_in != DMCONTROL);
+
+ bits_t result = {
+ .haltnot = get_field(value, DMCONTROL_HALTNOT),
+ .interrupt = get_field(value, DMCONTROL_INTERRUPT)
+ };
+ return result;
+}
+
+static int wait_for_debugint_clear(struct target *target, bool ignore_first)
+{
+ time_t start = time(NULL);
+ if (ignore_first) {
+ /* Throw away the results of the first read, since they'll contain the
+ * result of the read that happened just before debugint was set.
+ * (Assuming the last scan before calling this function was one that
+ * sets debugint.) */
+ read_bits(target);
+ }
+ while (1) {
+ bits_t bits = read_bits(target);
+ if (!bits.interrupt)
+ return ERROR_OK;
+ if (time(NULL) - start > riscv_command_timeout_sec) {
+ LOG_ERROR("Timed out waiting for debug int to clear."
+ "Increase timeout with riscv set_command_timeout_sec.");
+ return ERROR_FAIL;
+ }
+ }
+}
+
+static int dram_check32(struct target *target, unsigned int index,
+ uint32_t expected)
+{
+ uint16_t address = dram_address(index);
+ uint32_t actual = dbus_read(target, address);
+ if (expected != actual) {
+ LOG_ERROR("Wrote 0x%x to Debug RAM at %d, but read back 0x%x",
+ expected, index, actual);
+ return ERROR_FAIL;
+ }
+ return ERROR_OK;
+}
+
+static void cache_set32(struct target *target, unsigned int index, uint32_t data)
+{
+ riscv011_info_t *info = get_info(target);
+ if (info->dram_cache[index].valid &&
+ info->dram_cache[index].data == data) {
+ /* This is already preset on the target. */
+ LOG_DEBUG("cache[0x%x] = 0x%08x: DASM(0x%x) (hit)", index, data, data);
+ return;
+ }
+ LOG_DEBUG("cache[0x%x] = 0x%08x: DASM(0x%x)", index, data, data);
+ info->dram_cache[index].data = data;
+ info->dram_cache[index].valid = true;
+ info->dram_cache[index].dirty = true;
+}
+
+static void cache_set(struct target *target, slot_t slot, uint64_t data)
+{
+ unsigned int offset = slot_offset(target, slot);
+ cache_set32(target, offset, data);
+ if (riscv_xlen(target) > 32)
+ cache_set32(target, offset + 1, data >> 32);
+}
+
+static void cache_set_jump(struct target *target, unsigned int index)
+{
+ cache_set32(target, index,
+ jal(0, (uint32_t) (DEBUG_ROM_RESUME - (DEBUG_RAM_START + 4*index))));
+}
+
+static void cache_set_load(struct target *target, unsigned int index,
+ unsigned int reg, slot_t slot)
+{
+ uint16_t offset = DEBUG_RAM_START + 4 * slot_offset(target, slot);
+ cache_set32(target, index, load(target, reg, ZERO, offset));
+}
+
+static void cache_set_store(struct target *target, unsigned int index,
+ unsigned int reg, slot_t slot)
+{
+ uint16_t offset = DEBUG_RAM_START + 4 * slot_offset(target, slot);
+ cache_set32(target, index, store(target, reg, ZERO, offset));
+}
+
+static void dump_debug_ram(struct target *target)
+{
+ for (unsigned int i = 0; i < DRAM_CACHE_SIZE; i++) {
+ uint32_t value = dram_read32(target, i);
+ LOG_ERROR("Debug RAM 0x%x: 0x%08x", i, value);
+ }
+}
+
+/* Call this if the code you just ran writes to debug RAM entries 0 through 3. */
+static void cache_invalidate(struct target *target)
+{
+ riscv011_info_t *info = get_info(target);
+ for (unsigned int i = 0; i < info->dramsize; i++) {
+ info->dram_cache[i].valid = false;
+ info->dram_cache[i].dirty = false;
+ }
+}
+
+/* Called by cache_write() after the program has run. Also call this if you're
+ * running programs without calling cache_write(). */
+static void cache_clean(struct target *target)
+{
+ riscv011_info_t *info = get_info(target);
+ for (unsigned int i = 0; i < info->dramsize; i++) {
+ if (i >= 4)
+ info->dram_cache[i].valid = false;
+ info->dram_cache[i].dirty = false;
+ }
+}
+
+static int cache_check(struct target *target)
+{
+ riscv011_info_t *info = get_info(target);
+ int error = 0;
+
+ for (unsigned int i = 0; i < info->dramsize; i++) {
+ if (info->dram_cache[i].valid && !info->dram_cache[i].dirty) {
+ if (dram_check32(target, i, info->dram_cache[i].data) != ERROR_OK)
+ error++;
+ }
+ }
+
+ if (error) {
+ dump_debug_ram(target);
+ return ERROR_FAIL;
+ }
+
+ return ERROR_OK;
+}
+
+/** Write cache to the target, and optionally run the program.
+ * Then read the value at address into the cache, assuming address < 128. */
+#define CACHE_NO_READ 128
+static int cache_write(struct target *target, unsigned int address, bool run)
+{
+ LOG_DEBUG("enter");
+ riscv011_info_t *info = get_info(target);
+ scans_t *scans = scans_new(target, info->dramsize + 2);
+
+ unsigned int last = info->dramsize;
+ for (unsigned int i = 0; i < info->dramsize; i++) {
+ if (info->dram_cache[i].dirty)
+ last = i;
+ }
+
+ if (last == info->dramsize) {
+ /* Nothing needs to be written to RAM. */
+ dbus_write(target, DMCONTROL, DMCONTROL_HALTNOT | (run ? DMCONTROL_INTERRUPT : 0));
+
+ } else {
+ for (unsigned int i = 0; i < info->dramsize; i++) {
+ if (info->dram_cache[i].dirty) {
+ bool set_interrupt = (i == last && run);
+ scans_add_write32(scans, i, info->dram_cache[i].data,
+ set_interrupt);
+ }
+ }
+ }
+
+ if (run || address < CACHE_NO_READ) {
+ /* Throw away the results of the first read, since it'll contain the
+ * result of the read that happened just before debugint was set. */
+ scans_add_read32(scans, address, false);
+
+ /* This scan contains the results of the read the caller requested, as
+ * well as an interrupt bit worth looking at. */
+ scans_add_read32(scans, address, false);
+ }
+
+ int retval = scans_execute(scans);
+ if (retval != ERROR_OK) {
+ scans_delete(scans);
+ LOG_ERROR("JTAG execute failed.");
+ return retval;
+ }
+
+ int errors = 0;
+ for (unsigned int i = 0; i < scans->next_scan; i++) {
+ dbus_status_t status = scans_get_u32(scans, i, DBUS_OP_START,
+ DBUS_OP_SIZE);
+ switch (status) {
+ case DBUS_STATUS_SUCCESS:
+ break;
+ case DBUS_STATUS_FAILED:
+ LOG_ERROR("Debug RAM write failed. Hardware error?");
+ scans_delete(scans);
+ return ERROR_FAIL;
+ case DBUS_STATUS_BUSY:
+ errors++;
+ break;
+ default:
+ LOG_ERROR("Got invalid bus access status: %d", status);
+ scans_delete(scans);
+ return ERROR_FAIL;
+ }
+ }
+
+ if (errors) {
+ increase_dbus_busy_delay(target);
+
+ /* Try again, using the slow careful code.
+ * Write all RAM, just to be extra cautious. */
+ for (unsigned int i = 0; i < info->dramsize; i++) {
+ if (i == last && run)
+ dram_write32(target, last, info->dram_cache[last].data, true);
+ else
+ dram_write32(target, i, info->dram_cache[i].data, false);
+ info->dram_cache[i].dirty = false;
+ }
+ if (run)
+ cache_clean(target);
+
+ if (wait_for_debugint_clear(target, true) != ERROR_OK) {
+ LOG_ERROR("Debug interrupt didn't clear.");
+ dump_debug_ram(target);
+ scans_delete(scans);
+ return ERROR_FAIL;
+ }
+
+ } else {
+ if (run) {
+ cache_clean(target);
+ } else {
+ for (unsigned int i = 0; i < info->dramsize; i++)
+ info->dram_cache[i].dirty = false;
+ }
+
+ if (run || address < CACHE_NO_READ) {
+ int interrupt = scans_get_u32(scans, scans->next_scan-1,
+ DBUS_DATA_START + 33, 1);
+ if (interrupt) {
+ increase_interrupt_high_delay(target);
+ /* Slow path wait for it to clear. */
+ if (wait_for_debugint_clear(target, false) != ERROR_OK) {
+ LOG_ERROR("Debug interrupt didn't clear.");
+ dump_debug_ram(target);
+ scans_delete(scans);
+ return ERROR_FAIL;
+ }
+ } else {
+ /* We read a useful value in that last scan. */
+ unsigned int read_addr = scans_get_u32(scans, scans->next_scan-1,
+ DBUS_ADDRESS_START, info->addrbits);
+ if (read_addr != address) {
+ LOG_INFO("Got data from 0x%x but expected it from 0x%x",
+ read_addr, address);
+ }
+ info->dram_cache[read_addr].data =
+ scans_get_u32(scans, scans->next_scan-1, DBUS_DATA_START, 32);
+ info->dram_cache[read_addr].valid = true;
+ }
+ }
+ }
+
+ scans_delete(scans);
+ LOG_DEBUG("exit");
+
+ return ERROR_OK;
+}
+
+static uint32_t cache_get32(struct target *target, unsigned int address)
+{
+ riscv011_info_t *info = get_info(target);
+ if (!info->dram_cache[address].valid) {
+ info->dram_cache[address].data = dram_read32(target, address);
+ info->dram_cache[address].valid = true;
+ }
+ return info->dram_cache[address].data;
+}
+
+static uint64_t cache_get(struct target *target, slot_t slot)
+{
+ unsigned int offset = slot_offset(target, slot);
+ uint64_t value = cache_get32(target, offset);
+ if (riscv_xlen(target) > 32)
+ value |= ((uint64_t) cache_get32(target, offset + 1)) << 32;
+ return value;
+}
+
+/* Write instruction that jumps from the specified word in Debug RAM to resume
+ * in Debug ROM. */
+static void dram_write_jump(struct target *target, unsigned int index,
+ bool set_interrupt)
+{
+ dram_write32(target, index,
+ jal(0, (uint32_t) (DEBUG_ROM_RESUME - (DEBUG_RAM_START + 4*index))),
+ set_interrupt);
+}
+
+static int wait_for_state(struct target *target, enum target_state state)
+{
+ time_t start = time(NULL);
+ while (1) {
+ int result = riscv011_poll(target);
+ if (result != ERROR_OK)
+ return result;
+ if (target->state == state)
+ return ERROR_OK;
+ if (time(NULL) - start > riscv_command_timeout_sec) {
+ LOG_ERROR("Timed out waiting for state %d. "
+ "Increase timeout with riscv set_command_timeout_sec.", state);
+ return ERROR_FAIL;
+ }
+ }
+}
+
+static int read_csr(struct target *target, uint64_t *value, uint32_t csr)
+{
+ riscv011_info_t *info = get_info(target);
+ cache_set32(target, 0, csrr(S0, csr));
+ cache_set_store(target, 1, S0, SLOT0);
+ cache_set_jump(target, 2);
+ if (cache_write(target, 4, true) != ERROR_OK)
+ return ERROR_FAIL;
+ *value = cache_get(target, SLOT0);
+ LOG_DEBUG("csr 0x%x = 0x%" PRIx64, csr, *value);
+
+ uint32_t exception = cache_get32(target, info->dramsize-1);
+ if (exception) {
+ LOG_WARNING("Got exception 0x%x when reading %s", exception,
+ gdb_regno_name(GDB_REGNO_CSR0 + csr));
+ *value = ~0;
+ return ERROR_FAIL;
+ }
+
+ return ERROR_OK;
+}
+
+static int write_csr(struct target *target, uint32_t csr, uint64_t value)
+{
+ LOG_DEBUG("csr 0x%x <- 0x%" PRIx64, csr, value);
+ cache_set_load(target, 0, S0, SLOT0);
+ cache_set32(target, 1, csrw(S0, csr));
+ cache_set_jump(target, 2);
+ cache_set(target, SLOT0, value);
+ if (cache_write(target, 4, true) != ERROR_OK)
+ return ERROR_FAIL;
+
+ return ERROR_OK;
+}
+
+static int write_gpr(struct target *target, unsigned int gpr, uint64_t value)
+{
+ cache_set_load(target, 0, gpr, SLOT0);
+ cache_set_jump(target, 1);
+ cache_set(target, SLOT0, value);
+ if (cache_write(target, 4, true) != ERROR_OK)
+ return ERROR_FAIL;
+ return ERROR_OK;
+}
+
+static int maybe_read_tselect(struct target *target)
+{
+ riscv011_info_t *info = get_info(target);
+
+ if (info->tselect_dirty) {
+ int result = read_csr(target, &info->tselect, CSR_TSELECT);
+ if (result != ERROR_OK)
+ return result;
+ info->tselect_dirty = false;
+ }
+
+ return ERROR_OK;
+}
+
+static int maybe_write_tselect(struct target *target)
+{
+ riscv011_info_t *info = get_info(target);
+
+ if (!info->tselect_dirty) {
+ int result = write_csr(target, CSR_TSELECT, info->tselect);
+ if (result != ERROR_OK)
+ return result;
+ info->tselect_dirty = true;
+ }
+
+ return ERROR_OK;
+}
+
+static int execute_resume(struct target *target, bool step)
+{
+ riscv011_info_t *info = get_info(target);
+
+ LOG_DEBUG("step=%d", step);
+
+ maybe_write_tselect(target);
+
+ /* TODO: check if dpc is dirty (which also is true if an exception was hit
+ * at any time) */
+ cache_set_load(target, 0, S0, SLOT0);
+ cache_set32(target, 1, csrw(S0, CSR_DPC));
+ cache_set_jump(target, 2);
+ cache_set(target, SLOT0, info->dpc);
+ if (cache_write(target, 4, true) != ERROR_OK)
+ return ERROR_FAIL;
+
+ struct reg *mstatus_reg = &target->reg_cache->reg_list[GDB_REGNO_MSTATUS];
+ if (mstatus_reg->valid) {
+ uint64_t mstatus_user = buf_get_u64(mstatus_reg->value, 0, riscv_xlen(target));
+ if (mstatus_user != info->mstatus_actual) {
+ cache_set_load(target, 0, S0, SLOT0);
+ cache_set32(target, 1, csrw(S0, CSR_MSTATUS));
+ cache_set_jump(target, 2);
+ cache_set(target, SLOT0, mstatus_user);
+ if (cache_write(target, 4, true) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ }
+
+ info->dcsr |= DCSR_EBREAKM | DCSR_EBREAKH | DCSR_EBREAKS | DCSR_EBREAKU;
+ info->dcsr &= ~DCSR_HALT;
+
+ if (step)
+ info->dcsr |= DCSR_STEP;
+ else
+ info->dcsr &= ~DCSR_STEP;
+
+ dram_write32(target, 0, lw(S0, ZERO, DEBUG_RAM_START + 16), false);
+ dram_write32(target, 1, csrw(S0, CSR_DCSR), false);
+ dram_write32(target, 2, fence_i(), false);
+ dram_write_jump(target, 3, false);
+
+ /* Write DCSR value, set interrupt and clear haltnot. */
+ uint64_t dbus_value = DMCONTROL_INTERRUPT | info->dcsr;
+ dbus_write(target, dram_address(4), dbus_value);
+
+ cache_invalidate(target);
+
+ if (wait_for_debugint_clear(target, true) != ERROR_OK) {
+ LOG_ERROR("Debug interrupt didn't clear.");
+ return ERROR_FAIL;
+ }
+
+ target->state = TARGET_RUNNING;
+ register_cache_invalidate(target->reg_cache);
+
+ return ERROR_OK;
+}
+
+/* Execute a step, and wait for reentry into Debug Mode. */
+static int full_step(struct target *target, bool announce)
+{
+ int result = execute_resume(target, true);
+ if (result != ERROR_OK)
+ return result;
+ time_t start = time(NULL);
+ while (1) {
+ result = poll_target(target, announce);
+ if (result != ERROR_OK)
+ return result;
+ if (target->state != TARGET_DEBUG_RUNNING)
+ break;
+ if (time(NULL) - start > riscv_command_timeout_sec) {
+ LOG_ERROR("Timed out waiting for step to complete."
+ "Increase timeout with riscv set_command_timeout_sec");
+ return ERROR_FAIL;
+ }
+ }
+ return ERROR_OK;
+}
+
+static int resume(struct target *target, int debug_execution, bool step)
+{
+ if (debug_execution) {
+ LOG_ERROR("TODO: debug_execution is true");
+ return ERROR_FAIL;
+ }
+
+ return execute_resume(target, step);
+}
+
+static uint64_t reg_cache_get(struct target *target, unsigned int number)
+{
+ struct reg *r = &target->reg_cache->reg_list[number];
+ if (!r->valid) {
+ LOG_ERROR("Register cache entry for %d is invalid!", number);
+ assert(r->valid);
+ }
+ uint64_t value = buf_get_u64(r->value, 0, r->size);
+ LOG_DEBUG("%s = 0x%" PRIx64, r->name, value);
+ return value;
+}
+
+static void reg_cache_set(struct target *target, unsigned int number,
+ uint64_t value)
+{
+ struct reg *r = &target->reg_cache->reg_list[number];
+ LOG_DEBUG("%s <= 0x%" PRIx64, r->name, value);
+ r->valid = true;
+ buf_set_u64(r->value, 0, r->size, value);
+}
+
+static int update_mstatus_actual(struct target *target)
+{
+ struct reg *mstatus_reg = &target->reg_cache->reg_list[GDB_REGNO_MSTATUS];
+ if (mstatus_reg->valid) {
+ /* We previously made it valid. */
+ return ERROR_OK;
+ }
+
+ /* Force reading the register. In that process mstatus_actual will be
+ * updated. */
+ riscv_reg_t mstatus;
+ return get_register(target, &mstatus, 0, GDB_REGNO_MSTATUS);
+}
+
+/*** OpenOCD target functions. ***/
+
+static int register_read(struct target *target, riscv_reg_t *value, int regnum)
+{
+ riscv011_info_t *info = get_info(target);
+ if (regnum >= GDB_REGNO_CSR0 && regnum <= GDB_REGNO_CSR4095) {
+ cache_set32(target, 0, csrr(S0, regnum - GDB_REGNO_CSR0));
+ cache_set_store(target, 1, S0, SLOT0);
+ cache_set_jump(target, 2);
+ } else {
+ LOG_ERROR("Don't know how to read register %d", regnum);
+ return ERROR_FAIL;
+ }
+
+ if (cache_write(target, 4, true) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint32_t exception = cache_get32(target, info->dramsize-1);
+ if (exception) {
+ LOG_WARNING("Got exception 0x%x when reading %s", exception, gdb_regno_name(regnum));
+ *value = ~0;
+ return ERROR_FAIL;
+ }
+
+ *value = cache_get(target, SLOT0);
+ LOG_DEBUG("reg[%d]=0x%" PRIx64, regnum, *value);
+
+ if (regnum == GDB_REGNO_MSTATUS)
+ info->mstatus_actual = *value;
+
+ return ERROR_OK;
+}
+
+/* Write the register. No caching or games. */
+static int register_write(struct target *target, unsigned int number,
+ uint64_t value)
+{
+ riscv011_info_t *info = get_info(target);
+
+ maybe_write_tselect(target);
+
+ if (number == S0) {
+ cache_set_load(target, 0, S0, SLOT0);
+ cache_set32(target, 1, csrw(S0, CSR_DSCRATCH));
+ cache_set_jump(target, 2);
+ } else if (number == S1) {
+ cache_set_load(target, 0, S0, SLOT0);
+ cache_set_store(target, 1, S0, SLOT_LAST);
+ cache_set_jump(target, 2);
+ } else if (number <= GDB_REGNO_XPR31) {
+ cache_set_load(target, 0, number - GDB_REGNO_ZERO, SLOT0);
+ cache_set_jump(target, 1);
+ } else if (number == GDB_REGNO_PC) {
+ info->dpc = value;
+ return ERROR_OK;
+ } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
+ int result = update_mstatus_actual(target);
+ if (result != ERROR_OK)
+ return result;
+ unsigned i = 0;
+ if ((info->mstatus_actual & MSTATUS_FS) == 0) {
+ info->mstatus_actual = set_field(info->mstatus_actual, MSTATUS_FS, 1);
+ cache_set_load(target, i++, S0, SLOT1);
+ cache_set32(target, i++, csrw(S0, CSR_MSTATUS));
+ cache_set(target, SLOT1, info->mstatus_actual);
+ }
+
+ if (riscv_xlen(target) == 32)
+ cache_set32(target, i++, flw(number - GDB_REGNO_FPR0, 0, DEBUG_RAM_START + 16));
+ else
+ cache_set32(target, i++, fld(number - GDB_REGNO_FPR0, 0, DEBUG_RAM_START + 16));
+ cache_set_jump(target, i++);
+ } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
+ cache_set_load(target, 0, S0, SLOT0);
+ cache_set32(target, 1, csrw(S0, number - GDB_REGNO_CSR0));
+ cache_set_jump(target, 2);
+
+ if (number == GDB_REGNO_MSTATUS)
+ info->mstatus_actual = value;
+ } else if (number == GDB_REGNO_PRIV) {
+ info->dcsr = set_field(info->dcsr, DCSR_PRV, value);
+ return ERROR_OK;
+ } else {
+ LOG_ERROR("Don't know how to write register %d", number);
+ return ERROR_FAIL;
+ }
+
+ cache_set(target, SLOT0, value);
+ if (cache_write(target, info->dramsize - 1, true) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint32_t exception = cache_get32(target, info->dramsize-1);
+ if (exception) {
+ LOG_WARNING("Got exception 0x%x when writing %s", exception,
+ gdb_regno_name(number));
+ return ERROR_FAIL;
+ }
+
+ return ERROR_OK;
+}
+
+static int get_register(struct target *target, riscv_reg_t *value, int hartid,
+ int regid)
+{
+ assert(hartid == 0);
+ riscv011_info_t *info = get_info(target);
+
+ maybe_write_tselect(target);
+
+ if (regid <= GDB_REGNO_XPR31) {
+ *value = reg_cache_get(target, regid);
+ } else if (regid == GDB_REGNO_PC) {
+ *value = info->dpc;
+ } else if (regid >= GDB_REGNO_FPR0 && regid <= GDB_REGNO_FPR31) {
+ int result = update_mstatus_actual(target);
+ if (result != ERROR_OK)
+ return result;
+ unsigned i = 0;
+ if ((info->mstatus_actual & MSTATUS_FS) == 0) {
+ info->mstatus_actual = set_field(info->mstatus_actual, MSTATUS_FS, 1);
+ cache_set_load(target, i++, S0, SLOT1);
+ cache_set32(target, i++, csrw(S0, CSR_MSTATUS));
+ cache_set(target, SLOT1, info->mstatus_actual);
+ }
+
+ if (riscv_xlen(target) == 32)
+ cache_set32(target, i++, fsw(regid - GDB_REGNO_FPR0, 0, DEBUG_RAM_START + 16));
+ else
+ cache_set32(target, i++, fsd(regid - GDB_REGNO_FPR0, 0, DEBUG_RAM_START + 16));
+ cache_set_jump(target, i++);
+
+ if (cache_write(target, 4, true) != ERROR_OK)
+ return ERROR_FAIL;
+ } else if (regid == GDB_REGNO_PRIV) {
+ *value = get_field(info->dcsr, DCSR_PRV);
+ } else {
+ int result = register_read(target, value, regid);
+ if (result != ERROR_OK)
+ return result;
+ }
+
+ if (regid == GDB_REGNO_MSTATUS)
+ target->reg_cache->reg_list[regid].valid = true;
+
+ return ERROR_OK;
+}
+
+static int set_register(struct target *target, int hartid, int regid,
+ uint64_t value)
+{
+ assert(hartid == 0);
+ return register_write(target, regid, value);
+}
+
+static int halt(struct target *target)
+{
+ LOG_DEBUG("riscv_halt()");
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ cache_set32(target, 0, csrsi(CSR_DCSR, DCSR_HALT));
+ cache_set32(target, 1, csrr(S0, CSR_MHARTID));
+ cache_set32(target, 2, sw(S0, ZERO, SETHALTNOT));
+ cache_set_jump(target, 3);
+
+ if (cache_write(target, 4, true) != ERROR_OK) {
+ LOG_ERROR("cache_write() failed.");
+ return ERROR_FAIL;
+ }
+
+ return ERROR_OK;
+}
+
+static int init_target(struct command_context *cmd_ctx,
+ struct target *target)
+{
+ LOG_DEBUG("init");
+ riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
+ generic_info->get_register = get_register;
+ generic_info->set_register = set_register;
+
+ generic_info->version_specific = calloc(1, sizeof(riscv011_info_t));
+ if (!generic_info->version_specific)
+ return ERROR_FAIL;
+
+ /* Assume 32-bit until we discover the real value in examine(). */
+ generic_info->xlen[0] = 32;
+ riscv_init_registers(target);
+
+ return ERROR_OK;
+}
+
+static void deinit_target(struct target *target)
+{
+ LOG_DEBUG("riscv_deinit_target()");
+ riscv_info_t *info = (riscv_info_t *) target->arch_info;
+ free(info->version_specific);
+ info->version_specific = NULL;
+}
+
+static int strict_step(struct target *target, bool announce)
+{
+ riscv011_info_t *info = get_info(target);
+
+ LOG_DEBUG("enter");
+
+ struct breakpoint *breakpoint = target->breakpoints;
+ while (breakpoint) {
+ riscv_remove_breakpoint(target, breakpoint);
+ breakpoint = breakpoint->next;
+ }
+
+ struct watchpoint *watchpoint = target->watchpoints;
+ while (watchpoint) {
+ riscv_remove_watchpoint(target, watchpoint);
+ watchpoint = watchpoint->next;
+ }
+
+ int result = full_step(target, announce);
+ if (result != ERROR_OK)
+ return result;
+
+ breakpoint = target->breakpoints;
+ while (breakpoint) {
+ riscv_add_breakpoint(target, breakpoint);
+ breakpoint = breakpoint->next;
+ }
+
+ watchpoint = target->watchpoints;
+ while (watchpoint) {
+ riscv_add_watchpoint(target, watchpoint);
+ watchpoint = watchpoint->next;
+ }
+
+ info->need_strict_step = false;
+
+ return ERROR_OK;
+}
+
+static int step(struct target *target, int current, target_addr_t address,
+ int handle_breakpoints)
+{
+ riscv011_info_t *info = get_info(target);
+
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ if (!current) {
+ if (riscv_xlen(target) > 32) {
+ LOG_WARNING("Asked to resume at 32-bit PC on %d-bit target.",
+ riscv_xlen(target));
+ }
+ int result = register_write(target, GDB_REGNO_PC, address);
+ if (result != ERROR_OK)
+ return result;
+ }
+
+ if (info->need_strict_step || handle_breakpoints) {
+ int result = strict_step(target, true);
+ if (result != ERROR_OK)
+ return result;
+ } else {
+ return resume(target, 0, true);
+ }
+
+ return ERROR_OK;
+}
+
+static int examine(struct target *target)
+{
+ /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
+
+ uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
+ LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
+ LOG_DEBUG(" addrbits=%d", get_field(dtmcontrol, DTMCONTROL_ADDRBITS));
+ LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTMCONTROL_VERSION));
+ LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTMCONTROL_IDLE));
+ if (dtmcontrol == 0) {
+ LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
+ return ERROR_FAIL;
+ }
+ if (get_field(dtmcontrol, DTMCONTROL_VERSION) != 0) {
+ LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
+ get_field(dtmcontrol, DTMCONTROL_VERSION), dtmcontrol);
+ return ERROR_FAIL;
+ }
+
+ RISCV_INFO(r);
+ r->hart_count = 1;
+
+ riscv011_info_t *info = get_info(target);
+ info->addrbits = get_field(dtmcontrol, DTMCONTROL_ADDRBITS);
+ info->dtmcontrol_idle = get_field(dtmcontrol, DTMCONTROL_IDLE);
+ if (info->dtmcontrol_idle == 0) {
+ /* Some old SiFive cores don't set idle but need it to be 1. */
+ uint32_t idcode = idcode_scan(target);
+ if (idcode == 0x10e31913)
+ info->dtmcontrol_idle = 1;
+ }
+
+ uint32_t dminfo = dbus_read(target, DMINFO);
+ LOG_DEBUG("dminfo: 0x%08x", dminfo);
+ LOG_DEBUG(" abussize=0x%x", get_field(dminfo, DMINFO_ABUSSIZE));
+ LOG_DEBUG(" serialcount=0x%x", get_field(dminfo, DMINFO_SERIALCOUNT));
+ LOG_DEBUG(" access128=%d", get_field(dminfo, DMINFO_ACCESS128));
+ LOG_DEBUG(" access64=%d", get_field(dminfo, DMINFO_ACCESS64));
+ LOG_DEBUG(" access32=%d", get_field(dminfo, DMINFO_ACCESS32));
+ LOG_DEBUG(" access16=%d", get_field(dminfo, DMINFO_ACCESS16));
+ LOG_DEBUG(" access8=%d", get_field(dminfo, DMINFO_ACCESS8));
+ LOG_DEBUG(" dramsize=0x%x", get_field(dminfo, DMINFO_DRAMSIZE));
+ LOG_DEBUG(" authenticated=0x%x", get_field(dminfo, DMINFO_AUTHENTICATED));
+ LOG_DEBUG(" authbusy=0x%x", get_field(dminfo, DMINFO_AUTHBUSY));
+ LOG_DEBUG(" authtype=0x%x", get_field(dminfo, DMINFO_AUTHTYPE));
+ LOG_DEBUG(" version=0x%x", get_field(dminfo, DMINFO_VERSION));
+
+ if (get_field(dminfo, DMINFO_VERSION) != 1) {
+ LOG_ERROR("OpenOCD only supports Debug Module version 1, not %d "
+ "(dminfo=0x%x)", get_field(dminfo, DMINFO_VERSION), dminfo);
+ return ERROR_FAIL;
+ }
+
+ info->dramsize = get_field(dminfo, DMINFO_DRAMSIZE) + 1;
+
+ if (get_field(dminfo, DMINFO_AUTHTYPE) != 0) {
+ LOG_ERROR("Authentication required by RISC-V core but not "
+ "supported by OpenOCD. dminfo=0x%x", dminfo);
+ return ERROR_FAIL;
+ }
+
+ /* Pretend this is a 32-bit system until we have found out the true value. */
+ r->xlen[0] = 32;
+
+ /* Figure out XLEN, and test writing all of Debug RAM while we're at it. */
+ cache_set32(target, 0, xori(S1, ZERO, -1));
+ /* 0xffffffff 0xffffffff:ffffffff 0xffffffff:ffffffff:ffffffff:ffffffff */
+ cache_set32(target, 1, srli(S1, S1, 31));
+ /* 0x00000001 0x00000001:ffffffff 0x00000001:ffffffff:ffffffff:ffffffff */
+ cache_set32(target, 2, sw(S1, ZERO, DEBUG_RAM_START));
+ cache_set32(target, 3, srli(S1, S1, 31));
+ /* 0x00000000 0x00000000:00000003 0x00000000:00000003:ffffffff:ffffffff */
+ cache_set32(target, 4, sw(S1, ZERO, DEBUG_RAM_START + 4));
+ cache_set_jump(target, 5);
+ for (unsigned i = 6; i < info->dramsize; i++)
+ cache_set32(target, i, i * 0x01020304);
+
+ cache_write(target, 0, false);
+
+ /* Check that we can actually read/write dram. */
+ if (cache_check(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ cache_write(target, 0, true);
+ cache_invalidate(target);
+
+ uint32_t word0 = cache_get32(target, 0);
+ uint32_t word1 = cache_get32(target, 1);
+ riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
+ if (word0 == 1 && word1 == 0) {
+ generic_info->xlen[0] = 32;
+ } else if (word0 == 0xffffffff && word1 == 3) {
+ generic_info->xlen[0] = 64;
+ } else if (word0 == 0xffffffff && word1 == 0xffffffff) {
+ generic_info->xlen[0] = 128;
+ } else {
+ uint32_t exception = cache_get32(target, info->dramsize-1);
+ LOG_ERROR("Failed to discover xlen; word0=0x%x, word1=0x%x, exception=0x%x",
+ word0, word1, exception);
+ dump_debug_ram(target);
+ return ERROR_FAIL;
+ }
+ LOG_DEBUG("Discovered XLEN is %d", riscv_xlen(target));
+
+ if (read_csr(target, &r->misa[0], CSR_MISA) != ERROR_OK) {
+ const unsigned old_csr_misa = 0xf10;
+ LOG_WARNING("Failed to read misa at 0x%x; trying 0x%x.", CSR_MISA,
+ old_csr_misa);
+ if (read_csr(target, &r->misa[0], old_csr_misa) != ERROR_OK) {
+ /* Maybe this is an old core that still has $misa at the old
+ * address. */
+ LOG_ERROR("Failed to read misa at 0x%x.", old_csr_misa);
+ return ERROR_FAIL;
+ }
+ }
+
+ /* Update register list to match discovered XLEN/supported extensions. */
+ riscv_init_registers(target);
+
+ info->never_halted = true;
+
+ int result = riscv011_poll(target);
+ if (result != ERROR_OK)
+ return result;
+
+ target_set_examined(target);
+ riscv_set_current_hartid(target, 0);
+ for (size_t i = 0; i < 32; ++i)
+ reg_cache_set(target, i, -1);
+ LOG_INFO("Examined RISCV core; XLEN=%d, misa=0x%" PRIx64,
+ riscv_xlen(target), r->misa[0]);
+
+ return ERROR_OK;
+}
+
+static riscv_error_t handle_halt_routine(struct target *target)
+{
+ riscv011_info_t *info = get_info(target);
+
+ scans_t *scans = scans_new(target, 256);
+
+ /* Read all GPRs as fast as we can, because gdb is going to ask for them
+ * anyway. Reading them one at a time is much slower. */
+
+ /* Write the jump back to address 1. */
+ scans_add_write_jump(scans, 1, false);
+ for (int reg = 1; reg < 32; reg++) {
+ if (reg == S0 || reg == S1)
+ continue;
+
+ /* Write store instruction. */
+ scans_add_write_store(scans, 0, reg, SLOT0, true);
+
+ /* Read value. */
+ scans_add_read(scans, SLOT0, false);
+ }
+
+ /* Write store of s0 at index 1. */
+ scans_add_write_store(scans, 1, S0, SLOT0, false);
+ /* Write jump at index 2. */
+ scans_add_write_jump(scans, 2, false);
+
+ /* Read S1 from debug RAM */
+ scans_add_write_load(scans, 0, S0, SLOT_LAST, true);
+ /* Read value. */
+ scans_add_read(scans, SLOT0, false);
+
+ /* Read S0 from dscratch */
+ unsigned int csr[] = {CSR_DSCRATCH, CSR_DPC, CSR_DCSR};
+ for (unsigned int i = 0; i < DIM(csr); i++) {
+ scans_add_write32(scans, 0, csrr(S0, csr[i]), true);
+ scans_add_read(scans, SLOT0, false);
+ }
+
+ /* Final read to get the last value out. */
+ scans_add_read32(scans, 4, false);
+
+ int retval = scans_execute(scans);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("JTAG execute failed: %d", retval);
+ goto error;
+ }
+
+ unsigned int dbus_busy = 0;
+ unsigned int interrupt_set = 0;
+ unsigned result = 0;
+ uint64_t value = 0;
+ reg_cache_set(target, 0, 0);
+ /* The first scan result is the result from something old we don't care
+ * about. */
+ for (unsigned int i = 1; i < scans->next_scan && dbus_busy == 0; i++) {
+ dbus_status_t status = scans_get_u32(scans, i, DBUS_OP_START,
+ DBUS_OP_SIZE);
+ uint64_t data = scans_get_u64(scans, i, DBUS_DATA_START, DBUS_DATA_SIZE);
+ uint32_t address = scans_get_u32(scans, i, DBUS_ADDRESS_START,
+ info->addrbits);
+ switch (status) {
+ case DBUS_STATUS_SUCCESS:
+ break;
+ case DBUS_STATUS_FAILED:
+ LOG_ERROR("Debug access failed. Hardware error?");
+ goto error;
+ case DBUS_STATUS_BUSY:
+ dbus_busy++;
+ break;
+ default:
+ LOG_ERROR("Got invalid bus access status: %d", status);
+ return ERROR_FAIL;
+ }
+ if (data & DMCONTROL_INTERRUPT) {
+ interrupt_set++;
+ break;
+ }
+ if (address == 4 || address == 5) {
+ unsigned int reg;
+ switch (result) {
+ case 0:
+ reg = 1;
+ break;
+ case 1:
+ reg = 2;
+ break;
+ case 2:
+ reg = 3;
+ break;
+ case 3:
+ reg = 4;
+ break;
+ case 4:
+ reg = 5;
+ break;
+ case 5:
+ reg = 6;
+ break;
+ case 6:
+ reg = 7;
+ break;
+ /* S0 */
+ /* S1 */
+ case 7:
+ reg = 10;
+ break;
+ case 8:
+ reg = 11;
+ break;
+ case 9:
+ reg = 12;
+ break;
+ case 10:
+ reg = 13;
+ break;
+ case 11:
+ reg = 14;
+ break;
+ case 12:
+ reg = 15;
+ break;
+ case 13:
+ reg = 16;
+ break;
+ case 14:
+ reg = 17;
+ break;
+ case 15:
+ reg = 18;
+ break;
+ case 16:
+ reg = 19;
+ break;
+ case 17:
+ reg = 20;
+ break;
+ case 18:
+ reg = 21;
+ break;
+ case 19:
+ reg = 22;
+ break;
+ case 20:
+ reg = 23;
+ break;
+ case 21:
+ reg = 24;
+ break;
+ case 22:
+ reg = 25;
+ break;
+ case 23:
+ reg = 26;
+ break;
+ case 24:
+ reg = 27;
+ break;
+ case 25:
+ reg = 28;
+ break;
+ case 26:
+ reg = 29;
+ break;
+ case 27:
+ reg = 30;
+ break;
+ case 28:
+ reg = 31;
+ break;
+ case 29:
+ reg = S1;
+ break;
+ case 30:
+ reg = S0;
+ break;
+ case 31:
+ reg = CSR_DPC;
+ break;
+ case 32:
+ reg = CSR_DCSR;
+ break;
+ default:
+ assert(0);
+ }
+ if (riscv_xlen(target) == 32) {
+ reg_cache_set(target, reg, data & 0xffffffff);
+ result++;
+ } else if (riscv_xlen(target) == 64) {
+ if (address == 4) {
+ value = data & 0xffffffff;
+ } else if (address == 5) {
+ reg_cache_set(target, reg, ((data & 0xffffffff) << 32) | value);
+ value = 0;
+ result++;
+ }
+ }
+ }
+ }
+
+ if (dbus_busy) {
+ increase_dbus_busy_delay(target);
+ return RE_AGAIN;
+ }
+ if (interrupt_set) {
+ increase_interrupt_high_delay(target);
+ return RE_AGAIN;
+ }
+
+ /* TODO: get rid of those 2 variables and talk to the cache directly. */
+ info->dpc = reg_cache_get(target, CSR_DPC);
+ info->dcsr = reg_cache_get(target, CSR_DCSR);
+
+ scans_delete(scans);
+
+ cache_invalidate(target);
+
+ return RE_OK;
+
+error:
+ scans_delete(scans);
+ return RE_FAIL;
+}
+
+static int handle_halt(struct target *target, bool announce)
+{
+ riscv011_info_t *info = get_info(target);
+ target->state = TARGET_HALTED;
+
+ riscv_error_t re;
+ do {
+ re = handle_halt_routine(target);
+ } while (re == RE_AGAIN);
+ if (re != RE_OK) {
+ LOG_ERROR("handle_halt_routine failed");
+ return ERROR_FAIL;
+ }
+
+ int cause = get_field(info->dcsr, DCSR_CAUSE);
+ switch (cause) {
+ case DCSR_CAUSE_SWBP:
+ target->debug_reason = DBG_REASON_BREAKPOINT;
+ break;
+ case DCSR_CAUSE_HWBP:
+ target->debug_reason = DBG_REASON_WPTANDBKPT;
+ /* If we halted because of a data trigger, gdb doesn't know to do
+ * the disable-breakpoints-step-enable-breakpoints dance. */
+ info->need_strict_step = true;
+ break;
+ case DCSR_CAUSE_DEBUGINT:
+ target->debug_reason = DBG_REASON_DBGRQ;
+ break;
+ case DCSR_CAUSE_STEP:
+ target->debug_reason = DBG_REASON_SINGLESTEP;
+ break;
+ case DCSR_CAUSE_HALT:
+ default:
+ LOG_ERROR("Invalid halt cause %d in DCSR (0x%" PRIx64 ")",
+ cause, info->dcsr);
+ }
+
+ if (info->never_halted) {
+ info->never_halted = false;
+
+ int result = maybe_read_tselect(target);
+ if (result != ERROR_OK)
+ return result;
+ riscv_enumerate_triggers(target);
+ }
+
+ if (target->debug_reason == DBG_REASON_BREAKPOINT) {
+ int retval;
+ if (riscv_semihosting(target, &retval) != 0)
+ return retval;
+ }
+
+ if (announce)
+ target_call_event_callbacks(target, TARGET_EVENT_HALTED);
+
+ const char *cause_string[] = {
+ "none",
+ "software breakpoint",
+ "hardware trigger",
+ "debug interrupt",
+ "step",
+ "halt"
+ };
+ /* This is logged to the user so that gdb will show it when a user types
+ * 'monitor reset init'. At that time gdb appears to have the pc cached
+ * still so if a user manually inspects the pc it will still have the old
+ * value. */
+ LOG_USER("halted at 0x%" PRIx64 " due to %s", info->dpc, cause_string[cause]);
+
+ return ERROR_OK;
+}
+
+static int poll_target(struct target *target, bool announce)
+{
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ /* Inhibit debug logging during poll(), which isn't usually interesting and
+ * just fills up the screen/logs with clutter. */
+ int old_debug_level = debug_level;
+ if (debug_level >= LOG_LVL_DEBUG)
+ debug_level = LOG_LVL_INFO;
+ bits_t bits = read_bits(target);
+ debug_level = old_debug_level;
+
+ if (bits.haltnot && bits.interrupt) {
+ target->state = TARGET_DEBUG_RUNNING;
+ LOG_DEBUG("debug running");
+ } else if (bits.haltnot && !bits.interrupt) {
+ if (target->state != TARGET_HALTED)
+ return handle_halt(target, announce);
+ } else if (!bits.haltnot && bits.interrupt) {
+ /* Target is halting. There is no state for that, so don't change anything. */
+ LOG_DEBUG("halting");
+ } else if (!bits.haltnot && !bits.interrupt) {
+ target->state = TARGET_RUNNING;
+ }
+
+ return ERROR_OK;
+}
+
+static int riscv011_poll(struct target *target)
+{
+ return poll_target(target, true);
+}
+
+static int riscv011_resume(struct target *target, int current,
+ target_addr_t address, int handle_breakpoints, int debug_execution)
+{
+ riscv011_info_t *info = get_info(target);
+
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ if (!current) {
+ if (riscv_xlen(target) > 32) {
+ LOG_WARNING("Asked to resume at 32-bit PC on %d-bit target.",
+ riscv_xlen(target));
+ }
+ int result = register_write(target, GDB_REGNO_PC, address);
+ if (result != ERROR_OK)
+ return result;
+ }
+
+ if (info->need_strict_step || handle_breakpoints) {
+ int result = strict_step(target, false);
+ if (result != ERROR_OK)
+ return result;
+ }
+
+ return resume(target, debug_execution, false);
+}
+
+static int assert_reset(struct target *target)
+{
+ riscv011_info_t *info = get_info(target);
+ /* TODO: Maybe what I implemented here is more like soft_reset_halt()? */
+
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ /* The only assumption we can make is that the TAP was reset. */
+ if (wait_for_debugint_clear(target, true) != ERROR_OK) {
+ LOG_ERROR("Debug interrupt didn't clear.");
+ return ERROR_FAIL;
+ }
+
+ /* Not sure what we should do when there are multiple cores.
+ * Here just reset the single hart we're talking to. */
+ info->dcsr |= DCSR_EBREAKM | DCSR_EBREAKH | DCSR_EBREAKS |
+ DCSR_EBREAKU | DCSR_HALT;
+ if (target->reset_halt)
+ info->dcsr |= DCSR_NDRESET;
+ else
+ info->dcsr |= DCSR_FULLRESET;
+ dram_write32(target, 0, lw(S0, ZERO, DEBUG_RAM_START + 16), false);
+ dram_write32(target, 1, csrw(S0, CSR_DCSR), false);
+ /* We shouldn't actually need the jump because a reset should happen. */
+ dram_write_jump(target, 2, false);
+ dram_write32(target, 4, info->dcsr, true);
+ cache_invalidate(target);
+
+ target->state = TARGET_RESET;
+
+ return ERROR_OK;
+}
+
+static int deassert_reset(struct target *target)
+{
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+ if (target->reset_halt)
+ return wait_for_state(target, TARGET_HALTED);
+ else
+ return wait_for_state(target, TARGET_RUNNING);
+}
+
+static int read_memory(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer)
+{
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ cache_set32(target, 0, lw(S0, ZERO, DEBUG_RAM_START + 16));
+ switch (size) {
+ case 1:
+ cache_set32(target, 1, lb(S1, S0, 0));
+ cache_set32(target, 2, sw(S1, ZERO, DEBUG_RAM_START + 16));
+ break;
+ case 2:
+ cache_set32(target, 1, lh(S1, S0, 0));
+ cache_set32(target, 2, sw(S1, ZERO, DEBUG_RAM_START + 16));
+ break;
+ case 4:
+ cache_set32(target, 1, lw(S1, S0, 0));
+ cache_set32(target, 2, sw(S1, ZERO, DEBUG_RAM_START + 16));
+ break;
+ default:
+ LOG_ERROR("Unsupported size: %d", size);
+ return ERROR_FAIL;
+ }
+ cache_set_jump(target, 3);
+ cache_write(target, CACHE_NO_READ, false);
+
+ riscv011_info_t *info = get_info(target);
+ const unsigned max_batch_size = 256;
+ scans_t *scans = scans_new(target, max_batch_size);
+
+ uint32_t result_value = 0x777;
+ uint32_t i = 0;
+ while (i < count + 3) {
+ unsigned int batch_size = MIN(count + 3 - i, max_batch_size);
+ scans_reset(scans);
+
+ for (unsigned int j = 0; j < batch_size; j++) {
+ if (i + j == count) {
+ /* Just insert a read so we can scan out the last value. */
+ scans_add_read32(scans, 4, false);
+ } else if (i + j >= count + 1) {
+ /* And check for errors. */
+ scans_add_read32(scans, info->dramsize-1, false);
+ } else {
+ /* Write the next address and set interrupt. */
+ uint32_t offset = size * (i + j);
+ scans_add_write32(scans, 4, address + offset, true);
+ }
+ }
+
+ int retval = scans_execute(scans);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("JTAG execute failed: %d", retval);
+ goto error;
+ }
+
+ int dbus_busy = 0;
+ int execute_busy = 0;
+ for (unsigned int j = 0; j < batch_size; j++) {
+ dbus_status_t status = scans_get_u32(scans, j, DBUS_OP_START,
+ DBUS_OP_SIZE);
+ switch (status) {
+ case DBUS_STATUS_SUCCESS:
+ break;
+ case DBUS_STATUS_FAILED:
+ LOG_ERROR("Debug RAM write failed. Hardware error?");
+ goto error;
+ case DBUS_STATUS_BUSY:
+ dbus_busy++;
+ break;
+ default:
+ LOG_ERROR("Got invalid bus access status: %d", status);
+ return ERROR_FAIL;
+ }
+ uint64_t data = scans_get_u64(scans, j, DBUS_DATA_START,
+ DBUS_DATA_SIZE);
+ if (data & DMCONTROL_INTERRUPT)
+ execute_busy++;
+ if (i + j == count + 2) {
+ result_value = data;
+ } else if (i + j > 1) {
+ uint32_t offset = size * (i + j - 2);
+ switch (size) {
+ case 1:
+ buffer[offset] = data;
+ break;
+ case 2:
+ buffer[offset] = data;
+ buffer[offset+1] = data >> 8;
+ break;
+ case 4:
+ buffer[offset] = data;
+ buffer[offset+1] = data >> 8;
+ buffer[offset+2] = data >> 16;
+ buffer[offset+3] = data >> 24;
+ break;
+ }
+ }
+ LOG_DEBUG("j=%d status=%d data=%09" PRIx64, j, status, data);
+ }
+ if (dbus_busy)
+ increase_dbus_busy_delay(target);
+ if (execute_busy)
+ increase_interrupt_high_delay(target);
+ if (dbus_busy || execute_busy) {
+ wait_for_debugint_clear(target, false);
+
+ /* Retry. */
+ LOG_INFO("Retrying memory read starting from 0x%" TARGET_PRIxADDR
+ " with more delays", address + size * i);
+ } else {
+ i += batch_size;
+ }
+ }
+
+ if (result_value != 0) {
+ LOG_USER("Core got an exception (0x%x) while reading from 0x%"
+ TARGET_PRIxADDR, result_value, address + size * (count-1));
+ if (count > 1) {
+ LOG_USER("(It may have failed between 0x%" TARGET_PRIxADDR
+ " and 0x%" TARGET_PRIxADDR " as well, but we "
+ "didn't check then.)",
+ address, address + size * (count-2) + size - 1);
+ }
+ goto error;
+ }
+
+ scans_delete(scans);
+ cache_clean(target);
+ return ERROR_OK;
+
+error:
+ scans_delete(scans);
+ cache_clean(target);
+ return ERROR_FAIL;
+}
+
+static int setup_write_memory(struct target *target, uint32_t size)
+{
+ switch (size) {
+ case 1:
+ cache_set32(target, 0, lb(S0, ZERO, DEBUG_RAM_START + 16));
+ cache_set32(target, 1, sb(S0, T0, 0));
+ break;
+ case 2:
+ cache_set32(target, 0, lh(S0, ZERO, DEBUG_RAM_START + 16));
+ cache_set32(target, 1, sh(S0, T0, 0));
+ break;
+ case 4:
+ cache_set32(target, 0, lw(S0, ZERO, DEBUG_RAM_START + 16));
+ cache_set32(target, 1, sw(S0, T0, 0));
+ break;
+ default:
+ LOG_ERROR("Unsupported size: %d", size);
+ return ERROR_FAIL;
+ }
+ cache_set32(target, 2, addi(T0, T0, size));
+ cache_set_jump(target, 3);
+ cache_write(target, 4, false);
+
+ return ERROR_OK;
+}
+
+static int write_memory(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, const uint8_t *buffer)
+{
+ riscv011_info_t *info = get_info(target);
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ /* Set up the address. */
+ cache_set_store(target, 0, T0, SLOT1);
+ cache_set_load(target, 1, T0, SLOT0);
+ cache_set_jump(target, 2);
+ cache_set(target, SLOT0, address);
+ if (cache_write(target, 5, true) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint64_t t0 = cache_get(target, SLOT1);
+ LOG_DEBUG("t0 is 0x%" PRIx64, t0);
+
+ if (setup_write_memory(target, size) != ERROR_OK)
+ return ERROR_FAIL;
+
+ const unsigned max_batch_size = 256;
+ scans_t *scans = scans_new(target, max_batch_size);
+
+ uint32_t result_value = 0x777;
+ uint32_t i = 0;
+ while (i < count + 2) {
+ unsigned int batch_size = MIN(count + 2 - i, max_batch_size);
+ scans_reset(scans);
+
+ for (unsigned int j = 0; j < batch_size; j++) {
+ if (i + j >= count) {
+ /* Check for an exception. */
+ scans_add_read32(scans, info->dramsize-1, false);
+ } else {
+ /* Write the next value and set interrupt. */
+ uint32_t value;
+ uint32_t offset = size * (i + j);
+ switch (size) {
+ case 1:
+ value = buffer[offset];
+ break;
+ case 2:
+ value = buffer[offset] |
+ (buffer[offset+1] << 8);
+ break;
+ case 4:
+ value = buffer[offset] |
+ ((uint32_t) buffer[offset+1] << 8) |
+ ((uint32_t) buffer[offset+2] << 16) |
+ ((uint32_t) buffer[offset+3] << 24);
+ break;
+ default:
+ goto error;
+ }
+
+ scans_add_write32(scans, 4, value, true);
+ }
+ }
+
+ int retval = scans_execute(scans);
+ if (retval != ERROR_OK) {
+ LOG_ERROR("JTAG execute failed: %d", retval);
+ goto error;
+ }
+
+ int dbus_busy = 0;
+ int execute_busy = 0;
+ for (unsigned int j = 0; j < batch_size; j++) {
+ dbus_status_t status = scans_get_u32(scans, j, DBUS_OP_START,
+ DBUS_OP_SIZE);
+ switch (status) {
+ case DBUS_STATUS_SUCCESS:
+ break;
+ case DBUS_STATUS_FAILED:
+ LOG_ERROR("Debug RAM write failed. Hardware error?");
+ goto error;
+ case DBUS_STATUS_BUSY:
+ dbus_busy++;
+ break;
+ default:
+ LOG_ERROR("Got invalid bus access status: %d", status);
+ return ERROR_FAIL;
+ }
+ int interrupt = scans_get_u32(scans, j, DBUS_DATA_START + 33, 1);
+ if (interrupt)
+ execute_busy++;
+ if (i + j == count + 1)
+ result_value = scans_get_u32(scans, j, DBUS_DATA_START, 32);
+ }
+ if (dbus_busy)
+ increase_dbus_busy_delay(target);
+ if (execute_busy)
+ increase_interrupt_high_delay(target);
+ if (dbus_busy || execute_busy) {
+ wait_for_debugint_clear(target, false);
+
+ /* Retry.
+ * Set t0 back to what it should have been at the beginning of this
+ * batch. */
+ LOG_INFO("Retrying memory write starting from 0x%" TARGET_PRIxADDR
+ " with more delays", address + size * i);
+
+ cache_clean(target);
+
+ if (write_gpr(target, T0, address + size * i) != ERROR_OK)
+ goto error;
+
+ if (setup_write_memory(target, size) != ERROR_OK)
+ goto error;
+ } else {
+ i += batch_size;
+ }
+ }
+
+ if (result_value != 0) {
+ LOG_ERROR("Core got an exception (0x%x) while writing to 0x%"
+ TARGET_PRIxADDR, result_value, address + size * (count-1));
+ if (count > 1) {
+ LOG_ERROR("(It may have failed between 0x%" TARGET_PRIxADDR
+ " and 0x%" TARGET_PRIxADDR " as well, but we "
+ "didn't check then.)",
+ address, address + size * (count-2) + size - 1);
+ }
+ goto error;
+ }
+
+ scans_delete(scans);
+ cache_clean(target);
+ return register_write(target, T0, t0);
+
+error:
+ scans_delete(scans);
+ cache_clean(target);
+ return ERROR_FAIL;
+}
+
+static int arch_state(struct target *target)
+{
+ return ERROR_OK;
+}
+
+struct target_type riscv011_target = {
+ .name = "riscv",
+
+ .init_target = init_target,
+ .deinit_target = deinit_target,
+ .examine = examine,
+
+ /* poll current target status */
+ .poll = riscv011_poll,
+
+ .halt = halt,
+ .resume = riscv011_resume,
+ .step = step,
+
+ .assert_reset = assert_reset,
+ .deassert_reset = deassert_reset,
+
+ .read_memory = read_memory,
+ .write_memory = write_memory,
+
+ .arch_state = arch_state,
+};
--- /dev/null
+/*
+ * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
+ * latest draft.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <time.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "target/target.h"
+#include "target/algorithm.h"
+#include "target/target_type.h"
+#include "log.h"
+#include "jtag/jtag.h"
+#include "target/register.h"
+#include "target/breakpoints.h"
+#include "helper/time_support.h"
+#include "helper/list.h"
+#include "riscv.h"
+#include "debug_defines.h"
+#include "rtos/rtos.h"
+#include "program.h"
+#include "asm.h"
+#include "batch.h"
+
+#define DMI_DATA1 (DMI_DATA0 + 1)
+#define DMI_PROGBUF1 (DMI_PROGBUF0 + 1)
+
+static int riscv013_on_step_or_resume(struct target *target, bool step);
+static int riscv013_step_or_resume_current_hart(struct target *target, bool step);
+static void riscv013_clear_abstract_error(struct target *target);
+
+/* Implementations of the functions in riscv_info_t. */
+static int riscv013_get_register(struct target *target,
+ riscv_reg_t *value, int hid, int rid);
+static int riscv013_set_register(struct target *target, int hartid, int regid, uint64_t value);
+static int riscv013_select_current_hart(struct target *target);
+static int riscv013_halt_current_hart(struct target *target);
+static int riscv013_resume_current_hart(struct target *target);
+static int riscv013_step_current_hart(struct target *target);
+static int riscv013_on_halt(struct target *target);
+static int riscv013_on_step(struct target *target);
+static int riscv013_on_resume(struct target *target);
+static bool riscv013_is_halted(struct target *target);
+static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
+static int riscv013_write_debug_buffer(struct target *target, unsigned index,
+ riscv_insn_t d);
+static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
+ index);
+static int riscv013_execute_debug_buffer(struct target *target);
+static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
+static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
+static int riscv013_dmi_write_u64_bits(struct target *target);
+static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
+static int register_read(struct target *target, uint64_t *value, uint32_t number);
+static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
+static int register_write_direct(struct target *target, unsigned number,
+ uint64_t value);
+static int read_memory(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer);
+static int write_memory(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, const uint8_t *buffer);
+
+/**
+ * Since almost everything can be accomplish by scanning the dbus register, all
+ * functions here assume dbus is already selected. The exception are functions
+ * called directly by OpenOCD, which can't assume anything about what's
+ * currently in IR. They should set IR to dbus explicitly.
+ */
+
+#define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
+#define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
+
+#define DIM(x) (sizeof(x)/sizeof(*x))
+
+#define CSR_DCSR_CAUSE_SWBP 1
+#define CSR_DCSR_CAUSE_TRIGGER 2
+#define CSR_DCSR_CAUSE_DEBUGINT 3
+#define CSR_DCSR_CAUSE_STEP 4
+#define CSR_DCSR_CAUSE_HALT 5
+
+#define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
+
+/*** JTAG registers. ***/
+
+typedef enum {
+ DMI_OP_NOP = 0,
+ DMI_OP_READ = 1,
+ DMI_OP_WRITE = 2
+} dmi_op_t;
+typedef enum {
+ DMI_STATUS_SUCCESS = 0,
+ DMI_STATUS_FAILED = 2,
+ DMI_STATUS_BUSY = 3
+} dmi_status_t;
+
+typedef enum {
+ RE_OK,
+ RE_FAIL,
+ RE_AGAIN
+} riscv_error_t;
+
+typedef enum slot {
+ SLOT0,
+ SLOT1,
+ SLOT_LAST,
+} slot_t;
+
+/*** Debug Bus registers. ***/
+
+#define CMDERR_NONE 0
+#define CMDERR_BUSY 1
+#define CMDERR_NOT_SUPPORTED 2
+#define CMDERR_EXCEPTION 3
+#define CMDERR_HALT_RESUME 4
+#define CMDERR_OTHER 7
+
+/*** Info about the core being debugged. ***/
+
+struct trigger {
+ uint64_t address;
+ uint32_t length;
+ uint64_t mask;
+ uint64_t value;
+ bool read, write, execute;
+ int unique_id;
+};
+
+typedef enum {
+ YNM_MAYBE,
+ YNM_YES,
+ YNM_NO
+} yes_no_maybe_t;
+
+typedef struct {
+ struct list_head list;
+ int abs_chain_position;
+ /* Indicates we already reset this DM, so don't need to do it again. */
+ bool was_reset;
+ /* Targets that are connected to this DM. */
+ struct list_head target_list;
+ /* The currently selected hartid on this DM. */
+ int current_hartid;
+} dm013_info_t;
+
+typedef struct {
+ struct list_head list;
+ struct target *target;
+} target_list_t;
+
+typedef struct {
+ /* Number of address bits in the dbus register. */
+ unsigned abits;
+ /* Number of abstract command data registers. */
+ unsigned datacount;
+ /* Number of words in the Program Buffer. */
+ unsigned progbufsize;
+
+ /* We cache the read-only bits of sbcs here. */
+ uint32_t sbcs;
+
+ yes_no_maybe_t progbuf_writable;
+ /* We only need the address so that we know the alignment of the buffer. */
+ riscv_addr_t progbuf_address;
+
+ /* Number of run-test/idle cycles the target requests we do after each dbus
+ * access. */
+ unsigned int dtmcontrol_idle;
+
+ /* This value is incremented every time a dbus access comes back as "busy".
+ * It's used to determine how many run-test/idle cycles to feed the target
+ * in between accesses. */
+ unsigned int dmi_busy_delay;
+
+ /* Number of run-test/idle cycles to add between consecutive bus master
+ * reads/writes respectively. */
+ unsigned int bus_master_write_delay, bus_master_read_delay;
+
+ /* This value is increased every time we tried to execute two commands
+ * consecutively, and the second one failed because the previous hadn't
+ * completed yet. It's used to add extra run-test/idle cycles after
+ * starting a command, so we don't have to waste time checking for busy to
+ * go low. */
+ unsigned int ac_busy_delay;
+
+ bool need_strict_step;
+
+ bool abstract_read_csr_supported;
+ bool abstract_write_csr_supported;
+ bool abstract_read_fpr_supported;
+ bool abstract_write_fpr_supported;
+
+ /* When a function returns some error due to a failure indicated by the
+ * target in cmderr, the caller can look here to see what that error was.
+ * (Compare with errno.) */
+ uint8_t cmderr;
+
+ /* Some fields from hartinfo. */
+ uint8_t datasize;
+ uint8_t dataaccess;
+ int16_t dataaddr;
+
+ /* The width of the hartsel field. */
+ unsigned hartsellen;
+
+ /* DM that provides access to this target. */
+ dm013_info_t *dm;
+} riscv013_info_t;
+
+LIST_HEAD(dm_list);
+
+static riscv013_info_t *get_info(const struct target *target)
+{
+ riscv_info_t *info = (riscv_info_t *) target->arch_info;
+ return (riscv013_info_t *) info->version_specific;
+}
+
+/**
+ * Return the DM structure for this target. If there isn't one, find it in the
+ * global list of DMs. If it's not in there, then create one and initialize it
+ * to 0.
+ */
+static dm013_info_t *get_dm(struct target *target)
+{
+ RISCV013_INFO(info);
+ if (info->dm)
+ return info->dm;
+
+ int abs_chain_position = target->tap->abs_chain_position;
+
+ dm013_info_t *entry;
+ dm013_info_t *dm = NULL;
+ list_for_each_entry(entry, &dm_list, list) {
+ if (entry->abs_chain_position == abs_chain_position) {
+ dm = entry;
+ break;
+ }
+ }
+
+ if (!dm) {
+ dm = calloc(1, sizeof(dm013_info_t));
+ dm->abs_chain_position = abs_chain_position;
+ dm->current_hartid = -1;
+ INIT_LIST_HEAD(&dm->target_list);
+ list_add(&dm->list, &dm_list);
+ }
+
+ info->dm = dm;
+ target_list_t *target_entry;
+ list_for_each_entry(target_entry, &dm->target_list, list) {
+ if (target_entry->target == target)
+ return dm;
+ }
+ target_entry = calloc(1, sizeof(*target_entry));
+ target_entry->target = target;
+ list_add(&target_entry->list, &dm->target_list);
+
+ return dm;
+}
+
+static uint32_t set_hartsel(uint32_t initial, uint32_t index)
+{
+ initial &= ~DMI_DMCONTROL_HARTSELLO;
+ initial &= ~DMI_DMCONTROL_HARTSELHI;
+
+ uint32_t index_lo = index & ((1 << DMI_DMCONTROL_HARTSELLO_LENGTH) - 1);
+ initial |= index_lo << DMI_DMCONTROL_HARTSELLO_OFFSET;
+ uint32_t index_hi = index >> DMI_DMCONTROL_HARTSELLO_LENGTH;
+ assert(index_hi < 1 << DMI_DMCONTROL_HARTSELHI_LENGTH);
+ initial |= index_hi << DMI_DMCONTROL_HARTSELHI_OFFSET;
+
+ return initial;
+}
+
+static void decode_dmi(char *text, unsigned address, unsigned data)
+{
+ static const struct {
+ unsigned address;
+ uint64_t mask;
+ const char *name;
+ } description[] = {
+ { DMI_DMCONTROL, DMI_DMCONTROL_HALTREQ, "haltreq" },
+ { DMI_DMCONTROL, DMI_DMCONTROL_RESUMEREQ, "resumereq" },
+ { DMI_DMCONTROL, DMI_DMCONTROL_HARTRESET, "hartreset" },
+ { DMI_DMCONTROL, DMI_DMCONTROL_HASEL, "hasel" },
+ { DMI_DMCONTROL, DMI_DMCONTROL_HARTSELHI, "hartselhi" },
+ { DMI_DMCONTROL, DMI_DMCONTROL_HARTSELLO, "hartsello" },
+ { DMI_DMCONTROL, DMI_DMCONTROL_NDMRESET, "ndmreset" },
+ { DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE, "dmactive" },
+ { DMI_DMCONTROL, DMI_DMCONTROL_ACKHAVERESET, "ackhavereset" },
+
+ { DMI_DMSTATUS, DMI_DMSTATUS_IMPEBREAK, "impebreak" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ALLHAVERESET, "allhavereset" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ANYHAVERESET, "anyhavereset" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ALLRESUMEACK, "allresumeack" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ALLUNAVAIL, "allunavail" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ANYUNAVAIL, "anyunavail" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ALLRUNNING, "allrunning" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ANYRUNNING, "anyrunning" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ALLHALTED, "allhalted" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_ANYHALTED, "anyhalted" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_AUTHENTICATED, "authenticated" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_AUTHBUSY, "authbusy" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_DEVTREEVALID, "devtreevalid" },
+ { DMI_DMSTATUS, DMI_DMSTATUS_VERSION, "version" },
+
+ { DMI_ABSTRACTCS, DMI_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
+ { DMI_ABSTRACTCS, DMI_ABSTRACTCS_BUSY, "busy" },
+ { DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR, "cmderr" },
+ { DMI_ABSTRACTCS, DMI_ABSTRACTCS_DATACOUNT, "datacount" },
+
+ { DMI_COMMAND, DMI_COMMAND_CMDTYPE, "cmdtype" },
+
+ { DMI_SBCS, DMI_SBCS_SBREADONADDR, "sbreadonaddr" },
+ { DMI_SBCS, DMI_SBCS_SBACCESS, "sbaccess" },
+ { DMI_SBCS, DMI_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
+ { DMI_SBCS, DMI_SBCS_SBREADONDATA, "sbreadondata" },
+ { DMI_SBCS, DMI_SBCS_SBERROR, "sberror" },
+ { DMI_SBCS, DMI_SBCS_SBASIZE, "sbasize" },
+ { DMI_SBCS, DMI_SBCS_SBACCESS128, "sbaccess128" },
+ { DMI_SBCS, DMI_SBCS_SBACCESS64, "sbaccess64" },
+ { DMI_SBCS, DMI_SBCS_SBACCESS32, "sbaccess32" },
+ { DMI_SBCS, DMI_SBCS_SBACCESS16, "sbaccess16" },
+ { DMI_SBCS, DMI_SBCS_SBACCESS8, "sbaccess8" },
+ };
+
+ text[0] = 0;
+ for (unsigned i = 0; i < DIM(description); i++) {
+ if (description[i].address == address) {
+ uint64_t mask = description[i].mask;
+ unsigned value = get_field(data, mask);
+ if (value) {
+ if (i > 0)
+ *(text++) = ' ';
+ if (mask & (mask >> 1)) {
+ /* If the field is more than 1 bit wide. */
+ sprintf(text, "%s=%d", description[i].name, value);
+ } else {
+ strcpy(text, description[i].name);
+ }
+ text += strlen(text);
+ }
+ }
+ }
+}
+
+static void dump_field(const struct scan_field *field)
+{
+ static const char * const op_string[] = {"-", "r", "w", "?"};
+ static const char * const status_string[] = {"+", "?", "F", "b"};
+
+ if (debug_level < LOG_LVL_DEBUG)
+ return;
+
+ uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
+ unsigned int out_op = get_field(out, DTM_DMI_OP);
+ unsigned int out_data = get_field(out, DTM_DMI_DATA);
+ unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
+
+ uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
+ unsigned int in_op = get_field(in, DTM_DMI_OP);
+ unsigned int in_data = get_field(in, DTM_DMI_DATA);
+ unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
+
+ log_printf_lf(LOG_LVL_DEBUG,
+ __FILE__, __LINE__, "scan",
+ "%db %s %08x @%02x -> %s %08x @%02x",
+ field->num_bits,
+ op_string[out_op], out_data, out_address,
+ status_string[in_op], in_data, in_address);
+
+ char out_text[500];
+ char in_text[500];
+ decode_dmi(out_text, out_address, out_data);
+ decode_dmi(in_text, in_address, in_data);
+ if (in_text[0] || out_text[0]) {
+ log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
+ out_text, in_text);
+ }
+}
+
+/*** Utility functions. ***/
+
+static void select_dmi(struct target *target)
+{
+ static uint8_t ir_dmi[1] = {DTM_DMI};
+ struct scan_field field = {
+ .num_bits = target->tap->ir_length,
+ .out_value = ir_dmi,
+ .in_value = NULL,
+ .check_value = NULL,
+ .check_mask = NULL
+ };
+
+ jtag_add_ir_scan(target->tap, &field, TAP_IDLE);
+}
+
+static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
+{
+ struct scan_field field;
+ uint8_t in_value[4];
+ uint8_t out_value[4];
+
+ buf_set_u32(out_value, 0, 32, out);
+
+ jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
+
+ field.num_bits = 32;
+ field.out_value = out_value;
+ field.in_value = in_value;
+ jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
+
+ /* Always return to dmi. */
+ select_dmi(target);
+
+ int retval = jtag_execute_queue();
+ if (retval != ERROR_OK) {
+ LOG_ERROR("failed jtag scan: %d", retval);
+ return retval;
+ }
+
+ uint32_t in = buf_get_u32(field.in_value, 0, 32);
+ LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
+
+ return in;
+}
+
+static void increase_dmi_busy_delay(struct target *target)
+{
+ riscv013_info_t *info = get_info(target);
+ info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
+ LOG_DEBUG("dtmcontrol_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
+ info->dtmcontrol_idle, info->dmi_busy_delay,
+ info->ac_busy_delay);
+
+ dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
+}
+
+/**
+ * exec: If this is set, assume the scan results in an execution, so more
+ * run-test/idle cycles may be required.
+ */
+static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
+ uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
+ bool exec)
+{
+ riscv013_info_t *info = get_info(target);
+ uint8_t in[8] = {0};
+ uint8_t out[8];
+ struct scan_field field = {
+ .num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH,
+ .out_value = out,
+ .in_value = in
+ };
+
+ assert(info->abits != 0);
+
+ buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
+ buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
+ buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
+
+ /* Assume dbus is already selected. */
+ jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
+
+ int idle_count = info->dmi_busy_delay;
+ if (exec)
+ idle_count += info->ac_busy_delay;
+
+ if (idle_count)
+ jtag_add_runtest(idle_count, TAP_IDLE);
+
+ int retval = jtag_execute_queue();
+ if (retval != ERROR_OK) {
+ LOG_ERROR("dmi_scan failed jtag scan");
+ return DMI_STATUS_FAILED;
+ }
+
+ if (data_in)
+ *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
+
+ if (address_in)
+ *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
+
+ dump_field(&field);
+
+ return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
+}
+
+static int dmi_op_timeout(struct target *target, uint32_t *data_in, int dmi_op,
+ uint32_t address, uint32_t data_out, int timeout_sec)
+{
+ select_dmi(target);
+
+ dmi_status_t status;
+ uint32_t address_in;
+
+ const char *op_name;
+ switch (dmi_op) {
+ case DMI_OP_NOP:
+ op_name = "nop";
+ break;
+ case DMI_OP_READ:
+ op_name = "read";
+ break;
+ case DMI_OP_WRITE:
+ op_name = "write";
+ break;
+ default:
+ LOG_ERROR("Invalid DMI operation: %d", dmi_op);
+ return ERROR_FAIL;
+ }
+
+ time_t start = time(NULL);
+ /* This first loop performs the request. Note that if for some reason this
+ * stays busy, it is actually due to the previous access. */
+ while (1) {
+ status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
+ false);
+ if (status == DMI_STATUS_BUSY) {
+ increase_dmi_busy_delay(target);
+ } else if (status == DMI_STATUS_SUCCESS) {
+ break;
+ } else {
+ LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
+ return ERROR_FAIL;
+ }
+ if (time(NULL) - start > timeout_sec)
+ return ERROR_TIMEOUT_REACHED;
+ }
+
+ if (status != DMI_STATUS_SUCCESS) {
+ LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
+ return ERROR_FAIL;
+ }
+
+ /* This second loop ensures the request succeeded, and gets back data.
+ * Note that NOP can result in a 'busy' result as well, but that would be
+ * noticed on the next DMI access we do. */
+ while (1) {
+ status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
+ false);
+ if (status == DMI_STATUS_BUSY) {
+ increase_dmi_busy_delay(target);
+ } else if (status == DMI_STATUS_SUCCESS) {
+ break;
+ } else {
+ LOG_ERROR("failed %s (NOP) at 0x%x, status=%d", op_name, address,
+ status);
+ return ERROR_FAIL;
+ }
+ if (time(NULL) - start > timeout_sec)
+ return ERROR_TIMEOUT_REACHED;
+ }
+
+ if (status != DMI_STATUS_SUCCESS) {
+ if (status == DMI_STATUS_FAILED || !data_in) {
+ LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
+ status);
+ } else {
+ LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
+ op_name, address, *data_in, status);
+ }
+ return ERROR_FAIL;
+ }
+
+ return ERROR_OK;
+}
+
+static int dmi_op(struct target *target, uint32_t *data_in, int dmi_op,
+ uint32_t address, uint32_t data_out)
+{
+ int result = dmi_op_timeout(target, data_in, dmi_op, address, data_out,
+ riscv_command_timeout_sec);
+ if (result == ERROR_TIMEOUT_REACHED) {
+ LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
+ "either really slow or broken. You could increase the "
+ "timeout with riscv set_command_timeout_sec.",
+ riscv_command_timeout_sec);
+ return ERROR_FAIL;
+ }
+ return result;
+}
+
+static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
+{
+ return dmi_op(target, value, DMI_OP_READ, address, 0);
+}
+
+static int dmi_write(struct target *target, uint32_t address, uint32_t value)
+{
+ return dmi_op(target, NULL, DMI_OP_WRITE, address, value);
+}
+
+int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
+ bool authenticated, unsigned timeout_sec)
+{
+ int result = dmi_op_timeout(target, dmstatus, DMI_OP_READ, DMI_DMSTATUS, 0,
+ timeout_sec);
+ if (result != ERROR_OK)
+ return result;
+ if (authenticated && !get_field(*dmstatus, DMI_DMSTATUS_AUTHENTICATED)) {
+ LOG_ERROR("Debugger is not authenticated to target Debug Module. "
+ "(dmstatus=0x%x). Use `riscv authdata_read` and "
+ "`riscv authdata_write` commands to authenticate.", *dmstatus);
+ return ERROR_FAIL;
+ }
+ return ERROR_OK;
+}
+
+int dmstatus_read(struct target *target, uint32_t *dmstatus,
+ bool authenticated)
+{
+ return dmstatus_read_timeout(target, dmstatus, authenticated,
+ riscv_command_timeout_sec);
+}
+
+static void increase_ac_busy_delay(struct target *target)
+{
+ riscv013_info_t *info = get_info(target);
+ info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
+ LOG_DEBUG("dtmcontrol_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
+ info->dtmcontrol_idle, info->dmi_busy_delay,
+ info->ac_busy_delay);
+}
+
+uint32_t abstract_register_size(unsigned width)
+{
+ switch (width) {
+ case 32:
+ return set_field(0, AC_ACCESS_REGISTER_SIZE, 2);
+ case 64:
+ return set_field(0, AC_ACCESS_REGISTER_SIZE, 3);
+ break;
+ case 128:
+ return set_field(0, AC_ACCESS_REGISTER_SIZE, 4);
+ break;
+ default:
+ LOG_ERROR("Unsupported register width: %d", width);
+ return 0;
+ }
+}
+
+static int wait_for_idle(struct target *target, uint32_t *abstractcs)
+{
+ RISCV013_INFO(info);
+ time_t start = time(NULL);
+ while (1) {
+ if (dmi_read(target, abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (get_field(*abstractcs, DMI_ABSTRACTCS_BUSY) == 0)
+ return ERROR_OK;
+
+ if (time(NULL) - start > riscv_command_timeout_sec) {
+ info->cmderr = get_field(*abstractcs, DMI_ABSTRACTCS_CMDERR);
+ if (info->cmderr != CMDERR_NONE) {
+ const char *errors[8] = {
+ "none",
+ "busy",
+ "not supported",
+ "exception",
+ "halt/resume",
+ "reserved",
+ "reserved",
+ "other" };
+
+ LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
+ errors[info->cmderr], *abstractcs);
+ }
+
+ LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
+ "Increase the timeout with riscv set_command_timeout_sec.",
+ riscv_command_timeout_sec,
+ *abstractcs);
+ return ERROR_FAIL;
+ }
+ }
+}
+
+static int execute_abstract_command(struct target *target, uint32_t command)
+{
+ RISCV013_INFO(info);
+ LOG_DEBUG("command=0x%x", command);
+ dmi_write(target, DMI_COMMAND, command);
+
+ uint32_t abstractcs = 0;
+ wait_for_idle(target, &abstractcs);
+
+ info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
+ if (info->cmderr != 0) {
+ LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
+ /* Clear the error. */
+ dmi_write(target, DMI_ABSTRACTCS, set_field(0, DMI_ABSTRACTCS_CMDERR,
+ info->cmderr));
+ return ERROR_FAIL;
+ }
+
+ return ERROR_OK;
+}
+
+static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
+ unsigned size_bits)
+{
+ riscv_reg_t value = 0;
+ uint32_t v;
+ unsigned offset = index * size_bits / 32;
+ switch (size_bits) {
+ default:
+ LOG_ERROR("Unsupported size: %d", size_bits);
+ return ~0;
+ case 64:
+ dmi_read(target, &v, DMI_DATA0 + offset + 1);
+ value |= ((uint64_t) v) << 32;
+ /* falls through */
+ case 32:
+ dmi_read(target, &v, DMI_DATA0 + offset);
+ value |= v;
+ }
+ return value;
+}
+
+static int write_abstract_arg(struct target *target, unsigned index,
+ riscv_reg_t value, unsigned size_bits)
+{
+ unsigned offset = index * size_bits / 32;
+ switch (size_bits) {
+ default:
+ LOG_ERROR("Unsupported size: %d", size_bits);
+ return ERROR_FAIL;
+ case 64:
+ dmi_write(target, DMI_DATA0 + offset + 1, value >> 32);
+ /* falls through */
+ case 32:
+ dmi_write(target, DMI_DATA0 + offset, value);
+ }
+ return ERROR_OK;
+}
+
+/**
+ * @size in bits
+ */
+static uint32_t access_register_command(uint32_t number, unsigned size,
+ uint32_t flags)
+{
+ uint32_t command = set_field(0, DMI_COMMAND_CMDTYPE, 0);
+ switch (size) {
+ case 32:
+ command = set_field(command, AC_ACCESS_REGISTER_SIZE, 2);
+ break;
+ case 64:
+ command = set_field(command, AC_ACCESS_REGISTER_SIZE, 3);
+ break;
+ default:
+ assert(0);
+ }
+
+ if (number <= GDB_REGNO_XPR31) {
+ command = set_field(command, AC_ACCESS_REGISTER_REGNO,
+ 0x1000 + number - GDB_REGNO_ZERO);
+ } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
+ command = set_field(command, AC_ACCESS_REGISTER_REGNO,
+ 0x1020 + number - GDB_REGNO_FPR0);
+ } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
+ command = set_field(command, AC_ACCESS_REGISTER_REGNO,
+ number - GDB_REGNO_CSR0);
+ } else {
+ assert(0);
+ }
+
+ command |= flags;
+
+ return command;
+}
+
+static int register_read_abstract(struct target *target, uint64_t *value,
+ uint32_t number, unsigned size)
+{
+ RISCV013_INFO(info);
+
+ if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
+ !info->abstract_read_fpr_supported)
+ return ERROR_FAIL;
+ if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
+ !info->abstract_read_csr_supported)
+ return ERROR_FAIL;
+
+ uint32_t command = access_register_command(number, size,
+ AC_ACCESS_REGISTER_TRANSFER);
+
+ int result = execute_abstract_command(target, command);
+ if (result != ERROR_OK) {
+ if (info->cmderr == CMDERR_NOT_SUPPORTED) {
+ if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
+ info->abstract_read_fpr_supported = false;
+ LOG_INFO("Disabling abstract command reads from FPRs.");
+ } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
+ info->abstract_read_csr_supported = false;
+ LOG_INFO("Disabling abstract command reads from CSRs.");
+ }
+ }
+ return result;
+ }
+
+ if (value)
+ *value = read_abstract_arg(target, 0, size);
+
+ return ERROR_OK;
+}
+
+static int register_write_abstract(struct target *target, uint32_t number,
+ uint64_t value, unsigned size)
+{
+ RISCV013_INFO(info);
+
+ if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
+ !info->abstract_write_fpr_supported)
+ return ERROR_FAIL;
+ if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
+ !info->abstract_write_csr_supported)
+ return ERROR_FAIL;
+
+ uint32_t command = access_register_command(number, size,
+ AC_ACCESS_REGISTER_TRANSFER |
+ AC_ACCESS_REGISTER_WRITE);
+
+ if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
+ return ERROR_FAIL;
+
+ int result = execute_abstract_command(target, command);
+ if (result != ERROR_OK) {
+ if (info->cmderr == CMDERR_NOT_SUPPORTED) {
+ if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
+ info->abstract_write_fpr_supported = false;
+ LOG_INFO("Disabling abstract command writes to FPRs.");
+ } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
+ info->abstract_write_csr_supported = false;
+ LOG_INFO("Disabling abstract command writes to CSRs.");
+ }
+ }
+ return result;
+ }
+
+ return ERROR_OK;
+}
+
+static int examine_progbuf(struct target *target)
+{
+ riscv013_info_t *info = get_info(target);
+
+ if (info->progbuf_writable != YNM_MAYBE)
+ return ERROR_OK;
+
+ /* Figure out if progbuf is writable. */
+
+ if (info->progbufsize < 1) {
+ info->progbuf_writable = YNM_NO;
+ LOG_INFO("No program buffer present.");
+ return ERROR_OK;
+ }
+
+ uint64_t s0;
+ if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+ riscv_program_insert(&program, auipc(S0));
+ if (riscv_program_exec(&program, target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ riscv_program_init(&program, target);
+ riscv_program_insert(&program, sw(S0, S0, 0));
+ int result = riscv_program_exec(&program, target);
+
+ if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (result != ERROR_OK) {
+ /* This program might have failed if the program buffer is not
+ * writable. */
+ info->progbuf_writable = YNM_NO;
+ return ERROR_OK;
+ }
+
+ uint32_t written;
+ if (dmi_read(target, &written, DMI_PROGBUF0) != ERROR_OK)
+ return ERROR_FAIL;
+ if (written == (uint32_t) info->progbuf_address) {
+ LOG_INFO("progbuf is writable at 0x%" PRIx64,
+ info->progbuf_address);
+ info->progbuf_writable = YNM_YES;
+
+ } else {
+ LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
+ info->progbuf_address);
+ info->progbuf_writable = YNM_NO;
+ }
+
+ return ERROR_OK;
+}
+
+typedef enum {
+ SPACE_DMI_DATA,
+ SPACE_DMI_PROGBUF,
+ SPACE_DMI_RAM
+} memory_space_t;
+
+typedef struct {
+ /* How can the debugger access this memory? */
+ memory_space_t memory_space;
+ /* Memory address to access the scratch memory from the hart. */
+ riscv_addr_t hart_address;
+ /* Memory address to access the scratch memory from the debugger. */
+ riscv_addr_t debug_address;
+ struct working_area *area;
+} scratch_mem_t;
+
+/**
+ * Find some scratch memory to be used with the given program.
+ */
+static int scratch_reserve(struct target *target,
+ scratch_mem_t *scratch,
+ struct riscv_program *program,
+ unsigned size_bytes)
+{
+ riscv_addr_t alignment = 1;
+ while (alignment < size_bytes)
+ alignment *= 2;
+
+ scratch->area = NULL;
+
+ riscv013_info_t *info = get_info(target);
+
+ if (info->dataaccess == 1) {
+ /* Sign extend dataaddr. */
+ scratch->hart_address = info->dataaddr;
+ if (info->dataaddr & (1<<11))
+ scratch->hart_address |= 0xfffffffffffff000ULL;
+ /* Align. */
+ scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
+
+ if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
+ info->datasize) {
+ scratch->memory_space = SPACE_DMI_DATA;
+ scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
+ return ERROR_OK;
+ }
+ }
+
+ if (examine_progbuf(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Allow for ebreak at the end of the program. */
+ unsigned program_size = (program->instruction_count + 1) * 4;
+ scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
+ ~(alignment - 1);
+ if ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
+ info->progbufsize) {
+ scratch->memory_space = SPACE_DMI_PROGBUF;
+ scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
+ return ERROR_OK;
+ }
+
+ if (target_alloc_working_area(target, size_bytes + alignment - 1,
+ &scratch->area) == ERROR_OK) {
+ scratch->hart_address = (scratch->area->address + alignment - 1) &
+ ~(alignment - 1);
+ scratch->memory_space = SPACE_DMI_RAM;
+ scratch->debug_address = scratch->hart_address;
+ return ERROR_OK;
+ }
+
+ LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
+ "a work area with 'configure -work-area-phys'.", size_bytes);
+ return ERROR_FAIL;
+}
+
+static int scratch_release(struct target *target,
+ scratch_mem_t *scratch)
+{
+ if (scratch->area)
+ return target_free_working_area(target, scratch->area);
+
+ return ERROR_OK;
+}
+
+static int scratch_read64(struct target *target, scratch_mem_t *scratch,
+ uint64_t *value)
+{
+ uint32_t v;
+ switch (scratch->memory_space) {
+ case SPACE_DMI_DATA:
+ if (dmi_read(target, &v, DMI_DATA0 + scratch->debug_address) != ERROR_OK)
+ return ERROR_FAIL;
+ *value = v;
+ if (dmi_read(target, &v, DMI_DATA1 + scratch->debug_address) != ERROR_OK)
+ return ERROR_FAIL;
+ *value |= ((uint64_t) v) << 32;
+ break;
+ case SPACE_DMI_PROGBUF:
+ if (dmi_read(target, &v, DMI_PROGBUF0 + scratch->debug_address) != ERROR_OK)
+ return ERROR_FAIL;
+ *value = v;
+ if (dmi_read(target, &v, DMI_PROGBUF1 + scratch->debug_address) != ERROR_OK)
+ return ERROR_FAIL;
+ *value |= ((uint64_t) v) << 32;
+ break;
+ case SPACE_DMI_RAM:
+ {
+ uint8_t buffer[8];
+ if (read_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
+ return ERROR_FAIL;
+ *value = buffer[0] |
+ (((uint64_t) buffer[1]) << 8) |
+ (((uint64_t) buffer[2]) << 16) |
+ (((uint64_t) buffer[3]) << 24) |
+ (((uint64_t) buffer[4]) << 32) |
+ (((uint64_t) buffer[5]) << 40) |
+ (((uint64_t) buffer[6]) << 48) |
+ (((uint64_t) buffer[7]) << 56);
+ }
+ break;
+ }
+ return ERROR_OK;
+}
+
+static int scratch_write64(struct target *target, scratch_mem_t *scratch,
+ uint64_t value)
+{
+ switch (scratch->memory_space) {
+ case SPACE_DMI_DATA:
+ dmi_write(target, DMI_DATA0 + scratch->debug_address, value);
+ dmi_write(target, DMI_DATA1 + scratch->debug_address, value >> 32);
+ break;
+ case SPACE_DMI_PROGBUF:
+ dmi_write(target, DMI_PROGBUF0 + scratch->debug_address, value);
+ dmi_write(target, DMI_PROGBUF1 + scratch->debug_address, value >> 32);
+ break;
+ case SPACE_DMI_RAM:
+ {
+ uint8_t buffer[8] = {
+ value,
+ value >> 8,
+ value >> 16,
+ value >> 24,
+ value >> 32,
+ value >> 40,
+ value >> 48,
+ value >> 56
+ };
+ if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+ break;
+ }
+ return ERROR_OK;
+}
+
+/** Return register size in bits. */
+static unsigned register_size(struct target *target, unsigned number)
+{
+ /* If reg_cache hasn't been initialized yet, make a guess. We need this for
+ * when this function is called during examine(). */
+ if (target->reg_cache)
+ return target->reg_cache->reg_list[number].size;
+ else
+ return riscv_xlen(target);
+}
+
+/**
+ * Immediately write the new value to the requested register. This mechanism
+ * bypasses any caches.
+ */
+static int register_write_direct(struct target *target, unsigned number,
+ uint64_t value)
+{
+ RISCV013_INFO(info);
+ RISCV_INFO(r);
+
+ LOG_DEBUG("[%d] reg[0x%x] <- 0x%" PRIx64, riscv_current_hartid(target),
+ number, value);
+
+ int result = register_write_abstract(target, number, value,
+ register_size(target, number));
+ if (result == ERROR_OK && target->reg_cache) {
+ struct reg *reg = &target->reg_cache->reg_list[number];
+ buf_set_u64(reg->value, 0, reg->size, value);
+ reg->valid = true;
+ }
+ if (result == ERROR_OK || info->progbufsize + r->impebreak < 2 ||
+ !riscv_is_halted(target))
+ return result;
+
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+
+ uint64_t s0;
+ if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ scratch_mem_t scratch;
+ bool use_scratch = false;
+ if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
+ riscv_supports_extension(target, riscv_current_hartid(target), 'D') &&
+ riscv_xlen(target) < 64) {
+ /* There are no instructions to move all the bits from a register, so
+ * we need to use some scratch RAM. */
+ use_scratch = true;
+ riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
+
+ if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
+ != ERROR_OK) {
+ scratch_release(target, &scratch);
+ return ERROR_FAIL;
+ }
+
+ if (scratch_write64(target, &scratch, value) != ERROR_OK) {
+ scratch_release(target, &scratch);
+ return ERROR_FAIL;
+ }
+
+ } else {
+ if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
+ if (riscv_supports_extension(target, riscv_current_hartid(target), 'D'))
+ riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
+ else
+ riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
+ } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
+ riscv_program_csrw(&program, S0, number);
+ } else {
+ LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
+ return ERROR_FAIL;
+ }
+ }
+
+ int exec_out = riscv_program_exec(&program, target);
+ /* Don't message on error. Probably the register doesn't exist. */
+ if (exec_out == ERROR_OK && target->reg_cache) {
+ struct reg *reg = &target->reg_cache->reg_list[number];
+ buf_set_u64(reg->value, 0, reg->size, value);
+ reg->valid = true;
+ }
+
+ if (use_scratch)
+ scratch_release(target, &scratch);
+
+ /* Restore S0. */
+ if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ return exec_out;
+}
+
+/** Return the cached value, or read from the target if necessary. */
+static int register_read(struct target *target, uint64_t *value, uint32_t number)
+{
+ if (number == GDB_REGNO_ZERO) {
+ *value = 0;
+ return ERROR_OK;
+ }
+ if (target->reg_cache &&
+ (number <= GDB_REGNO_XPR31 ||
+ (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31))) {
+ /* Only check the cache for registers that we know won't spontaneously
+ * change. */
+ struct reg *reg = &target->reg_cache->reg_list[number];
+ if (reg && reg->valid) {
+ *value = buf_get_u64(reg->value, 0, reg->size);
+ return ERROR_OK;
+ }
+ }
+ int result = register_read_direct(target, value, number);
+ if (result != ERROR_OK)
+ return ERROR_FAIL;
+ if (target->reg_cache) {
+ struct reg *reg = &target->reg_cache->reg_list[number];
+ buf_set_u64(reg->value, 0, reg->size, *value);
+ reg->valid = true;
+ }
+ return ERROR_OK;
+}
+
+/** Actually read registers from the target right now. */
+static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
+{
+ RISCV013_INFO(info);
+ RISCV_INFO(r);
+
+ int result = register_read_abstract(target, value, number,
+ register_size(target, number));
+
+ if (result != ERROR_OK &&
+ info->progbufsize + r->impebreak >= 2 &&
+ number > GDB_REGNO_XPR31) {
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+
+ scratch_mem_t scratch;
+ bool use_scratch = false;
+
+ uint64_t s0;
+ if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Write program to move data into s0. */
+
+ uint64_t mstatus;
+ if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
+ if (register_read(target, &mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
+ return ERROR_FAIL;
+ if ((mstatus & MSTATUS_FS) == 0)
+ if (register_write_direct(target, GDB_REGNO_MSTATUS,
+ set_field(mstatus, MSTATUS_FS, 1)) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (riscv_supports_extension(target, riscv_current_hartid(target), 'D')
+ && riscv_xlen(target) < 64) {
+ /* There are no instructions to move all the bits from a
+ * register, so we need to use some scratch RAM. */
+ riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
+ 0));
+
+ if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
+ return ERROR_FAIL;
+ use_scratch = true;
+
+ if (register_write_direct(target, GDB_REGNO_S0,
+ scratch.hart_address) != ERROR_OK) {
+ scratch_release(target, &scratch);
+ return ERROR_FAIL;
+ }
+ } else if (riscv_supports_extension(target,
+ riscv_current_hartid(target), 'D')) {
+ riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
+ } else {
+ riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
+ }
+ } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
+ riscv_program_csrr(&program, S0, number);
+ } else {
+ LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
+ return ERROR_FAIL;
+ }
+
+ /* Execute program. */
+ result = riscv_program_exec(&program, target);
+ /* Don't message on error. Probably the register doesn't exist. */
+
+ if (use_scratch) {
+ result = scratch_read64(target, &scratch, value);
+ scratch_release(target, &scratch);
+ if (result != ERROR_OK)
+ return result;
+ } else {
+ /* Read S0 */
+ if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
+ (mstatus & MSTATUS_FS) == 0)
+ if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Restore S0. */
+ if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ if (result == ERROR_OK) {
+ LOG_DEBUG("[%d] reg[0x%x] = 0x%" PRIx64, riscv_current_hartid(target),
+ number, *value);
+ }
+
+ return result;
+}
+
+int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
+{
+ time_t start = time(NULL);
+ while (1) {
+ uint32_t value;
+ if (dmstatus_read(target, &value, false) != ERROR_OK)
+ return ERROR_FAIL;
+ if (dmstatus)
+ *dmstatus = value;
+ if (!get_field(value, DMI_DMSTATUS_AUTHBUSY))
+ break;
+ if (time(NULL) - start > riscv_command_timeout_sec) {
+ LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
+ "Increase the timeout with riscv set_command_timeout_sec.",
+ riscv_command_timeout_sec,
+ value);
+ return ERROR_FAIL;
+ }
+ }
+
+ return ERROR_OK;
+}
+
+/*** OpenOCD target functions. ***/
+
+static void deinit_target(struct target *target)
+{
+ LOG_DEBUG("riscv_deinit_target()");
+ riscv_info_t *info = (riscv_info_t *) target->arch_info;
+ free(info->version_specific);
+ info->version_specific = NULL;
+}
+
+static int examine(struct target *target)
+{
+ /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
+
+ uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
+ LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
+ LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
+ LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
+ LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
+ LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
+ LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
+ if (dtmcontrol == 0) {
+ LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
+ return ERROR_FAIL;
+ }
+ if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
+ LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
+ get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
+ return ERROR_FAIL;
+ }
+
+ riscv013_info_t *info = get_info(target);
+ info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
+ info->dtmcontrol_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
+
+ uint32_t dmstatus;
+ if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
+ return ERROR_FAIL;
+ LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
+ if (get_field(dmstatus, DMI_DMSTATUS_VERSION) != 2) {
+ LOG_ERROR("OpenOCD only supports Debug Module version 2, not %d "
+ "(dmstatus=0x%x)", get_field(dmstatus, DMI_DMSTATUS_VERSION), dmstatus);
+ return ERROR_FAIL;
+ }
+
+ /* Reset the Debug Module. */
+ dm013_info_t *dm = get_dm(target);
+ if (!dm->was_reset) {
+ dmi_write(target, DMI_DMCONTROL, 0);
+ dmi_write(target, DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE);
+ dm->was_reset = true;
+ }
+
+ dmi_write(target, DMI_DMCONTROL, DMI_DMCONTROL_HARTSELLO |
+ DMI_DMCONTROL_HARTSELHI | DMI_DMCONTROL_DMACTIVE);
+ uint32_t dmcontrol;
+ if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (!get_field(dmcontrol, DMI_DMCONTROL_DMACTIVE)) {
+ LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
+ dmcontrol);
+ return ERROR_FAIL;
+ }
+
+ uint32_t hartsel =
+ (get_field(dmcontrol, DMI_DMCONTROL_HARTSELHI) <<
+ DMI_DMCONTROL_HARTSELLO_LENGTH) |
+ get_field(dmcontrol, DMI_DMCONTROL_HARTSELLO);
+ info->hartsellen = 0;
+ while (hartsel & 1) {
+ info->hartsellen++;
+ hartsel >>= 1;
+ }
+ LOG_DEBUG("hartsellen=%d", info->hartsellen);
+
+ uint32_t hartinfo;
+ if (dmi_read(target, &hartinfo, DMI_HARTINFO) != ERROR_OK)
+ return ERROR_FAIL;
+
+ info->datasize = get_field(hartinfo, DMI_HARTINFO_DATASIZE);
+ info->dataaccess = get_field(hartinfo, DMI_HARTINFO_DATAACCESS);
+ info->dataaddr = get_field(hartinfo, DMI_HARTINFO_DATAADDR);
+
+ if (!get_field(dmstatus, DMI_DMSTATUS_AUTHENTICATED)) {
+ LOG_ERROR("Debugger is not authenticated to target Debug Module. "
+ "(dmstatus=0x%x). Use `riscv authdata_read` and "
+ "`riscv authdata_write` commands to authenticate.", dmstatus);
+ /* If we return ERROR_FAIL here, then in a multicore setup the next
+ * core won't be examined, which means we won't set up the
+ * authentication commands for them, which means the config script
+ * needs to be a lot more complex. */
+ return ERROR_OK;
+ }
+
+ if (dmi_read(target, &info->sbcs, DMI_SBCS) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Check that abstract data registers are accessible. */
+ uint32_t abstractcs;
+ if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ return ERROR_FAIL;
+ info->datacount = get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT);
+ info->progbufsize = get_field(abstractcs, DMI_ABSTRACTCS_PROGBUFSIZE);
+
+ LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
+
+ RISCV_INFO(r);
+ r->impebreak = get_field(dmstatus, DMI_DMSTATUS_IMPEBREAK);
+
+ if (info->progbufsize + r->impebreak < 2) {
+ LOG_WARNING("We won't be able to execute fence instructions on this "
+ "target. Memory may not always appear consistent. "
+ "(progbufsize=%d, impebreak=%d)", info->progbufsize,
+ r->impebreak);
+ }
+
+ /* Before doing anything else we must first enumerate the harts. */
+
+ /* Don't call any riscv_* functions until after we've counted the number of
+ * cores and initialized registers. */
+ for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
+ if (!riscv_rtos_enabled(target) && i != target->coreid)
+ continue;
+
+ r->current_hartid = i;
+ if (riscv013_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ uint32_t s;
+ if (dmstatus_read(target, &s, true) != ERROR_OK)
+ return ERROR_FAIL;
+ if (get_field(s, DMI_DMSTATUS_ANYNONEXISTENT))
+ break;
+ r->hart_count = i + 1;
+
+ if (get_field(s, DMI_DMSTATUS_ANYHAVERESET))
+ dmi_write(target, DMI_DMCONTROL,
+ set_hartsel(DMI_DMCONTROL_DMACTIVE | DMI_DMCONTROL_ACKHAVERESET, i));
+
+ if (!riscv_is_halted(target)) {
+ if (riscv013_halt_current_hart(target) != ERROR_OK) {
+ LOG_ERROR("Fatal: Hart %d failed to halt during examine()", i);
+ return ERROR_FAIL;
+ }
+ }
+
+ /* Without knowing anything else we can at least mess with the
+ * program buffer. */
+ r->debug_buffer_size[i] = info->progbufsize;
+
+ int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
+ if (result == ERROR_OK)
+ r->xlen[i] = 64;
+ else
+ r->xlen[i] = 32;
+
+ if (register_read(target, &r->misa[i], GDB_REGNO_MISA)) {
+ LOG_ERROR("Fatal: Failed to read MISA from hart %d.", i);
+ return ERROR_FAIL;
+ }
+
+ /* Now init registers based on what we discovered. */
+ if (riscv_init_registers(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Display this as early as possible to help people who are using
+ * really slow simulators. */
+ LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
+ r->misa[i]);
+ }
+
+ LOG_DEBUG("Enumerated %d harts", r->hart_count);
+
+ if (r->hart_count == 0) {
+ LOG_ERROR("No harts found!");
+ return ERROR_FAIL;
+ }
+
+ /* Resumes all the harts, so the debugger can later pause them. */
+ /* TODO: Only do this if the harts were halted to start with. */
+ riscv_resume_all_harts(target);
+ target->state = TARGET_RUNNING;
+
+ target_set_examined(target);
+
+ /* Some regression suites rely on seeing 'Examined RISC-V core' to know
+ * when they can connect with gdb/telnet.
+ * We will need to update those suites if we want to change that text. */
+ LOG_INFO("Examined RISC-V core; found %d harts",
+ riscv_count_harts(target));
+ for (int i = 0; i < riscv_count_harts(target); ++i) {
+ if (riscv_hart_enabled(target, i)) {
+ LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
+ r->misa[i]);
+ } else {
+ LOG_INFO(" hart %d: currently disabled", i);
+ }
+ }
+ return ERROR_OK;
+}
+
+int riscv013_authdata_read(struct target *target, uint32_t *value)
+{
+ if (wait_for_authbusy(target, NULL) != ERROR_OK)
+ return ERROR_FAIL;
+
+ return dmi_read(target, value, DMI_AUTHDATA);
+}
+
+int riscv013_authdata_write(struct target *target, uint32_t value)
+{
+ uint32_t before, after;
+ if (wait_for_authbusy(target, &before) != ERROR_OK)
+ return ERROR_FAIL;
+
+ dmi_write(target, DMI_AUTHDATA, value);
+
+ if (wait_for_authbusy(target, &after) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (!get_field(before, DMI_DMSTATUS_AUTHENTICATED) &&
+ get_field(after, DMI_DMSTATUS_AUTHENTICATED)) {
+ LOG_INFO("authdata_write resulted in successful authentication");
+ int result = ERROR_OK;
+ dm013_info_t *dm = get_dm(target);
+ target_list_t *entry;
+ list_for_each_entry(entry, &dm->target_list, list) {
+ if (examine(entry->target) != ERROR_OK)
+ result = ERROR_FAIL;
+ }
+ return result;
+ }
+
+ return ERROR_OK;
+}
+
+static int init_target(struct command_context *cmd_ctx,
+ struct target *target)
+{
+ LOG_DEBUG("init");
+ riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
+
+ generic_info->get_register = &riscv013_get_register;
+ generic_info->set_register = &riscv013_set_register;
+ generic_info->select_current_hart = &riscv013_select_current_hart;
+ generic_info->is_halted = &riscv013_is_halted;
+ generic_info->halt_current_hart = &riscv013_halt_current_hart;
+ generic_info->resume_current_hart = &riscv013_resume_current_hart;
+ generic_info->step_current_hart = &riscv013_step_current_hart;
+ generic_info->on_halt = &riscv013_on_halt;
+ generic_info->on_resume = &riscv013_on_resume;
+ generic_info->on_step = &riscv013_on_step;
+ generic_info->halt_reason = &riscv013_halt_reason;
+ generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
+ generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
+ generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
+ generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
+ generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
+ generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
+ generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
+ generic_info->authdata_read = &riscv013_authdata_read;
+ generic_info->authdata_write = &riscv013_authdata_write;
+ generic_info->dmi_read = &dmi_read;
+ generic_info->dmi_write = &dmi_write;
+ generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
+ if (!generic_info->version_specific)
+ return ERROR_FAIL;
+ riscv013_info_t *info = get_info(target);
+
+ info->progbufsize = -1;
+
+ info->dmi_busy_delay = 0;
+ info->bus_master_read_delay = 0;
+ info->bus_master_write_delay = 0;
+ info->ac_busy_delay = 0;
+
+ /* Assume all these abstract commands are supported until we learn
+ * otherwise.
+ * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
+ * while another one isn't. We don't track that this closely here, but in
+ * the future we probably should. */
+ info->abstract_read_csr_supported = true;
+ info->abstract_write_csr_supported = true;
+ info->abstract_read_fpr_supported = true;
+ info->abstract_write_fpr_supported = true;
+
+ return ERROR_OK;
+}
+
+static int assert_reset(struct target *target)
+{
+ RISCV_INFO(r);
+
+ select_dmi(target);
+
+ uint32_t control_base = set_field(0, DMI_DMCONTROL_DMACTIVE, 1);
+
+ if (target->rtos) {
+ /* There's only one target, and OpenOCD thinks each hart is a thread.
+ * We must reset them all. */
+
+ /* TODO: Try to use hasel in dmcontrol */
+
+ /* Set haltreq for each hart. */
+ uint32_t control = control_base;
+ for (int i = 0; i < riscv_count_harts(target); ++i) {
+ if (!riscv_hart_enabled(target, i))
+ continue;
+
+ control = set_hartsel(control_base, i);
+ control = set_field(control, DMI_DMCONTROL_HALTREQ,
+ target->reset_halt ? 1 : 0);
+ dmi_write(target, DMI_DMCONTROL, control);
+ }
+ /* Assert ndmreset */
+ control = set_field(control, DMI_DMCONTROL_NDMRESET, 1);
+ dmi_write(target, DMI_DMCONTROL, control);
+
+ } else {
+ /* Reset just this hart. */
+ uint32_t control = set_hartsel(control_base, r->current_hartid);
+ control = set_field(control, DMI_DMCONTROL_HALTREQ,
+ target->reset_halt ? 1 : 0);
+ control = set_field(control, DMI_DMCONTROL_NDMRESET, 1);
+ dmi_write(target, DMI_DMCONTROL, control);
+ }
+
+ target->state = TARGET_RESET;
+
+ return ERROR_OK;
+}
+
+static int deassert_reset(struct target *target)
+{
+ RISCV_INFO(r);
+ RISCV013_INFO(info);
+ select_dmi(target);
+
+ /* Clear the reset, but make sure haltreq is still set */
+ uint32_t control = 0;
+ control = set_field(control, DMI_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
+ control = set_field(control, DMI_DMCONTROL_DMACTIVE, 1);
+ dmi_write(target, DMI_DMCONTROL,
+ set_hartsel(control, r->current_hartid));
+
+ uint32_t dmstatus;
+ int dmi_busy_delay = info->dmi_busy_delay;
+ time_t start = time(NULL);
+
+ for (int i = 0; i < riscv_count_harts(target); ++i) {
+ int index = i;
+ if (target->rtos) {
+ if (!riscv_hart_enabled(target, index))
+ continue;
+ dmi_write(target, DMI_DMCONTROL,
+ set_hartsel(control, index));
+ } else {
+ index = r->current_hartid;
+ }
+
+ char *operation;
+ uint32_t expected_field;
+ if (target->reset_halt) {
+ operation = "halt";
+ expected_field = DMI_DMSTATUS_ALLHALTED;
+ } else {
+ operation = "run";
+ expected_field = DMI_DMSTATUS_ALLRUNNING;
+ }
+ LOG_DEBUG("Waiting for hart %d to %s out of reset.", index, operation);
+ while (1) {
+ int result = dmstatus_read_timeout(target, &dmstatus, true,
+ riscv_reset_timeout_sec);
+ if (result == ERROR_TIMEOUT_REACHED)
+ LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
+ "reset in %ds; Increase the timeout with riscv "
+ "set_reset_timeout_sec.",
+ index, riscv_reset_timeout_sec);
+ if (result != ERROR_OK)
+ return result;
+ if (get_field(dmstatus, expected_field))
+ break;
+ if (time(NULL) - start > riscv_reset_timeout_sec) {
+ LOG_ERROR("Hart %d didn't %s coming out of reset in %ds; "
+ "dmstatus=0x%x; "
+ "Increase the timeout with riscv set_reset_timeout_sec.",
+ index, operation, riscv_reset_timeout_sec, dmstatus);
+ return ERROR_FAIL;
+ }
+ }
+ target->state = TARGET_HALTED;
+
+ if (get_field(dmstatus, DMI_DMSTATUS_ALLHAVERESET)) {
+ /* Ack reset. */
+ dmi_write(target, DMI_DMCONTROL,
+ set_hartsel(control, index) |
+ DMI_DMCONTROL_ACKHAVERESET);
+ }
+
+ if (!target->rtos)
+ break;
+ }
+ info->dmi_busy_delay = dmi_busy_delay;
+ return ERROR_OK;
+}
+
+/**
+ * @size in bytes
+ */
+static void write_to_buf(uint8_t *buffer, uint64_t value, unsigned size)
+{
+ switch (size) {
+ case 8:
+ buffer[7] = value >> 56;
+ buffer[6] = value >> 48;
+ buffer[5] = value >> 40;
+ buffer[4] = value >> 32;
+ /* falls through */
+ case 4:
+ buffer[3] = value >> 24;
+ buffer[2] = value >> 16;
+ /* falls through */
+ case 2:
+ buffer[1] = value >> 8;
+ /* falls through */
+ case 1:
+ buffer[0] = value;
+ break;
+ default:
+ assert(false);
+ }
+}
+
+static int execute_fence(struct target *target)
+{
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+ riscv_program_fence(&program);
+ int result = riscv_program_exec(&program, target);
+ if (result != ERROR_OK)
+ LOG_ERROR("Unable to execute fence");
+ return result;
+}
+
+static void log_memory_access(target_addr_t address, uint64_t value,
+ unsigned size_bytes, bool read)
+{
+ if (debug_level < LOG_LVL_DEBUG)
+ return;
+
+ char fmt[80];
+ sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
+ address, read ? "read" : "write", size_bytes * 2);
+ value &= (((uint64_t) 0x1) << (size_bytes * 8)) - 1;
+ LOG_DEBUG(fmt, value);
+}
+
+/* Read the relevant sbdata regs depending on size, and put the results into
+ * buffer. */
+static int read_memory_bus_word(struct target *target, target_addr_t address,
+ uint32_t size, uint8_t *buffer)
+{
+ uint32_t value;
+ if (size > 12) {
+ if (dmi_read(target, &value, DMI_SBDATA3) != ERROR_OK)
+ return ERROR_FAIL;
+ write_to_buf(buffer + 12, value, 4);
+ log_memory_access(address + 12, value, 4, true);
+ }
+ if (size > 8) {
+ if (dmi_read(target, &value, DMI_SBDATA2) != ERROR_OK)
+ return ERROR_FAIL;
+ write_to_buf(buffer + 8, value, 4);
+ log_memory_access(address + 8, value, 4, true);
+ }
+ if (size > 4) {
+ if (dmi_read(target, &value, DMI_SBDATA1) != ERROR_OK)
+ return ERROR_FAIL;
+ write_to_buf(buffer + 4, value, 4);
+ log_memory_access(address + 4, value, 4, true);
+ }
+ if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
+ return ERROR_FAIL;
+ write_to_buf(buffer, value, MIN(size, 4));
+ log_memory_access(address, value, MIN(size, 4), true);
+ return ERROR_OK;
+}
+
+static uint32_t sb_sbaccess(unsigned size_bytes)
+{
+ switch (size_bytes) {
+ case 1:
+ return set_field(0, DMI_SBCS_SBACCESS, 0);
+ case 2:
+ return set_field(0, DMI_SBCS_SBACCESS, 1);
+ case 4:
+ return set_field(0, DMI_SBCS_SBACCESS, 2);
+ case 8:
+ return set_field(0, DMI_SBCS_SBACCESS, 3);
+ case 16:
+ return set_field(0, DMI_SBCS_SBACCESS, 4);
+ }
+ assert(0);
+ return 0; /* Make mingw happy. */
+}
+
+static target_addr_t sb_read_address(struct target *target)
+{
+ RISCV013_INFO(info);
+ unsigned sbasize = get_field(info->sbcs, DMI_SBCS_SBASIZE);
+ target_addr_t address = 0;
+ uint32_t v;
+ if (sbasize > 32) {
+#if BUILD_TARGET64
+ dmi_read(target, &v, DMI_SBADDRESS1);
+ address |= v;
+ address <<= 32;
+#endif
+ }
+ dmi_read(target, &v, DMI_SBADDRESS0);
+ address |= v;
+ return address;
+}
+
+static int sb_write_address(struct target *target, target_addr_t address)
+{
+ RISCV013_INFO(info);
+ unsigned sbasize = get_field(info->sbcs, DMI_SBCS_SBASIZE);
+ /* There currently is no support for >64-bit addresses in OpenOCD. */
+ if (sbasize > 96)
+ dmi_write(target, DMI_SBADDRESS3, 0);
+ if (sbasize > 64)
+ dmi_write(target, DMI_SBADDRESS2, 0);
+ if (sbasize > 32)
+#if BUILD_TARGET64
+ dmi_write(target, DMI_SBADDRESS1, address >> 32);
+#else
+ dmi_write(target, DMI_SBADDRESS1, 0);
+#endif
+ return dmi_write(target, DMI_SBADDRESS0, address);
+}
+
+static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
+{
+ time_t start = time(NULL);
+ while (1) {
+ if (dmi_read(target, sbcs, DMI_SBCS) != ERROR_OK)
+ return ERROR_FAIL;
+ if (!get_field(*sbcs, DMI_SBCS_SBBUSY))
+ return ERROR_OK;
+ if (time(NULL) - start > riscv_command_timeout_sec) {
+ LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
+ "Increase the timeout with riscv set_command_timeout_sec.",
+ riscv_command_timeout_sec, *sbcs);
+ return ERROR_FAIL;
+ }
+ }
+}
+
+static int read_memory_bus_v0(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer)
+{
+ LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
+ TARGET_PRIxADDR, size, count, address);
+ uint8_t *t_buffer = buffer;
+ riscv_addr_t cur_addr = address;
+ riscv_addr_t fin_addr = address + (count * size);
+ uint32_t access = 0;
+
+ const int DMI_SBCS_SBSINGLEREAD_OFFSET = 20;
+ const uint32_t DMI_SBCS_SBSINGLEREAD = (0x1U << DMI_SBCS_SBSINGLEREAD_OFFSET);
+
+ const int DMI_SBCS_SBAUTOREAD_OFFSET = 15;
+ const uint32_t DMI_SBCS_SBAUTOREAD = (0x1U << DMI_SBCS_SBAUTOREAD_OFFSET);
+
+ /* ww favorise one off reading if there is an issue */
+ if (count == 1) {
+ for (uint32_t i = 0; i < count; i++) {
+ if (dmi_read(target, &access, DMI_SBCS) != ERROR_OK)
+ return ERROR_FAIL;
+ dmi_write(target, DMI_SBADDRESS0, cur_addr);
+ /* size/2 matching the bit access of the spec 0.13 */
+ access = set_field(access, DMI_SBCS_SBACCESS, size/2);
+ access = set_field(access, DMI_SBCS_SBSINGLEREAD, 1);
+ LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
+ dmi_write(target, DMI_SBCS, access);
+ /* 3) read */
+ uint32_t value;
+ if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
+ return ERROR_FAIL;
+ LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
+ write_to_buf(t_buffer, value, size);
+ t_buffer += size;
+ cur_addr += size;
+ }
+ return ERROR_OK;
+ }
+
+ /* has to be the same size if we want to read a block */
+ LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
+ if (dmi_read(target, &access, DMI_SBCS) != ERROR_OK)
+ return ERROR_FAIL;
+ /* set current address */
+ dmi_write(target, DMI_SBADDRESS0, cur_addr);
+ /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
+ * size/2 matching the bit access of the spec 0.13 */
+ access = set_field(access, DMI_SBCS_SBACCESS, size/2);
+ access = set_field(access, DMI_SBCS_SBAUTOREAD, 1);
+ access = set_field(access, DMI_SBCS_SBSINGLEREAD, 1);
+ access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 1);
+ LOG_DEBUG("\r\naccess: 0x%08x", access);
+ dmi_write(target, DMI_SBCS, access);
+
+ while (cur_addr < fin_addr) {
+ LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
+ PRIx64, size, count, cur_addr);
+ /* read */
+ uint32_t value;
+ if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
+ return ERROR_FAIL;
+ write_to_buf(t_buffer, value, size);
+ cur_addr += size;
+ t_buffer += size;
+
+ /* if we are reaching last address, we must clear autoread */
+ if (cur_addr == fin_addr && count != 1) {
+ dmi_write(target, DMI_SBCS, 0);
+ if (dmi_read(target, &value, DMI_SBDATA0) != ERROR_OK)
+ return ERROR_FAIL;
+ write_to_buf(t_buffer, value, size);
+ }
+ }
+
+ return ERROR_OK;
+}
+
+/**
+ * Read the requested memory using the system bus interface.
+ */
+static int read_memory_bus_v1(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer)
+{
+ RISCV013_INFO(info);
+ target_addr_t next_address = address;
+ target_addr_t end_address = address + count * size;
+
+ while (next_address < end_address) {
+ uint32_t sbcs = set_field(0, DMI_SBCS_SBREADONADDR, 1);
+ sbcs |= sb_sbaccess(size);
+ sbcs = set_field(sbcs, DMI_SBCS_SBAUTOINCREMENT, 1);
+ sbcs = set_field(sbcs, DMI_SBCS_SBREADONDATA, count > 1);
+ dmi_write(target, DMI_SBCS, sbcs);
+
+ /* This address write will trigger the first read. */
+ sb_write_address(target, next_address);
+
+ if (info->bus_master_read_delay) {
+ jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
+ if (jtag_execute_queue() != ERROR_OK) {
+ LOG_ERROR("Failed to scan idle sequence");
+ return ERROR_FAIL;
+ }
+ }
+
+ for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
+ read_memory_bus_word(target, address + i * size, size,
+ buffer + i * size);
+ }
+
+ sbcs = set_field(sbcs, DMI_SBCS_SBREADONDATA, 0);
+ dmi_write(target, DMI_SBCS, sbcs);
+
+ read_memory_bus_word(target, address + (count - 1) * size, size,
+ buffer + (count - 1) * size);
+
+ if (read_sbcs_nonbusy(target, &sbcs) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (get_field(sbcs, DMI_SBCS_SBBUSYERROR)) {
+ /* We read while the target was busy. Slow down and try again. */
+ dmi_write(target, DMI_SBCS, DMI_SBCS_SBBUSYERROR);
+ next_address = sb_read_address(target);
+ info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
+ continue;
+ }
+
+ unsigned error = get_field(sbcs, DMI_SBCS_SBERROR);
+ if (error == 0) {
+ next_address = end_address;
+ } else {
+ /* Some error indicating the bus access failed, but not because of
+ * something we did wrong. */
+ dmi_write(target, DMI_SBCS, DMI_SBCS_SBERROR);
+ return ERROR_FAIL;
+ }
+ }
+
+ return ERROR_OK;
+}
+
+/**
+ * Read the requested memory, taking care to execute every read exactly once,
+ * even if cmderr=busy is encountered.
+ */
+static int read_memory_progbuf(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer)
+{
+ RISCV013_INFO(info);
+
+ int result = ERROR_OK;
+
+ LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
+ size, address);
+
+ select_dmi(target);
+
+ /* s0 holds the next address to write to
+ * s1 holds the next data value to write
+ */
+ uint64_t s0, s1;
+ if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+ if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (execute_fence(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Write the program (load, increment) */
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+ switch (size) {
+ case 1:
+ riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
+ break;
+ case 2:
+ riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
+ break;
+ case 4:
+ riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
+ break;
+ default:
+ LOG_ERROR("Unsupported size: %d", size);
+ return ERROR_FAIL;
+ }
+ riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
+
+ if (riscv_program_ebreak(&program) != ERROR_OK)
+ return ERROR_FAIL;
+ riscv_program_write(&program);
+
+ /* Write address to S0, and execute buffer. */
+ result = register_write_direct(target, GDB_REGNO_S0, address);
+ if (result != ERROR_OK)
+ goto error;
+ uint32_t command = access_register_command(GDB_REGNO_S1, riscv_xlen(target),
+ AC_ACCESS_REGISTER_TRANSFER |
+ AC_ACCESS_REGISTER_POSTEXEC);
+ result = execute_abstract_command(target, command);
+ if (result != ERROR_OK)
+ goto error;
+
+ /* First read has just triggered. Result is in s1. */
+
+ dmi_write(target, DMI_ABSTRACTAUTO,
+ 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
+
+ /* read_addr is the next address that the hart will read from, which is the
+ * value in s0. */
+ riscv_addr_t read_addr = address + size;
+ /* The next address that we need to receive data for. */
+ riscv_addr_t receive_addr = address;
+ riscv_addr_t fin_addr = address + (count * size);
+ unsigned skip = 1;
+ while (read_addr < fin_addr) {
+ LOG_DEBUG("read_addr=0x%" PRIx64 ", receive_addr=0x%" PRIx64
+ ", fin_addr=0x%" PRIx64, read_addr, receive_addr, fin_addr);
+ /* The pipeline looks like this:
+ * memory -> s1 -> dm_data0 -> debugger
+ * It advances every time the debugger reads dmdata0.
+ * So at any time the debugger has just read mem[s0 - 3*size],
+ * dm_data0 contains mem[s0 - 2*size]
+ * s1 contains mem[s0-size] */
+
+ LOG_DEBUG("creating burst to read from 0x%" PRIx64
+ " up to 0x%" PRIx64, read_addr, fin_addr);
+ assert(read_addr >= address && read_addr < fin_addr);
+ struct riscv_batch *batch = riscv_batch_alloc(target, 32,
+ info->dmi_busy_delay + info->ac_busy_delay);
+
+ size_t reads = 0;
+ for (riscv_addr_t addr = read_addr; addr < fin_addr; addr += size) {
+ riscv_batch_add_dmi_read(batch, DMI_DATA0);
+
+ reads++;
+ if (riscv_batch_full(batch))
+ break;
+ }
+
+ riscv_batch_run(batch);
+
+ /* Wait for the target to finish performing the last abstract command,
+ * and update our copy of cmderr. */
+ uint32_t abstractcs;
+ if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ return ERROR_FAIL;
+ while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY))
+ if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ return ERROR_FAIL;
+ info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
+
+ unsigned cmderr = info->cmderr;
+ riscv_addr_t next_read_addr;
+ uint32_t dmi_data0 = -1;
+ switch (info->cmderr) {
+ case CMDERR_NONE:
+ LOG_DEBUG("successful (partial?) memory read");
+ next_read_addr = read_addr + reads * size;
+ break;
+ case CMDERR_BUSY:
+ LOG_DEBUG("memory read resulted in busy response");
+
+ /*
+ * If you want to exercise this code path, apply the following patch to spike:
+--- a/riscv/debug_module.cc
++++ b/riscv/debug_module.cc
+@@ -1,3 +1,5 @@
++#include <unistd.h>
++
+ #include <cassert>
+
+ #include "debug_module.h"
+@@ -398,6 +400,15 @@ bool debug_module_t::perform_abstract_command()
+ // Since the next instruction is what we will use, just use nother NOP
+ // to get there.
+ write32(debug_abstract, 1, addi(ZERO, ZERO, 0));
++
++ if (abstractauto.autoexecdata &&
++ program_buffer[0] == 0x83 &&
++ program_buffer[1] == 0x24 &&
++ program_buffer[2] == 0x04 &&
++ program_buffer[3] == 0 &&
++ rand() < RAND_MAX / 10) {
++ usleep(1000000);
++ }
+ } else {
+ write32(debug_abstract, 1, ebreak());
+ }
+ */
+ increase_ac_busy_delay(target);
+ riscv013_clear_abstract_error(target);
+
+ dmi_write(target, DMI_ABSTRACTAUTO, 0);
+
+ /* This is definitely a good version of the value that we
+ * attempted to read when we discovered that the target was
+ * busy. */
+ if (dmi_read(target, &dmi_data0, DMI_DATA0) != ERROR_OK) {
+ riscv_batch_free(batch);
+ goto error;
+ }
+
+ /* Clobbers DMI_DATA0. */
+ result = register_read_direct(target, &next_read_addr,
+ GDB_REGNO_S0);
+ if (result != ERROR_OK) {
+ riscv_batch_free(batch);
+ goto error;
+ }
+ /* Restore the command, and execute it.
+ * Now DMI_DATA0 contains the next value just as it would if no
+ * error had occurred. */
+ dmi_write(target, DMI_COMMAND, command);
+
+ dmi_write(target, DMI_ABSTRACTAUTO,
+ 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
+ break;
+ default:
+ LOG_ERROR("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
+ riscv013_clear_abstract_error(target);
+ riscv_batch_free(batch);
+ result = ERROR_FAIL;
+ goto error;
+ }
+
+ /* Now read whatever we got out of the batch. */
+ for (size_t i = 0; i < reads; i++) {
+ if (read_addr >= next_read_addr)
+ break;
+
+ read_addr += size;
+
+ if (skip > 0) {
+ skip--;
+ continue;
+ }
+
+ riscv_addr_t offset = receive_addr - address;
+ uint64_t dmi_out = riscv_batch_get_dmi_read(batch, i);
+ uint32_t value = get_field(dmi_out, DTM_DMI_DATA);
+ write_to_buf(buffer + offset, value, size);
+ log_memory_access(receive_addr, value, size, true);
+
+ receive_addr += size;
+ }
+ riscv_batch_free(batch);
+
+ if (cmderr == CMDERR_BUSY) {
+ riscv_addr_t offset = receive_addr - address;
+ write_to_buf(buffer + offset, dmi_data0, size);
+ log_memory_access(receive_addr, dmi_data0, size, true);
+ read_addr += size;
+ receive_addr += size;
+ }
+ }
+
+ dmi_write(target, DMI_ABSTRACTAUTO, 0);
+
+ if (count > 1) {
+ /* Read the penultimate word. */
+ uint32_t value;
+ if (dmi_read(target, &value, DMI_DATA0) != ERROR_OK)
+ goto error;
+ write_to_buf(buffer + receive_addr - address, value, size);
+ log_memory_access(receive_addr, value, size, true);
+ receive_addr += size;
+ }
+
+ /* Read the last word. */
+ uint64_t value;
+ result = register_read_direct(target, &value, GDB_REGNO_S1);
+ if (result != ERROR_OK)
+ goto error;
+ write_to_buf(buffer + receive_addr - address, value, size);
+ log_memory_access(receive_addr, value, size, true);
+
+ riscv_set_register(target, GDB_REGNO_S0, s0);
+ riscv_set_register(target, GDB_REGNO_S1, s1);
+ return ERROR_OK;
+
+error:
+ dmi_write(target, DMI_ABSTRACTAUTO, 0);
+
+ riscv_set_register(target, GDB_REGNO_S0, s0);
+ riscv_set_register(target, GDB_REGNO_S1, s1);
+ return result;
+}
+
+static int read_memory(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer)
+{
+ RISCV013_INFO(info);
+ if (info->progbufsize >= 2 && !riscv_prefer_sba)
+ return read_memory_progbuf(target, address, size, count, buffer);
+
+ if ((get_field(info->sbcs, DMI_SBCS_SBACCESS8) && size == 1) ||
+ (get_field(info->sbcs, DMI_SBCS_SBACCESS16) && size == 2) ||
+ (get_field(info->sbcs, DMI_SBCS_SBACCESS32) && size == 4) ||
+ (get_field(info->sbcs, DMI_SBCS_SBACCESS64) && size == 8) ||
+ (get_field(info->sbcs, DMI_SBCS_SBACCESS128) && size == 16)) {
+ if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 0)
+ return read_memory_bus_v0(target, address, size, count, buffer);
+ else if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 1)
+ return read_memory_bus_v1(target, address, size, count, buffer);
+ }
+
+ if (info->progbufsize >= 2)
+ return read_memory_progbuf(target, address, size, count, buffer);
+
+ LOG_ERROR("Don't know how to read memory on this target.");
+ return ERROR_FAIL;
+}
+
+static int write_memory_bus_v0(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, const uint8_t *buffer)
+{
+ /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
+ LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
+ TARGET_PRIxADDR, size, count, address);
+ dmi_write(target, DMI_SBADDRESS0, address);
+ int64_t value = 0;
+ int64_t access = 0;
+ riscv_addr_t offset = 0;
+ riscv_addr_t t_addr = 0;
+ const uint8_t *t_buffer = buffer + offset;
+
+ /* B.8 Writing Memory, single write check if we write in one go */
+ if (count == 1) { /* count is in bytes here */
+ /* check the size */
+ switch (size) {
+ case 1:
+ value = t_buffer[0];
+ break;
+ case 2:
+ value = t_buffer[0]
+ | ((uint32_t) t_buffer[1] << 8);
+ break;
+ case 4:
+ value = t_buffer[0]
+ | ((uint32_t) t_buffer[1] << 8)
+ | ((uint32_t) t_buffer[2] << 16)
+ | ((uint32_t) t_buffer[3] << 24);
+ break;
+ default:
+ LOG_ERROR("unsupported access size: %d", size);
+ return ERROR_FAIL;
+ }
+
+ access = 0;
+ access = set_field(access, DMI_SBCS_SBACCESS, size/2);
+ dmi_write(target, DMI_SBCS, access);
+ LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
+ LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
+ dmi_write(target, DMI_SBDATA0, value);
+ return ERROR_OK;
+ }
+
+ /*B.8 Writing Memory, using autoincrement*/
+
+ access = 0;
+ access = set_field(access, DMI_SBCS_SBACCESS, size/2);
+ access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 1);
+ LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
+ dmi_write(target, DMI_SBCS, access);
+
+ /*2)set the value according to the size required and write*/
+ for (riscv_addr_t i = 0; i < count; ++i) {
+ offset = size*i;
+ /* for monitoring only */
+ t_addr = address + offset;
+ t_buffer = buffer + offset;
+
+ switch (size) {
+ case 1:
+ value = t_buffer[0];
+ break;
+ case 2:
+ value = t_buffer[0]
+ | ((uint32_t) t_buffer[1] << 8);
+ break;
+ case 4:
+ value = t_buffer[0]
+ | ((uint32_t) t_buffer[1] << 8)
+ | ((uint32_t) t_buffer[2] << 16)
+ | ((uint32_t) t_buffer[3] << 24);
+ break;
+ default:
+ LOG_ERROR("unsupported access size: %d", size);
+ return ERROR_FAIL;
+ }
+ LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
+ PRIx64, (uint32_t)t_addr, (uint32_t)value);
+ dmi_write(target, DMI_SBDATA0, value);
+ }
+ /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
+ access = set_field(access, DMI_SBCS_SBAUTOINCREMENT, 0);
+ dmi_write(target, DMI_SBCS, access);
+
+ return ERROR_OK;
+}
+
+static int write_memory_bus_v1(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, const uint8_t *buffer)
+{
+ RISCV013_INFO(info);
+ uint32_t sbcs = sb_sbaccess(size);
+ sbcs = set_field(sbcs, DMI_SBCS_SBAUTOINCREMENT, 1);
+ dmi_write(target, DMI_SBCS, sbcs);
+
+ target_addr_t next_address = address;
+ target_addr_t end_address = address + count * size;
+
+ sb_write_address(target, next_address);
+ while (next_address < end_address) {
+ for (uint32_t i = (next_address - address) / size; i < count; i++) {
+ const uint8_t *p = buffer + i * size;
+ if (size > 12)
+ dmi_write(target, DMI_SBDATA3,
+ ((uint32_t) p[12]) |
+ (((uint32_t) p[13]) << 8) |
+ (((uint32_t) p[14]) << 16) |
+ (((uint32_t) p[15]) << 24));
+ if (size > 8)
+ dmi_write(target, DMI_SBDATA2,
+ ((uint32_t) p[8]) |
+ (((uint32_t) p[9]) << 8) |
+ (((uint32_t) p[10]) << 16) |
+ (((uint32_t) p[11]) << 24));
+ if (size > 4)
+ dmi_write(target, DMI_SBDATA1,
+ ((uint32_t) p[4]) |
+ (((uint32_t) p[5]) << 8) |
+ (((uint32_t) p[6]) << 16) |
+ (((uint32_t) p[7]) << 24));
+ uint32_t value = p[0];
+ if (size > 2) {
+ value |= ((uint32_t) p[2]) << 16;
+ value |= ((uint32_t) p[3]) << 24;
+ }
+ if (size > 1)
+ value |= ((uint32_t) p[1]) << 8;
+ dmi_write(target, DMI_SBDATA0, value);
+
+ log_memory_access(address + i * size, value, size, false);
+
+ if (info->bus_master_write_delay) {
+ jtag_add_runtest(info->bus_master_write_delay, TAP_IDLE);
+ if (jtag_execute_queue() != ERROR_OK) {
+ LOG_ERROR("Failed to scan idle sequence");
+ return ERROR_FAIL;
+ }
+ }
+ }
+
+ if (read_sbcs_nonbusy(target, &sbcs) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (get_field(sbcs, DMI_SBCS_SBBUSYERROR)) {
+ /* We wrote while the target was busy. Slow down and try again. */
+ dmi_write(target, DMI_SBCS, DMI_SBCS_SBBUSYERROR);
+ next_address = sb_read_address(target);
+ info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
+ continue;
+ }
+
+ unsigned error = get_field(sbcs, DMI_SBCS_SBERROR);
+ if (error == 0) {
+ next_address = end_address;
+ } else {
+ /* Some error indicating the bus access failed, but not because of
+ * something we did wrong. */
+ dmi_write(target, DMI_SBCS, DMI_SBCS_SBERROR);
+ return ERROR_FAIL;
+ }
+ }
+
+ return ERROR_OK;
+}
+
+static int write_memory_progbuf(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, const uint8_t *buffer)
+{
+ RISCV013_INFO(info);
+
+ LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
+
+ select_dmi(target);
+
+ /* s0 holds the next address to write to
+ * s1 holds the next data value to write
+ */
+
+ int result = ERROR_OK;
+ uint64_t s0, s1;
+ if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
+ return ERROR_FAIL;
+ if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Write the program (store, increment) */
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+
+ switch (size) {
+ case 1:
+ riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
+ break;
+ case 2:
+ riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
+ break;
+ case 4:
+ riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
+ break;
+ default:
+ LOG_ERROR("Unsupported size: %d", size);
+ result = ERROR_FAIL;
+ goto error;
+ }
+
+ riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
+
+ result = riscv_program_ebreak(&program);
+ if (result != ERROR_OK)
+ goto error;
+ riscv_program_write(&program);
+
+ riscv_addr_t cur_addr = address;
+ riscv_addr_t fin_addr = address + (count * size);
+ bool setup_needed = true;
+ LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
+ while (cur_addr < fin_addr) {
+ LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
+ cur_addr);
+
+ struct riscv_batch *batch = riscv_batch_alloc(
+ target,
+ 32,
+ info->dmi_busy_delay + info->ac_busy_delay);
+
+ /* To write another word, we put it in S1 and execute the program. */
+ unsigned start = (cur_addr - address) / size;
+ for (unsigned i = start; i < count; ++i) {
+ unsigned offset = size*i;
+ const uint8_t *t_buffer = buffer + offset;
+
+ uint32_t value;
+ switch (size) {
+ case 1:
+ value = t_buffer[0];
+ break;
+ case 2:
+ value = t_buffer[0]
+ | ((uint32_t) t_buffer[1] << 8);
+ break;
+ case 4:
+ value = t_buffer[0]
+ | ((uint32_t) t_buffer[1] << 8)
+ | ((uint32_t) t_buffer[2] << 16)
+ | ((uint32_t) t_buffer[3] << 24);
+ break;
+ default:
+ LOG_ERROR("unsupported access size: %d", size);
+ riscv_batch_free(batch);
+ result = ERROR_FAIL;
+ goto error;
+ }
+
+ log_memory_access(address + offset, value, size, false);
+ cur_addr += size;
+
+ if (setup_needed) {
+ result = register_write_direct(target, GDB_REGNO_S0,
+ address + offset);
+ if (result != ERROR_OK) {
+ riscv_batch_free(batch);
+ goto error;
+ }
+
+ /* Write value. */
+ dmi_write(target, DMI_DATA0, value);
+
+ /* Write and execute command that moves value into S1 and
+ * executes program buffer. */
+ uint32_t command = access_register_command(GDB_REGNO_S1, 32,
+ AC_ACCESS_REGISTER_POSTEXEC |
+ AC_ACCESS_REGISTER_TRANSFER |
+ AC_ACCESS_REGISTER_WRITE);
+ result = execute_abstract_command(target, command);
+ if (result != ERROR_OK) {
+ riscv_batch_free(batch);
+ goto error;
+ }
+
+ /* Turn on autoexec */
+ dmi_write(target, DMI_ABSTRACTAUTO,
+ 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
+
+ setup_needed = false;
+ } else {
+ riscv_batch_add_dmi_write(batch, DMI_DATA0, value);
+ if (riscv_batch_full(batch))
+ break;
+ }
+ }
+
+ result = riscv_batch_run(batch);
+ riscv_batch_free(batch);
+ if (result != ERROR_OK)
+ goto error;
+
+ /* Note that if the scan resulted in a Busy DMI response, it
+ * is this read to abstractcs that will cause the dmi_busy_delay
+ * to be incremented if necessary. */
+
+ uint32_t abstractcs;
+ if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ goto error;
+ while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY))
+ if (dmi_read(target, &abstractcs, DMI_ABSTRACTCS) != ERROR_OK)
+ return ERROR_FAIL;
+ info->cmderr = get_field(abstractcs, DMI_ABSTRACTCS_CMDERR);
+ switch (info->cmderr) {
+ case CMDERR_NONE:
+ LOG_DEBUG("successful (partial?) memory write");
+ break;
+ case CMDERR_BUSY:
+ LOG_DEBUG("memory write resulted in busy response");
+ riscv013_clear_abstract_error(target);
+ increase_ac_busy_delay(target);
+
+ dmi_write(target, DMI_ABSTRACTAUTO, 0);
+ result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
+ if (result != ERROR_OK)
+ goto error;
+ setup_needed = true;
+ break;
+
+ default:
+ LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
+ riscv013_clear_abstract_error(target);
+ result = ERROR_FAIL;
+ goto error;
+ }
+ }
+
+error:
+ dmi_write(target, DMI_ABSTRACTAUTO, 0);
+
+ if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
+ return ERROR_FAIL;
+ if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ if (execute_fence(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ return result;
+}
+
+static int write_memory(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, const uint8_t *buffer)
+{
+ RISCV013_INFO(info);
+ if (info->progbufsize >= 2 && !riscv_prefer_sba)
+ return write_memory_progbuf(target, address, size, count, buffer);
+
+ if ((get_field(info->sbcs, DMI_SBCS_SBACCESS8) && size == 1) ||
+ (get_field(info->sbcs, DMI_SBCS_SBACCESS16) && size == 2) ||
+ (get_field(info->sbcs, DMI_SBCS_SBACCESS32) && size == 4) ||
+ (get_field(info->sbcs, DMI_SBCS_SBACCESS64) && size == 8) ||
+ (get_field(info->sbcs, DMI_SBCS_SBACCESS128) && size == 16)) {
+ if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 0)
+ return write_memory_bus_v0(target, address, size, count, buffer);
+ else if (get_field(info->sbcs, DMI_SBCS_SBVERSION) == 1)
+ return write_memory_bus_v1(target, address, size, count, buffer);
+ }
+
+ if (info->progbufsize >= 2)
+ return write_memory_progbuf(target, address, size, count, buffer);
+
+ LOG_ERROR("Don't know how to write memory on this target.");
+ return ERROR_FAIL;
+}
+
+static int arch_state(struct target *target)
+{
+ return ERROR_OK;
+}
+
+struct target_type riscv013_target = {
+ .name = "riscv",
+
+ .init_target = init_target,
+ .deinit_target = deinit_target,
+ .examine = examine,
+
+ .poll = &riscv_openocd_poll,
+ .halt = &riscv_openocd_halt,
+ .resume = &riscv_openocd_resume,
+ .step = &riscv_openocd_step,
+
+ .assert_reset = assert_reset,
+ .deassert_reset = deassert_reset,
+
+ .read_memory = read_memory,
+ .write_memory = write_memory,
+
+ .arch_state = arch_state,
+};
+
+/*** 0.13-specific implementations of various RISC-V helper functions. ***/
+static int riscv013_get_register(struct target *target,
+ riscv_reg_t *value, int hid, int rid)
+{
+ LOG_DEBUG("reading register %s on hart %d", gdb_regno_name(rid), hid);
+
+ riscv_set_current_hartid(target, hid);
+
+ int result = ERROR_OK;
+ if (rid == GDB_REGNO_PC) {
+ result = register_read(target, value, GDB_REGNO_DPC);
+ LOG_DEBUG("read PC from DPC: 0x%016" PRIx64, *value);
+ } else if (rid == GDB_REGNO_PRIV) {
+ uint64_t dcsr;
+ result = register_read(target, &dcsr, GDB_REGNO_DCSR);
+ *value = get_field(dcsr, CSR_DCSR_PRV);
+ } else {
+ result = register_read(target, value, rid);
+ if (result != ERROR_OK)
+ *value = -1;
+ }
+
+ return result;
+}
+
+static int riscv013_set_register(struct target *target, int hid, int rid, uint64_t value)
+{
+ LOG_DEBUG("writing 0x%" PRIx64 " to register %s on hart %d", value,
+ gdb_regno_name(rid), hid);
+
+ riscv_set_current_hartid(target, hid);
+
+ if (rid <= GDB_REGNO_XPR31) {
+ return register_write_direct(target, rid, value);
+ } else if (rid == GDB_REGNO_PC) {
+ LOG_DEBUG("writing PC to DPC: 0x%016" PRIx64, value);
+ register_write_direct(target, GDB_REGNO_DPC, value);
+ uint64_t actual_value;
+ register_read_direct(target, &actual_value, GDB_REGNO_DPC);
+ LOG_DEBUG(" actual DPC written: 0x%016" PRIx64, actual_value);
+ if (value != actual_value) {
+ LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
+ "value (0x%" PRIx64 ")", value, actual_value);
+ return ERROR_FAIL;
+ }
+ } else if (rid == GDB_REGNO_PRIV) {
+ uint64_t dcsr;
+ register_read(target, &dcsr, GDB_REGNO_DCSR);
+ dcsr = set_field(dcsr, CSR_DCSR_PRV, value);
+ return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
+ } else {
+ return register_write_direct(target, rid, value);
+ }
+
+ return ERROR_OK;
+}
+
+static int riscv013_select_current_hart(struct target *target)
+{
+ RISCV_INFO(r);
+
+ dm013_info_t *dm = get_dm(target);
+ if (r->current_hartid == dm->current_hartid)
+ return ERROR_OK;
+
+ uint32_t dmcontrol;
+ /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
+ if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
+ return ERROR_FAIL;
+ dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
+ int result = dmi_write(target, DMI_DMCONTROL, dmcontrol);
+ dm->current_hartid = r->current_hartid;
+ return result;
+}
+
+static int riscv013_halt_current_hart(struct target *target)
+{
+ RISCV_INFO(r);
+ LOG_DEBUG("halting hart %d", r->current_hartid);
+ if (riscv_is_halted(target))
+ LOG_ERROR("Hart %d is already halted!", r->current_hartid);
+
+ /* Issue the halt command, and then wait for the current hart to halt. */
+ uint32_t dmcontrol;
+ if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
+ return ERROR_FAIL;
+ dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HALTREQ, 1);
+ dmi_write(target, DMI_DMCONTROL, dmcontrol);
+ for (size_t i = 0; i < 256; ++i)
+ if (riscv_is_halted(target))
+ break;
+
+ if (!riscv_is_halted(target)) {
+ uint32_t dmstatus;
+ if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
+ return ERROR_FAIL;
+ if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
+ return ERROR_FAIL;
+
+ LOG_ERROR("unable to halt hart %d", r->current_hartid);
+ LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
+ LOG_ERROR(" dmstatus =0x%08x", dmstatus);
+ return ERROR_FAIL;
+ }
+
+ dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HALTREQ, 0);
+ dmi_write(target, DMI_DMCONTROL, dmcontrol);
+
+ return ERROR_OK;
+}
+
+static int riscv013_resume_current_hart(struct target *target)
+{
+ return riscv013_step_or_resume_current_hart(target, false);
+}
+
+static int riscv013_step_current_hart(struct target *target)
+{
+ return riscv013_step_or_resume_current_hart(target, true);
+}
+
+static int riscv013_on_resume(struct target *target)
+{
+ return riscv013_on_step_or_resume(target, false);
+}
+
+static int riscv013_on_step(struct target *target)
+{
+ return riscv013_on_step_or_resume(target, true);
+}
+
+static int riscv013_on_halt(struct target *target)
+{
+ return ERROR_OK;
+}
+
+static bool riscv013_is_halted(struct target *target)
+{
+ uint32_t dmstatus;
+ if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
+ return false;
+ if (get_field(dmstatus, DMI_DMSTATUS_ANYUNAVAIL))
+ LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
+ if (get_field(dmstatus, DMI_DMSTATUS_ANYNONEXISTENT))
+ LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
+ if (get_field(dmstatus, DMI_DMSTATUS_ANYHAVERESET)) {
+ int hartid = riscv_current_hartid(target);
+ LOG_INFO("Hart %d unexpectedly reset!", hartid);
+ /* TODO: Can we make this more obvious to eg. a gdb user? */
+ uint32_t dmcontrol = DMI_DMCONTROL_DMACTIVE |
+ DMI_DMCONTROL_ACKHAVERESET;
+ dmcontrol = set_hartsel(dmcontrol, hartid);
+ /* If we had been halted when we reset, request another halt. If we
+ * ended up running out of reset, then the user will (hopefully) get a
+ * message that a reset happened, that the target is running, and then
+ * that it is halted again once the request goes through.
+ */
+ if (target->state == TARGET_HALTED)
+ dmcontrol |= DMI_DMCONTROL_HALTREQ;
+ dmi_write(target, DMI_DMCONTROL, dmcontrol);
+ }
+ return get_field(dmstatus, DMI_DMSTATUS_ALLHALTED);
+}
+
+static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
+{
+ riscv_reg_t dcsr;
+ int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
+ if (result != ERROR_OK)
+ return RISCV_HALT_UNKNOWN;
+
+ switch (get_field(dcsr, CSR_DCSR_CAUSE)) {
+ case CSR_DCSR_CAUSE_SWBP:
+ return RISCV_HALT_BREAKPOINT;
+ case CSR_DCSR_CAUSE_TRIGGER:
+ /* We could get here before triggers are enumerated if a trigger was
+ * already set when we connected. Force enumeration now, which has the
+ * side effect of clearing any triggers we did not set. */
+ riscv_enumerate_triggers(target);
+ return RISCV_HALT_TRIGGER;
+ case CSR_DCSR_CAUSE_STEP:
+ return RISCV_HALT_SINGLESTEP;
+ case CSR_DCSR_CAUSE_DEBUGINT:
+ case CSR_DCSR_CAUSE_HALT:
+ return RISCV_HALT_INTERRUPT;
+ }
+
+ LOG_ERROR("Unknown DCSR cause field: %x", (int)get_field(dcsr, CSR_DCSR_CAUSE));
+ LOG_ERROR(" dcsr=0x%016lx", (long)dcsr);
+ return RISCV_HALT_UNKNOWN;
+}
+
+int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
+{
+ return dmi_write(target, DMI_PROGBUF0 + index, data);
+}
+
+riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
+{
+ uint32_t value;
+ dmi_read(target, &value, DMI_PROGBUF0 + index);
+ return value;
+}
+
+int riscv013_execute_debug_buffer(struct target *target)
+{
+ uint32_t run_program = 0;
+ run_program = set_field(run_program, AC_ACCESS_REGISTER_SIZE, 2);
+ run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
+ run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
+ run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
+
+ return execute_abstract_command(target, run_program);
+}
+
+void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
+{
+ RISCV013_INFO(info);
+ buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_WRITE);
+ buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, d);
+ buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
+}
+
+void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a)
+{
+ RISCV013_INFO(info);
+ buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_READ);
+ buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
+ buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
+}
+
+void riscv013_fill_dmi_nop_u64(struct target *target, char *buf)
+{
+ RISCV013_INFO(info);
+ buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_NOP);
+ buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
+ buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, 0);
+}
+
+int riscv013_dmi_write_u64_bits(struct target *target)
+{
+ RISCV013_INFO(info);
+ return info->abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH;
+}
+
+static int maybe_execute_fence_i(struct target *target)
+{
+ RISCV013_INFO(info);
+ RISCV_INFO(r);
+ if (info->progbufsize + r->impebreak >= 2) {
+ struct riscv_program program;
+ riscv_program_init(&program, target);
+ if (riscv_program_fence_i(&program) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_program_exec(&program, target) != ERROR_OK) {
+ LOG_ERROR("Failed to execute fence.i");
+ return ERROR_FAIL;
+ }
+ }
+ return ERROR_OK;
+}
+
+/* Helper Functions. */
+static int riscv013_on_step_or_resume(struct target *target, bool step)
+{
+ if (maybe_execute_fence_i(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* We want to twiddle some bits in the debug CSR so debugging works. */
+ riscv_reg_t dcsr;
+ int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
+ if (result != ERROR_OK)
+ return result;
+ dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
+ dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, 1);
+ dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, 1);
+ dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, 1);
+ return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
+}
+
+static int riscv013_step_or_resume_current_hart(struct target *target, bool step)
+{
+ RISCV_INFO(r);
+ LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
+ if (!riscv_is_halted(target)) {
+ LOG_ERROR("Hart %d is not halted!", r->current_hartid);
+ return ERROR_FAIL;
+ }
+
+ if (maybe_execute_fence_i(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* Issue the resume command, and then wait for the current hart to resume. */
+ uint32_t dmcontrol = DMI_DMCONTROL_DMACTIVE;
+ dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
+ dmi_write(target, DMI_DMCONTROL, dmcontrol | DMI_DMCONTROL_RESUMEREQ);
+
+ uint32_t dmstatus;
+ for (size_t i = 0; i < 256; ++i) {
+ usleep(10);
+ if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
+ return ERROR_FAIL;
+ if (get_field(dmstatus, DMI_DMSTATUS_ALLRESUMEACK) == 0)
+ continue;
+ if (step && get_field(dmstatus, DMI_DMSTATUS_ALLHALTED) == 0)
+ continue;
+
+ dmi_write(target, DMI_DMCONTROL, dmcontrol);
+ return ERROR_OK;
+ }
+
+ LOG_ERROR("unable to resume hart %d", r->current_hartid);
+ if (dmi_read(target, &dmcontrol, DMI_DMCONTROL) != ERROR_OK)
+ return ERROR_FAIL;
+ LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
+ if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
+ return ERROR_FAIL;
+ LOG_ERROR(" dmstatus =0x%08x", dmstatus);
+
+ if (step) {
+ LOG_ERROR(" was stepping, halting");
+ riscv013_halt_current_hart(target);
+ return ERROR_OK;
+ }
+
+ return ERROR_FAIL;
+}
+
+void riscv013_clear_abstract_error(struct target *target)
+{
+ /* Wait for busy to go away. */
+ time_t start = time(NULL);
+ uint32_t abstractcs;
+ dmi_read(target, &abstractcs, DMI_ABSTRACTCS);
+ while (get_field(abstractcs, DMI_ABSTRACTCS_BUSY)) {
+ dmi_read(target, &abstractcs, DMI_ABSTRACTCS);
+
+ if (time(NULL) - start > riscv_command_timeout_sec) {
+ LOG_ERROR("abstractcs.busy is not going low after %d seconds "
+ "(abstractcs=0x%x). The target is either really slow or "
+ "broken. You could increase the timeout with riscv "
+ "set_command_timeout_sec.",
+ riscv_command_timeout_sec, abstractcs);
+ break;
+ }
+ }
+ /* Clear the error status. */
+ dmi_write(target, DMI_ABSTRACTCS, abstractcs & DMI_ABSTRACTCS_CMDERR);
+}
--- /dev/null
+#include <assert.h>
+#include <stdlib.h>
+#include <time.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "target/target.h"
+#include "target/algorithm.h"
+#include "target/target_type.h"
+#include "log.h"
+#include "jtag/jtag.h"
+#include "target/register.h"
+#include "target/breakpoints.h"
+#include "helper/time_support.h"
+#include "riscv.h"
+#include "gdb_regs.h"
+#include "rtos/rtos.h"
+
+/**
+ * Since almost everything can be accomplish by scanning the dbus register, all
+ * functions here assume dbus is already selected. The exception are functions
+ * called directly by OpenOCD, which can't assume anything about what's
+ * currently in IR. They should set IR to dbus explicitly.
+ */
+
+/**
+ * Code structure
+ *
+ * At the bottom of the stack are the OpenOCD JTAG functions:
+ * jtag_add_[id]r_scan
+ * jtag_execute_query
+ * jtag_add_runtest
+ *
+ * There are a few functions to just instantly shift a register and get its
+ * value:
+ * dtmcontrol_scan
+ * idcode_scan
+ * dbus_scan
+ *
+ * Because doing one scan and waiting for the result is slow, most functions
+ * batch up a bunch of dbus writes and then execute them all at once. They use
+ * the scans "class" for this:
+ * scans_new
+ * scans_delete
+ * scans_execute
+ * scans_add_...
+ * Usually you new(), call a bunch of add functions, then execute() and look
+ * at the results by calling scans_get...()
+ *
+ * Optimized functions will directly use the scans class above, but slightly
+ * lazier code will use the cache functions that in turn use the scans
+ * functions:
+ * cache_get...
+ * cache_set...
+ * cache_write
+ * cache_set... update a local structure, which is then synced to the target
+ * with cache_write(). Only Debug RAM words that are actually changed are sent
+ * to the target. Afterwards use cache_get... to read results.
+ */
+
+#define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
+#define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
+
+#define DIM(x) (sizeof(x)/sizeof(*x))
+
+/* Constants for legacy SiFive hardware breakpoints. */
+#define CSR_BPCONTROL_X (1<<0)
+#define CSR_BPCONTROL_W (1<<1)
+#define CSR_BPCONTROL_R (1<<2)
+#define CSR_BPCONTROL_U (1<<3)
+#define CSR_BPCONTROL_S (1<<4)
+#define CSR_BPCONTROL_H (1<<5)
+#define CSR_BPCONTROL_M (1<<6)
+#define CSR_BPCONTROL_BPMATCH (0xf<<7)
+#define CSR_BPCONTROL_BPACTION (0xff<<11)
+
+#define DEBUG_ROM_START 0x800
+#define DEBUG_ROM_RESUME (DEBUG_ROM_START + 4)
+#define DEBUG_ROM_EXCEPTION (DEBUG_ROM_START + 8)
+#define DEBUG_RAM_START 0x400
+
+#define SETHALTNOT 0x10c
+
+/*** JTAG registers. ***/
+
+#define DTMCONTROL 0x10
+#define DTMCONTROL_DBUS_RESET (1<<16)
+#define DTMCONTROL_IDLE (7<<10)
+#define DTMCONTROL_ADDRBITS (0xf<<4)
+#define DTMCONTROL_VERSION (0xf)
+
+#define DBUS 0x11
+#define DBUS_OP_START 0
+#define DBUS_OP_SIZE 2
+typedef enum {
+ DBUS_OP_NOP = 0,
+ DBUS_OP_READ = 1,
+ DBUS_OP_WRITE = 2
+} dbus_op_t;
+typedef enum {
+ DBUS_STATUS_SUCCESS = 0,
+ DBUS_STATUS_FAILED = 2,
+ DBUS_STATUS_BUSY = 3
+} dbus_status_t;
+#define DBUS_DATA_START 2
+#define DBUS_DATA_SIZE 34
+#define DBUS_ADDRESS_START 36
+
+typedef enum {
+ RE_OK,
+ RE_FAIL,
+ RE_AGAIN
+} riscv_error_t;
+
+typedef enum slot {
+ SLOT0,
+ SLOT1,
+ SLOT_LAST,
+} slot_t;
+
+/*** Debug Bus registers. ***/
+
+#define DMCONTROL 0x10
+#define DMCONTROL_INTERRUPT (((uint64_t)1)<<33)
+#define DMCONTROL_HALTNOT (((uint64_t)1)<<32)
+#define DMCONTROL_BUSERROR (7<<19)
+#define DMCONTROL_SERIAL (3<<16)
+#define DMCONTROL_AUTOINCREMENT (1<<15)
+#define DMCONTROL_ACCESS (7<<12)
+#define DMCONTROL_HARTID (0x3ff<<2)
+#define DMCONTROL_NDRESET (1<<1)
+#define DMCONTROL_FULLRESET 1
+
+#define DMINFO 0x11
+#define DMINFO_ABUSSIZE (0x7fU<<25)
+#define DMINFO_SERIALCOUNT (0xf<<21)
+#define DMINFO_ACCESS128 (1<<20)
+#define DMINFO_ACCESS64 (1<<19)
+#define DMINFO_ACCESS32 (1<<18)
+#define DMINFO_ACCESS16 (1<<17)
+#define DMINFO_ACCESS8 (1<<16)
+#define DMINFO_DRAMSIZE (0x3f<<10)
+#define DMINFO_AUTHENTICATED (1<<5)
+#define DMINFO_AUTHBUSY (1<<4)
+#define DMINFO_AUTHTYPE (3<<2)
+#define DMINFO_VERSION 3
+
+/*** Info about the core being debugged. ***/
+
+#define DBUS_ADDRESS_UNKNOWN 0xffff
+
+#define MAX_HWBPS 16
+#define DRAM_CACHE_SIZE 16
+
+uint8_t ir_dtmcontrol[1] = {DTMCONTROL};
+struct scan_field select_dtmcontrol = {
+ .in_value = NULL,
+ .out_value = ir_dtmcontrol
+};
+uint8_t ir_dbus[1] = {DBUS};
+struct scan_field select_dbus = {
+ .in_value = NULL,
+ .out_value = ir_dbus
+};
+uint8_t ir_idcode[1] = {0x1};
+struct scan_field select_idcode = {
+ .in_value = NULL,
+ .out_value = ir_idcode
+};
+
+struct trigger {
+ uint64_t address;
+ uint32_t length;
+ uint64_t mask;
+ uint64_t value;
+ bool read, write, execute;
+ int unique_id;
+};
+
+/* Wall-clock timeout for a command/access. Settable via RISC-V Target commands.*/
+int riscv_command_timeout_sec = DEFAULT_COMMAND_TIMEOUT_SEC;
+
+/* Wall-clock timeout after reset. Settable via RISC-V Target commands.*/
+int riscv_reset_timeout_sec = DEFAULT_RESET_TIMEOUT_SEC;
+
+bool riscv_prefer_sba;
+
+/* In addition to the ones in the standard spec, we'll also expose additional
+ * CSRs in this list.
+ * The list is either NULL, or a series of ranges (inclusive), terminated with
+ * 1,0. */
+struct {
+ uint16_t low, high;
+} *expose_csr;
+
+static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
+{
+ struct scan_field field;
+ uint8_t in_value[4];
+ uint8_t out_value[4];
+
+ buf_set_u32(out_value, 0, 32, out);
+
+ jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
+
+ field.num_bits = 32;
+ field.out_value = out_value;
+ field.in_value = in_value;
+ jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
+
+ /* Always return to dbus. */
+ jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
+
+ int retval = jtag_execute_queue();
+ if (retval != ERROR_OK) {
+ LOG_ERROR("failed jtag scan: %d", retval);
+ return retval;
+ }
+
+ uint32_t in = buf_get_u32(field.in_value, 0, 32);
+ LOG_DEBUG("DTMCONTROL: 0x%x -> 0x%x", out, in);
+
+ return in;
+}
+
+static struct target_type *get_target_type(struct target *target)
+{
+ riscv_info_t *info = (riscv_info_t *) target->arch_info;
+
+ if (!info) {
+ LOG_ERROR("Target has not been initialized");
+ return NULL;
+ }
+
+ switch (info->dtm_version) {
+ case 0:
+ return &riscv011_target;
+ case 1:
+ return &riscv013_target;
+ default:
+ LOG_ERROR("Unsupported DTM version: %d", info->dtm_version);
+ return NULL;
+ }
+}
+
+static int riscv_init_target(struct command_context *cmd_ctx,
+ struct target *target)
+{
+ LOG_DEBUG("riscv_init_target()");
+ target->arch_info = calloc(1, sizeof(riscv_info_t));
+ if (!target->arch_info)
+ return ERROR_FAIL;
+ riscv_info_t *info = (riscv_info_t *) target->arch_info;
+ riscv_info_init(target, info);
+ info->cmd_ctx = cmd_ctx;
+
+ select_dtmcontrol.num_bits = target->tap->ir_length;
+ select_dbus.num_bits = target->tap->ir_length;
+ select_idcode.num_bits = target->tap->ir_length;
+
+ riscv_semihosting_init(target);
+
+ return ERROR_OK;
+}
+
+static void riscv_deinit_target(struct target *target)
+{
+ LOG_DEBUG("riscv_deinit_target()");
+ struct target_type *tt = get_target_type(target);
+ if (tt) {
+ tt->deinit_target(target);
+ riscv_info_t *info = (riscv_info_t *) target->arch_info;
+ free(info);
+ }
+ target->arch_info = NULL;
+}
+
+static int oldriscv_halt(struct target *target)
+{
+ struct target_type *tt = get_target_type(target);
+ return tt->halt(target);
+}
+
+static void trigger_from_breakpoint(struct trigger *trigger,
+ const struct breakpoint *breakpoint)
+{
+ trigger->address = breakpoint->address;
+ trigger->length = breakpoint->length;
+ trigger->mask = ~0LL;
+ trigger->read = false;
+ trigger->write = false;
+ trigger->execute = true;
+ /* unique_id is unique across both breakpoints and watchpoints. */
+ trigger->unique_id = breakpoint->unique_id;
+}
+
+static int maybe_add_trigger_t1(struct target *target, unsigned hartid,
+ struct trigger *trigger, uint64_t tdata1)
+{
+ RISCV_INFO(r);
+
+ const uint32_t bpcontrol_x = 1<<0;
+ const uint32_t bpcontrol_w = 1<<1;
+ const uint32_t bpcontrol_r = 1<<2;
+ const uint32_t bpcontrol_u = 1<<3;
+ const uint32_t bpcontrol_s = 1<<4;
+ const uint32_t bpcontrol_h = 1<<5;
+ const uint32_t bpcontrol_m = 1<<6;
+ const uint32_t bpcontrol_bpmatch = 0xf << 7;
+ const uint32_t bpcontrol_bpaction = 0xff << 11;
+
+ if (tdata1 & (bpcontrol_r | bpcontrol_w | bpcontrol_x)) {
+ /* Trigger is already in use, presumably by user code. */
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+
+ tdata1 = set_field(tdata1, bpcontrol_r, trigger->read);
+ tdata1 = set_field(tdata1, bpcontrol_w, trigger->write);
+ tdata1 = set_field(tdata1, bpcontrol_x, trigger->execute);
+ tdata1 = set_field(tdata1, bpcontrol_u,
+ !!(r->misa[hartid] & (1 << ('U' - 'A'))));
+ tdata1 = set_field(tdata1, bpcontrol_s,
+ !!(r->misa[hartid] & (1 << ('S' - 'A'))));
+ tdata1 = set_field(tdata1, bpcontrol_h,
+ !!(r->misa[hartid] & (1 << ('H' - 'A'))));
+ tdata1 |= bpcontrol_m;
+ tdata1 = set_field(tdata1, bpcontrol_bpmatch, 0); /* exact match */
+ tdata1 = set_field(tdata1, bpcontrol_bpaction, 0); /* cause bp exception */
+
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, tdata1);
+
+ riscv_reg_t tdata1_rb;
+ if (riscv_get_register_on_hart(target, &tdata1_rb, hartid,
+ GDB_REGNO_TDATA1) != ERROR_OK)
+ return ERROR_FAIL;
+ LOG_DEBUG("tdata1=0x%" PRIx64, tdata1_rb);
+
+ if (tdata1 != tdata1_rb) {
+ LOG_DEBUG("Trigger doesn't support what we need; After writing 0x%"
+ PRIx64 " to tdata1 it contains 0x%" PRIx64,
+ tdata1, tdata1_rb);
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA2, trigger->address);
+
+ return ERROR_OK;
+}
+
+static int maybe_add_trigger_t2(struct target *target, unsigned hartid,
+ struct trigger *trigger, uint64_t tdata1)
+{
+ RISCV_INFO(r);
+
+ /* tselect is already set */
+ if (tdata1 & (MCONTROL_EXECUTE | MCONTROL_STORE | MCONTROL_LOAD)) {
+ /* Trigger is already in use, presumably by user code. */
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+
+ /* address/data match trigger */
+ tdata1 |= MCONTROL_DMODE(riscv_xlen(target));
+ tdata1 = set_field(tdata1, MCONTROL_ACTION,
+ MCONTROL_ACTION_DEBUG_MODE);
+ tdata1 = set_field(tdata1, MCONTROL_MATCH, MCONTROL_MATCH_EQUAL);
+ tdata1 |= MCONTROL_M;
+ if (r->misa[hartid] & (1 << ('H' - 'A')))
+ tdata1 |= MCONTROL_H;
+ if (r->misa[hartid] & (1 << ('S' - 'A')))
+ tdata1 |= MCONTROL_S;
+ if (r->misa[hartid] & (1 << ('U' - 'A')))
+ tdata1 |= MCONTROL_U;
+
+ if (trigger->execute)
+ tdata1 |= MCONTROL_EXECUTE;
+ if (trigger->read)
+ tdata1 |= MCONTROL_LOAD;
+ if (trigger->write)
+ tdata1 |= MCONTROL_STORE;
+
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, tdata1);
+
+ uint64_t tdata1_rb;
+ int result = riscv_get_register_on_hart(target, &tdata1_rb, hartid, GDB_REGNO_TDATA1);
+ if (result != ERROR_OK)
+ return result;
+ LOG_DEBUG("tdata1=0x%" PRIx64, tdata1_rb);
+
+ if (tdata1 != tdata1_rb) {
+ LOG_DEBUG("Trigger doesn't support what we need; After writing 0x%"
+ PRIx64 " to tdata1 it contains 0x%" PRIx64,
+ tdata1, tdata1_rb);
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA2, trigger->address);
+
+ return ERROR_OK;
+}
+
+static int add_trigger(struct target *target, struct trigger *trigger)
+{
+ RISCV_INFO(r);
+
+ if (riscv_enumerate_triggers(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* In RTOS mode, we need to set the same trigger in the same slot on every
+ * hart, to keep up the illusion that each hart is a thread running on the
+ * same core. */
+
+ /* Otherwise, we just set the trigger on the one hart this target deals
+ * with. */
+
+ riscv_reg_t tselect[RISCV_MAX_HARTS];
+
+ int first_hart = -1;
+ for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
+ if (!riscv_hart_enabled(target, hartid))
+ continue;
+ if (first_hart < 0)
+ first_hart = hartid;
+ int result = riscv_get_register_on_hart(target, &tselect[hartid],
+ hartid, GDB_REGNO_TSELECT);
+ if (result != ERROR_OK)
+ return result;
+ }
+ assert(first_hart >= 0);
+
+ unsigned int i;
+ for (i = 0; i < r->trigger_count[first_hart]; i++) {
+ if (r->trigger_unique_id[i] != -1)
+ continue;
+
+ riscv_set_register_on_hart(target, first_hart, GDB_REGNO_TSELECT, i);
+
+ uint64_t tdata1;
+ int result = riscv_get_register_on_hart(target, &tdata1, first_hart,
+ GDB_REGNO_TDATA1);
+ if (result != ERROR_OK)
+ return result;
+ int type = get_field(tdata1, MCONTROL_TYPE(riscv_xlen(target)));
+
+ result = ERROR_OK;
+ for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
+ if (!riscv_hart_enabled(target, hartid))
+ continue;
+ if (hartid > first_hart)
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, i);
+ switch (type) {
+ case 1:
+ result = maybe_add_trigger_t1(target, hartid, trigger, tdata1);
+ break;
+ case 2:
+ result = maybe_add_trigger_t2(target, hartid, trigger, tdata1);
+ break;
+ default:
+ LOG_DEBUG("trigger %d has unknown type %d", i, type);
+ continue;
+ }
+
+ if (result != ERROR_OK)
+ continue;
+ }
+
+ if (result != ERROR_OK)
+ continue;
+
+ LOG_DEBUG("Using trigger %d (type %d) for bp %d", i, type,
+ trigger->unique_id);
+ r->trigger_unique_id[i] = trigger->unique_id;
+ break;
+ }
+
+ for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
+ if (!riscv_hart_enabled(target, hartid))
+ continue;
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT,
+ tselect[hartid]);
+ }
+
+ if (i >= r->trigger_count[first_hart]) {
+ LOG_ERROR("Couldn't find an available hardware trigger.");
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+
+ return ERROR_OK;
+}
+
+int riscv_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
+{
+ if (breakpoint->type == BKPT_SOFT) {
+ if (target_read_memory(target, breakpoint->address, breakpoint->length, 1,
+ breakpoint->orig_instr) != ERROR_OK) {
+ LOG_ERROR("Failed to read original instruction at 0x%" TARGET_PRIxADDR,
+ breakpoint->address);
+ return ERROR_FAIL;
+ }
+
+ int retval;
+ if (breakpoint->length == 4)
+ retval = target_write_u32(target, breakpoint->address, ebreak());
+ else
+ retval = target_write_u16(target, breakpoint->address, ebreak_c());
+ if (retval != ERROR_OK) {
+ LOG_ERROR("Failed to write %d-byte breakpoint instruction at 0x%"
+ TARGET_PRIxADDR, breakpoint->length, breakpoint->address);
+ return ERROR_FAIL;
+ }
+
+ } else if (breakpoint->type == BKPT_HARD) {
+ struct trigger trigger;
+ trigger_from_breakpoint(&trigger, breakpoint);
+ int result = add_trigger(target, &trigger);
+ if (result != ERROR_OK)
+ return result;
+
+ } else {
+ LOG_INFO("OpenOCD only supports hardware and software breakpoints.");
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+
+ breakpoint->set = true;
+
+ return ERROR_OK;
+}
+
+static int remove_trigger(struct target *target, struct trigger *trigger)
+{
+ RISCV_INFO(r);
+
+ if (riscv_enumerate_triggers(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ int first_hart = -1;
+ for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
+ if (!riscv_hart_enabled(target, hartid))
+ continue;
+ if (first_hart < 0) {
+ first_hart = hartid;
+ break;
+ }
+ }
+ assert(first_hart >= 0);
+
+ unsigned int i;
+ for (i = 0; i < r->trigger_count[first_hart]; i++) {
+ if (r->trigger_unique_id[i] == trigger->unique_id)
+ break;
+ }
+ if (i >= r->trigger_count[first_hart]) {
+ LOG_ERROR("Couldn't find the hardware resources used by hardware "
+ "trigger.");
+ return ERROR_FAIL;
+ }
+ LOG_DEBUG("Stop using resource %d for bp %d", i, trigger->unique_id);
+ for (int hartid = first_hart; hartid < riscv_count_harts(target); ++hartid) {
+ if (!riscv_hart_enabled(target, hartid))
+ continue;
+ riscv_reg_t tselect;
+ int result = riscv_get_register_on_hart(target, &tselect, hartid, GDB_REGNO_TSELECT);
+ if (result != ERROR_OK)
+ return result;
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, i);
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, tselect);
+ }
+ r->trigger_unique_id[i] = -1;
+
+ return ERROR_OK;
+}
+
+int riscv_remove_breakpoint(struct target *target,
+ struct breakpoint *breakpoint)
+{
+ if (breakpoint->type == BKPT_SOFT) {
+ if (target_write_memory(target, breakpoint->address, breakpoint->length, 1,
+ breakpoint->orig_instr) != ERROR_OK) {
+ LOG_ERROR("Failed to restore instruction for %d-byte breakpoint at "
+ "0x%" TARGET_PRIxADDR, breakpoint->length, breakpoint->address);
+ return ERROR_FAIL;
+ }
+
+ } else if (breakpoint->type == BKPT_HARD) {
+ struct trigger trigger;
+ trigger_from_breakpoint(&trigger, breakpoint);
+ int result = remove_trigger(target, &trigger);
+ if (result != ERROR_OK)
+ return result;
+
+ } else {
+ LOG_INFO("OpenOCD only supports hardware and software breakpoints.");
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+ }
+
+ breakpoint->set = false;
+
+ return ERROR_OK;
+}
+
+static void trigger_from_watchpoint(struct trigger *trigger,
+ const struct watchpoint *watchpoint)
+{
+ trigger->address = watchpoint->address;
+ trigger->length = watchpoint->length;
+ trigger->mask = watchpoint->mask;
+ trigger->value = watchpoint->value;
+ trigger->read = (watchpoint->rw == WPT_READ || watchpoint->rw == WPT_ACCESS);
+ trigger->write = (watchpoint->rw == WPT_WRITE || watchpoint->rw == WPT_ACCESS);
+ trigger->execute = false;
+ /* unique_id is unique across both breakpoints and watchpoints. */
+ trigger->unique_id = watchpoint->unique_id;
+}
+
+int riscv_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
+{
+ struct trigger trigger;
+ trigger_from_watchpoint(&trigger, watchpoint);
+
+ int result = add_trigger(target, &trigger);
+ if (result != ERROR_OK)
+ return result;
+ watchpoint->set = true;
+
+ return ERROR_OK;
+}
+
+int riscv_remove_watchpoint(struct target *target,
+ struct watchpoint *watchpoint)
+{
+ struct trigger trigger;
+ trigger_from_watchpoint(&trigger, watchpoint);
+
+ int result = remove_trigger(target, &trigger);
+ if (result != ERROR_OK)
+ return result;
+ watchpoint->set = false;
+
+ return ERROR_OK;
+}
+
+static int oldriscv_step(struct target *target, int current, uint32_t address,
+ int handle_breakpoints)
+{
+ struct target_type *tt = get_target_type(target);
+ return tt->step(target, current, address, handle_breakpoints);
+}
+
+static int old_or_new_riscv_step(
+ struct target *target,
+ int current,
+ target_addr_t address,
+ int handle_breakpoints
+){
+ RISCV_INFO(r);
+ LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
+ if (r->is_halted == NULL)
+ return oldriscv_step(target, current, address, handle_breakpoints);
+ else
+ return riscv_openocd_step(target, current, address, handle_breakpoints);
+}
+
+
+static int riscv_examine(struct target *target)
+{
+ LOG_DEBUG("riscv_examine()");
+ if (target_was_examined(target)) {
+ LOG_DEBUG("Target was already examined.");
+ return ERROR_OK;
+ }
+
+ /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
+
+ riscv_info_t *info = (riscv_info_t *) target->arch_info;
+ uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
+ LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
+ info->dtm_version = get_field(dtmcontrol, DTMCONTROL_VERSION);
+ LOG_DEBUG(" version=0x%x", info->dtm_version);
+
+ struct target_type *tt = get_target_type(target);
+ if (tt == NULL)
+ return ERROR_FAIL;
+
+ int result = tt->init_target(info->cmd_ctx, target);
+ if (result != ERROR_OK)
+ return result;
+
+ return tt->examine(target);
+}
+
+static int oldriscv_poll(struct target *target)
+{
+ struct target_type *tt = get_target_type(target);
+ return tt->poll(target);
+}
+
+static int old_or_new_riscv_poll(struct target *target)
+{
+ RISCV_INFO(r);
+ if (r->is_halted == NULL)
+ return oldriscv_poll(target);
+ else
+ return riscv_openocd_poll(target);
+}
+
+static int old_or_new_riscv_halt(struct target *target)
+{
+ RISCV_INFO(r);
+ if (r->is_halted == NULL)
+ return oldriscv_halt(target);
+ else
+ return riscv_openocd_halt(target);
+}
+
+static int riscv_assert_reset(struct target *target)
+{
+ struct target_type *tt = get_target_type(target);
+ return tt->assert_reset(target);
+}
+
+static int riscv_deassert_reset(struct target *target)
+{
+ LOG_DEBUG("RISCV DEASSERT RESET");
+ struct target_type *tt = get_target_type(target);
+ return tt->deassert_reset(target);
+}
+
+
+static int oldriscv_resume(struct target *target, int current, uint32_t address,
+ int handle_breakpoints, int debug_execution)
+{
+ struct target_type *tt = get_target_type(target);
+ return tt->resume(target, current, address, handle_breakpoints,
+ debug_execution);
+}
+
+static int old_or_new_riscv_resume(
+ struct target *target,
+ int current,
+ target_addr_t address,
+ int handle_breakpoints,
+ int debug_execution
+){
+ RISCV_INFO(r);
+ LOG_DEBUG("handle_breakpoints=%d", handle_breakpoints);
+ if (r->is_halted == NULL)
+ return oldriscv_resume(target, current, address, handle_breakpoints, debug_execution);
+ else
+ return riscv_openocd_resume(target, current, address, handle_breakpoints, debug_execution);
+}
+
+static int riscv_select_current_hart(struct target *target)
+{
+ RISCV_INFO(r);
+ if (r->rtos_hartid != -1 && riscv_rtos_enabled(target))
+ return riscv_set_current_hartid(target, r->rtos_hartid);
+ else
+ return riscv_set_current_hartid(target, target->coreid);
+}
+
+static int riscv_read_memory(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, uint8_t *buffer)
+{
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+ struct target_type *tt = get_target_type(target);
+ return tt->read_memory(target, address, size, count, buffer);
+}
+
+static int riscv_write_memory(struct target *target, target_addr_t address,
+ uint32_t size, uint32_t count, const uint8_t *buffer)
+{
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+ struct target_type *tt = get_target_type(target);
+ return tt->write_memory(target, address, size, count, buffer);
+}
+
+static int riscv_get_gdb_reg_list(struct target *target,
+ struct reg **reg_list[], int *reg_list_size,
+ enum target_register_class reg_class)
+{
+ RISCV_INFO(r);
+ LOG_DEBUG("reg_class=%d", reg_class);
+ LOG_DEBUG("rtos_hartid=%d current_hartid=%d", r->rtos_hartid, r->current_hartid);
+
+ if (!target->reg_cache) {
+ LOG_ERROR("Target not initialized. Return ERROR_FAIL.");
+ return ERROR_FAIL;
+ }
+
+ if (riscv_select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ switch (reg_class) {
+ case REG_CLASS_GENERAL:
+ *reg_list_size = 32;
+ break;
+ case REG_CLASS_ALL:
+ *reg_list_size = GDB_REGNO_COUNT;
+ break;
+ default:
+ LOG_ERROR("Unsupported reg_class: %d", reg_class);
+ return ERROR_FAIL;
+ }
+
+ *reg_list = calloc(*reg_list_size, sizeof(struct reg *));
+ if (!*reg_list)
+ return ERROR_FAIL;
+
+ for (int i = 0; i < *reg_list_size; i++) {
+ assert(!target->reg_cache->reg_list[i].valid ||
+ target->reg_cache->reg_list[i].size > 0);
+ (*reg_list)[i] = &target->reg_cache->reg_list[i];
+ }
+
+ return ERROR_OK;
+}
+
+static int riscv_arch_state(struct target *target)
+{
+ struct target_type *tt = get_target_type(target);
+ return tt->arch_state(target);
+}
+
+/* Algorithm must end with a software breakpoint instruction. */
+static int riscv_run_algorithm(struct target *target, int num_mem_params,
+ struct mem_param *mem_params, int num_reg_params,
+ struct reg_param *reg_params, target_addr_t entry_point,
+ target_addr_t exit_point, int timeout_ms, void *arch_info)
+{
+ riscv_info_t *info = (riscv_info_t *) target->arch_info;
+
+ if (num_mem_params > 0) {
+ LOG_ERROR("Memory parameters are not supported for RISC-V algorithms.");
+ return ERROR_FAIL;
+ }
+
+ if (target->state != TARGET_HALTED) {
+ LOG_WARNING("target not halted");
+ return ERROR_TARGET_NOT_HALTED;
+ }
+
+ /* Save registers */
+ struct reg *reg_pc = register_get_by_name(target->reg_cache, "pc", 1);
+ if (!reg_pc || reg_pc->type->get(reg_pc) != ERROR_OK)
+ return ERROR_FAIL;
+ uint64_t saved_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
+
+ uint64_t saved_regs[32];
+ for (int i = 0; i < num_reg_params; i++) {
+ LOG_DEBUG("save %s", reg_params[i].reg_name);
+ struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
+ if (!r) {
+ LOG_ERROR("Couldn't find register named '%s'", reg_params[i].reg_name);
+ return ERROR_FAIL;
+ }
+
+ if (r->size != reg_params[i].size) {
+ LOG_ERROR("Register %s is %d bits instead of %d bits.",
+ reg_params[i].reg_name, r->size, reg_params[i].size);
+ return ERROR_FAIL;
+ }
+
+ if (r->number > GDB_REGNO_XPR31) {
+ LOG_ERROR("Only GPRs can be use as argument registers.");
+ return ERROR_FAIL;
+ }
+
+ if (r->type->get(r) != ERROR_OK)
+ return ERROR_FAIL;
+ saved_regs[r->number] = buf_get_u64(r->value, 0, r->size);
+ if (r->type->set(r, reg_params[i].value) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+
+ /* Disable Interrupts before attempting to run the algorithm. */
+ uint64_t current_mstatus;
+ uint8_t mstatus_bytes[8];
+
+ LOG_DEBUG("Disabling Interrupts");
+ struct reg *reg_mstatus = register_get_by_name(target->reg_cache,
+ "mstatus", 1);
+ if (!reg_mstatus) {
+ LOG_ERROR("Couldn't find mstatus!");
+ return ERROR_FAIL;
+ }
+
+ reg_mstatus->type->get(reg_mstatus);
+ current_mstatus = buf_get_u64(reg_mstatus->value, 0, reg_mstatus->size);
+ uint64_t ie_mask = MSTATUS_MIE | MSTATUS_HIE | MSTATUS_SIE | MSTATUS_UIE;
+ buf_set_u64(mstatus_bytes, 0, info->xlen[0], set_field(current_mstatus,
+ ie_mask, 0));
+
+ reg_mstatus->type->set(reg_mstatus, mstatus_bytes);
+
+ /* Run algorithm */
+ LOG_DEBUG("resume at 0x%" TARGET_PRIxADDR, entry_point);
+ if (oldriscv_resume(target, 0, entry_point, 0, 0) != ERROR_OK)
+ return ERROR_FAIL;
+
+ int64_t start = timeval_ms();
+ while (target->state != TARGET_HALTED) {
+ LOG_DEBUG("poll()");
+ int64_t now = timeval_ms();
+ if (now - start > timeout_ms) {
+ LOG_ERROR("Algorithm timed out after %d ms.", timeout_ms);
+ LOG_ERROR(" now = 0x%08x", (uint32_t) now);
+ LOG_ERROR(" start = 0x%08x", (uint32_t) start);
+ oldriscv_halt(target);
+ old_or_new_riscv_poll(target);
+ return ERROR_TARGET_TIMEOUT;
+ }
+
+ int result = old_or_new_riscv_poll(target);
+ if (result != ERROR_OK)
+ return result;
+ }
+
+ if (reg_pc->type->get(reg_pc) != ERROR_OK)
+ return ERROR_FAIL;
+ uint64_t final_pc = buf_get_u64(reg_pc->value, 0, reg_pc->size);
+ if (final_pc != exit_point) {
+ LOG_ERROR("PC ended up at 0x%" PRIx64 " instead of 0x%"
+ TARGET_PRIxADDR, final_pc, exit_point);
+ return ERROR_FAIL;
+ }
+
+ /* Restore Interrupts */
+ LOG_DEBUG("Restoring Interrupts");
+ buf_set_u64(mstatus_bytes, 0, info->xlen[0], current_mstatus);
+ reg_mstatus->type->set(reg_mstatus, mstatus_bytes);
+
+ /* Restore registers */
+ uint8_t buf[8];
+ buf_set_u64(buf, 0, info->xlen[0], saved_pc);
+ if (reg_pc->type->set(reg_pc, buf) != ERROR_OK)
+ return ERROR_FAIL;
+
+ for (int i = 0; i < num_reg_params; i++) {
+ LOG_DEBUG("restore %s", reg_params[i].reg_name);
+ struct reg *r = register_get_by_name(target->reg_cache, reg_params[i].reg_name, 0);
+ buf_set_u64(buf, 0, info->xlen[0], saved_regs[r->number]);
+ if (r->type->set(r, buf) != ERROR_OK)
+ return ERROR_FAIL;
+ }
+
+ return ERROR_OK;
+}
+
+/* Should run code on the target to perform CRC of
+memory. Not yet implemented.
+*/
+
+static int riscv_checksum_memory(struct target *target,
+ target_addr_t address, uint32_t count,
+ uint32_t *checksum)
+{
+ *checksum = 0xFFFFFFFF;
+ return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
+}
+
+/*** OpenOCD Helper Functions ***/
+
+enum riscv_poll_hart {
+ RPH_NO_CHANGE,
+ RPH_DISCOVERED_HALTED,
+ RPH_DISCOVERED_RUNNING,
+ RPH_ERROR
+};
+static enum riscv_poll_hart riscv_poll_hart(struct target *target, int hartid)
+{
+ RISCV_INFO(r);
+ if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
+ return RPH_ERROR;
+
+ LOG_DEBUG("polling hart %d, target->state=%d", hartid, target->state);
+
+ /* If OpenOCD thinks we're running but this hart is halted then it's time
+ * to raise an event. */
+ bool halted = riscv_is_halted(target);
+ if (target->state != TARGET_HALTED && halted) {
+ LOG_DEBUG(" triggered a halt");
+ r->on_halt(target);
+ return RPH_DISCOVERED_HALTED;
+ } else if (target->state != TARGET_RUNNING && !halted) {
+ LOG_DEBUG(" triggered running");
+ target->state = TARGET_RUNNING;
+ return RPH_DISCOVERED_RUNNING;
+ }
+
+ return RPH_NO_CHANGE;
+}
+
+/*** OpenOCD Interface ***/
+int riscv_openocd_poll(struct target *target)
+{
+ LOG_DEBUG("polling all harts");
+ int halted_hart = -1;
+ if (riscv_rtos_enabled(target)) {
+ /* Check every hart for an event. */
+ for (int i = 0; i < riscv_count_harts(target); ++i) {
+ enum riscv_poll_hart out = riscv_poll_hart(target, i);
+ switch (out) {
+ case RPH_NO_CHANGE:
+ case RPH_DISCOVERED_RUNNING:
+ continue;
+ case RPH_DISCOVERED_HALTED:
+ halted_hart = i;
+ break;
+ case RPH_ERROR:
+ return ERROR_FAIL;
+ }
+ }
+ if (halted_hart == -1) {
+ LOG_DEBUG(" no harts just halted, target->state=%d", target->state);
+ return ERROR_OK;
+ }
+ LOG_DEBUG(" hart %d halted", halted_hart);
+
+ /* If we're here then at least one hart triggered. That means
+ * we want to go and halt _every_ hart in the system, as that's
+ * the invariant we hold here. Some harts might have already
+ * halted (as we're either in single-step mode or they also
+ * triggered a breakpoint), so don't attempt to halt those
+ * harts. */
+ for (int i = 0; i < riscv_count_harts(target); ++i)
+ riscv_halt_one_hart(target, i);
+ } else {
+ enum riscv_poll_hart out = riscv_poll_hart(target,
+ riscv_current_hartid(target));
+ if (out == RPH_NO_CHANGE || out == RPH_DISCOVERED_RUNNING)
+ return ERROR_OK;
+ else if (out == RPH_ERROR)
+ return ERROR_FAIL;
+
+ halted_hart = riscv_current_hartid(target);
+ LOG_DEBUG(" hart %d halted", halted_hart);
+ }
+
+ target->state = TARGET_HALTED;
+ switch (riscv_halt_reason(target, halted_hart)) {
+ case RISCV_HALT_BREAKPOINT:
+ target->debug_reason = DBG_REASON_BREAKPOINT;
+ break;
+ case RISCV_HALT_TRIGGER:
+ target->debug_reason = DBG_REASON_WATCHPOINT;
+ break;
+ case RISCV_HALT_INTERRUPT:
+ target->debug_reason = DBG_REASON_DBGRQ;
+ break;
+ case RISCV_HALT_SINGLESTEP:
+ target->debug_reason = DBG_REASON_SINGLESTEP;
+ break;
+ case RISCV_HALT_UNKNOWN:
+ target->debug_reason = DBG_REASON_UNDEFINED;
+ break;
+ case RISCV_HALT_ERROR:
+ return ERROR_FAIL;
+ }
+
+ if (riscv_rtos_enabled(target)) {
+ target->rtos->current_threadid = halted_hart + 1;
+ target->rtos->current_thread = halted_hart + 1;
+ }
+
+ target->state = TARGET_HALTED;
+
+ if (target->debug_reason == DBG_REASON_BREAKPOINT) {
+ int retval;
+ if (riscv_semihosting(target, &retval) != 0)
+ return retval;
+ }
+
+ target_call_event_callbacks(target, TARGET_EVENT_HALTED);
+ return ERROR_OK;
+}
+
+int riscv_openocd_halt(struct target *target)
+{
+ RISCV_INFO(r);
+
+ LOG_DEBUG("halting all harts");
+
+ int out = riscv_halt_all_harts(target);
+ if (out != ERROR_OK) {
+ LOG_ERROR("Unable to halt all harts");
+ return out;
+ }
+
+ register_cache_invalidate(target->reg_cache);
+ if (riscv_rtos_enabled(target)) {
+ target->rtos->current_threadid = r->rtos_hartid + 1;
+ target->rtos->current_thread = r->rtos_hartid + 1;
+ }
+
+ target->state = TARGET_HALTED;
+ target->debug_reason = DBG_REASON_DBGRQ;
+ target_call_event_callbacks(target, TARGET_EVENT_HALTED);
+ return out;
+}
+
+int riscv_openocd_resume(
+ struct target *target,
+ int current,
+ target_addr_t address,
+ int handle_breakpoints,
+ int debug_execution)
+{
+ LOG_DEBUG("debug_reason=%d", target->debug_reason);
+
+ if (!current)
+ riscv_set_register(target, GDB_REGNO_PC, address);
+
+ if (target->debug_reason == DBG_REASON_WATCHPOINT) {
+ /* To be able to run off a trigger, disable all the triggers, step, and
+ * then resume as usual. */
+ struct watchpoint *watchpoint = target->watchpoints;
+ bool trigger_temporarily_cleared[RISCV_MAX_HWBPS] = {0};
+
+ int i = 0;
+ int result = ERROR_OK;
+ while (watchpoint && result == ERROR_OK) {
+ LOG_DEBUG("watchpoint %d: set=%d", i, watchpoint->set);
+ trigger_temporarily_cleared[i] = watchpoint->set;
+ if (watchpoint->set)
+ result = riscv_remove_watchpoint(target, watchpoint);
+ watchpoint = watchpoint->next;
+ i++;
+ }
+
+ if (result == ERROR_OK)
+ result = riscv_step_rtos_hart(target);
+
+ watchpoint = target->watchpoints;
+ i = 0;
+ while (watchpoint) {
+ LOG_DEBUG("watchpoint %d: cleared=%d", i, trigger_temporarily_cleared[i]);
+ if (trigger_temporarily_cleared[i]) {
+ if (result == ERROR_OK)
+ result = riscv_add_watchpoint(target, watchpoint);
+ else
+ riscv_add_watchpoint(target, watchpoint);
+ }
+ watchpoint = watchpoint->next;
+ i++;
+ }
+
+ if (result != ERROR_OK)
+ return result;
+ }
+
+ int out = riscv_resume_all_harts(target);
+ if (out != ERROR_OK) {
+ LOG_ERROR("unable to resume all harts");
+ return out;
+ }
+
+ register_cache_invalidate(target->reg_cache);
+ target->state = TARGET_RUNNING;
+ target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
+ return out;
+}
+
+int riscv_openocd_step(
+ struct target *target,
+ int current,
+ target_addr_t address,
+ int handle_breakpoints
+) {
+ LOG_DEBUG("stepping rtos hart");
+
+ if (!current)
+ riscv_set_register(target, GDB_REGNO_PC, address);
+
+ int out = riscv_step_rtos_hart(target);
+ if (out != ERROR_OK) {
+ LOG_ERROR("unable to step rtos hart");
+ return out;
+ }
+
+ register_cache_invalidate(target->reg_cache);
+ target->state = TARGET_RUNNING;
+ target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
+ target->state = TARGET_HALTED;
+ target->debug_reason = DBG_REASON_SINGLESTEP;
+ target_call_event_callbacks(target, TARGET_EVENT_HALTED);
+ return out;
+}
+
+/* Command Handlers */
+COMMAND_HANDLER(riscv_set_command_timeout_sec)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ int timeout = atoi(CMD_ARGV[0]);
+ if (timeout <= 0) {
+ LOG_ERROR("%s is not a valid integer argument for command.", CMD_ARGV[0]);
+ return ERROR_FAIL;
+ }
+
+ riscv_command_timeout_sec = timeout;
+
+ return ERROR_OK;
+}
+
+COMMAND_HANDLER(riscv_set_reset_timeout_sec)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ int timeout = atoi(CMD_ARGV[0]);
+ if (timeout <= 0) {
+ LOG_ERROR("%s is not a valid integer argument for command.", CMD_ARGV[0]);
+ return ERROR_FAIL;
+ }
+
+ riscv_reset_timeout_sec = timeout;
+ return ERROR_OK;
+}
+
+COMMAND_HANDLER(riscv_set_prefer_sba)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ COMMAND_PARSE_ON_OFF(CMD_ARGV[0], riscv_prefer_sba);
+ return ERROR_OK;
+}
+
+void parse_error(const char *string, char c, unsigned position)
+{
+ char buf[position+2];
+ for (unsigned i = 0; i < position; i++)
+ buf[i] = ' ';
+ buf[position] = '^';
+ buf[position + 1] = 0;
+
+ LOG_ERROR("Parse error at character %c in:", c);
+ LOG_ERROR("%s", string);
+ LOG_ERROR("%s", buf);
+}
+
+COMMAND_HANDLER(riscv_set_expose_csrs)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+
+ for (unsigned pass = 0; pass < 2; pass++) {
+ unsigned range = 0;
+ unsigned low = 0;
+ bool parse_low = true;
+ unsigned high = 0;
+ for (unsigned i = 0; i == 0 || CMD_ARGV[0][i-1]; i++) {
+ char c = CMD_ARGV[0][i];
+ if (isspace(c)) {
+ /* Ignore whitespace. */
+ continue;
+ }
+
+ if (parse_low) {
+ if (isdigit(c)) {
+ low *= 10;
+ low += c - '0';
+ } else if (c == '-') {
+ parse_low = false;
+ } else if (c == ',' || c == 0) {
+ if (pass == 1) {
+ expose_csr[range].low = low;
+ expose_csr[range].high = low;
+ }
+ low = 0;
+ range++;
+ } else {
+ parse_error(CMD_ARGV[0], c, i);
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+
+ } else {
+ if (isdigit(c)) {
+ high *= 10;
+ high += c - '0';
+ } else if (c == ',' || c == 0) {
+ parse_low = true;
+ if (pass == 1) {
+ expose_csr[range].low = low;
+ expose_csr[range].high = high;
+ }
+ low = 0;
+ high = 0;
+ range++;
+ } else {
+ parse_error(CMD_ARGV[0], c, i);
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+ }
+ }
+
+ if (pass == 0) {
+ if (expose_csr)
+ free(expose_csr);
+ expose_csr = calloc(range + 2, sizeof(*expose_csr));
+ } else {
+ expose_csr[range].low = 1;
+ expose_csr[range].high = 0;
+ }
+ }
+ return ERROR_OK;
+}
+
+COMMAND_HANDLER(riscv_authdata_read)
+{
+ if (CMD_ARGC != 0) {
+ LOG_ERROR("Command takes no parameters");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+
+ struct target *target = get_current_target(CMD_CTX);
+ if (!target) {
+ LOG_ERROR("target is NULL!");
+ return ERROR_FAIL;
+ }
+
+ RISCV_INFO(r);
+ if (!r) {
+ LOG_ERROR("riscv_info is NULL!");
+ return ERROR_FAIL;
+ }
+
+ if (r->authdata_read) {
+ uint32_t value;
+ if (r->authdata_read(target, &value) != ERROR_OK)
+ return ERROR_FAIL;
+ command_print(CMD_CTX, "0x%" PRIx32, value);
+ return ERROR_OK;
+ } else {
+ LOG_ERROR("authdata_read is not implemented for this target.");
+ return ERROR_FAIL;
+ }
+}
+
+COMMAND_HANDLER(riscv_authdata_write)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes exactly 1 argument");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+
+ struct target *target = get_current_target(CMD_CTX);
+ RISCV_INFO(r);
+
+ uint32_t value;
+ COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], value);
+
+ if (r->authdata_write) {
+ return r->authdata_write(target, value);
+ } else {
+ LOG_ERROR("authdata_write is not implemented for this target.");
+ return ERROR_FAIL;
+ }
+}
+
+COMMAND_HANDLER(riscv_dmi_read)
+{
+ if (CMD_ARGC != 1) {
+ LOG_ERROR("Command takes 1 parameter");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+
+ struct target *target = get_current_target(CMD_CTX);
+ if (!target) {
+ LOG_ERROR("target is NULL!");
+ return ERROR_FAIL;
+ }
+
+ RISCV_INFO(r);
+ if (!r) {
+ LOG_ERROR("riscv_info is NULL!");
+ return ERROR_FAIL;
+ }
+
+ if (r->dmi_read) {
+ uint32_t address, value;
+ COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
+ if (r->dmi_read(target, &value, address) != ERROR_OK)
+ return ERROR_FAIL;
+ command_print(CMD_CTX, "0x%" PRIx32, value);
+ return ERROR_OK;
+ } else {
+ LOG_ERROR("dmi_read is not implemented for this target.");
+ return ERROR_FAIL;
+ }
+}
+
+
+COMMAND_HANDLER(riscv_dmi_write)
+{
+ if (CMD_ARGC != 2) {
+ LOG_ERROR("Command takes exactly 2 arguments");
+ return ERROR_COMMAND_SYNTAX_ERROR;
+ }
+
+ struct target *target = get_current_target(CMD_CTX);
+ RISCV_INFO(r);
+
+ uint32_t address, value;
+ COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
+ COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
+
+ if (r->dmi_write) {
+ return r->dmi_write(target, address, value);
+ } else {
+ LOG_ERROR("dmi_write is not implemented for this target.");
+ return ERROR_FAIL;
+ }
+}
+
+static const struct command_registration riscv_exec_command_handlers[] = {
+ {
+ .name = "set_command_timeout_sec",
+ .handler = riscv_set_command_timeout_sec,
+ .mode = COMMAND_ANY,
+ .usage = "riscv set_command_timeout_sec [sec]",
+ .help = "Set the wall-clock timeout (in seconds) for individual commands"
+ },
+ {
+ .name = "set_reset_timeout_sec",
+ .handler = riscv_set_reset_timeout_sec,
+ .mode = COMMAND_ANY,
+ .usage = "riscv set_reset_timeout_sec [sec]",
+ .help = "Set the wall-clock timeout (in seconds) after reset is deasserted"
+ },
+ {
+ .name = "set_prefer_sba",
+ .handler = riscv_set_prefer_sba,
+ .mode = COMMAND_ANY,
+ .usage = "riscv set_prefer_sba on|off",
+ .help = "When on, prefer to use System Bus Access to access memory. "
+ "When off, prefer to use the Program Buffer to access memory."
+ },
+ {
+ .name = "expose_csrs",
+ .handler = riscv_set_expose_csrs,
+ .mode = COMMAND_ANY,
+ .usage = "riscv expose_csrs n0[-m0][,n1[-m1]]...",
+ .help = "Configure a list of inclusive ranges for CSRs to expose in "
+ "addition to the standard ones. This must be executed before "
+ "`init`."
+ },
+ {
+ .name = "authdata_read",
+ .handler = riscv_authdata_read,
+ .mode = COMMAND_ANY,
+ .usage = "riscv authdata_read",
+ .help = "Return the 32-bit value read from authdata."
+ },
+ {
+ .name = "authdata_write",
+ .handler = riscv_authdata_write,
+ .mode = COMMAND_ANY,
+ .usage = "riscv authdata_write value",
+ .help = "Write the 32-bit value to authdata."
+ },
+ {
+ .name = "dmi_read",
+ .handler = riscv_dmi_read,
+ .mode = COMMAND_ANY,
+ .usage = "riscv dmi_read address",
+ .help = "Perform a 32-bit DMI read at address, returning the value."
+ },
+ {
+ .name = "dmi_write",
+ .handler = riscv_dmi_write,
+ .mode = COMMAND_ANY,
+ .usage = "riscv dmi_write address value",
+ .help = "Perform a 32-bit DMI write of value at address."
+ },
+ COMMAND_REGISTRATION_DONE
+};
+
+extern __COMMAND_HANDLER(handle_common_semihosting_command);
+extern __COMMAND_HANDLER(handle_common_semihosting_fileio_command);
+extern __COMMAND_HANDLER(handle_common_semihosting_resumable_exit_command);
+extern __COMMAND_HANDLER(handle_common_semihosting_cmdline);
+
+/*
+ * To be noted that RISC-V targets use the same semihosting commands as
+ * ARM targets.
+ *
+ * The main reason is compatibility with existing tools. For example the
+ * Eclipse OpenOCD/SEGGER J-Link/QEMU plug-ins have several widgets to
+ * configure semihosting, which generate commands like `arm semihosting
+ * enable`.
+ * A secondary reason is the fact that the protocol used is exactly the
+ * one specified by ARM. If RISC-V will ever define its own semihosting
+ * protocol, then a command like `riscv semihosting enable` will make
+ * sense, but for now all semihosting commands are prefixed with `arm`.
+ */
+static const struct command_registration arm_exec_command_handlers[] = {
+ {
+ "semihosting",
+ .handler = handle_common_semihosting_command,
+ .mode = COMMAND_EXEC,
+ .usage = "['enable'|'disable']",
+ .help = "activate support for semihosting operations",
+ },
+ {
+ "semihosting_cmdline",
+ .handler = handle_common_semihosting_cmdline,
+ .mode = COMMAND_EXEC,
+ .usage = "arguments",
+ .help = "command line arguments to be passed to program",
+ },
+ {
+ "semihosting_fileio",
+ .handler = handle_common_semihosting_fileio_command,
+ .mode = COMMAND_EXEC,
+ .usage = "['enable'|'disable']",
+ .help = "activate support for semihosting fileio operations",
+ },
+ {
+ "semihosting_resexit",
+ .handler = handle_common_semihosting_resumable_exit_command,
+ .mode = COMMAND_EXEC,
+ .usage = "['enable'|'disable']",
+ .help = "activate support for semihosting resumable exit",
+ },
+ COMMAND_REGISTRATION_DONE
+};
+
+const struct command_registration riscv_command_handlers[] = {
+ {
+ .name = "riscv",
+ .mode = COMMAND_ANY,
+ .help = "RISC-V Command Group",
+ .usage = "",
+ .chain = riscv_exec_command_handlers
+ },
+ {
+ .name = "arm",
+ .mode = COMMAND_ANY,
+ .help = "ARM Command Group",
+ .usage = "",
+ .chain = arm_exec_command_handlers
+ },
+ COMMAND_REGISTRATION_DONE
+};
+
+struct target_type riscv_target = {
+ .name = "riscv",
+
+ .init_target = riscv_init_target,
+ .deinit_target = riscv_deinit_target,
+ .examine = riscv_examine,
+
+ /* poll current target status */
+ .poll = old_or_new_riscv_poll,
+
+ .halt = old_or_new_riscv_halt,
+ .resume = old_or_new_riscv_resume,
+ .step = old_or_new_riscv_step,
+
+ .assert_reset = riscv_assert_reset,
+ .deassert_reset = riscv_deassert_reset,
+
+ .read_memory = riscv_read_memory,
+ .write_memory = riscv_write_memory,
+
+ .checksum_memory = riscv_checksum_memory,
+
+ .get_gdb_reg_list = riscv_get_gdb_reg_list,
+
+ .add_breakpoint = riscv_add_breakpoint,
+ .remove_breakpoint = riscv_remove_breakpoint,
+
+ .add_watchpoint = riscv_add_watchpoint,
+ .remove_watchpoint = riscv_remove_watchpoint,
+
+ .arch_state = riscv_arch_state,
+
+ .run_algorithm = riscv_run_algorithm,
+
+ .commands = riscv_command_handlers
+};
+
+/*** RISC-V Interface ***/
+
+void riscv_info_init(struct target *target, riscv_info_t *r)
+{
+ memset(r, 0, sizeof(*r));
+ r->dtm_version = 1;
+ r->registers_initialized = false;
+ r->current_hartid = target->coreid;
+
+ memset(r->trigger_unique_id, 0xff, sizeof(r->trigger_unique_id));
+
+ for (size_t h = 0; h < RISCV_MAX_HARTS; ++h) {
+ r->xlen[h] = -1;
+
+ for (size_t e = 0; e < RISCV_MAX_REGISTERS; ++e)
+ r->valid_saved_registers[h][e] = false;
+ }
+}
+
+int riscv_halt_all_harts(struct target *target)
+{
+ for (int i = 0; i < riscv_count_harts(target); ++i) {
+ if (!riscv_hart_enabled(target, i))
+ continue;
+
+ riscv_halt_one_hart(target, i);
+ }
+
+ return ERROR_OK;
+}
+
+int riscv_halt_one_hart(struct target *target, int hartid)
+{
+ RISCV_INFO(r);
+ LOG_DEBUG("halting hart %d", hartid);
+ if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
+ return ERROR_FAIL;
+ if (riscv_is_halted(target)) {
+ LOG_DEBUG(" hart %d requested halt, but was already halted", hartid);
+ return ERROR_OK;
+ }
+
+ return r->halt_current_hart(target);
+}
+
+int riscv_resume_all_harts(struct target *target)
+{
+ for (int i = 0; i < riscv_count_harts(target); ++i) {
+ if (!riscv_hart_enabled(target, i))
+ continue;
+
+ riscv_resume_one_hart(target, i);
+ }
+
+ riscv_invalidate_register_cache(target);
+ return ERROR_OK;
+}
+
+int riscv_resume_one_hart(struct target *target, int hartid)
+{
+ RISCV_INFO(r);
+ LOG_DEBUG("resuming hart %d", hartid);
+ if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
+ return ERROR_FAIL;
+ if (!riscv_is_halted(target)) {
+ LOG_DEBUG(" hart %d requested resume, but was already resumed", hartid);
+ return ERROR_OK;
+ }
+
+ r->on_resume(target);
+ return r->resume_current_hart(target);
+}
+
+int riscv_step_rtos_hart(struct target *target)
+{
+ RISCV_INFO(r);
+ int hartid = r->current_hartid;
+ if (riscv_rtos_enabled(target)) {
+ hartid = r->rtos_hartid;
+ if (hartid == -1) {
+ LOG_USER("GDB has asked me to step \"any\" thread, so I'm stepping hart 0.");
+ hartid = 0;
+ }
+ }
+ if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
+ return ERROR_FAIL;
+ LOG_DEBUG("stepping hart %d", hartid);
+
+ if (!riscv_is_halted(target)) {
+ LOG_ERROR("Hart isn't halted before single step!");
+ return ERROR_FAIL;
+ }
+ riscv_invalidate_register_cache(target);
+ r->on_step(target);
+ if (r->step_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+ riscv_invalidate_register_cache(target);
+ r->on_halt(target);
+ if (!riscv_is_halted(target)) {
+ LOG_ERROR("Hart was not halted after single step!");
+ return ERROR_FAIL;
+ }
+ return ERROR_OK;
+}
+
+bool riscv_supports_extension(struct target *target, int hartid, char letter)
+{
+ RISCV_INFO(r);
+ unsigned num;
+ if (letter >= 'a' && letter <= 'z')
+ num = letter - 'a';
+ else if (letter >= 'A' && letter <= 'Z')
+ num = letter - 'A';
+ else
+ return false;
+ return r->misa[hartid] & (1 << num);
+}
+
+int riscv_xlen(const struct target *target)
+{
+ return riscv_xlen_of_hart(target, riscv_current_hartid(target));
+}
+
+int riscv_xlen_of_hart(const struct target *target, int hartid)
+{
+ RISCV_INFO(r);
+ assert(r->xlen[hartid] != -1);
+ return r->xlen[hartid];
+}
+
+bool riscv_rtos_enabled(const struct target *target)
+{
+ return target->rtos != NULL;
+}
+
+int riscv_set_current_hartid(struct target *target, int hartid)
+{
+ RISCV_INFO(r);
+ if (!r->select_current_hart)
+ return ERROR_OK;
+
+ int previous_hartid = riscv_current_hartid(target);
+ r->current_hartid = hartid;
+ assert(riscv_hart_enabled(target, hartid));
+ LOG_DEBUG("setting hartid to %d, was %d", hartid, previous_hartid);
+ if (r->select_current_hart(target) != ERROR_OK)
+ return ERROR_FAIL;
+
+ /* This might get called during init, in which case we shouldn't be
+ * setting up the register cache. */
+ if (!target_was_examined(target))
+ return ERROR_OK;
+
+ /* Avoid invalidating the register cache all the time. */
+ if (r->registers_initialized
+ && (!riscv_rtos_enabled(target) || (previous_hartid == hartid))
+ && target->reg_cache->reg_list[GDB_REGNO_ZERO].size == (unsigned)riscv_xlen(target)
+ && (!riscv_rtos_enabled(target) || (r->rtos_hartid != -1))) {
+ return ERROR_OK;
+ } else
+ LOG_DEBUG("Initializing registers: xlen=%d", riscv_xlen(target));
+
+ riscv_invalidate_register_cache(target);
+ return ERROR_OK;
+}
+
+void riscv_invalidate_register_cache(struct target *target)
+{
+ RISCV_INFO(r);
+
+ register_cache_invalidate(target->reg_cache);
+ for (size_t i = 0; i < GDB_REGNO_COUNT; ++i) {
+ struct reg *reg = &target->reg_cache->reg_list[i];
+ reg->valid = false;
+ }
+
+ r->registers_initialized = true;
+}
+
+int riscv_current_hartid(const struct target *target)
+{
+ RISCV_INFO(r);
+ return r->current_hartid;
+}
+
+void riscv_set_all_rtos_harts(struct target *target)
+{
+ RISCV_INFO(r);
+ r->rtos_hartid = -1;
+}
+
+void riscv_set_rtos_hartid(struct target *target, int hartid)
+{
+ LOG_DEBUG("setting RTOS hartid %d", hartid);
+ RISCV_INFO(r);
+ r->rtos_hartid = hartid;
+}
+
+int riscv_count_harts(struct target *target)
+{
+ if (target == NULL)
+ return 1;
+ RISCV_INFO(r);
+ if (r == NULL)
+ return 1;
+ return r->hart_count;
+}
+
+bool riscv_has_register(struct target *target, int hartid, int regid)
+{
+ return 1;
+}
+
+/**
+ * This function is called when the debug user wants to change the value of a
+ * register. The new value may be cached, and may not be written until the hart
+ * is resumed. */
+int riscv_set_register(struct target *target, enum gdb_regno r, riscv_reg_t v)
+{
+ return riscv_set_register_on_hart(target, riscv_current_hartid(target), r, v);
+}
+
+int riscv_set_register_on_hart(struct target *target, int hartid,
+ enum gdb_regno regid, uint64_t value)
+{
+ RISCV_INFO(r);
+ LOG_DEBUG("[%d] %s <- %" PRIx64, hartid, gdb_regno_name(regid), value);
+ assert(r->set_register);
+ return r->set_register(target, hartid, regid, value);
+}
+
+int riscv_get_register(struct target *target, riscv_reg_t *value,
+ enum gdb_regno r)
+{
+ return riscv_get_register_on_hart(target, value,
+ riscv_current_hartid(target), r);
+}
+
+int riscv_get_register_on_hart(struct target *target, riscv_reg_t *value,
+ int hartid, enum gdb_regno regid)
+{
+ RISCV_INFO(r);
+ int result = r->get_register(target, value, hartid, regid);
+ LOG_DEBUG("[%d] %s: %" PRIx64, hartid, gdb_regno_name(regid), *value);
+ return result;
+}
+
+bool riscv_is_halted(struct target *target)
+{
+ RISCV_INFO(r);
+ assert(r->is_halted);
+ return r->is_halted(target);
+}
+
+enum riscv_halt_reason riscv_halt_reason(struct target *target, int hartid)
+{
+ RISCV_INFO(r);
+ if (riscv_set_current_hartid(target, hartid) != ERROR_OK)
+ return RISCV_HALT_ERROR;
+ if (!riscv_is_halted(target)) {
+ LOG_ERROR("Hart is not halted!");
+ return RISCV_HALT_UNKNOWN;
+ }
+ return r->halt_reason(target);
+}
+
+size_t riscv_debug_buffer_size(struct target *target)
+{
+ RISCV_INFO(r);
+ return r->debug_buffer_size[riscv_current_hartid(target)];
+}
+
+int riscv_write_debug_buffer(struct target *target, int index, riscv_insn_t insn)
+{
+ RISCV_INFO(r);
+ r->write_debug_buffer(target, index, insn);
+ return ERROR_OK;
+}
+
+riscv_insn_t riscv_read_debug_buffer(struct target *target, int index)
+{
+ RISCV_INFO(r);
+ return r->read_debug_buffer(target, index);
+}
+
+int riscv_execute_debug_buffer(struct target *target)
+{
+ RISCV_INFO(r);
+ return r->execute_debug_buffer(target);
+}
+
+void riscv_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
+{
+ RISCV_INFO(r);
+ r->fill_dmi_write_u64(target, buf, a, d);
+}
+
+void riscv_fill_dmi_read_u64(struct target *target, char *buf, int a)
+{
+ RISCV_INFO(r);
+ r->fill_dmi_read_u64(target, buf, a);
+}
+
+void riscv_fill_dmi_nop_u64(struct target *target, char *buf)
+{
+ RISCV_INFO(r);
+ r->fill_dmi_nop_u64(target, buf);
+}
+
+int riscv_dmi_write_u64_bits(struct target *target)
+{
+ RISCV_INFO(r);
+ return r->dmi_write_u64_bits(target);
+}
+
+bool riscv_hart_enabled(struct target *target, int hartid)
+{
+ /* FIXME: Add a hart mask to the RTOS. */
+ if (riscv_rtos_enabled(target))
+ return hartid < riscv_count_harts(target);
+
+ return hartid == target->coreid;
+}
+
+/**
+ * Count triggers, and initialize trigger_count for each hart.
+ * trigger_count is initialized even if this function fails to discover
+ * something.
+ * Disable any hardware triggers that have dmode set. We can't have set them
+ * ourselves. Maybe they're left over from some killed debug session.
+ * */
+int riscv_enumerate_triggers(struct target *target)
+{
+ RISCV_INFO(r);
+
+ if (r->triggers_enumerated)
+ return ERROR_OK;
+
+ r->triggers_enumerated = true; /* At the very least we tried. */
+
+ for (int hartid = 0; hartid < riscv_count_harts(target); ++hartid) {
+ if (!riscv_hart_enabled(target, hartid))
+ continue;
+
+ riscv_reg_t tselect;
+ int result = riscv_get_register_on_hart(target, &tselect, hartid,
+ GDB_REGNO_TSELECT);
+ if (result != ERROR_OK)
+ return result;
+
+ for (unsigned t = 0; t < RISCV_MAX_TRIGGERS; ++t) {
+ r->trigger_count[hartid] = t;
+
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, t);
+ uint64_t tselect_rb;
+ result = riscv_get_register_on_hart(target, &tselect_rb, hartid,
+ GDB_REGNO_TSELECT);
+ if (result != ERROR_OK)
+ return result;
+ /* Mask off the top bit, which is used as tdrmode in old
+ * implementations. */
+ tselect_rb &= ~(1ULL << (riscv_xlen(target)-1));
+ if (tselect_rb != t)
+ break;
+ uint64_t tdata1;
+ result = riscv_get_register_on_hart(target, &tdata1, hartid,
+ GDB_REGNO_TDATA1);
+ if (result != ERROR_OK)
+ return result;
+
+ int type = get_field(tdata1, MCONTROL_TYPE(riscv_xlen(target)));
+ switch (type) {
+ case 1:
+ /* On these older cores we don't support software using
+ * triggers. */
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
+ break;
+ case 2:
+ if (tdata1 & MCONTROL_DMODE(riscv_xlen(target)))
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TDATA1, 0);
+ break;
+ }
+ }
+
+ riscv_set_register_on_hart(target, hartid, GDB_REGNO_TSELECT, tselect);
+
+ LOG_INFO("[%d] Found %d triggers", hartid, r->trigger_count[hartid]);
+ }
+
+ return ERROR_OK;
+}
+
+const char *gdb_regno_name(enum gdb_regno regno)
+{
+ static char buf[32];
+
+ switch (regno) {
+ case GDB_REGNO_ZERO:
+ return "zero";
+ case GDB_REGNO_S0:
+ return "s0";
+ case GDB_REGNO_S1:
+ return "s1";
+ case GDB_REGNO_PC:
+ return "pc";
+ case GDB_REGNO_FPR0:
+ return "fpr0";
+ case GDB_REGNO_FPR31:
+ return "fpr31";
+ case GDB_REGNO_CSR0:
+ return "csr0";
+ case GDB_REGNO_TSELECT:
+ return "tselect";
+ case GDB_REGNO_TDATA1:
+ return "tdata1";
+ case GDB_REGNO_TDATA2:
+ return "tdata2";
+ case GDB_REGNO_MISA:
+ return "misa";
+ case GDB_REGNO_DPC:
+ return "dpc";
+ case GDB_REGNO_DCSR:
+ return "dcsr";
+ case GDB_REGNO_DSCRATCH:
+ return "dscratch";
+ case GDB_REGNO_MSTATUS:
+ return "mstatus";
+ case GDB_REGNO_PRIV:
+ return "priv";
+ default:
+ if (regno <= GDB_REGNO_XPR31)
+ sprintf(buf, "x%d", regno - GDB_REGNO_ZERO);
+ else if (regno >= GDB_REGNO_CSR0 && regno <= GDB_REGNO_CSR4095)
+ sprintf(buf, "csr%d", regno - GDB_REGNO_CSR0);
+ else if (regno >= GDB_REGNO_FPR0 && regno <= GDB_REGNO_FPR31)
+ sprintf(buf, "f%d", regno - GDB_REGNO_FPR0);
+ else
+ sprintf(buf, "gdb_regno_%d", regno);
+ return buf;
+ }
+}
+
+static int register_get(struct reg *reg)
+{
+ struct target *target = (struct target *) reg->arch_info;
+ uint64_t value;
+ int result = riscv_get_register(target, &value, reg->number);
+ if (result != ERROR_OK)
+ return result;
+ buf_set_u64(reg->value, 0, reg->size, value);
+ return ERROR_OK;
+}
+
+static int register_set(struct reg *reg, uint8_t *buf)
+{
+ struct target *target = (struct target *) reg->arch_info;
+
+ uint64_t value = buf_get_u64(buf, 0, reg->size);
+
+ LOG_DEBUG("write 0x%" PRIx64 " to %s", value, reg->name);
+ struct reg *r = &target->reg_cache->reg_list[reg->number];
+ r->valid = true;
+ memcpy(r->value, buf, (r->size + 7) / 8);
+
+ riscv_set_register(target, reg->number, value);
+ return ERROR_OK;
+}
+
+static struct reg_arch_type riscv_reg_arch_type = {
+ .get = register_get,
+ .set = register_set
+};
+
+struct csr_info {
+ unsigned number;
+ const char *name;
+};
+
+static int cmp_csr_info(const void *p1, const void *p2)
+{
+ return (int) (((struct csr_info *)p1)->number) - (int) (((struct csr_info *)p2)->number);
+}
+
+int riscv_init_registers(struct target *target)
+{
+ RISCV_INFO(info);
+
+ if (target->reg_cache) {
+ if (target->reg_cache->reg_list)
+ free(target->reg_cache->reg_list);
+ free(target->reg_cache);
+ }
+
+ target->reg_cache = calloc(1, sizeof(*target->reg_cache));
+ target->reg_cache->name = "RISC-V Registers";
+ target->reg_cache->num_regs = GDB_REGNO_COUNT;
+
+ target->reg_cache->reg_list = calloc(GDB_REGNO_COUNT, sizeof(struct reg));
+
+ const unsigned int max_reg_name_len = 12;
+ if (info->reg_names)
+ free(info->reg_names);
+ info->reg_names = calloc(1, GDB_REGNO_COUNT * max_reg_name_len);
+ char *reg_name = info->reg_names;
+
+ static struct reg_feature feature_cpu = {
+ .name = "org.gnu.gdb.riscv.cpu"
+ };
+ static struct reg_feature feature_fpu = {
+ .name = "org.gnu.gdb.riscv.fpu"
+ };
+ static struct reg_feature feature_csr = {
+ .name = "org.gnu.gdb.riscv.csr"
+ };
+ static struct reg_feature feature_virtual = {
+ .name = "org.gnu.gdb.riscv.virtual"
+ };
+
+ static struct reg_data_type type_ieee_single = {
+ .type = REG_TYPE_IEEE_SINGLE,
+ .id = "ieee_single"
+ };
+ static struct reg_data_type type_ieee_double = {
+ .type = REG_TYPE_IEEE_DOUBLE,
+ .id = "ieee_double"
+ };
+ struct csr_info csr_info[] = {
+#define DECLARE_CSR(name, number) { number, #name },
+#include "encoding.h"
+#undef DECLARE_CSR
+ };
+ /* encoding.h does not contain the registers in sorted order. */
+ qsort(csr_info, DIM(csr_info), sizeof(*csr_info), cmp_csr_info);
+ unsigned csr_info_index = 0;
+
+ /* When gdb request register N, gdb_get_register_packet() assumes that this
+ * is register at index N in reg_list. So if there are certain registers
+ * that don't exist, we need to leave holes in the list (or renumber, but
+ * it would be nice not to have yet another set of numbers to translate
+ * between). */
+ for (uint32_t number = 0; number < GDB_REGNO_COUNT; number++) {
+ struct reg *r = &target->reg_cache->reg_list[number];
+ r->dirty = false;
+ r->valid = false;
+ r->exist = true;
+ r->type = &riscv_reg_arch_type;
+ r->arch_info = target;
+ r->number = number;
+ r->size = riscv_xlen(target);
+ /* r->size is set in riscv_invalidate_register_cache, maybe because the
+ * target is in theory allowed to change XLEN on us. But I expect a lot
+ * of other things to break in that case as well. */
+ if (number <= GDB_REGNO_XPR31) {
+ r->caller_save = true;
+ switch (number) {
+ case GDB_REGNO_ZERO:
+ r->name = "zero";
+ break;
+ case GDB_REGNO_RA:
+ r->name = "ra";
+ break;
+ case GDB_REGNO_SP:
+ r->name = "sp";
+ break;
+ case GDB_REGNO_GP:
+ r->name = "gp";
+ break;
+ case GDB_REGNO_TP:
+ r->name = "tp";
+ break;
+ case GDB_REGNO_T0:
+ r->name = "t0";
+ break;
+ case GDB_REGNO_T1:
+ r->name = "t1";
+ break;
+ case GDB_REGNO_T2:
+ r->name = "t2";
+ break;
+ case GDB_REGNO_FP:
+ r->name = "fp";
+ break;
+ case GDB_REGNO_S1:
+ r->name = "s1";
+ break;
+ case GDB_REGNO_A0:
+ r->name = "a0";
+ break;
+ case GDB_REGNO_A1:
+ r->name = "a1";
+ break;
+ case GDB_REGNO_A2:
+ r->name = "a2";
+ break;
+ case GDB_REGNO_A3:
+ r->name = "a3";
+ break;
+ case GDB_REGNO_A4:
+ r->name = "a4";
+ break;
+ case GDB_REGNO_A5:
+ r->name = "a5";
+ break;
+ case GDB_REGNO_A6:
+ r->name = "a6";
+ break;
+ case GDB_REGNO_A7:
+ r->name = "a7";
+ break;
+ case GDB_REGNO_S2:
+ r->name = "s2";
+ break;
+ case GDB_REGNO_S3:
+ r->name = "s3";
+ break;
+ case GDB_REGNO_S4:
+ r->name = "s4";
+ break;
+ case GDB_REGNO_S5:
+ r->name = "s5";
+ break;
+ case GDB_REGNO_S6:
+ r->name = "s6";
+ break;
+ case GDB_REGNO_S7:
+ r->name = "s7";
+ break;
+ case GDB_REGNO_S8:
+ r->name = "s8";
+ break;
+ case GDB_REGNO_S9:
+ r->name = "s9";
+ break;
+ case GDB_REGNO_S10:
+ r->name = "s10";
+ break;
+ case GDB_REGNO_S11:
+ r->name = "s11";
+ break;
+ case GDB_REGNO_T3:
+ r->name = "t3";
+ break;
+ case GDB_REGNO_T4:
+ r->name = "t4";
+ break;
+ case GDB_REGNO_T5:
+ r->name = "t5";
+ break;
+ case GDB_REGNO_T6:
+ r->name = "t6";
+ break;
+ }
+ r->group = "general";
+ r->feature = &feature_cpu;
+ } else if (number == GDB_REGNO_PC) {
+ r->caller_save = true;
+ sprintf(reg_name, "pc");
+ r->group = "general";
+ r->feature = &feature_cpu;
+ } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
+ r->caller_save = true;
+ if (riscv_supports_extension(target, riscv_current_hartid(target),
+ 'D')) {
+ r->reg_data_type = &type_ieee_double;
+ r->size = 64;
+ } else if (riscv_supports_extension(target,
+ riscv_current_hartid(target), 'F')) {
+ r->reg_data_type = &type_ieee_single;
+ r->size = 32;
+ } else {
+ r->exist = false;
+ }
+ switch (number) {
+ case GDB_REGNO_FT0:
+ r->name = "ft0";
+ break;
+ case GDB_REGNO_FT1:
+ r->name = "ft1";
+ break;
+ case GDB_REGNO_FT2:
+ r->name = "ft2";
+ break;
+ case GDB_REGNO_FT3:
+ r->name = "ft3";
+ break;
+ case GDB_REGNO_FT4:
+ r->name = "ft4";
+ break;
+ case GDB_REGNO_FT5:
+ r->name = "ft5";
+ break;
+ case GDB_REGNO_FT6:
+ r->name = "ft6";
+ break;
+ case GDB_REGNO_FT7:
+ r->name = "ft7";
+ break;
+ case GDB_REGNO_FS0:
+ r->name = "fs0";
+ break;
+ case GDB_REGNO_FS1:
+ r->name = "fs1";
+ break;
+ case GDB_REGNO_FA0:
+ r->name = "fa0";
+ break;
+ case GDB_REGNO_FA1:
+ r->name = "fa1";
+ break;
+ case GDB_REGNO_FA2:
+ r->name = "fa2";
+ break;
+ case GDB_REGNO_FA3:
+ r->name = "fa3";
+ break;
+ case GDB_REGNO_FA4:
+ r->name = "fa4";
+ break;
+ case GDB_REGNO_FA5:
+ r->name = "fa5";
+ break;
+ case GDB_REGNO_FA6:
+ r->name = "fa6";
+ break;
+ case GDB_REGNO_FA7:
+ r->name = "fa7";
+ break;
+ case GDB_REGNO_FS2:
+ r->name = "fs2";
+ break;
+ case GDB_REGNO_FS3:
+ r->name = "fs3";
+ break;
+ case GDB_REGNO_FS4:
+ r->name = "fs4";
+ break;
+ case GDB_REGNO_FS5:
+ r->name = "fs5";
+ break;
+ case GDB_REGNO_FS6:
+ r->name = "fs6";
+ break;
+ case GDB_REGNO_FS7:
+ r->name = "fs7";
+ break;
+ case GDB_REGNO_FS8:
+ r->name = "fs8";
+ break;
+ case GDB_REGNO_FS9:
+ r->name = "fs9";
+ break;
+ case GDB_REGNO_FS10:
+ r->name = "fs10";
+ break;
+ case GDB_REGNO_FS11:
+ r->name = "fs11";
+ break;
+ case GDB_REGNO_FT8:
+ r->name = "ft8";
+ break;
+ case GDB_REGNO_FT9:
+ r->name = "ft9";
+ break;
+ case GDB_REGNO_FT10:
+ r->name = "ft10";
+ break;
+ case GDB_REGNO_FT11:
+ r->name = "ft11";
+ break;
+ }
+ r->group = "float";
+ r->feature = &feature_fpu;
+ } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
+ r->group = "csr";
+ r->feature = &feature_csr;
+ unsigned csr_number = number - GDB_REGNO_CSR0;
+
+ while (csr_info[csr_info_index].number < csr_number &&
+ csr_info_index < DIM(csr_info) - 1) {
+ csr_info_index++;
+ }
+ if (csr_info[csr_info_index].number == csr_number) {
+ r->name = csr_info[csr_info_index].name;
+ } else {
+ sprintf(reg_name, "csr%d", csr_number);
+ /* Assume unnamed registers don't exist, unless we have some
+ * configuration that tells us otherwise. That's important
+ * because eg. Eclipse crashes if a target has too many
+ * registers, and apparently has no way of only showing a
+ * subset of registers in any case. */
+ r->exist = false;
+ }
+
+ switch (csr_number) {
+ case CSR_FFLAGS:
+ case CSR_FRM:
+ case CSR_FCSR:
+ r->exist = riscv_supports_extension(target,
+ riscv_current_hartid(target), 'F');
+ r->group = "float";
+ r->feature = &feature_fpu;
+ break;
+ case CSR_SSTATUS:
+ case CSR_STVEC:
+ case CSR_SIP:
+ case CSR_SIE:
+ case CSR_SCOUNTEREN:
+ case CSR_SSCRATCH:
+ case CSR_SEPC:
+ case CSR_SCAUSE:
+ case CSR_STVAL:
+ case CSR_SATP:
+ r->exist = riscv_supports_extension(target,
+ riscv_current_hartid(target), 'S');
+ break;
+ case CSR_MEDELEG:
+ case CSR_MIDELEG:
+ /* "In systems with only M-mode, or with both M-mode and
+ * U-mode but without U-mode trap support, the medeleg and
+ * mideleg registers should not exist." */
+ r->exist = riscv_supports_extension(target, riscv_current_hartid(target), 'S') ||
+ riscv_supports_extension(target, riscv_current_hartid(target), 'N');
+ break;
+
+ case CSR_CYCLEH:
+ case CSR_TIMEH:
+ case CSR_INSTRETH:
+ case CSR_HPMCOUNTER3H:
+ case CSR_HPMCOUNTER4H:
+ case CSR_HPMCOUNTER5H:
+ case CSR_HPMCOUNTER6H:
+ case CSR_HPMCOUNTER7H:
+ case CSR_HPMCOUNTER8H:
+ case CSR_HPMCOUNTER9H:
+ case CSR_HPMCOUNTER10H:
+ case CSR_HPMCOUNTER11H:
+ case CSR_HPMCOUNTER12H:
+ case CSR_HPMCOUNTER13H:
+ case CSR_HPMCOUNTER14H:
+ case CSR_HPMCOUNTER15H:
+ case CSR_HPMCOUNTER16H:
+ case CSR_HPMCOUNTER17H:
+ case CSR_HPMCOUNTER18H:
+ case CSR_HPMCOUNTER19H:
+ case CSR_HPMCOUNTER20H:
+ case CSR_HPMCOUNTER21H:
+ case CSR_HPMCOUNTER22H:
+ case CSR_HPMCOUNTER23H:
+ case CSR_HPMCOUNTER24H:
+ case CSR_HPMCOUNTER25H:
+ case CSR_HPMCOUNTER26H:
+ case CSR_HPMCOUNTER27H:
+ case CSR_HPMCOUNTER28H:
+ case CSR_HPMCOUNTER29H:
+ case CSR_HPMCOUNTER30H:
+ case CSR_HPMCOUNTER31H:
+ case CSR_MCYCLEH:
+ case CSR_MINSTRETH:
+ case CSR_MHPMCOUNTER3H:
+ case CSR_MHPMCOUNTER4H:
+ case CSR_MHPMCOUNTER5H:
+ case CSR_MHPMCOUNTER6H:
+ case CSR_MHPMCOUNTER7H:
+ case CSR_MHPMCOUNTER8H:
+ case CSR_MHPMCOUNTER9H:
+ case CSR_MHPMCOUNTER10H:
+ case CSR_MHPMCOUNTER11H:
+ case CSR_MHPMCOUNTER12H:
+ case CSR_MHPMCOUNTER13H:
+ case CSR_MHPMCOUNTER14H:
+ case CSR_MHPMCOUNTER15H:
+ case CSR_MHPMCOUNTER16H:
+ case CSR_MHPMCOUNTER17H:
+ case CSR_MHPMCOUNTER18H:
+ case CSR_MHPMCOUNTER19H:
+ case CSR_MHPMCOUNTER20H:
+ case CSR_MHPMCOUNTER21H:
+ case CSR_MHPMCOUNTER22H:
+ case CSR_MHPMCOUNTER23H:
+ case CSR_MHPMCOUNTER24H:
+ case CSR_MHPMCOUNTER25H:
+ case CSR_MHPMCOUNTER26H:
+ case CSR_MHPMCOUNTER27H:
+ case CSR_MHPMCOUNTER28H:
+ case CSR_MHPMCOUNTER29H:
+ case CSR_MHPMCOUNTER30H:
+ case CSR_MHPMCOUNTER31H:
+ r->exist = riscv_xlen(target) == 32;
+ break;
+ }
+
+ if (!r->exist && expose_csr) {
+ for (unsigned i = 0; expose_csr[i].low <= expose_csr[i].high; i++) {
+ if (csr_number >= expose_csr[i].low && csr_number <= expose_csr[i].high) {
+ LOG_INFO("Exposing additional CSR %d", csr_number);
+ r->exist = true;
+ break;
+ }
+ }
+ }
+
+ } else if (number == GDB_REGNO_PRIV) {
+ sprintf(reg_name, "priv");
+ r->group = "general";
+ r->feature = &feature_virtual;
+ r->size = 8;
+ }
+ if (reg_name[0])
+ r->name = reg_name;
+ reg_name += strlen(reg_name) + 1;
+ assert(reg_name < info->reg_names + GDB_REGNO_COUNT * max_reg_name_len);
+ r->value = &info->reg_cache_values[number];
+ }
+
+ return ERROR_OK;
+}
--- /dev/null
+#ifndef RISCV_H
+#define RISCV_H
+
+struct riscv_program;
+
+#include <stdint.h>
+#include "opcodes.h"
+#include "gdb_regs.h"
+
+/* The register cache is statically allocated. */
+#define RISCV_MAX_HARTS 32
+#define RISCV_MAX_REGISTERS 5000
+#define RISCV_MAX_TRIGGERS 32
+#define RISCV_MAX_HWBPS 16
+
+#define DEFAULT_COMMAND_TIMEOUT_SEC 2
+#define DEFAULT_RESET_TIMEOUT_SEC 30
+
+extern struct target_type riscv011_target;
+extern struct target_type riscv013_target;
+
+/*
+ * Definitions shared by code supporting all RISC-V versions.
+ */
+typedef uint64_t riscv_reg_t;
+typedef uint32_t riscv_insn_t;
+typedef uint64_t riscv_addr_t;
+
+enum riscv_halt_reason {
+ RISCV_HALT_INTERRUPT,
+ RISCV_HALT_BREAKPOINT,
+ RISCV_HALT_SINGLESTEP,
+ RISCV_HALT_TRIGGER,
+ RISCV_HALT_UNKNOWN,
+ RISCV_HALT_ERROR
+};
+
+typedef struct {
+ unsigned dtm_version;
+
+ struct command_context *cmd_ctx;
+ void *version_specific;
+
+ /* The number of harts on this system. */
+ int hart_count;
+
+ /* The hart that the RTOS thinks is currently being debugged. */
+ int rtos_hartid;
+
+ /* The hart that is currently being debugged. Note that this is
+ * different than the hartid that the RTOS is expected to use. This
+ * one will change all the time, it's more of a global argument to
+ * every function than an actual */
+ int current_hartid;
+
+ /* Enough space to store all the registers we might need to save. */
+ /* FIXME: This should probably be a bunch of register caches. */
+ uint64_t saved_registers[RISCV_MAX_HARTS][RISCV_MAX_REGISTERS];
+ bool valid_saved_registers[RISCV_MAX_HARTS][RISCV_MAX_REGISTERS];
+
+ /* OpenOCD's register cache points into here. This is not per-hart because
+ * we just invalidate the entire cache when we change which hart is
+ * selected. */
+ uint64_t reg_cache_values[RISCV_MAX_REGISTERS];
+
+ /* Single buffer that contains all register names, instead of calling
+ * malloc for each register. Needs to be freed when reg_list is freed. */
+ char *reg_names;
+
+ /* It's possible that each core has a different supported ISA set. */
+ int xlen[RISCV_MAX_HARTS];
+ riscv_reg_t misa[RISCV_MAX_HARTS];
+
+ /* The number of triggers per hart. */
+ unsigned trigger_count[RISCV_MAX_HARTS];
+
+ /* For each physical trigger, contains -1 if the hwbp is available, or the
+ * unique_id of the breakpoint/watchpoint that is using it.
+ * Note that in RTOS mode the triggers are the same across all harts the
+ * target controls, while otherwise only a single hart is controlled. */
+ int trigger_unique_id[RISCV_MAX_HWBPS];
+
+ /* The number of entries in the debug buffer. */
+ int debug_buffer_size[RISCV_MAX_HARTS];
+
+ /* This avoids invalidating the register cache too often. */
+ bool registers_initialized;
+
+ /* This hart contains an implicit ebreak at the end of the program buffer. */
+ bool impebreak;
+
+ bool triggers_enumerated;
+
+ /* Helper functions that target the various RISC-V debug spec
+ * implementations. */
+ int (*get_register)(struct target *target,
+ riscv_reg_t *value, int hid, int rid);
+ int (*set_register)(struct target *, int hartid, int regid,
+ uint64_t value);
+ int (*select_current_hart)(struct target *);
+ bool (*is_halted)(struct target *target);
+ int (*halt_current_hart)(struct target *);
+ int (*resume_current_hart)(struct target *target);
+ int (*step_current_hart)(struct target *target);
+ int (*on_halt)(struct target *target);
+ int (*on_resume)(struct target *target);
+ int (*on_step)(struct target *target);
+ enum riscv_halt_reason (*halt_reason)(struct target *target);
+ int (*write_debug_buffer)(struct target *target, unsigned index,
+ riscv_insn_t d);
+ riscv_insn_t (*read_debug_buffer)(struct target *target, unsigned index);
+ int (*execute_debug_buffer)(struct target *target);
+ int (*dmi_write_u64_bits)(struct target *target);
+ void (*fill_dmi_write_u64)(struct target *target, char *buf, int a, uint64_t d);
+ void (*fill_dmi_read_u64)(struct target *target, char *buf, int a);
+ void (*fill_dmi_nop_u64)(struct target *target, char *buf);
+
+ int (*authdata_read)(struct target *target, uint32_t *value);
+ int (*authdata_write)(struct target *target, uint32_t value);
+
+ int (*dmi_read)(struct target *target, uint32_t *value, uint32_t address);
+ int (*dmi_write)(struct target *target, uint32_t address, uint32_t value);
+} riscv_info_t;
+
+/* Wall-clock timeout for a command/access. Settable via RISC-V Target commands.*/
+extern int riscv_command_timeout_sec;
+
+/* Wall-clock timeout after reset. Settable via RISC-V Target commands.*/
+extern int riscv_reset_timeout_sec;
+
+extern bool riscv_prefer_sba;
+
+/* Everything needs the RISC-V specific info structure, so here's a nice macro
+ * that provides that. */
+static inline riscv_info_t *riscv_info(const struct target *target) __attribute__((unused));
+static inline riscv_info_t *riscv_info(const struct target *target)
+{ return target->arch_info; }
+#define RISCV_INFO(R) riscv_info_t *R = riscv_info(target);
+
+extern uint8_t ir_dtmcontrol[1];
+extern struct scan_field select_dtmcontrol;
+extern uint8_t ir_dbus[1];
+extern struct scan_field select_dbus;
+extern uint8_t ir_idcode[1];
+extern struct scan_field select_idcode;
+
+/*** OpenOCD Interface */
+int riscv_openocd_poll(struct target *target);
+
+int riscv_openocd_halt(struct target *target);
+
+int riscv_openocd_resume(
+ struct target *target,
+ int current,
+ target_addr_t address,
+ int handle_breakpoints,
+ int debug_execution
+);
+
+int riscv_openocd_step(
+ struct target *target,
+ int current,
+ target_addr_t address,
+ int handle_breakpoints
+);
+
+int riscv_openocd_assert_reset(struct target *target);
+int riscv_openocd_deassert_reset(struct target *target);
+
+/*** RISC-V Interface ***/
+
+/* Initializes the shared RISC-V structure. */
+void riscv_info_init(struct target *target, riscv_info_t *r);
+
+/* Run control, possibly for multiple harts. The _all_harts versions resume
+ * all the enabled harts, which when running in RTOS mode is all the harts on
+ * the system. */
+int riscv_halt_all_harts(struct target *target);
+int riscv_halt_one_hart(struct target *target, int hartid);
+int riscv_resume_all_harts(struct target *target);
+int riscv_resume_one_hart(struct target *target, int hartid);
+
+/* Steps the hart that's currently selected in the RTOS, or if there is no RTOS
+ * then the only hart. */
+int riscv_step_rtos_hart(struct target *target);
+
+bool riscv_supports_extension(struct target *target, int hartid, char letter);
+
+/* Returns XLEN for the given (or current) hart. */
+int riscv_xlen(const struct target *target);
+int riscv_xlen_of_hart(const struct target *target, int hartid);
+
+bool riscv_rtos_enabled(const struct target *target);
+
+/* Sets the current hart, which is the hart that will actually be used when
+ * issuing debug commands. */
+int riscv_set_current_hartid(struct target *target, int hartid);
+int riscv_current_hartid(const struct target *target);
+
+/*** Support functions for the RISC-V 'RTOS', which provides multihart support
+ * without requiring multiple targets. */
+
+/* When using the RTOS to debug, this selects the hart that is currently being
+ * debugged. This doesn't propogate to the hardware. */
+void riscv_set_all_rtos_harts(struct target *target);
+void riscv_set_rtos_hartid(struct target *target, int hartid);
+
+/* Lists the number of harts in the system, which are assumed to be
+ * concecutive and start with mhartid=0. */
+int riscv_count_harts(struct target *target);
+
+/* Returns TRUE if the target has the given register on the given hart. */
+bool riscv_has_register(struct target *target, int hartid, int regid);
+
+/* Returns the value of the given register on the given hart. 32-bit registers
+ * are zero extended to 64 bits. */
+int riscv_set_register(struct target *target, enum gdb_regno i, riscv_reg_t v);
+int riscv_set_register_on_hart(struct target *target, int hid, enum gdb_regno rid, uint64_t v);
+int riscv_get_register(struct target *target, riscv_reg_t *value,
+ enum gdb_regno r);
+int riscv_get_register_on_hart(struct target *target, riscv_reg_t *value,
+ int hartid, enum gdb_regno regid);
+
+/* Checks the state of the current hart -- "is_halted" checks the actual
+ * on-device register. */
+bool riscv_is_halted(struct target *target);
+enum riscv_halt_reason riscv_halt_reason(struct target *target, int hartid);
+
+/* These helper functions let the generic program interface get target-specific
+ * information. */
+size_t riscv_debug_buffer_size(struct target *target);
+
+riscv_insn_t riscv_read_debug_buffer(struct target *target, int index);
+int riscv_write_debug_buffer(struct target *target, int index, riscv_insn_t insn);
+int riscv_execute_debug_buffer(struct target *target);
+
+void riscv_fill_dmi_nop_u64(struct target *target, char *buf);
+void riscv_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
+void riscv_fill_dmi_read_u64(struct target *target, char *buf, int a);
+int riscv_dmi_write_u64_bits(struct target *target);
+
+/* Invalidates the register cache. */
+void riscv_invalidate_register_cache(struct target *target);
+
+/* Returns TRUE when a hart is enabled in this target. */
+bool riscv_hart_enabled(struct target *target, int hartid);
+
+int riscv_enumerate_triggers(struct target *target);
+
+int riscv_add_breakpoint(struct target *target, struct breakpoint *breakpoint);
+int riscv_remove_breakpoint(struct target *target,
+ struct breakpoint *breakpoint);
+int riscv_add_watchpoint(struct target *target, struct watchpoint *watchpoint);
+int riscv_remove_watchpoint(struct target *target,
+ struct watchpoint *watchpoint);
+
+int riscv_init_registers(struct target *target);
+
+void riscv_semihosting_init(struct target *target);
+int riscv_semihosting(struct target *target, int *retval);
+
+#endif
--- /dev/null
+/***************************************************************************
+ * Copyright (C) 2018 by Liviu Ionescu *
+ * ilg@livius.net *
+ * *
+ * Copyright (C) 2009 by Marvell Technology Group Ltd. *
+ * Written by Nicolas Pitre <nico@marvell.com> *
+ * *
+ * Copyright (C) 2010 by Spencer Oliver *
+ * spen@spen-soft.co.uk *
+ * *
+ * Copyright (C) 2016 by Square, Inc. *
+ * Steven Stallion <stallion@squareup.com> *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program. If not, see <http://www.gnu.org/licenses/>. *
+ ***************************************************************************/
+
+/**
+ * @file
+ * Hold RISC-V semihosting support.
+ *
+ * The RISC-V code is inspired from ARM semihosting.
+ *
+ * Details can be found in chapter 8 of DUI0203I_rvct_developer_guide.pdf
+ * from ARM Ltd.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "log.h"
+
+#include "target/target.h"
+#include "target/semihosting_common.h"
+#include "riscv.h"
+
+static int riscv_semihosting_setup(struct target *target, int enable);
+static int riscv_semihosting_post_result(struct target *target);
+
+/**
+ * Initialize RISC-V semihosting. Use common ARM code.
+ */
+void riscv_semihosting_init(struct target *target)
+{
+ semihosting_common_init(target, riscv_semihosting_setup,
+ riscv_semihosting_post_result);
+}
+
+/**
+ * Check for and process a semihosting request using the ARM protocol). This
+ * is meant to be called when the target is stopped due to a debug mode entry.
+ * If the value 0 is returned then there was nothing to process. A non-zero
+ * return value signifies that a request was processed and the target resumed,
+ * or an error was encountered, in which case the caller must return
+ * immediately.
+ *
+ * @param target Pointer to the target to process.
+ * @param retval Pointer to a location where the return code will be stored
+ * @return non-zero value if a request was processed or an error encountered
+ */
+int riscv_semihosting(struct target *target, int *retval)
+{
+ struct semihosting *semihosting = target->semihosting;
+ if (!semihosting)
+ return 0;
+
+ if (!semihosting->is_active)
+ return 0;
+
+ riscv_reg_t dpc;
+ int result = riscv_get_register(target, &dpc, GDB_REGNO_DPC);
+ if (result != ERROR_OK)
+ return 0;
+
+ uint8_t tmp[12];
+
+ /* Read the current instruction, including the bracketing */
+ *retval = target_read_memory(target, dpc - 4, 2, 6, tmp);
+ if (*retval != ERROR_OK)
+ return 0;
+
+ /*
+ * The instructions that trigger a semihosting call,
+ * always uncompressed, should look like:
+ *
+ * 01f01013 slli zero,zero,0x1f
+ * 00100073 ebreak
+ * 40705013 srai zero,zero,0x7
+ */
+ uint32_t pre = target_buffer_get_u32(target, tmp);
+ uint32_t ebreak = target_buffer_get_u32(target, tmp + 4);
+ uint32_t post = target_buffer_get_u32(target, tmp + 8);
+ LOG_DEBUG("check %08x %08x %08x from 0x%" PRIx64 "-4", pre, ebreak, post, dpc);
+
+ if (pre != 0x01f01013 || ebreak != 0x00100073 || post != 0x40705013) {
+
+ /* Not the magic sequence defining semihosting. */
+ return 0;
+ }
+
+ /*
+ * Perform semihosting call if we are not waiting on a fileio
+ * operation to complete.
+ */
+ if (!semihosting->hit_fileio) {
+
+ /* RISC-V uses A0 and A1 to pass function arguments */
+ riscv_reg_t r0;
+ riscv_reg_t r1;
+
+ result = riscv_get_register(target, &r0, GDB_REGNO_A0);
+ if (result != ERROR_OK)
+ return 0;
+
+ result = riscv_get_register(target, &r1, GDB_REGNO_A1);
+ if (result != ERROR_OK)
+ return 0;
+
+ semihosting->op = r0;
+ semihosting->param = r1;
+ semihosting->word_size_bytes = riscv_xlen(target) / 8;
+
+ /* Check for ARM operation numbers. */
+ if (0 <= semihosting->op && semihosting->op <= 0x31) {
+ *retval = semihosting_common(target);
+ if (*retval != ERROR_OK) {
+ LOG_ERROR("Failed semihosting operation");
+ return 0;
+ }
+ } else {
+ /* Unknown operation number, not a semihosting call. */
+ return 0;
+ }
+ }
+
+ /*
+ * Resume target if we are not waiting on a fileio
+ * operation to complete.
+ */
+ if (semihosting->is_resumable && !semihosting->hit_fileio) {
+ /* Resume right after the EBREAK 4 bytes instruction. */
+ *retval = target_resume(target, 0, dpc+4, 0, 0);
+ if (*retval != ERROR_OK) {
+ LOG_ERROR("Failed to resume target");
+ return 0;
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------
+ * Local functions. */
+
+/**
+ * Called via semihosting->setup() later, after the target is known,
+ * usually on the first semihosting command.
+ */
+static int riscv_semihosting_setup(struct target *target, int enable)
+{
+ LOG_DEBUG("enable=%d", enable);
+
+ struct semihosting *semihosting = target->semihosting;
+ if (semihosting)
+ semihosting->setup_time = clock();
+
+ return ERROR_OK;
+}
+
+static int riscv_semihosting_post_result(struct target *target)
+{
+ struct semihosting *semihosting = target->semihosting;
+ if (!semihosting) {
+ /* If not enabled, silently ignored. */
+ return 0;
+ }
+
+ LOG_DEBUG("0x%" PRIx64, semihosting->result);
+ riscv_set_register(target, GDB_REGNO_A0, semihosting->result);
+ return 0;
+}
extern struct target_type quark_x10xx_target;
extern struct target_type quark_d20xx_target;
extern struct target_type stm8_target;
+extern struct target_type riscv_target;
static struct target_type *target_types[] = {
&arm7tdmi_target,
&quark_x10xx_target,
&quark_d20xx_target,
&stm8_target,
+ &riscv_target,
#if BUILD_TARGET64
&aarch64_target,
#endif
--- /dev/null
+#
+# Be sure you include the speed and interface before this file
+# Example:
+# -c "adapter_khz 5000" -f "interface/ftdi/olimex-arm-usb-tiny-h.cfg" -f "board/sifive-e31arty.cfg"
+
+set _CHIPNAME riscv
+jtag newtap $_CHIPNAME cpu -irlen 5 -expected-id 0x20000001
+
+set _TARGETNAME $_CHIPNAME.cpu
+
+target create $_TARGETNAME.0 riscv -chain-position $_TARGETNAME
+$_TARGETNAME.0 configure -work-area-phys 0x80000000 -work-area-size 10000 -work-area-backup 1
+
+flash bank spi0 fespi 0x40000000 0 0 0 $_TARGETNAME.0 0x20004000
+init
+if {[ info exists pulse_srst]} {
+ ftdi_set_signal nSRST 0
+ ftdi_set_signal nSRST z
+}
+halt
+flash protect 0 64 last off
+echo "Ready for Remote Connections"
--- /dev/null
+#
+# Be sure you include the speed and interface before this file
+# Example:
+# -c "adapter_khz 5000" -f "interface/ftdi/olimex-arm-usb-tiny-h.cfg" -f "board/sifive-e51arty.cfg"
+
+set _CHIPNAME riscv
+jtag newtap $_CHIPNAME cpu -irlen 5 -expected-id 0x20000001
+
+set _TARGETNAME $_CHIPNAME.cpu
+
+target create $_TARGETNAME.0 riscv -chain-position $_TARGETNAME
+$_TARGETNAME.0 configure -work-area-phys 0x80000000 -work-area-size 10000 -work-area-backup 1
+
+flash bank spi0 fespi 0x40000000 0 0 0 $_TARGETNAME.0 0x20004000
+init
+if {[ info exists pulse_srst]} {
+ ftdi_set_signal nSRST 0
+ ftdi_set_signal nSRST z
+}
+halt
+flash protect 0 64 last off
+echo "Ready for Remote Connections"
--- /dev/null
+adapter_khz 10000
+
+interface ftdi
+ftdi_device_desc "Dual RS232-HS"
+ftdi_vid_pid 0x0403 0x6010
+
+ftdi_layout_init 0x0008 0x001b
+ftdi_layout_signal nSRST -oe 0x0020 -data 0x0020
+
+#Reset Stretcher logic on FE310 is ~1 second long
+#This doesn't apply if you use
+# ftdi_set_signal, but still good to document
+#adapter_nsrst_delay 1500
+
+set _CHIPNAME riscv
+jtag newtap $_CHIPNAME cpu -irlen 5 -expected-id 0x10e31913
+
+set _TARGETNAME $_CHIPNAME.cpu
+target create $_TARGETNAME riscv -chain-position $_TARGETNAME
+$_TARGETNAME configure -work-area-phys 0x80000000 -work-area-size 10000 -work-area-backup 1
+
+flash bank onboard_spi_flash fespi 0x20000000 0 0 0 $_TARGETNAME
+init
+#reset -- This type of reset is not implemented yet
+if {[ info exists pulse_srst]} {
+ ftdi_set_signal nSRST 0
+ ftdi_set_signal nSRST z
+ #Wait for the reset stretcher
+ #It will work without this, but
+ #will incur lots of delays for later commands.
+ sleep 1500
+}
+halt
+flash protect 0 64 last off