1 /***************************************************************************
2 * Copyright (C) 2013 Andes Technology *
3 * Hsiangkai Wang <hkwang@andestech.com> *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
19 ***************************************************************************/
25 #include <helper/log.h>
26 #include <helper/binarybuffer.h>
28 #include "nds32_aice.h"
29 #include "nds32_tlb.h"
30 #include "nds32_disassembler.h"
32 const int NDS32_BREAK_16 = 0x00EA; /* 0xEA00 */
33 const int NDS32_BREAK_32 = 0x0A000064; /* 0x6400000A */
35 struct nds32_edm_operation nds32_edm_ops[NDS32_EDM_OPERATION_MAX_NUM];
36 uint32_t nds32_edm_ops_num;
38 const char *nds32_debug_type_name[11] = {
41 "HARDWARE BREAKPOINT",
42 "DATA ADDR WATCHPOINT PRECISE",
43 "DATA VALUE WATCHPOINT PRECISE",
44 "DATA VALUE WATCHPOINT IMPRECISE",
46 "HARDWARE SINGLE STEP",
47 "DATA ADDR WATCHPOINT NEXT PRECISE",
48 "DATA VALUE WATCHPOINT NEXT PRECISE",
49 "LOAD STORE GLOBAL STOP",
52 static const int NDS32_LM_SIZE_TABLE[16] = {
66 static const int NDS32_LINE_SIZE_TABLE[6] = {
75 static int nds32_get_core_reg(struct reg *reg)
78 struct nds32_reg *reg_arch_info = reg->arch_info;
79 struct target *target = reg_arch_info->target;
80 struct nds32 *nds32 = target_to_nds32(target);
81 struct aice_port_s *aice = target_to_aice(target);
83 if (target->state != TARGET_HALTED) {
84 LOG_ERROR("Target not halted");
85 return ERROR_TARGET_NOT_HALTED;
89 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
90 LOG_DEBUG("reading register(cached) %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
91 reg_arch_info->num, reg->name, val);
95 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
97 if (reg_arch_info->enable == false) {
98 buf_set_u32(reg_arch_info->value, 0, 32, NDS32_REGISTER_DISABLE);
102 if ((nds32->fpu_enable == false)
103 && (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
105 } else if ((nds32->audio_enable == false)
106 && (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
109 retval = aice_read_register(aice, mapped_regnum, &val);
111 buf_set_u32(reg_arch_info->value, 0, 32, val);
113 LOG_DEBUG("reading register %" PRIi32 "(%s), value: 0x%8.8" PRIx32,
114 reg_arch_info->num, reg->name, val);
117 if (retval == ERROR_OK) {
125 static int nds32_get_core_reg_64(struct reg *reg)
128 struct nds32_reg *reg_arch_info = reg->arch_info;
129 struct target *target = reg_arch_info->target;
130 struct nds32 *nds32 = target_to_nds32(target);
131 struct aice_port_s *aice = target_to_aice(target);
133 if (target->state != TARGET_HALTED) {
134 LOG_ERROR("Target not halted");
135 return ERROR_TARGET_NOT_HALTED;
141 if (reg_arch_info->enable == false) {
142 buf_set_u64(reg_arch_info->value, 0, 64, NDS32_REGISTER_DISABLE);
146 if ((nds32->fpu_enable == false)
147 && ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
150 retval = aice_read_reg_64(aice, reg_arch_info->num, &val);
152 buf_set_u64(reg_arch_info->value, 0, 64, val);
155 if (retval == ERROR_OK) {
163 static int nds32_update_psw(struct nds32 *nds32)
166 struct aice_port_s *aice = target_to_aice(nds32->target);
168 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
170 /* Save data memory endian */
171 if ((value_ir0 >> 5) & 0x1) {
172 nds32->data_endian = TARGET_BIG_ENDIAN;
173 aice_set_data_endian(aice, AICE_BIG_ENDIAN);
175 nds32->data_endian = TARGET_LITTLE_ENDIAN;
176 aice_set_data_endian(aice, AICE_LITTLE_ENDIAN);
179 /* Save translation status */
180 nds32->memory.address_translation = ((value_ir0 >> 7) & 0x1) ? true : false;
185 static int nds32_update_mmu_info(struct nds32 *nds32)
189 /* Update MMU control status */
190 nds32_get_mapped_reg(nds32, MR0, &value);
191 nds32->mmu_config.default_min_page_size = value & 0x1;
192 nds32->mmu_config.multiple_page_size_in_use = (value >> 10) & 0x1;
197 static int nds32_update_cache_info(struct nds32 *nds32)
201 if (ERROR_OK == nds32_get_mapped_reg(nds32, MR8, &value)) {
203 nds32->memory.icache.enable = true;
205 nds32->memory.icache.enable = false;
208 nds32->memory.dcache.enable = true;
210 nds32->memory.dcache.enable = false;
212 nds32->memory.icache.enable = false;
213 nds32->memory.dcache.enable = false;
219 static int nds32_update_lm_info(struct nds32 *nds32)
221 struct nds32_memory *memory = &(nds32->memory);
225 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
227 memory->ilm_enable = true;
229 memory->ilm_enable = false;
231 if (memory->ilm_align_ver == 0) { /* 1MB aligned */
232 memory->ilm_start = value_mr6 & 0xFFF00000;
233 memory->ilm_end = memory->ilm_start + memory->ilm_size;
234 } else if (memory->ilm_align_ver == 1) { /* aligned to local memory size */
235 memory->ilm_start = value_mr6 & 0xFFFFFC00;
236 memory->ilm_end = memory->ilm_start + memory->ilm_size;
238 memory->ilm_start = -1;
239 memory->ilm_end = -1;
242 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
244 memory->dlm_enable = true;
246 memory->dlm_enable = false;
248 if (memory->dlm_align_ver == 0) { /* 1MB aligned */
249 memory->dlm_start = value_mr7 & 0xFFF00000;
250 memory->dlm_end = memory->dlm_start + memory->dlm_size;
251 } else if (memory->dlm_align_ver == 1) { /* aligned to local memory size */
252 memory->dlm_start = value_mr7 & 0xFFFFFC00;
253 memory->dlm_end = memory->dlm_start + memory->dlm_size;
255 memory->dlm_start = -1;
256 memory->dlm_end = -1;
263 * If fpu/audio is disabled, to access fpu/audio registers will cause
264 * exceptions. So, we need to check if fpu/audio is enabled or not as
265 * target is halted. If fpu/audio is disabled, as users access fpu/audio
266 * registers, OpenOCD will return fake value 0 instead of accessing
267 * registers through DIM.
269 static int nds32_check_extension(struct nds32 *nds32)
273 nds32_get_mapped_reg(nds32, FUCPR, &value);
274 if (value == NDS32_REGISTER_DISABLE) {
275 nds32->fpu_enable = false;
276 nds32->audio_enable = false;
281 nds32->fpu_enable = true;
283 nds32->fpu_enable = false;
285 if (value & 0x80000000)
286 nds32->audio_enable = true;
288 nds32->audio_enable = false;
293 static int nds32_set_core_reg(struct reg *reg, uint8_t *buf)
295 struct nds32_reg *reg_arch_info = reg->arch_info;
296 struct target *target = reg_arch_info->target;
297 struct nds32 *nds32 = target_to_nds32(target);
298 struct aice_port_s *aice = target_to_aice(target);
299 uint32_t value = buf_get_u32(buf, 0, 32);
301 if (target->state != TARGET_HALTED) {
302 LOG_ERROR("Target not halted");
303 return ERROR_TARGET_NOT_HALTED;
306 int mapped_regnum = nds32->register_map(nds32, reg_arch_info->num);
308 /* ignore values that will generate exception */
309 if (nds32_reg_exception(mapped_regnum, value))
312 LOG_DEBUG("writing register %" PRIi32 "(%s) with value 0x%8.8" PRIx32,
313 reg_arch_info->num, reg->name, value);
315 if ((nds32->fpu_enable == false) &&
316 (NDS32_REG_TYPE_FPU == nds32_reg_type(mapped_regnum))) {
318 buf_set_u32(reg->value, 0, 32, 0);
319 } else if ((nds32->audio_enable == false) &&
320 (NDS32_REG_TYPE_AUMR == nds32_reg_type(mapped_regnum))) {
322 buf_set_u32(reg->value, 0, 32, 0);
324 buf_set_u32(reg->value, 0, 32, value);
325 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
326 aice_write_register(aice, mapped_regnum, val);
328 /* After set value to registers, read the value from target
329 * to avoid W1C inconsistency. */
330 aice_read_register(aice, mapped_regnum, &val);
331 buf_set_u32(reg_arch_info->value, 0, 32, val);
337 /* update registers to take effect right now */
338 if (IR0 == mapped_regnum) {
339 nds32_update_psw(nds32);
340 } else if (MR0 == mapped_regnum) {
341 nds32_update_mmu_info(nds32);
342 } else if ((MR6 == mapped_regnum) || (MR7 == mapped_regnum)) {
343 /* update lm information */
344 nds32_update_lm_info(nds32);
345 } else if (MR8 == mapped_regnum) {
346 nds32_update_cache_info(nds32);
347 } else if (FUCPR == mapped_regnum) {
348 /* update audio/fpu setting */
349 nds32_check_extension(nds32);
355 static int nds32_set_core_reg_64(struct reg *reg, uint8_t *buf)
357 struct nds32_reg *reg_arch_info = reg->arch_info;
358 struct target *target = reg_arch_info->target;
359 struct nds32 *nds32 = target_to_nds32(target);
360 uint32_t low_part = buf_get_u32(buf, 0, 32);
361 uint32_t high_part = buf_get_u32(buf, 32, 32);
363 if (target->state != TARGET_HALTED) {
364 LOG_ERROR("Target not halted");
365 return ERROR_TARGET_NOT_HALTED;
368 if ((nds32->fpu_enable == false) &&
369 ((FD0 <= reg_arch_info->num) && (reg_arch_info->num <= FD31))) {
371 buf_set_u32(reg->value, 0, 32, 0);
372 buf_set_u32(reg->value, 32, 32, 0);
377 buf_set_u32(reg->value, 0, 32, low_part);
378 buf_set_u32(reg->value, 32, 32, high_part);
387 static const struct reg_arch_type nds32_reg_access_type = {
388 .get = nds32_get_core_reg,
389 .set = nds32_set_core_reg,
392 static const struct reg_arch_type nds32_reg_access_type_64 = {
393 .get = nds32_get_core_reg_64,
394 .set = nds32_set_core_reg_64,
397 static struct reg_cache *nds32_build_reg_cache(struct target *target,
400 struct reg_cache *cache = calloc(sizeof(struct reg_cache), 1);
401 struct reg *reg_list = calloc(TOTAL_REG_NUM, sizeof(struct reg));
402 struct nds32_reg *reg_arch_info = calloc(TOTAL_REG_NUM, sizeof(struct nds32_reg));
405 if (!cache || !reg_list || !reg_arch_info) {
412 cache->name = "Andes registers";
414 cache->reg_list = reg_list;
417 for (i = 0; i < TOTAL_REG_NUM; i++) {
418 reg_arch_info[i].num = i;
419 reg_arch_info[i].target = target;
420 reg_arch_info[i].nds32 = nds32;
421 reg_arch_info[i].enable = false;
423 reg_list[i].name = nds32_reg_simple_name(i);
424 reg_list[i].number = reg_arch_info[i].num;
425 reg_list[i].size = nds32_reg_size(i);
426 reg_list[i].arch_info = ®_arch_info[i];
428 reg_list[i].reg_data_type = calloc(sizeof(struct reg_data_type), 1);
430 if (FD0 <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31) {
431 reg_list[i].value = reg_arch_info[i].value;
432 reg_list[i].type = &nds32_reg_access_type_64;
434 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_DOUBLE;
435 reg_list[i].reg_data_type->id = "ieee_double";
436 reg_list[i].group = "float";
438 reg_list[i].value = reg_arch_info[i].value;
439 reg_list[i].type = &nds32_reg_access_type;
440 reg_list[i].group = "general";
442 if ((FS0 <= reg_arch_info[i].num) && (reg_arch_info[i].num <= FS31)) {
443 reg_list[i].reg_data_type->type = REG_TYPE_IEEE_SINGLE;
444 reg_list[i].reg_data_type->id = "ieee_single";
445 reg_list[i].group = "float";
446 } else if ((reg_arch_info[i].num == FPCSR) ||
447 (reg_arch_info[i].num == FPCFG)) {
448 reg_list[i].group = "float";
449 } else if ((reg_arch_info[i].num == R28) ||
450 (reg_arch_info[i].num == R29) ||
451 (reg_arch_info[i].num == R31)) {
452 reg_list[i].reg_data_type->type = REG_TYPE_DATA_PTR;
453 reg_list[i].reg_data_type->id = "data_ptr";
454 } else if ((reg_arch_info[i].num == R30) ||
455 (reg_arch_info[i].num == PC)) {
456 reg_list[i].reg_data_type->type = REG_TYPE_CODE_PTR;
457 reg_list[i].reg_data_type->id = "code_ptr";
459 reg_list[i].reg_data_type->type = REG_TYPE_UINT32;
460 reg_list[i].reg_data_type->id = "uint32";
464 if (R16 <= reg_arch_info[i].num && reg_arch_info[i].num <= R25)
465 reg_list[i].caller_save = true;
467 reg_list[i].caller_save = false;
469 reg_list[i].feature = malloc(sizeof(struct reg_feature));
471 if (R0 <= reg_arch_info[i].num && reg_arch_info[i].num <= IFC_LP)
472 reg_list[i].feature->name = "org.gnu.gdb.nds32.core";
473 else if (CR0 <= reg_arch_info[i].num && reg_arch_info[i].num <= SECUR0)
474 reg_list[i].feature->name = "org.gnu.gdb.nds32.system";
475 else if (D0L24 <= reg_arch_info[i].num && reg_arch_info[i].num <= CBE3)
476 reg_list[i].feature->name = "org.gnu.gdb.nds32.audio";
477 else if (FPCSR <= reg_arch_info[i].num && reg_arch_info[i].num <= FD31)
478 reg_list[i].feature->name = "org.gnu.gdb.nds32.fpu";
483 nds32->core_cache = cache;
488 static int nds32_reg_cache_init(struct target *target, struct nds32 *nds32)
490 struct reg_cache *cache;
492 cache = nds32_build_reg_cache(target, nds32);
496 *register_get_last_cache_p(&target->reg_cache) = cache;
501 static struct reg *nds32_reg_current(struct nds32 *nds32, unsigned regnum)
505 r = nds32->core_cache->reg_list + regnum;
510 int nds32_full_context(struct nds32 *nds32)
512 uint32_t value, value_ir0;
514 /* save $pc & $psw */
515 nds32_get_mapped_reg(nds32, PC, &value);
516 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
518 nds32_update_psw(nds32);
519 nds32_update_mmu_info(nds32);
520 nds32_update_cache_info(nds32);
521 nds32_update_lm_info(nds32);
523 nds32_check_extension(nds32);
528 /* get register value internally */
529 int nds32_get_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t *value)
531 struct reg_cache *reg_cache = nds32->core_cache;
534 if (regnum > reg_cache->num_regs)
537 r = nds32_reg_current(nds32, regnum);
539 if (ERROR_OK != r->type->get(r))
542 *value = buf_get_u32(r->value, 0, 32);
547 /** set register internally */
548 int nds32_set_mapped_reg(struct nds32 *nds32, unsigned regnum, uint32_t value)
550 struct reg_cache *reg_cache = nds32->core_cache;
552 uint8_t set_value[4];
554 if (regnum > reg_cache->num_regs)
557 r = nds32_reg_current(nds32, regnum);
559 buf_set_u32(set_value, 0, 32, value);
561 return r->type->set(r, set_value);
564 /** get general register list */
565 static int nds32_get_general_reg_list(struct nds32 *nds32,
566 struct reg **reg_list[], int *reg_list_size)
568 struct reg *reg_current;
572 /** freed in gdb_server.c */
573 *reg_list = malloc(sizeof(struct reg *) * (IFC_LP - R0 + 1));
576 for (i = R0; i < IFC_LP + 1; i++) {
577 reg_current = nds32_reg_current(nds32, i);
578 if (((struct nds32_reg *)reg_current->arch_info)->enable) {
579 (*reg_list)[current_idx] = reg_current;
583 *reg_list_size = current_idx;
588 /** get all register list */
589 static int nds32_get_all_reg_list(struct nds32 *nds32,
590 struct reg **reg_list[], int *reg_list_size)
592 struct reg_cache *reg_cache = nds32->core_cache;
593 struct reg *reg_current;
596 *reg_list_size = reg_cache->num_regs;
598 /** freed in gdb_server.c */
599 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
601 for (i = 0; i < reg_cache->num_regs; i++) {
602 reg_current = nds32_reg_current(nds32, i);
603 reg_current->exist = ((struct nds32_reg *)
604 reg_current->arch_info)->enable;
605 (*reg_list)[i] = reg_current;
611 /** get all register list */
612 int nds32_get_gdb_reg_list(struct target *target,
613 struct reg **reg_list[], int *reg_list_size,
614 enum target_register_class reg_class)
616 struct nds32 *nds32 = target_to_nds32(target);
620 return nds32_get_all_reg_list(nds32, reg_list, reg_list_size);
621 case REG_CLASS_GENERAL:
622 return nds32_get_general_reg_list(nds32, reg_list, reg_list_size);
630 static int nds32_select_memory_mode(struct target *target, uint32_t address,
631 uint32_t length, uint32_t *end_address)
633 struct nds32 *nds32 = target_to_nds32(target);
634 struct aice_port_s *aice = target_to_aice(target);
635 struct nds32_memory *memory = &(nds32->memory);
636 struct nds32_edm *edm = &(nds32->edm);
637 uint32_t dlm_start, dlm_end;
638 uint32_t ilm_start, ilm_end;
639 uint32_t address_end = address + length;
641 /* init end_address */
642 *end_address = address_end;
644 if (NDS_MEMORY_ACC_CPU == memory->access_channel)
647 if (edm->access_control == false) {
648 LOG_DEBUG("EDM does not support ACC_CTL");
652 if (edm->direct_access_local_memory == false) {
653 LOG_DEBUG("EDM does not support DALM");
654 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
658 if (NDS_MEMORY_SELECT_AUTO != memory->mode) {
659 LOG_DEBUG("Memory mode is not AUTO");
663 /* set default mode */
664 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
666 if ((memory->ilm_base != 0) && (memory->ilm_enable == true)) {
667 ilm_start = memory->ilm_start;
668 ilm_end = memory->ilm_end;
670 /* case 1, address < ilm_start */
671 if (address < ilm_start) {
672 if (ilm_start < address_end) {
673 /* update end_address to split non-ILM from ILM */
674 *end_address = ilm_start;
677 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
678 } else if ((ilm_start <= address) && (address < ilm_end)) {
679 /* case 2, ilm_start <= address < ilm_end */
680 if (ilm_end < address_end) {
681 /* update end_address to split non-ILM from ILM */
682 *end_address = ilm_end;
685 aice_memory_mode(aice, NDS_MEMORY_SELECT_ILM);
686 } else { /* case 3, ilm_end <= address */
688 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
693 LOG_DEBUG("ILM is not enabled");
696 if ((memory->dlm_base != 0) && (memory->dlm_enable == true)) {
697 dlm_start = memory->dlm_start;
698 dlm_end = memory->dlm_end;
700 /* case 1, address < dlm_start */
701 if (address < dlm_start) {
702 if (dlm_start < address_end) {
703 /* update end_address to split non-DLM from DLM */
704 *end_address = dlm_start;
707 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
708 } else if ((dlm_start <= address) && (address < dlm_end)) {
709 /* case 2, dlm_start <= address < dlm_end */
710 if (dlm_end < address_end) {
711 /* update end_address to split non-DLM from DLM */
712 *end_address = dlm_end;
715 aice_memory_mode(aice, NDS_MEMORY_SELECT_DLM);
716 } else { /* case 3, dlm_end <= address */
718 aice_memory_mode(aice, NDS_MEMORY_SELECT_MEM);
723 LOG_DEBUG("DLM is not enabled");
729 int nds32_read_buffer(struct target *target, uint32_t address,
730 uint32_t size, uint8_t *buffer)
732 struct nds32 *nds32 = target_to_nds32(target);
733 struct nds32_memory *memory = &(nds32->memory);
735 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
736 (target->state != TARGET_HALTED)) {
737 LOG_WARNING("target was not halted");
738 return ERROR_TARGET_NOT_HALTED;
741 LOG_DEBUG("READ BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
745 int retval = ERROR_OK;
746 struct aice_port_s *aice = target_to_aice(target);
747 uint32_t end_address;
749 if (((address % 2) == 0) && (size == 2)) {
750 nds32_select_memory_mode(target, address, 2, &end_address);
751 return aice_read_mem_unit(aice, address, 2, 1, buffer);
754 /* handle unaligned head bytes */
756 uint32_t unaligned = 4 - (address % 4);
758 if (unaligned > size)
761 nds32_select_memory_mode(target, address, unaligned, &end_address);
762 retval = aice_read_mem_unit(aice, address, 1, unaligned, buffer);
763 if (retval != ERROR_OK)
767 address += unaligned;
771 /* handle aligned words */
773 int aligned = size - (size % 4);
777 nds32_select_memory_mode(target, address, aligned, &end_address);
779 read_len = end_address - address;
782 retval = aice_read_mem_bulk(aice, address, read_len, buffer);
784 retval = aice_read_mem_unit(aice, address, 4, read_len / 4, buffer);
786 if (retval != ERROR_OK)
794 } while (aligned != 0);
797 /*prevent byte access when possible (avoid AHB access limitations in some cases)*/
799 int aligned = size - (size % 2);
800 nds32_select_memory_mode(target, address, aligned, &end_address);
801 retval = aice_read_mem_unit(aice, address, 2, aligned / 2, buffer);
802 if (retval != ERROR_OK)
809 /* handle tail writes of less than 4 bytes */
811 nds32_select_memory_mode(target, address, size, &end_address);
812 retval = aice_read_mem_unit(aice, address, 1, size, buffer);
813 if (retval != ERROR_OK)
820 int nds32_read_memory(struct target *target, uint32_t address,
821 uint32_t size, uint32_t count, uint8_t *buffer)
823 struct aice_port_s *aice = target_to_aice(target);
825 return aice_read_mem_unit(aice, address, size, count, buffer);
828 int nds32_read_phys_memory(struct target *target, uint32_t address,
829 uint32_t size, uint32_t count, uint8_t *buffer)
831 struct aice_port_s *aice = target_to_aice(target);
832 struct nds32 *nds32 = target_to_nds32(target);
833 struct nds32_memory *memory = &(nds32->memory);
834 enum nds_memory_access orig_channel;
837 /* switch to BUS access mode to skip MMU */
838 orig_channel = memory->access_channel;
839 memory->access_channel = NDS_MEMORY_ACC_BUS;
840 aice_memory_access(aice, memory->access_channel);
842 /* The input address is physical address. No need to do address translation. */
843 result = aice_read_mem_unit(aice, address, size, count, buffer);
845 /* restore to origin access mode */
846 memory->access_channel = orig_channel;
847 aice_memory_access(aice, memory->access_channel);
852 int nds32_write_buffer(struct target *target, uint32_t address,
853 uint32_t size, const uint8_t *buffer)
855 struct nds32 *nds32 = target_to_nds32(target);
856 struct nds32_memory *memory = &(nds32->memory);
858 if ((NDS_MEMORY_ACC_CPU == memory->access_channel) &&
859 (target->state != TARGET_HALTED)) {
860 LOG_WARNING("target was not halted");
861 return ERROR_TARGET_NOT_HALTED;
864 LOG_DEBUG("WRITE BUFFER: ADDR %08" PRIx32 " SIZE %08" PRIx32,
868 struct aice_port_s *aice = target_to_aice(target);
869 int retval = ERROR_OK;
870 uint32_t end_address;
872 if (((address % 2) == 0) && (size == 2)) {
873 nds32_select_memory_mode(target, address, 2, &end_address);
874 return aice_write_mem_unit(aice, address, 2, 1, buffer);
877 /* handle unaligned head bytes */
879 uint32_t unaligned = 4 - (address % 4);
881 if (unaligned > size)
884 nds32_select_memory_mode(target, address, unaligned, &end_address);
885 retval = aice_write_mem_unit(aice, address, 1, unaligned, buffer);
886 if (retval != ERROR_OK)
890 address += unaligned;
894 /* handle aligned words */
896 int aligned = size - (size % 4);
900 nds32_select_memory_mode(target, address, aligned, &end_address);
902 write_len = end_address - address;
904 retval = aice_write_mem_bulk(aice, address, write_len, buffer);
906 retval = aice_write_mem_unit(aice, address, 4, write_len / 4, buffer);
907 if (retval != ERROR_OK)
911 address += write_len;
913 aligned -= write_len;
915 } while (aligned != 0);
918 /* handle tail writes of less than 4 bytes */
920 nds32_select_memory_mode(target, address, size, &end_address);
921 retval = aice_write_mem_unit(aice, address, 1, size, buffer);
922 if (retval != ERROR_OK)
929 int nds32_write_memory(struct target *target, uint32_t address,
930 uint32_t size, uint32_t count, const uint8_t *buffer)
932 struct aice_port_s *aice = target_to_aice(target);
934 return aice_write_mem_unit(aice, address, size, count, buffer);
937 int nds32_write_phys_memory(struct target *target, uint32_t address,
938 uint32_t size, uint32_t count, const uint8_t *buffer)
940 struct aice_port_s *aice = target_to_aice(target);
941 struct nds32 *nds32 = target_to_nds32(target);
942 struct nds32_memory *memory = &(nds32->memory);
943 enum nds_memory_access orig_channel;
946 /* switch to BUS access mode to skip MMU */
947 orig_channel = memory->access_channel;
948 memory->access_channel = NDS_MEMORY_ACC_BUS;
949 aice_memory_access(aice, memory->access_channel);
951 /* The input address is physical address. No need to do address translation. */
952 result = aice_write_mem_unit(aice, address, size, count, buffer);
954 /* restore to origin access mode */
955 memory->access_channel = orig_channel;
956 aice_memory_access(aice, memory->access_channel);
961 int nds32_mmu(struct target *target, int *enabled)
963 if (target->state != TARGET_HALTED) {
964 LOG_ERROR("%s: target not halted", __func__);
965 return ERROR_TARGET_INVALID;
968 struct nds32 *nds32 = target_to_nds32(target);
969 struct nds32_memory *memory = &(nds32->memory);
970 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
972 if ((mmu_config->memory_protection == 2) && (memory->address_translation == true))
980 int nds32_arch_state(struct target *target)
982 struct nds32 *nds32 = target_to_nds32(target);
984 if (nds32->common_magic != NDS32_COMMON_MAGIC) {
985 LOG_ERROR("BUG: called for a non-Andes target");
989 uint32_t value_pc, value_psw;
991 nds32_get_mapped_reg(nds32, PC, &value_pc);
992 nds32_get_mapped_reg(nds32, IR0, &value_psw);
994 LOG_USER("target halted due to %s\n"
995 "psw: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
996 debug_reason_name(target),
999 nds32->virtual_hosting ? ", virtual hosting" : "");
1001 /* save pc value to pseudo register pc */
1002 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
1003 buf_set_u32(reg->value, 0, 32, value_pc);
1008 static void nds32_init_must_have_registers(struct nds32 *nds32)
1010 struct reg_cache *reg_cache = nds32->core_cache;
1012 /** MUST have general registers */
1013 ((struct nds32_reg *)reg_cache->reg_list[R0].arch_info)->enable = true;
1014 ((struct nds32_reg *)reg_cache->reg_list[R1].arch_info)->enable = true;
1015 ((struct nds32_reg *)reg_cache->reg_list[R2].arch_info)->enable = true;
1016 ((struct nds32_reg *)reg_cache->reg_list[R3].arch_info)->enable = true;
1017 ((struct nds32_reg *)reg_cache->reg_list[R4].arch_info)->enable = true;
1018 ((struct nds32_reg *)reg_cache->reg_list[R5].arch_info)->enable = true;
1019 ((struct nds32_reg *)reg_cache->reg_list[R6].arch_info)->enable = true;
1020 ((struct nds32_reg *)reg_cache->reg_list[R7].arch_info)->enable = true;
1021 ((struct nds32_reg *)reg_cache->reg_list[R8].arch_info)->enable = true;
1022 ((struct nds32_reg *)reg_cache->reg_list[R9].arch_info)->enable = true;
1023 ((struct nds32_reg *)reg_cache->reg_list[R10].arch_info)->enable = true;
1024 ((struct nds32_reg *)reg_cache->reg_list[R15].arch_info)->enable = true;
1025 ((struct nds32_reg *)reg_cache->reg_list[R28].arch_info)->enable = true;
1026 ((struct nds32_reg *)reg_cache->reg_list[R29].arch_info)->enable = true;
1027 ((struct nds32_reg *)reg_cache->reg_list[R30].arch_info)->enable = true;
1028 ((struct nds32_reg *)reg_cache->reg_list[R31].arch_info)->enable = true;
1029 ((struct nds32_reg *)reg_cache->reg_list[PC].arch_info)->enable = true;
1031 /** MUST have configuration system registers */
1032 ((struct nds32_reg *)reg_cache->reg_list[CR0].arch_info)->enable = true;
1033 ((struct nds32_reg *)reg_cache->reg_list[CR1].arch_info)->enable = true;
1034 ((struct nds32_reg *)reg_cache->reg_list[CR2].arch_info)->enable = true;
1035 ((struct nds32_reg *)reg_cache->reg_list[CR3].arch_info)->enable = true;
1036 ((struct nds32_reg *)reg_cache->reg_list[CR4].arch_info)->enable = true;
1038 /** MUST have interrupt system registers */
1039 ((struct nds32_reg *)reg_cache->reg_list[IR0].arch_info)->enable = true;
1040 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = true;
1041 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = true;
1042 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = true;
1043 ((struct nds32_reg *)reg_cache->reg_list[IR6].arch_info)->enable = true;
1044 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = true;
1045 ((struct nds32_reg *)reg_cache->reg_list[IR11].arch_info)->enable = true;
1046 ((struct nds32_reg *)reg_cache->reg_list[IR14].arch_info)->enable = true;
1047 ((struct nds32_reg *)reg_cache->reg_list[IR15].arch_info)->enable = true;
1049 /** MUST have MMU system registers */
1050 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = true;
1052 /** MUST have EDM system registers */
1053 ((struct nds32_reg *)reg_cache->reg_list[DR40].arch_info)->enable = true;
1054 ((struct nds32_reg *)reg_cache->reg_list[DR42].arch_info)->enable = true;
1057 static int nds32_init_memory_config(struct nds32 *nds32)
1059 uint32_t value_cr1; /* ICM_CFG */
1060 uint32_t value_cr2; /* DCM_CFG */
1061 struct nds32_memory *memory = &(nds32->memory);
1063 /* read $cr1 to init instruction memory information */
1064 nds32_get_mapped_reg(nds32, CR1, &value_cr1);
1065 memory->icache.set = value_cr1 & 0x7;
1066 memory->icache.way = (value_cr1 >> 3) & 0x7;
1067 memory->icache.line_size = (value_cr1 >> 6) & 0x7;
1068 memory->icache.lock_support = (value_cr1 >> 9) & 0x1;
1070 memory->ilm_base = (value_cr1 >> 10) & 0x7;
1071 memory->ilm_align_ver = (value_cr1 >> 13) & 0x3;
1073 /* read $cr2 to init data memory information */
1074 nds32_get_mapped_reg(nds32, CR2, &value_cr2);
1075 memory->dcache.set = value_cr2 & 0x7;
1076 memory->dcache.way = (value_cr2 >> 3) & 0x7;
1077 memory->dcache.line_size = (value_cr2 >> 6) & 0x7;
1078 memory->dcache.lock_support = (value_cr2 >> 9) & 0x1;
1080 memory->dlm_base = (value_cr2 >> 10) & 0x7;
1081 memory->dlm_align_ver = (value_cr2 >> 13) & 0x3;
1086 static void nds32_init_config(struct nds32 *nds32)
1091 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1092 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1093 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1095 nds32_get_mapped_reg(nds32, CR0, &value_cr0);
1096 nds32_get_mapped_reg(nds32, CR3, &value_cr3);
1097 nds32_get_mapped_reg(nds32, CR4, &value_cr4);
1099 /* config cpu version */
1100 cpu_version->performance_extension = value_cr0 & 0x1;
1101 cpu_version->_16bit_extension = (value_cr0 >> 1) & 0x1;
1102 cpu_version->performance_extension_2 = (value_cr0 >> 2) & 0x1;
1103 cpu_version->cop_fpu_extension = (value_cr0 >> 3) & 0x1;
1104 cpu_version->string_extension = (value_cr0 >> 4) & 0x1;
1105 cpu_version->revision = (value_cr0 >> 16) & 0xFF;
1106 cpu_version->cpu_id_family = (value_cr0 >> 24) & 0xF;
1107 cpu_version->cpu_id_version = (value_cr0 >> 28) & 0xF;
1110 mmu_config->memory_protection = value_cr3 & 0x3;
1111 mmu_config->memory_protection_version = (value_cr3 >> 2) & 0x1F;
1112 mmu_config->fully_associative_tlb = (value_cr3 >> 7) & 0x1;
1113 if (mmu_config->fully_associative_tlb) {
1114 mmu_config->tlb_size = (value_cr3 >> 8) & 0x7F;
1116 mmu_config->tlb_ways = (value_cr3 >> 8) & 0x7;
1117 mmu_config->tlb_sets = (value_cr3 >> 11) & 0x7;
1119 mmu_config->_8k_page_support = (value_cr3 >> 15) & 0x1;
1120 mmu_config->extra_page_size_support = (value_cr3 >> 16) & 0xFF;
1121 mmu_config->tlb_lock = (value_cr3 >> 24) & 0x1;
1122 mmu_config->hardware_page_table_walker = (value_cr3 >> 25) & 0x1;
1123 mmu_config->default_endian = (value_cr3 >> 26) & 0x1;
1124 mmu_config->partition_num = (value_cr3 >> 27) & 0x1;
1125 mmu_config->invisible_tlb = (value_cr3 >> 28) & 0x1;
1126 mmu_config->vlpt = (value_cr3 >> 29) & 0x1;
1127 mmu_config->ntme = (value_cr3 >> 30) & 0x1;
1128 mmu_config->drde = (value_cr3 >> 31) & 0x1;
1131 misc_config->edm = value_cr4 & 0x1;
1132 misc_config->local_memory_dma = (value_cr4 >> 1) & 0x1;
1133 misc_config->performance_monitor = (value_cr4 >> 2) & 0x1;
1134 misc_config->high_speed_memory_port = (value_cr4 >> 3) & 0x1;
1135 misc_config->debug_tracer = (value_cr4 >> 4) & 0x1;
1136 misc_config->div_instruction = (value_cr4 >> 5) & 0x1;
1137 misc_config->mac_instruction = (value_cr4 >> 6) & 0x1;
1138 misc_config->audio_isa = (value_cr4 >> 7) & 0x3;
1139 misc_config->L2_cache = (value_cr4 >> 9) & 0x1;
1140 misc_config->reduce_register = (value_cr4 >> 10) & 0x1;
1141 misc_config->addr_24 = (value_cr4 >> 11) & 0x1;
1142 misc_config->interruption_level = (value_cr4 >> 12) & 0x1;
1143 misc_config->baseline_instruction = (value_cr4 >> 13) & 0x7;
1144 misc_config->no_dx_register = (value_cr4 >> 16) & 0x1;
1145 misc_config->implement_dependant_register = (value_cr4 >> 17) & 0x1;
1146 misc_config->implement_dependant_sr_encoding = (value_cr4 >> 18) & 0x1;
1147 misc_config->ifc = (value_cr4 >> 19) & 0x1;
1148 misc_config->mcu = (value_cr4 >> 20) & 0x1;
1149 misc_config->shadow = (value_cr4 >> 21) & 0x7;
1150 misc_config->ex9 = (value_cr4 >> 24) & 0x1;
1152 nds32_init_memory_config(nds32);
1155 static int nds32_init_option_registers(struct nds32 *nds32)
1157 struct reg_cache *reg_cache = nds32->core_cache;
1158 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
1159 struct nds32_mmu_config *mmu_config = &(nds32->mmu_config);
1160 struct nds32_misc_config *misc_config = &(nds32->misc_config);
1161 struct nds32_memory *memory_config = &(nds32->memory);
1167 if (((cpu_version->cpu_id_family == 0xC) || (cpu_version->cpu_id_family == 0xD)) &&
1168 ((cpu_version->revision & 0xFC) == 0)) {
1178 if (misc_config->reduce_register == false) {
1179 ((struct nds32_reg *)reg_cache->reg_list[R11].arch_info)->enable = true;
1180 ((struct nds32_reg *)reg_cache->reg_list[R12].arch_info)->enable = true;
1181 ((struct nds32_reg *)reg_cache->reg_list[R13].arch_info)->enable = true;
1182 ((struct nds32_reg *)reg_cache->reg_list[R14].arch_info)->enable = true;
1183 ((struct nds32_reg *)reg_cache->reg_list[R16].arch_info)->enable = true;
1184 ((struct nds32_reg *)reg_cache->reg_list[R17].arch_info)->enable = true;
1185 ((struct nds32_reg *)reg_cache->reg_list[R18].arch_info)->enable = true;
1186 ((struct nds32_reg *)reg_cache->reg_list[R19].arch_info)->enable = true;
1187 ((struct nds32_reg *)reg_cache->reg_list[R20].arch_info)->enable = true;
1188 ((struct nds32_reg *)reg_cache->reg_list[R21].arch_info)->enable = true;
1189 ((struct nds32_reg *)reg_cache->reg_list[R22].arch_info)->enable = true;
1190 ((struct nds32_reg *)reg_cache->reg_list[R23].arch_info)->enable = true;
1191 ((struct nds32_reg *)reg_cache->reg_list[R24].arch_info)->enable = true;
1192 ((struct nds32_reg *)reg_cache->reg_list[R25].arch_info)->enable = true;
1193 ((struct nds32_reg *)reg_cache->reg_list[R26].arch_info)->enable = true;
1194 ((struct nds32_reg *)reg_cache->reg_list[R27].arch_info)->enable = true;
1197 if (misc_config->no_dx_register == false) {
1198 ((struct nds32_reg *)reg_cache->reg_list[D0LO].arch_info)->enable = true;
1199 ((struct nds32_reg *)reg_cache->reg_list[D0HI].arch_info)->enable = true;
1200 ((struct nds32_reg *)reg_cache->reg_list[D1LO].arch_info)->enable = true;
1201 ((struct nds32_reg *)reg_cache->reg_list[D1HI].arch_info)->enable = true;
1204 if (misc_config->ex9)
1205 ((struct nds32_reg *)reg_cache->reg_list[ITB].arch_info)->enable = true;
1207 if (no_cr5 == false)
1208 ((struct nds32_reg *)reg_cache->reg_list[CR5].arch_info)->enable = true;
1210 if (cpu_version->cop_fpu_extension) {
1211 ((struct nds32_reg *)reg_cache->reg_list[CR6].arch_info)->enable = true;
1212 ((struct nds32_reg *)reg_cache->reg_list[FPCSR].arch_info)->enable = true;
1213 ((struct nds32_reg *)reg_cache->reg_list[FPCFG].arch_info)->enable = true;
1216 if (mmu_config->memory_protection == 1) {
1217 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1218 ((struct nds32_reg *)reg_cache->reg_list[IR1].arch_info)->enable = false;
1219 ((struct nds32_reg *)reg_cache->reg_list[IR9].arch_info)->enable = false;
1222 if (nds32->privilege_level != 0)
1223 ((struct nds32_reg *)reg_cache->reg_list[IR3].arch_info)->enable = false;
1225 if (misc_config->mcu == true)
1226 ((struct nds32_reg *)reg_cache->reg_list[IR4].arch_info)->enable = false;
1228 if (misc_config->interruption_level == false) {
1229 ((struct nds32_reg *)reg_cache->reg_list[IR2].arch_info)->enable = true;
1230 ((struct nds32_reg *)reg_cache->reg_list[IR5].arch_info)->enable = true;
1231 ((struct nds32_reg *)reg_cache->reg_list[IR10].arch_info)->enable = true;
1232 ((struct nds32_reg *)reg_cache->reg_list[IR12].arch_info)->enable = true;
1233 ((struct nds32_reg *)reg_cache->reg_list[IR13].arch_info)->enable = true;
1235 /* Secure MPU has no IPC, IPSW, P_ITYPE */
1236 if (mmu_config->memory_protection != 1)
1237 ((struct nds32_reg *)reg_cache->reg_list[IR7].arch_info)->enable = true;
1240 if ((cpu_version->cpu_id_family == 0x9) ||
1241 (cpu_version->cpu_id_family == 0xA) ||
1242 (cpu_version->cpu_id_family == 0xC) ||
1243 (cpu_version->cpu_id_family == 0xD))
1244 ((struct nds32_reg *)reg_cache->reg_list[IR8].arch_info)->enable = true;
1246 if (misc_config->shadow == 1) {
1247 ((struct nds32_reg *)reg_cache->reg_list[IR16].arch_info)->enable = true;
1248 ((struct nds32_reg *)reg_cache->reg_list[IR17].arch_info)->enable = true;
1251 if (misc_config->ifc)
1252 ((struct nds32_reg *)reg_cache->reg_list[IFC_LP].arch_info)->enable = true;
1254 if (nds32->privilege_level != 0)
1255 ((struct nds32_reg *)reg_cache->reg_list[MR0].arch_info)->enable = false;
1257 if (mmu_config->memory_protection == 1) {
1258 if (mmu_config->memory_protection_version == 24)
1259 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1261 if (nds32->privilege_level == 0) {
1262 if ((mmu_config->memory_protection_version == 16) ||
1263 (mmu_config->memory_protection_version == 24)) {
1264 ((struct nds32_reg *)reg_cache->reg_list[MR11].arch_info)->enable = true;
1265 ((struct nds32_reg *)reg_cache->reg_list[SECUR0].arch_info)->enable = true;
1266 ((struct nds32_reg *)reg_cache->reg_list[IR20].arch_info)->enable = true;
1267 ((struct nds32_reg *)reg_cache->reg_list[IR22].arch_info)->enable = true;
1268 ((struct nds32_reg *)reg_cache->reg_list[IR24].arch_info)->enable = true;
1269 ((struct nds32_reg *)reg_cache->reg_list[IR30].arch_info)->enable = true;
1271 if (misc_config->shadow == 1) {
1272 ((struct nds32_reg *)reg_cache->reg_list[IR21].arch_info)->enable = true;
1273 ((struct nds32_reg *)reg_cache->reg_list[IR23].arch_info)->enable = true;
1274 ((struct nds32_reg *)reg_cache->reg_list[IR25].arch_info)->enable = true;
1278 } else if (mmu_config->memory_protection == 2) {
1279 ((struct nds32_reg *)reg_cache->reg_list[MR1].arch_info)->enable = true;
1280 ((struct nds32_reg *)reg_cache->reg_list[MR4].arch_info)->enable = true;
1282 if ((cpu_version->cpu_id_family != 0xA) && (cpu_version->cpu_id_family != 0xC) &&
1283 (cpu_version->cpu_id_family != 0xD))
1284 ((struct nds32_reg *)reg_cache->reg_list[MR5].arch_info)->enable = true;
1287 if (mmu_config->memory_protection > 0) {
1288 ((struct nds32_reg *)reg_cache->reg_list[MR2].arch_info)->enable = true;
1289 ((struct nds32_reg *)reg_cache->reg_list[MR3].arch_info)->enable = true;
1292 if (memory_config->ilm_base != 0)
1293 if (nds32->privilege_level == 0)
1294 ((struct nds32_reg *)reg_cache->reg_list[MR6].arch_info)->enable = true;
1296 if (memory_config->dlm_base != 0)
1297 if (nds32->privilege_level == 0)
1298 ((struct nds32_reg *)reg_cache->reg_list[MR7].arch_info)->enable = true;
1300 if ((memory_config->icache.line_size != 0) && (memory_config->dcache.line_size != 0))
1301 ((struct nds32_reg *)reg_cache->reg_list[MR8].arch_info)->enable = true;
1303 if (misc_config->high_speed_memory_port)
1304 ((struct nds32_reg *)reg_cache->reg_list[MR9].arch_info)->enable = true;
1307 ((struct nds32_reg *)reg_cache->reg_list[MR10].arch_info)->enable = true;
1309 if (misc_config->edm) {
1310 int dr_reg_n = nds32->edm.breakpoint_num * 5;
1312 for (int i = 0 ; i < dr_reg_n ; i++)
1313 ((struct nds32_reg *)reg_cache->reg_list[DR0 + i].arch_info)->enable = true;
1315 ((struct nds32_reg *)reg_cache->reg_list[DR41].arch_info)->enable = true;
1316 ((struct nds32_reg *)reg_cache->reg_list[DR43].arch_info)->enable = true;
1317 ((struct nds32_reg *)reg_cache->reg_list[DR44].arch_info)->enable = true;
1318 ((struct nds32_reg *)reg_cache->reg_list[DR45].arch_info)->enable = true;
1321 if (misc_config->debug_tracer) {
1322 ((struct nds32_reg *)reg_cache->reg_list[DR46].arch_info)->enable = true;
1323 ((struct nds32_reg *)reg_cache->reg_list[DR47].arch_info)->enable = true;
1326 if (misc_config->performance_monitor) {
1327 ((struct nds32_reg *)reg_cache->reg_list[PFR0].arch_info)->enable = true;
1328 ((struct nds32_reg *)reg_cache->reg_list[PFR1].arch_info)->enable = true;
1329 ((struct nds32_reg *)reg_cache->reg_list[PFR2].arch_info)->enable = true;
1330 ((struct nds32_reg *)reg_cache->reg_list[PFR3].arch_info)->enable = true;
1333 if (misc_config->local_memory_dma) {
1334 ((struct nds32_reg *)reg_cache->reg_list[DMAR0].arch_info)->enable = true;
1335 ((struct nds32_reg *)reg_cache->reg_list[DMAR1].arch_info)->enable = true;
1336 ((struct nds32_reg *)reg_cache->reg_list[DMAR2].arch_info)->enable = true;
1337 ((struct nds32_reg *)reg_cache->reg_list[DMAR3].arch_info)->enable = true;
1338 ((struct nds32_reg *)reg_cache->reg_list[DMAR4].arch_info)->enable = true;
1339 ((struct nds32_reg *)reg_cache->reg_list[DMAR5].arch_info)->enable = true;
1340 ((struct nds32_reg *)reg_cache->reg_list[DMAR6].arch_info)->enable = true;
1341 ((struct nds32_reg *)reg_cache->reg_list[DMAR7].arch_info)->enable = true;
1342 ((struct nds32_reg *)reg_cache->reg_list[DMAR8].arch_info)->enable = true;
1343 ((struct nds32_reg *)reg_cache->reg_list[DMAR9].arch_info)->enable = true;
1344 ((struct nds32_reg *)reg_cache->reg_list[DMAR10].arch_info)->enable = true;
1347 if ((misc_config->local_memory_dma || misc_config->performance_monitor) &&
1348 (no_racr0 == false))
1349 ((struct nds32_reg *)reg_cache->reg_list[RACR].arch_info)->enable = true;
1351 if (cpu_version->cop_fpu_extension || (misc_config->audio_isa != 0))
1352 ((struct nds32_reg *)reg_cache->reg_list[FUCPR].arch_info)->enable = true;
1354 if (misc_config->audio_isa != 0) {
1355 if (misc_config->audio_isa > 1) {
1356 ((struct nds32_reg *)reg_cache->reg_list[D0L24].arch_info)->enable = true;
1357 ((struct nds32_reg *)reg_cache->reg_list[D1L24].arch_info)->enable = true;
1360 ((struct nds32_reg *)reg_cache->reg_list[I0].arch_info)->enable = true;
1361 ((struct nds32_reg *)reg_cache->reg_list[I1].arch_info)->enable = true;
1362 ((struct nds32_reg *)reg_cache->reg_list[I2].arch_info)->enable = true;
1363 ((struct nds32_reg *)reg_cache->reg_list[I3].arch_info)->enable = true;
1364 ((struct nds32_reg *)reg_cache->reg_list[I4].arch_info)->enable = true;
1365 ((struct nds32_reg *)reg_cache->reg_list[I5].arch_info)->enable = true;
1366 ((struct nds32_reg *)reg_cache->reg_list[I6].arch_info)->enable = true;
1367 ((struct nds32_reg *)reg_cache->reg_list[I7].arch_info)->enable = true;
1368 ((struct nds32_reg *)reg_cache->reg_list[M1].arch_info)->enable = true;
1369 ((struct nds32_reg *)reg_cache->reg_list[M2].arch_info)->enable = true;
1370 ((struct nds32_reg *)reg_cache->reg_list[M3].arch_info)->enable = true;
1371 ((struct nds32_reg *)reg_cache->reg_list[M5].arch_info)->enable = true;
1372 ((struct nds32_reg *)reg_cache->reg_list[M6].arch_info)->enable = true;
1373 ((struct nds32_reg *)reg_cache->reg_list[M7].arch_info)->enable = true;
1374 ((struct nds32_reg *)reg_cache->reg_list[MOD].arch_info)->enable = true;
1375 ((struct nds32_reg *)reg_cache->reg_list[LBE].arch_info)->enable = true;
1376 ((struct nds32_reg *)reg_cache->reg_list[LE].arch_info)->enable = true;
1377 ((struct nds32_reg *)reg_cache->reg_list[LC].arch_info)->enable = true;
1378 ((struct nds32_reg *)reg_cache->reg_list[ADM_VBASE].arch_info)->enable = true;
1379 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL0].arch_info)->enable = true;
1380 ((struct nds32_reg *)reg_cache->reg_list[SHFT_CTL1].arch_info)->enable = true;
1383 uint32_t fucpr_backup;
1384 /* enable fpu and get configuration */
1385 nds32_get_mapped_reg(nds32, FUCPR, &fucpr_backup);
1386 if ((fucpr_backup & 0x80000000) == 0)
1387 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup | 0x80000000);
1388 nds32_get_mapped_reg(nds32, MOD, &value_mod);
1389 /* restore origin fucpr value */
1390 if ((fucpr_backup & 0x80000000) == 0)
1391 nds32_set_mapped_reg(nds32, FUCPR, fucpr_backup);
1393 if ((value_mod >> 6) & 0x1) {
1394 ((struct nds32_reg *)reg_cache->reg_list[CB_CTL].arch_info)->enable = true;
1395 ((struct nds32_reg *)reg_cache->reg_list[CBB0].arch_info)->enable = true;
1396 ((struct nds32_reg *)reg_cache->reg_list[CBB1].arch_info)->enable = true;
1397 ((struct nds32_reg *)reg_cache->reg_list[CBB2].arch_info)->enable = true;
1398 ((struct nds32_reg *)reg_cache->reg_list[CBB3].arch_info)->enable = true;
1399 ((struct nds32_reg *)reg_cache->reg_list[CBE0].arch_info)->enable = true;
1400 ((struct nds32_reg *)reg_cache->reg_list[CBE1].arch_info)->enable = true;
1401 ((struct nds32_reg *)reg_cache->reg_list[CBE2].arch_info)->enable = true;
1402 ((struct nds32_reg *)reg_cache->reg_list[CBE3].arch_info)->enable = true;
1406 if ((cpu_version->cpu_id_family == 0x9) ||
1407 (cpu_version->cpu_id_family == 0xA) ||
1408 (cpu_version->cpu_id_family == 0xC)) {
1410 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = true;
1411 ((struct nds32_reg *)reg_cache->reg_list[IDR1].arch_info)->enable = true;
1413 if ((cpu_version->cpu_id_family == 0xC) && (cpu_version->revision == 0x0C))
1414 ((struct nds32_reg *)reg_cache->reg_list[IDR0].arch_info)->enable = false;
1418 uint32_t ivb_prog_pri_lvl;
1419 uint32_t ivb_ivic_ver;
1421 nds32_get_mapped_reg(nds32, IR3, &ir3_value);
1422 ivb_prog_pri_lvl = ir3_value & 0x1;
1423 ivb_ivic_ver = (ir3_value >> 11) & 0x3;
1425 if ((ivb_prog_pri_lvl == 1) || (ivb_ivic_ver >= 1)) {
1426 ((struct nds32_reg *)reg_cache->reg_list[IR18].arch_info)->enable = true;
1427 ((struct nds32_reg *)reg_cache->reg_list[IR19].arch_info)->enable = true;
1430 if (ivb_ivic_ver >= 1) {
1431 ((struct nds32_reg *)reg_cache->reg_list[IR26].arch_info)->enable = true;
1432 ((struct nds32_reg *)reg_cache->reg_list[IR27].arch_info)->enable = true;
1433 ((struct nds32_reg *)reg_cache->reg_list[IR28].arch_info)->enable = true;
1434 ((struct nds32_reg *)reg_cache->reg_list[IR29].arch_info)->enable = true;
1440 int nds32_init_register_table(struct nds32 *nds32)
1442 nds32_init_must_have_registers(nds32);
1447 int nds32_add_software_breakpoint(struct target *target,
1448 struct breakpoint *breakpoint)
1451 uint32_t check_data;
1452 uint32_t break_insn;
1454 /* check the breakpoint size */
1455 target->type->read_buffer(target, breakpoint->address, 4, (uint8_t *)&data);
1457 /* backup origin instruction
1458 * instruction is big-endian */
1459 if (*(char *)&data & 0x80) { /* 16-bits instruction */
1460 breakpoint->length = 2;
1461 break_insn = NDS32_BREAK_16;
1462 } else { /* 32-bits instruction */
1463 breakpoint->length = 4;
1464 break_insn = NDS32_BREAK_32;
1467 if (breakpoint->orig_instr != NULL)
1468 free(breakpoint->orig_instr);
1470 breakpoint->orig_instr = malloc(breakpoint->length);
1471 memcpy(breakpoint->orig_instr, &data, breakpoint->length);
1473 /* self-modified code */
1474 target->type->write_buffer(target, breakpoint->address, breakpoint->length, (const uint8_t *)&break_insn);
1475 /* write_back & invalidate dcache & invalidate icache */
1476 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1478 /* read back to check */
1479 target->type->read_buffer(target, breakpoint->address, breakpoint->length, (uint8_t *)&check_data);
1480 if (memcmp(&check_data, &break_insn, breakpoint->length) == 0)
1486 int nds32_remove_software_breakpoint(struct target *target,
1487 struct breakpoint *breakpoint)
1489 uint32_t check_data;
1490 uint32_t break_insn;
1492 if (breakpoint->length == 2)
1493 break_insn = NDS32_BREAK_16;
1494 else if (breakpoint->length == 4)
1495 break_insn = NDS32_BREAK_32;
1499 target->type->read_buffer(target, breakpoint->address, breakpoint->length,
1500 (uint8_t *)&check_data);
1502 /* break instruction is modified */
1503 if (memcmp(&check_data, &break_insn, breakpoint->length) != 0)
1506 /* self-modified code */
1507 target->type->write_buffer(target, breakpoint->address, breakpoint->length,
1508 breakpoint->orig_instr);
1510 /* write_back & invalidate dcache & invalidate icache */
1511 nds32_cache_sync(target, breakpoint->address, breakpoint->length);
1517 * Restore the processor context on an Andes target. The full processor
1518 * context is analyzed to see if any of the registers are dirty on this end, but
1519 * have a valid new value. If this is the case, the processor is changed to the
1520 * appropriate mode and the new register values are written out to the
1521 * processor. If there happens to be a dirty register with an invalid value, an
1522 * error will be logged.
1524 * @param target Pointer to the Andes target to have its context restored
1525 * @return Error status if the target is not halted.
1527 int nds32_restore_context(struct target *target)
1529 struct nds32 *nds32 = target_to_nds32(target);
1530 struct aice_port_s *aice = target_to_aice(target);
1531 struct reg_cache *reg_cache = nds32->core_cache;
1533 struct nds32_reg *reg_arch_info;
1538 if (target->state != TARGET_HALTED) {
1539 LOG_WARNING("target not halted");
1540 return ERROR_TARGET_NOT_HALTED;
1543 /* check if there are dirty registers */
1544 for (i = 0; i < reg_cache->num_regs; i++) {
1545 reg = &(reg_cache->reg_list[i]);
1546 if (reg->dirty == true) {
1547 if (reg->valid == true) {
1549 LOG_DEBUG("examining dirty reg: %s", reg->name);
1550 LOG_DEBUG("writing register %d with value 0x%8.8" PRIx32,
1551 i, buf_get_u32(reg->value, 0, 32));
1553 reg_arch_info = reg->arch_info;
1554 if (FD0 <= reg_arch_info->num && reg_arch_info->num <= FD31) {
1555 uint64_t val = buf_get_u64(reg_arch_info->value, 0, 64);
1556 aice_write_reg_64(aice, reg_arch_info->num, val);
1558 uint32_t val = buf_get_u32(reg_arch_info->value, 0, 32);
1559 aice_write_register(aice, reg_arch_info->num, val);
1571 int nds32_edm_config(struct nds32 *nds32)
1573 struct target *target = nds32->target;
1574 struct aice_port_s *aice = target_to_aice(target);
1578 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
1580 nds32->edm.version = (edm_cfg >> 16) & 0xFFFF;
1581 LOG_INFO("EDM version 0x%04x", nds32->edm.version);
1583 nds32->edm.breakpoint_num = (edm_cfg & 0x7) + 1;
1585 if ((nds32->edm.version & 0x1000) || (0x60 <= nds32->edm.version))
1586 nds32->edm.access_control = true;
1588 nds32->edm.access_control = false;
1590 if ((edm_cfg >> 4) & 0x1)
1591 nds32->edm.direct_access_local_memory = true;
1593 nds32->edm.direct_access_local_memory = false;
1595 if (nds32->edm.version <= 0x20)
1596 nds32->edm.direct_access_local_memory = false;
1598 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &edm_ctl);
1599 if (edm_ctl & (0x1 << 29))
1600 nds32->edm.support_max_stop = true;
1602 nds32->edm.support_max_stop = false;
1604 /* set passcode for secure MCU */
1610 int nds32_config(struct nds32 *nds32)
1612 nds32_init_config(nds32);
1614 /* init optional system registers according to config registers */
1615 nds32_init_option_registers(nds32);
1617 /* get max interrupt level */
1618 if (nds32->misc_config.interruption_level)
1619 nds32->max_interrupt_level = 2;
1621 nds32->max_interrupt_level = 3;
1623 /* get ILM/DLM size from MR6/MR7 */
1624 uint32_t value_mr6, value_mr7;
1625 uint32_t size_index;
1626 nds32_get_mapped_reg(nds32, MR6, &value_mr6);
1627 size_index = (value_mr6 >> 1) & 0xF;
1628 nds32->memory.ilm_size = NDS32_LM_SIZE_TABLE[size_index];
1630 nds32_get_mapped_reg(nds32, MR7, &value_mr7);
1631 size_index = (value_mr7 >> 1) & 0xF;
1632 nds32->memory.dlm_size = NDS32_LM_SIZE_TABLE[size_index];
1637 int nds32_init_arch_info(struct target *target, struct nds32 *nds32)
1639 target->arch_info = nds32;
1640 nds32->target = target;
1642 nds32->common_magic = NDS32_COMMON_MAGIC;
1643 nds32->init_arch_info_after_halted = false;
1644 nds32->auto_convert_hw_bp = true;
1645 nds32->global_stop = false;
1646 nds32->soft_reset_halt = false;
1647 nds32->edm_passcode = NULL;
1648 nds32->privilege_level = 0;
1649 nds32->boot_time = 1500;
1650 nds32->reset_halt_as_examine = false;
1651 nds32->keep_target_edm_ctl = false;
1652 nds32->word_access_mem = false;
1653 nds32->virtual_hosting = true;
1654 nds32->hit_syscall = false;
1655 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
1656 nds32->virtual_hosting_errno = 0;
1657 nds32->virtual_hosting_ctrl_c = false;
1658 nds32->attached = false;
1660 nds32->syscall_break.asid = 0;
1661 nds32->syscall_break.length = 4;
1662 nds32->syscall_break.set = 0;
1663 nds32->syscall_break.orig_instr = NULL;
1664 nds32->syscall_break.next = NULL;
1665 nds32->syscall_break.unique_id = 0x515CAll + target->target_number;
1666 nds32->syscall_break.linked_BRP = 0;
1670 if (ERROR_FAIL == nds32_reg_cache_init(target, nds32))
1673 if (ERROR_OK != nds32_init_register_table(nds32))
1679 int nds32_virtual_to_physical(struct target *target, uint32_t address, uint32_t *physical)
1681 struct nds32 *nds32 = target_to_nds32(target);
1683 if (nds32->memory.address_translation == false) {
1684 *physical = address;
1688 if (ERROR_OK == nds32_probe_tlb(nds32, address, physical))
1691 if (ERROR_OK == nds32_walk_page_table(nds32, address, physical))
1697 int nds32_cache_sync(struct target *target, uint32_t address, uint32_t length)
1699 struct aice_port_s *aice = target_to_aice(target);
1700 struct nds32 *nds32 = target_to_nds32(target);
1701 struct nds32_cache *dcache = &(nds32->memory.dcache);
1702 struct nds32_cache *icache = &(nds32->memory.icache);
1703 uint32_t dcache_line_size = NDS32_LINE_SIZE_TABLE[dcache->line_size];
1704 uint32_t icache_line_size = NDS32_LINE_SIZE_TABLE[icache->line_size];
1705 uint32_t cur_address;
1707 uint32_t start_line, end_line;
1710 if ((dcache->line_size != 0) && (dcache->enable == true)) {
1711 /* address / dcache_line_size */
1712 start_line = address >> (dcache->line_size + 2);
1713 /* (address + length - 1) / dcache_line_size */
1714 end_line = (address + length - 1) >> (dcache->line_size + 2);
1716 for (cur_address = address, cur_line = start_line ;
1717 cur_line <= end_line ;
1718 cur_address += dcache_line_size, cur_line++) {
1720 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_WB, cur_address);
1721 if (result != ERROR_OK)
1725 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1D_VA_INVAL, cur_address);
1726 if (result != ERROR_OK)
1731 if ((icache->line_size != 0) && (icache->enable == true)) {
1732 /* address / icache_line_size */
1733 start_line = address >> (icache->line_size + 2);
1734 /* (address + length - 1) / icache_line_size */
1735 end_line = (address + length - 1) >> (icache->line_size + 2);
1737 for (cur_address = address, cur_line = start_line ;
1738 cur_line <= end_line ;
1739 cur_address += icache_line_size, cur_line++) {
1740 /* Because PSW.IT is turned off under debug exception, address MUST
1741 * be physical address. L1I_VA_INVALIDATE uses PSW.IT to decide
1742 * address translation or not. */
1743 uint32_t physical_addr;
1744 if (ERROR_FAIL == target->type->virt2phys(target, cur_address,
1749 result = aice_cache_ctl(aice, AICE_CACHE_CTL_L1I_VA_INVAL, physical_addr);
1750 if (result != ERROR_OK)
1758 uint32_t nds32_nextpc(struct nds32 *nds32, int current, uint32_t address)
1761 nds32_set_mapped_reg(nds32, PC, address);
1763 nds32_get_mapped_reg(nds32, PC, &address);
1768 int nds32_step(struct target *target, int current,
1769 uint32_t address, int handle_breakpoints)
1771 LOG_DEBUG("target->state: %s",
1772 target_state_name(target));
1774 if (target->state != TARGET_HALTED) {
1775 LOG_WARNING("target was not halted");
1776 return ERROR_TARGET_NOT_HALTED;
1779 struct nds32 *nds32 = target_to_nds32(target);
1781 address = nds32_nextpc(nds32, current, address);
1783 LOG_DEBUG("STEP PC %08" PRIx32 "%s", address, !current ? "!" : "");
1786 uint32_t ir14_value;
1787 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1788 if (nds32->step_isr_enable)
1789 ir14_value |= (0x1 << 31);
1791 ir14_value &= ~(0x1 << 31);
1792 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1794 /* check hit_syscall before leave_debug_state() because
1795 * leave_debug_state() may clear hit_syscall flag */
1796 bool no_step = false;
1797 if (nds32->hit_syscall)
1798 /* step after hit_syscall should be ignored because
1799 * leave_debug_state will step implicitly to skip the
1803 /********* TODO: maybe create another function to handle this part */
1804 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
1805 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
1807 if (no_step == false) {
1808 struct aice_port_s *aice = target_to_aice(target);
1809 if (ERROR_OK != aice_step(aice))
1814 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
1815 /********* TODO: maybe create another function to handle this part */
1818 if (nds32->step_isr_enable) {
1819 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1820 ir14_value &= ~(0x1 << 31);
1821 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1824 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
1829 static int nds32_step_without_watchpoint(struct nds32 *nds32)
1831 struct target *target = nds32->target;
1833 if (target->state != TARGET_HALTED) {
1834 LOG_WARNING("target was not halted");
1835 return ERROR_TARGET_NOT_HALTED;
1839 uint32_t ir14_value;
1840 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1841 if (nds32->step_isr_enable)
1842 ir14_value |= (0x1 << 31);
1844 ir14_value &= ~(0x1 << 31);
1845 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1847 /********* TODO: maybe create another function to handle this part */
1848 CHECK_RETVAL(nds32->leave_debug_state(nds32, false));
1850 struct aice_port_s *aice = target_to_aice(target);
1852 if (ERROR_OK != aice_step(aice))
1856 CHECK_RETVAL(nds32->enter_debug_state(nds32, false));
1857 /********* TODO: maybe create another function to handle this part */
1860 if (nds32->step_isr_enable) {
1861 nds32_get_mapped_reg(nds32, IR14, &ir14_value);
1862 ir14_value &= ~(0x1 << 31);
1863 nds32_set_mapped_reg(nds32, IR14, ir14_value);
1869 int nds32_target_state(struct nds32 *nds32, enum target_state *state)
1871 struct aice_port_s *aice = target_to_aice(nds32->target);
1872 enum aice_target_state_s nds32_state;
1874 if (aice_state(aice, &nds32_state) != ERROR_OK)
1877 switch (nds32_state) {
1878 case AICE_DISCONNECT:
1879 LOG_INFO("USB is disconnected");
1881 case AICE_TARGET_DETACH:
1882 LOG_INFO("Target is disconnected");
1884 case AICE_TARGET_UNKNOWN:
1885 *state = TARGET_UNKNOWN;
1887 case AICE_TARGET_RUNNING:
1888 *state = TARGET_RUNNING;
1890 case AICE_TARGET_HALTED:
1891 *state = TARGET_HALTED;
1893 case AICE_TARGET_RESET:
1894 *state = TARGET_RESET;
1896 case AICE_TARGET_DEBUG_RUNNING:
1897 *state = TARGET_DEBUG_RUNNING;
1906 int nds32_examine_debug_reason(struct nds32 *nds32)
1909 struct target *target = nds32->target;
1911 if (nds32->hit_syscall == true) {
1912 LOG_DEBUG("Hit syscall breakpoint");
1913 target->debug_reason = DBG_REASON_BREAKPOINT;
1917 nds32->get_debug_reason(nds32, &reason);
1919 LOG_DEBUG("nds32 examines debug reason: %s", nds32_debug_type_name[reason]);
1921 /* Examine debug reason */
1923 case NDS32_DEBUG_BREAK:
1924 case NDS32_DEBUG_BREAK_16:
1925 case NDS32_DEBUG_INST_BREAK:
1929 struct nds32_instruction instruction;
1931 nds32_get_mapped_reg(nds32, PC, &value_pc);
1933 if (ERROR_OK != nds32_read_opcode(nds32, value_pc, &opcode))
1935 if (ERROR_OK != nds32_evaluate_opcode(nds32, opcode, value_pc,
1939 /* hit 'break 0x7FFF' */
1940 if ((instruction.info.opc_6 == 0x32) &&
1941 (instruction.info.sub_opc == 0xA) &&
1942 (instruction.info.imm == 0x7FFF)) {
1943 target->debug_reason = DBG_REASON_EXIT;
1945 target->debug_reason = DBG_REASON_BREAKPOINT;
1948 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_PRECISE:
1949 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_PRECISE:
1950 case NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP: /* GLOBAL_STOP is precise exception */
1954 result = nds32->get_watched_address(nds32,
1955 &(nds32->watched_address), reason);
1956 /* do single step(without watchpoints) to skip the "watched" instruction */
1957 nds32_step_without_watchpoint(nds32);
1959 /* before single_step, save exception address */
1960 if (ERROR_OK != result)
1963 target->debug_reason = DBG_REASON_WATCHPOINT;
1966 case NDS32_DEBUG_DEBUG_INTERRUPT:
1967 target->debug_reason = DBG_REASON_DBGRQ;
1969 case NDS32_DEBUG_HARDWARE_SINGLE_STEP:
1970 target->debug_reason = DBG_REASON_SINGLESTEP;
1972 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_IMPRECISE:
1973 case NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE:
1974 case NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE:
1975 if (ERROR_OK != nds32->get_watched_address(nds32,
1976 &(nds32->watched_address), reason))
1979 target->debug_reason = DBG_REASON_WATCHPOINT;
1982 target->debug_reason = DBG_REASON_UNDEFINED;
1989 int nds32_login(struct nds32 *nds32)
1991 struct target *target = nds32->target;
1992 struct aice_port_s *aice = target_to_aice(target);
1993 uint32_t passcode_length;
1994 char command_sequence[129];
1995 char command_str[33];
1997 uint32_t copy_length;
2001 LOG_DEBUG("nds32_login");
2003 if (nds32->edm_passcode != NULL) {
2004 /* convert EDM passcode to command sequences */
2005 passcode_length = strlen(nds32->edm_passcode);
2006 command_sequence[0] = '\0';
2007 for (i = 0; i < passcode_length; i += 8) {
2008 if (passcode_length - i < 8)
2009 copy_length = passcode_length - i;
2013 strncpy(code_str, nds32->edm_passcode + i, copy_length);
2014 code_str[copy_length] = '\0';
2015 code = strtoul(code_str, NULL, 16);
2017 sprintf(command_str, "write_misc gen_port0 0x%" PRIx32 ";", code);
2018 strcat(command_sequence, command_str);
2021 if (ERROR_OK != aice_program_edm(aice, command_sequence))
2024 /* get current privilege level */
2025 uint32_t value_edmsw;
2026 aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &value_edmsw);
2027 nds32->privilege_level = (value_edmsw >> 16) & 0x3;
2028 LOG_INFO("Current privilege level: %d", nds32->privilege_level);
2031 if (nds32_edm_ops_num > 0) {
2032 const char *reg_name;
2033 for (i = 0 ; i < nds32_edm_ops_num ; i++) {
2034 code = nds32_edm_ops[i].value;
2035 if (nds32_edm_ops[i].reg_no == 6)
2036 reg_name = "gen_port0";
2037 else if (nds32_edm_ops[i].reg_no == 7)
2038 reg_name = "gen_port1";
2042 sprintf(command_str, "write_misc %s 0x%" PRIx32 ";", reg_name, code);
2043 if (ERROR_OK != aice_program_edm(aice, command_str))
2051 int nds32_halt(struct target *target)
2053 struct nds32 *nds32 = target_to_nds32(target);
2054 struct aice_port_s *aice = target_to_aice(target);
2055 enum target_state state;
2057 LOG_DEBUG("target->state: %s",
2058 target_state_name(target));
2060 if (target->state == TARGET_HALTED) {
2061 LOG_DEBUG("target was already halted");
2065 if (nds32_target_state(nds32, &state) != ERROR_OK)
2068 if (TARGET_HALTED != state)
2069 /* TODO: if state == TARGET_HALTED, check ETYPE is DBGI or not */
2070 if (ERROR_OK != aice_halt(aice))
2073 CHECK_RETVAL(nds32->enter_debug_state(nds32, true));
2075 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
2080 /* poll current target status */
2081 int nds32_poll(struct target *target)
2083 struct nds32 *nds32 = target_to_nds32(target);
2084 enum target_state state;
2086 if (nds32_target_state(nds32, &state) != ERROR_OK)
2089 if (state == TARGET_HALTED) {
2090 if (target->state != TARGET_HALTED) {
2091 /* if false_hit, continue free_run */
2092 if (ERROR_OK != nds32->enter_debug_state(nds32, true)) {
2093 struct aice_port_s *aice = target_to_aice(target);
2098 LOG_DEBUG("Change target state to TARGET_HALTED.");
2100 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
2102 } else if (state == TARGET_RESET) {
2103 if (target->state == TARGET_HALTED) {
2104 /* similar to assert srst */
2105 register_cache_invalidate(nds32->core_cache);
2106 target->state = TARGET_RESET;
2108 /* TODO: deassert srst */
2109 } else if (target->state == TARGET_RUNNING) {
2110 /* reset as running */
2111 LOG_WARNING("<-- TARGET WARNING! The debug target has been reset. -->");
2114 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2115 LOG_DEBUG("Change target state to TARGET_RUNNING.");
2116 target->state = TARGET_RUNNING;
2117 target->debug_reason = DBG_REASON_NOTHALTED;
2124 int nds32_resume(struct target *target, int current,
2125 uint32_t address, int handle_breakpoints, int debug_execution)
2127 LOG_DEBUG("current %d address %08" PRIx32
2128 " handle_breakpoints %d"
2129 " debug_execution %d",
2130 current, address, handle_breakpoints, debug_execution);
2132 struct nds32 *nds32 = target_to_nds32(target);
2134 if (target->state != TARGET_HALTED) {
2135 LOG_ERROR("Target not halted");
2136 return ERROR_TARGET_NOT_HALTED;
2139 address = nds32_nextpc(nds32, current, address);
2141 LOG_DEBUG("RESUME PC %08" PRIx32 "%s", address, !current ? "!" : "");
2143 if (!debug_execution)
2144 target_free_all_working_areas(target);
2146 /* Disable HSS to avoid users misuse HSS */
2147 if (nds32_reach_max_interrupt_level(nds32) == false) {
2149 nds32_get_mapped_reg(nds32, IR0, &value_ir0);
2150 value_ir0 &= ~(0x1 << 11);
2151 nds32_set_mapped_reg(nds32, IR0, value_ir0);
2154 CHECK_RETVAL(nds32->leave_debug_state(nds32, true));
2155 CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
2157 if (nds32->virtual_hosting_ctrl_c == false) {
2158 struct aice_port_s *aice = target_to_aice(target);
2161 nds32->virtual_hosting_ctrl_c = false;
2163 target->debug_reason = DBG_REASON_NOTHALTED;
2164 if (!debug_execution)
2165 target->state = TARGET_RUNNING;
2167 target->state = TARGET_DEBUG_RUNNING;
2169 LOG_DEBUG("target->state: %s",
2170 target_state_name(target));
2175 static int nds32_soft_reset_halt(struct target *target)
2178 struct nds32 *nds32 = target_to_nds32(target);
2179 struct aice_port_s *aice = target_to_aice(target);
2181 aice_assert_srst(aice, AICE_SRST);
2183 /* halt core and set pc to 0x0 */
2184 int retval = target_halt(target);
2185 if (retval != ERROR_OK)
2188 /* start fetching from IVB */
2190 nds32_get_mapped_reg(nds32, IR3, &value_ir3);
2191 nds32_set_mapped_reg(nds32, PC, value_ir3 & 0xFFFF0000);
2196 int nds32_assert_reset(struct target *target)
2198 struct nds32 *nds32 = target_to_nds32(target);
2199 struct aice_port_s *aice = target_to_aice(target);
2200 struct nds32_cpu_version *cpu_version = &(nds32->cpu_version);
2202 if (target->reset_halt) {
2203 if ((nds32->soft_reset_halt)
2204 || (nds32->edm.version < 0x51)
2205 || ((nds32->edm.version == 0x51)
2206 && (cpu_version->revision == 0x1C)
2207 && (cpu_version->cpu_id_family == 0xC)
2208 && (cpu_version->cpu_id_version == 0x0)))
2209 nds32_soft_reset_halt(target);
2211 aice_assert_srst(aice, AICE_RESET_HOLD);
2213 aice_assert_srst(aice, AICE_SRST);
2214 alive_sleep(nds32->boot_time);
2217 /* set passcode for secure MCU after core reset */
2220 /* registers are now invalid */
2221 register_cache_invalidate(nds32->core_cache);
2223 target->state = TARGET_RESET;
2228 static int nds32_gdb_attach(struct nds32 *nds32)
2230 LOG_DEBUG("nds32_gdb_attach, target coreid: %" PRId32, nds32->target->coreid);
2232 if (nds32->attached == false) {
2234 if (nds32->keep_target_edm_ctl) {
2235 /* backup target EDM_CTL */
2236 struct aice_port_s *aice = target_to_aice(nds32->target);
2237 aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CTL, &nds32->backup_edm_ctl);
2240 target_halt(nds32->target);
2242 nds32->attached = true;
2248 static int nds32_gdb_detach(struct nds32 *nds32)
2250 LOG_DEBUG("nds32_gdb_detach");
2251 bool backup_virtual_hosting_setting;
2253 if (nds32->attached) {
2255 backup_virtual_hosting_setting = nds32->virtual_hosting;
2256 /* turn off virtual hosting before resume as gdb-detach */
2257 nds32->virtual_hosting = false;
2258 target_resume(nds32->target, 1, 0, 0, 0);
2259 nds32->virtual_hosting = backup_virtual_hosting_setting;
2261 if (nds32->keep_target_edm_ctl) {
2262 /* restore target EDM_CTL */
2263 struct aice_port_s *aice = target_to_aice(nds32->target);
2264 aice_write_debug_reg(aice, NDS_EDM_SR_EDM_CTL, nds32->backup_edm_ctl);
2267 nds32->attached = false;
2273 static int nds32_callback_event_handler(struct target *target,
2274 enum target_event event, void *priv)
2276 int retval = ERROR_OK;
2277 int target_number = *(int *)priv;
2279 if (target_number != target->target_number)
2282 struct nds32 *nds32 = target_to_nds32(target);
2285 case TARGET_EVENT_GDB_ATTACH:
2286 retval = nds32_gdb_attach(nds32);
2288 case TARGET_EVENT_GDB_DETACH:
2289 retval = nds32_gdb_detach(nds32);
2298 int nds32_init(struct nds32 *nds32)
2300 /* Initialize anything we can set up without talking to the target */
2301 nds32->memory.access_channel = NDS_MEMORY_ACC_CPU;
2303 /* register event callback */
2304 target_register_event_callback(nds32_callback_event_handler,
2305 &(nds32->target->target_number));
2310 int nds32_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
2312 /* fill syscall parameters to file-I/O info */
2313 if (NULL == fileio_info) {
2314 LOG_ERROR("Target has not initial file-I/O data structure");
2318 struct nds32 *nds32 = target_to_nds32(target);
2320 uint32_t syscall_id;
2322 if (nds32->hit_syscall == false)
2325 nds32_get_mapped_reg(nds32, IR6, &value_ir6);
2326 syscall_id = (value_ir6 >> 16) & 0x7FFF;
2327 nds32->active_syscall_id = syscall_id;
2329 LOG_DEBUG("hit syscall ID: 0x%" PRIx32, syscall_id);
2331 /* free previous identifier storage */
2332 if (NULL != fileio_info->identifier) {
2333 free(fileio_info->identifier);
2334 fileio_info->identifier = NULL;
2337 switch (syscall_id) {
2338 case NDS32_SYSCALL_EXIT:
2339 fileio_info->identifier = malloc(5);
2340 sprintf(fileio_info->identifier, "exit");
2341 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2343 case NDS32_SYSCALL_OPEN:
2345 uint8_t filename[256];
2346 fileio_info->identifier = malloc(5);
2347 sprintf(fileio_info->identifier, "open");
2348 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2349 /* reserve fileio_info->param_2 for length of path */
2350 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2351 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_4));
2353 target->type->read_buffer(target, fileio_info->param_1,
2355 fileio_info->param_2 = strlen((char *)filename) + 1;
2358 case NDS32_SYSCALL_CLOSE:
2359 fileio_info->identifier = malloc(6);
2360 sprintf(fileio_info->identifier, "close");
2361 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2363 case NDS32_SYSCALL_READ:
2364 fileio_info->identifier = malloc(5);
2365 sprintf(fileio_info->identifier, "read");
2366 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2367 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2368 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2370 case NDS32_SYSCALL_WRITE:
2371 fileio_info->identifier = malloc(6);
2372 sprintf(fileio_info->identifier, "write");
2373 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2374 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2375 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2377 case NDS32_SYSCALL_LSEEK:
2378 fileio_info->identifier = malloc(6);
2379 sprintf(fileio_info->identifier, "lseek");
2380 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2381 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2382 nds32_get_mapped_reg(nds32, R2, &(fileio_info->param_3));
2384 case NDS32_SYSCALL_UNLINK:
2386 uint8_t filename[256];
2387 fileio_info->identifier = malloc(7);
2388 sprintf(fileio_info->identifier, "unlink");
2389 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2390 /* reserve fileio_info->param_2 for length of path */
2392 target->type->read_buffer(target, fileio_info->param_1,
2394 fileio_info->param_2 = strlen((char *)filename) + 1;
2397 case NDS32_SYSCALL_RENAME:
2399 uint8_t filename[256];
2400 fileio_info->identifier = malloc(7);
2401 sprintf(fileio_info->identifier, "rename");
2402 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2403 /* reserve fileio_info->param_2 for length of old path */
2404 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2405 /* reserve fileio_info->param_4 for length of new path */
2407 target->type->read_buffer(target, fileio_info->param_1,
2409 fileio_info->param_2 = strlen((char *)filename) + 1;
2411 target->type->read_buffer(target, fileio_info->param_3,
2413 fileio_info->param_4 = strlen((char *)filename) + 1;
2416 case NDS32_SYSCALL_FSTAT:
2417 fileio_info->identifier = malloc(6);
2418 sprintf(fileio_info->identifier, "fstat");
2419 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2420 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2422 case NDS32_SYSCALL_STAT:
2424 uint8_t filename[256];
2425 fileio_info->identifier = malloc(5);
2426 sprintf(fileio_info->identifier, "stat");
2427 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2428 /* reserve fileio_info->param_2 for length of old path */
2429 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_3));
2431 target->type->read_buffer(target, fileio_info->param_1,
2433 fileio_info->param_2 = strlen((char *)filename) + 1;
2436 case NDS32_SYSCALL_GETTIMEOFDAY:
2437 fileio_info->identifier = malloc(13);
2438 sprintf(fileio_info->identifier, "gettimeofday");
2439 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2440 nds32_get_mapped_reg(nds32, R1, &(fileio_info->param_2));
2442 case NDS32_SYSCALL_ISATTY:
2443 fileio_info->identifier = malloc(7);
2444 sprintf(fileio_info->identifier, "isatty");
2445 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2447 case NDS32_SYSCALL_SYSTEM:
2449 uint8_t command[256];
2450 fileio_info->identifier = malloc(7);
2451 sprintf(fileio_info->identifier, "system");
2452 nds32_get_mapped_reg(nds32, R0, &(fileio_info->param_1));
2453 /* reserve fileio_info->param_2 for length of old path */
2455 target->type->read_buffer(target, fileio_info->param_1,
2457 fileio_info->param_2 = strlen((char *)command) + 1;
2460 case NDS32_SYSCALL_ERRNO:
2461 fileio_info->identifier = malloc(6);
2462 sprintf(fileio_info->identifier, "errno");
2463 nds32_set_mapped_reg(nds32, R0, nds32->virtual_hosting_errno);
2466 fileio_info->identifier = malloc(8);
2467 sprintf(fileio_info->identifier, "unknown");
2474 int nds32_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
2476 LOG_DEBUG("syscall return code: 0x%x, errno: 0x%x , ctrl_c: %s",
2477 retcode, fileio_errno, ctrl_c ? "true" : "false");
2479 struct nds32 *nds32 = target_to_nds32(target);
2481 nds32_set_mapped_reg(nds32, R0, (uint32_t)retcode);
2483 nds32->virtual_hosting_errno = fileio_errno;
2484 nds32->virtual_hosting_ctrl_c = ctrl_c;
2485 nds32->active_syscall_id = NDS32_SYSCALL_UNDEFINED;
2490 int nds32_profiling(struct target *target, uint32_t *samples,
2491 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2493 /* sample $PC every 10 milliseconds */
2494 uint32_t iteration = seconds * 100;
2495 struct aice_port_s *aice = target_to_aice(target);
2496 struct nds32 *nds32 = target_to_nds32(target);
2498 if (max_num_samples < iteration)
2499 iteration = max_num_samples;
2501 int pc_regnum = nds32->register_map(nds32, PC);
2502 aice_profiling(aice, 10, iteration, pc_regnum, samples, num_samples);
2504 register_cache_invalidate(nds32->core_cache);
2509 int nds32_gdb_fileio_write_memory(struct nds32 *nds32, uint32_t address,
2510 uint32_t size, const uint8_t *buffer)
2512 if ((NDS32_SYSCALL_FSTAT == nds32->active_syscall_id) ||
2513 (NDS32_SYSCALL_STAT == nds32->active_syscall_id)) {
2514 /* If doing GDB file-I/O, target should convert 'struct stat'
2515 * from gdb-format to target-format */
2516 uint8_t stat_buffer[NDS32_STRUCT_STAT_SIZE];
2518 stat_buffer[0] = buffer[3];
2519 stat_buffer[1] = buffer[2];
2521 stat_buffer[2] = buffer[7];
2522 stat_buffer[3] = buffer[6];
2524 stat_buffer[4] = buffer[11];
2525 stat_buffer[5] = buffer[10];
2526 stat_buffer[6] = buffer[9];
2527 stat_buffer[7] = buffer[8];
2529 stat_buffer[8] = buffer[15];
2530 stat_buffer[9] = buffer[16];
2532 stat_buffer[10] = buffer[19];
2533 stat_buffer[11] = buffer[18];
2535 stat_buffer[12] = buffer[23];
2536 stat_buffer[13] = buffer[22];
2538 stat_buffer[14] = buffer[27];
2539 stat_buffer[15] = buffer[26];
2541 stat_buffer[16] = buffer[35];
2542 stat_buffer[17] = buffer[34];
2543 stat_buffer[18] = buffer[33];
2544 stat_buffer[19] = buffer[32];
2546 stat_buffer[20] = buffer[55];
2547 stat_buffer[21] = buffer[54];
2548 stat_buffer[22] = buffer[53];
2549 stat_buffer[23] = buffer[52];
2551 stat_buffer[24] = 0;
2552 stat_buffer[25] = 0;
2553 stat_buffer[26] = 0;
2554 stat_buffer[27] = 0;
2556 stat_buffer[28] = buffer[59];
2557 stat_buffer[29] = buffer[58];
2558 stat_buffer[30] = buffer[57];
2559 stat_buffer[31] = buffer[56];
2561 stat_buffer[32] = 0;
2562 stat_buffer[33] = 0;
2563 stat_buffer[34] = 0;
2564 stat_buffer[35] = 0;
2566 stat_buffer[36] = buffer[63];
2567 stat_buffer[37] = buffer[62];
2568 stat_buffer[38] = buffer[61];
2569 stat_buffer[39] = buffer[60];
2571 stat_buffer[40] = 0;
2572 stat_buffer[41] = 0;
2573 stat_buffer[42] = 0;
2574 stat_buffer[43] = 0;
2576 stat_buffer[44] = buffer[43];
2577 stat_buffer[45] = buffer[42];
2578 stat_buffer[46] = buffer[41];
2579 stat_buffer[47] = buffer[40];
2581 stat_buffer[48] = buffer[51];
2582 stat_buffer[49] = buffer[50];
2583 stat_buffer[50] = buffer[49];
2584 stat_buffer[51] = buffer[48];
2586 stat_buffer[52] = 0;
2587 stat_buffer[53] = 0;
2588 stat_buffer[54] = 0;
2589 stat_buffer[55] = 0;
2590 stat_buffer[56] = 0;
2591 stat_buffer[57] = 0;
2592 stat_buffer[58] = 0;
2593 stat_buffer[59] = 0;
2595 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_STAT_SIZE, stat_buffer);
2596 } else if (NDS32_SYSCALL_GETTIMEOFDAY == nds32->active_syscall_id) {
2597 /* If doing GDB file-I/O, target should convert 'struct timeval'
2598 * from gdb-format to target-format */
2599 uint8_t timeval_buffer[NDS32_STRUCT_TIMEVAL_SIZE];
2600 timeval_buffer[0] = buffer[3];
2601 timeval_buffer[1] = buffer[2];
2602 timeval_buffer[2] = buffer[1];
2603 timeval_buffer[3] = buffer[0];
2604 timeval_buffer[4] = buffer[11];
2605 timeval_buffer[5] = buffer[10];
2606 timeval_buffer[6] = buffer[9];
2607 timeval_buffer[7] = buffer[8];
2609 return nds32_write_buffer(nds32->target, address, NDS32_STRUCT_TIMEVAL_SIZE, timeval_buffer);
2612 return nds32_write_buffer(nds32->target, address, size, buffer);
2615 int nds32_reset_halt(struct nds32 *nds32)
2617 LOG_INFO("reset halt as init");
2619 struct aice_port_s *aice = target_to_aice(nds32->target);
2620 aice_assert_srst(aice, AICE_RESET_HOLD);