You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

676 lines
20 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2013 Andes Technology *
  3. * Hsiangkai Wang <hkwang@andestech.com> *
  4. * *
  5. * This program is free software; you can redistribute it and/or modify *
  6. * it under the terms of the GNU General Public License as published by *
  7. * the Free Software Foundation; either version 2 of the License, or *
  8. * (at your option) any later version. *
  9. * *
  10. * This program is distributed in the hope that it will be useful, *
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  13. * GNU General Public License for more details. *
  14. * *
  15. * You should have received a copy of the GNU General Public License *
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>. *
  17. ***************************************************************************/
  18. #ifdef HAVE_CONFIG_H
  19. #include "config.h"
  20. #endif
  21. #include "breakpoints.h"
  22. #include "nds32_reg.h"
  23. #include "nds32_disassembler.h"
  24. #include "nds32.h"
  25. #include "nds32_aice.h"
  26. #include "nds32_v3_common.h"
  27. static struct nds32_v3_common_callback *v3_common_callback;
  28. static int nds32_v3_register_mapping(struct nds32 *nds32, int reg_no)
  29. {
  30. if (reg_no == PC)
  31. return IR11;
  32. return reg_no;
  33. }
  34. static int nds32_v3_get_debug_reason(struct nds32 *nds32, uint32_t *reason)
  35. {
  36. uint32_t edmsw;
  37. struct aice_port_s *aice = target_to_aice(nds32->target);
  38. aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &edmsw);
  39. *reason = (edmsw >> 12) & 0x0F;
  40. return ERROR_OK;
  41. }
  42. /**
  43. * Save processor state. This is called after a HALT instruction
  44. * succeeds, and on other occasions the processor enters debug mode
  45. * (breakpoint, watchpoint, etc).
  46. */
  47. static int nds32_v3_debug_entry(struct nds32 *nds32, bool enable_watchpoint)
  48. {
  49. LOG_DEBUG("nds32_v3_debug_entry");
  50. enum target_state backup_state = nds32->target->state;
  51. nds32->target->state = TARGET_HALTED;
  52. if (nds32->init_arch_info_after_halted == false) {
  53. /* init architecture info according to config registers */
  54. CHECK_RETVAL(nds32_config(nds32));
  55. nds32->init_arch_info_after_halted = true;
  56. }
  57. /* REVISIT entire cache should already be invalid !!! */
  58. register_cache_invalidate(nds32->core_cache);
  59. /* deactivate all hardware breakpoints */
  60. CHECK_RETVAL(v3_common_callback->deactivate_hardware_breakpoint(nds32->target));
  61. if (enable_watchpoint)
  62. CHECK_RETVAL(v3_common_callback->deactivate_hardware_watchpoint(nds32->target));
  63. struct breakpoint *syscall_break = &(nds32->syscall_break);
  64. if (nds32->virtual_hosting) {
  65. if (syscall_break->set) {
  66. /** disable virtual hosting */
  67. /* remove breakpoint at syscall entry */
  68. target_remove_breakpoint(nds32->target, syscall_break);
  69. syscall_break->set = 0;
  70. uint32_t value_pc;
  71. nds32_get_mapped_reg(nds32, PC, &value_pc);
  72. if (value_pc == syscall_break->address)
  73. /** process syscall for virtual hosting */
  74. nds32->hit_syscall = true;
  75. }
  76. }
  77. if (nds32_examine_debug_reason(nds32) != ERROR_OK) {
  78. nds32->target->state = backup_state;
  79. /* re-activate all hardware breakpoints & watchpoints */
  80. CHECK_RETVAL(v3_common_callback->activate_hardware_breakpoint(nds32->target));
  81. if (enable_watchpoint)
  82. CHECK_RETVAL(v3_common_callback->activate_hardware_watchpoint(nds32->target));
  83. return ERROR_FAIL;
  84. }
  85. /* Save registers. */
  86. nds32_full_context(nds32);
  87. /* check interrupt level */
  88. v3_common_callback->check_interrupt_stack(nds32);
  89. return ERROR_OK;
  90. }
  91. /**
  92. * Restore processor state.
  93. */
  94. static int nds32_v3_leave_debug_state(struct nds32 *nds32, bool enable_watchpoint)
  95. {
  96. LOG_DEBUG("nds32_v3_leave_debug_state");
  97. struct target *target = nds32->target;
  98. /* activate all hardware breakpoints */
  99. CHECK_RETVAL(v3_common_callback->activate_hardware_breakpoint(target));
  100. if (enable_watchpoint) {
  101. /* activate all watchpoints */
  102. CHECK_RETVAL(v3_common_callback->activate_hardware_watchpoint(target));
  103. }
  104. /* restore interrupt stack */
  105. v3_common_callback->restore_interrupt_stack(nds32);
  106. /* REVISIT once we start caring about MMU and cache state,
  107. * address it here ...
  108. */
  109. /* restore PSW, PC, and R0 ... after flushing any modified
  110. * registers.
  111. */
  112. CHECK_RETVAL(nds32_restore_context(target));
  113. if (nds32->virtual_hosting) {
  114. /** enable virtual hosting */
  115. uint32_t value_ir3;
  116. uint32_t entry_size;
  117. uint32_t syscall_address;
  118. /* get syscall entry address */
  119. nds32_get_mapped_reg(nds32, IR3, &value_ir3);
  120. entry_size = 0x4 << (((value_ir3 >> 14) & 0x3) << 1);
  121. syscall_address = (value_ir3 & 0xFFFF0000) + entry_size * 8; /* The index of SYSCALL is 8 */
  122. if (nds32->hit_syscall) {
  123. /* single step to skip syscall entry */
  124. /* use IRET to skip syscall */
  125. struct aice_port_s *aice = target_to_aice(target);
  126. uint32_t value_ir9;
  127. uint32_t value_ir6;
  128. uint32_t syscall_id;
  129. nds32_get_mapped_reg(nds32, IR6, &value_ir6);
  130. syscall_id = (value_ir6 >> 16) & 0x7FFF;
  131. if (syscall_id == NDS32_SYSCALL_EXIT) {
  132. /* If target hits exit syscall, do not use IRET to skip handler. */
  133. aice_step(aice);
  134. } else {
  135. /* use api->read/write_reg to skip nds32 register cache */
  136. uint32_t value_dimbr;
  137. aice_read_debug_reg(aice, NDS_EDM_SR_DIMBR, &value_dimbr);
  138. aice_write_register(aice, IR11, value_dimbr + 0xC);
  139. aice_read_register(aice, IR9, &value_ir9);
  140. value_ir9 += 4; /* syscall is always 4 bytes */
  141. aice_write_register(aice, IR9, value_ir9);
  142. /* backup hardware breakpoint 0 */
  143. uint32_t backup_bpa, backup_bpam, backup_bpc;
  144. aice_read_debug_reg(aice, NDS_EDM_SR_BPA0, &backup_bpa);
  145. aice_read_debug_reg(aice, NDS_EDM_SR_BPAM0, &backup_bpam);
  146. aice_read_debug_reg(aice, NDS_EDM_SR_BPC0, &backup_bpc);
  147. /* use hardware breakpoint 0 to stop cpu after skipping syscall */
  148. aice_write_debug_reg(aice, NDS_EDM_SR_BPA0, value_ir9);
  149. aice_write_debug_reg(aice, NDS_EDM_SR_BPAM0, 0);
  150. aice_write_debug_reg(aice, NDS_EDM_SR_BPC0, 0xA);
  151. /* Execute two IRET.
  152. * First IRET is used to quit debug mode.
  153. * Second IRET is used to quit current syscall. */
  154. uint32_t dim_inst[4] = {NOP, NOP, IRET, IRET};
  155. aice_execute(aice, dim_inst, 4);
  156. /* restore origin hardware breakpoint 0 */
  157. aice_write_debug_reg(aice, NDS_EDM_SR_BPA0, backup_bpa);
  158. aice_write_debug_reg(aice, NDS_EDM_SR_BPAM0, backup_bpam);
  159. aice_write_debug_reg(aice, NDS_EDM_SR_BPC0, backup_bpc);
  160. }
  161. nds32->hit_syscall = false;
  162. }
  163. /* insert breakpoint at syscall entry */
  164. struct breakpoint *syscall_break = &(nds32->syscall_break);
  165. syscall_break->address = syscall_address;
  166. syscall_break->type = BKPT_SOFT;
  167. syscall_break->set = 1;
  168. target_add_breakpoint(target, syscall_break);
  169. }
  170. return ERROR_OK;
  171. }
  172. static int nds32_v3_get_exception_address(struct nds32 *nds32,
  173. uint32_t *address, uint32_t reason)
  174. {
  175. LOG_DEBUG("nds32_v3_get_exception_address");
  176. struct aice_port_s *aice = target_to_aice(nds32->target);
  177. struct target *target = nds32->target;
  178. uint32_t edmsw;
  179. uint32_t edm_cfg;
  180. uint32_t match_bits;
  181. uint32_t match_count;
  182. int32_t i;
  183. static int32_t number_of_hard_break;
  184. uint32_t bp_control;
  185. if (number_of_hard_break == 0) {
  186. aice_read_debug_reg(aice, NDS_EDM_SR_EDM_CFG, &edm_cfg);
  187. number_of_hard_break = (edm_cfg & 0x7) + 1;
  188. }
  189. aice_read_debug_reg(aice, NDS_EDM_SR_EDMSW, &edmsw);
  190. /* clear matching bits (write-one-clear) */
  191. aice_write_debug_reg(aice, NDS_EDM_SR_EDMSW, edmsw);
  192. match_bits = (edmsw >> 4) & 0xFF;
  193. match_count = 0;
  194. for (i = 0 ; i < number_of_hard_break ; i++) {
  195. if (match_bits & (1 << i)) {
  196. aice_read_debug_reg(aice, NDS_EDM_SR_BPA0 + i, address);
  197. match_count++;
  198. /* If target hits multiple read/access watchpoint,
  199. * select the first one. */
  200. aice_read_debug_reg(aice, NDS_EDM_SR_BPC0 + i, &bp_control);
  201. if (0x3 == (bp_control & 0x3)) {
  202. match_count = 1;
  203. break;
  204. }
  205. }
  206. }
  207. if (match_count > 1) { /* multiple hits */
  208. *address = 0;
  209. return ERROR_OK;
  210. } else if (match_count == 1) {
  211. uint32_t val_pc;
  212. uint32_t opcode;
  213. struct nds32_instruction instruction;
  214. struct watchpoint *wp;
  215. bool hit;
  216. nds32_get_mapped_reg(nds32, PC, &val_pc);
  217. if ((reason == NDS32_DEBUG_DATA_ADDR_WATCHPOINT_NEXT_PRECISE) ||
  218. (reason == NDS32_DEBUG_DATA_VALUE_WATCHPOINT_NEXT_PRECISE)) {
  219. if (edmsw & 0x4) /* check EDMSW.IS_16BIT */
  220. val_pc -= 2;
  221. else
  222. val_pc -= 4;
  223. }
  224. nds32_read_opcode(nds32, val_pc, &opcode);
  225. nds32_evaluate_opcode(nds32, opcode, val_pc, &instruction);
  226. LOG_DEBUG("PC: 0x%08" PRIx32 ", access start: 0x%08" PRIx32 ", end: 0x%08" PRIx32,
  227. val_pc, instruction.access_start, instruction.access_end);
  228. /* check if multiple hits in the access range */
  229. uint32_t in_range_watch_count = 0;
  230. for (wp = target->watchpoints; wp; wp = wp->next) {
  231. if ((instruction.access_start <= wp->address) &&
  232. (wp->address < instruction.access_end))
  233. in_range_watch_count++;
  234. }
  235. if (in_range_watch_count > 1) {
  236. /* Hit LSMW instruction. */
  237. *address = 0;
  238. return ERROR_OK;
  239. }
  240. /* dispel false match */
  241. hit = false;
  242. for (wp = target->watchpoints; wp; wp = wp->next) {
  243. if (((*address ^ wp->address) & (~wp->mask)) == 0) {
  244. uint32_t watch_start;
  245. uint32_t watch_end;
  246. watch_start = wp->address;
  247. watch_end = wp->address + wp->length;
  248. if ((watch_end <= instruction.access_start) ||
  249. (instruction.access_end <= watch_start))
  250. continue;
  251. hit = true;
  252. break;
  253. }
  254. }
  255. if (hit)
  256. return ERROR_OK;
  257. else
  258. return ERROR_FAIL;
  259. } else if (match_count == 0) {
  260. /* global stop is precise exception */
  261. if ((reason == NDS32_DEBUG_LOAD_STORE_GLOBAL_STOP) && nds32->global_stop) {
  262. /* parse instruction to get correct access address */
  263. uint32_t val_pc;
  264. uint32_t opcode;
  265. struct nds32_instruction instruction;
  266. nds32_get_mapped_reg(nds32, PC, &val_pc);
  267. nds32_read_opcode(nds32, val_pc, &opcode);
  268. nds32_evaluate_opcode(nds32, opcode, val_pc, &instruction);
  269. *address = instruction.access_start;
  270. return ERROR_OK;
  271. }
  272. }
  273. *address = 0xFFFFFFFF;
  274. return ERROR_FAIL;
  275. }
  276. void nds32_v3_common_register_callback(struct nds32_v3_common_callback *callback)
  277. {
  278. v3_common_callback = callback;
  279. }
  280. /** target_type functions: */
  281. /* target request support */
  282. int nds32_v3_target_request_data(struct target *target,
  283. uint32_t size, uint8_t *buffer)
  284. {
  285. /* AndesCore could use DTR register to communicate with OpenOCD
  286. * to output messages
  287. * Target data will be put in buffer
  288. * The format of DTR is as follow
  289. * DTR[31:16] => length, DTR[15:8] => size, DTR[7:0] => target_req_cmd
  290. * target_req_cmd has three possible values:
  291. * TARGET_REQ_TRACEMSG
  292. * TARGET_REQ_DEBUGMSG
  293. * TARGET_REQ_DEBUGCHAR
  294. * if size == 0, target will call target_asciimsg(),
  295. * else call target_hexmsg()
  296. */
  297. LOG_WARNING("Not implemented: %s", __func__);
  298. return ERROR_OK;
  299. }
  300. int nds32_v3_checksum_memory(struct target *target,
  301. target_addr_t address, uint32_t count, uint32_t *checksum)
  302. {
  303. LOG_WARNING("Not implemented: %s", __func__);
  304. return ERROR_FAIL;
  305. }
  306. /**
  307. * find out which watchpoint hits
  308. * get exception address and compare the address to watchpoints
  309. */
  310. int nds32_v3_hit_watchpoint(struct target *target,
  311. struct watchpoint **hit_watchpoint)
  312. {
  313. static struct watchpoint scan_all_watchpoint;
  314. uint32_t exception_address;
  315. struct watchpoint *wp;
  316. struct nds32 *nds32 = target_to_nds32(target);
  317. exception_address = nds32->watched_address;
  318. if (exception_address == 0xFFFFFFFF)
  319. return ERROR_FAIL;
  320. if (exception_address == 0) {
  321. scan_all_watchpoint.address = 0;
  322. scan_all_watchpoint.rw = WPT_WRITE;
  323. scan_all_watchpoint.next = 0;
  324. scan_all_watchpoint.unique_id = 0x5CA8;
  325. *hit_watchpoint = &scan_all_watchpoint;
  326. return ERROR_OK;
  327. }
  328. for (wp = target->watchpoints; wp; wp = wp->next) {
  329. if (((exception_address ^ wp->address) & (~wp->mask)) == 0) {
  330. *hit_watchpoint = wp;
  331. return ERROR_OK;
  332. }
  333. }
  334. return ERROR_FAIL;
  335. }
  336. int nds32_v3_target_create_common(struct target *target, struct nds32 *nds32)
  337. {
  338. nds32->register_map = nds32_v3_register_mapping;
  339. nds32->get_debug_reason = nds32_v3_get_debug_reason;
  340. nds32->enter_debug_state = nds32_v3_debug_entry;
  341. nds32->leave_debug_state = nds32_v3_leave_debug_state;
  342. nds32->get_watched_address = nds32_v3_get_exception_address;
  343. /* Init target->arch_info in nds32_init_arch_info().
  344. * After this, user could use target_to_nds32() to get nds32 object */
  345. nds32_init_arch_info(target, nds32);
  346. return ERROR_OK;
  347. }
  348. int nds32_v3_run_algorithm(struct target *target,
  349. int num_mem_params,
  350. struct mem_param *mem_params,
  351. int num_reg_params,
  352. struct reg_param *reg_params,
  353. target_addr_t entry_point,
  354. target_addr_t exit_point,
  355. int timeout_ms,
  356. void *arch_info)
  357. {
  358. LOG_WARNING("Not implemented: %s", __func__);
  359. return ERROR_FAIL;
  360. }
  361. int nds32_v3_read_buffer(struct target *target, target_addr_t address,
  362. uint32_t size, uint8_t *buffer)
  363. {
  364. struct nds32 *nds32 = target_to_nds32(target);
  365. struct nds32_memory *memory = &(nds32->memory);
  366. if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
  367. (target->state != TARGET_HALTED)) {
  368. LOG_WARNING("target was not halted");
  369. return ERROR_TARGET_NOT_HALTED;
  370. }
  371. target_addr_t physical_address;
  372. /* BUG: If access range crosses multiple pages, the translation will not correct
  373. * for second page or so. */
  374. /* When DEX is set to one, hardware will enforce the following behavior without
  375. * modifying the corresponding control bits in PSW.
  376. *
  377. * Disable all interrupts
  378. * Become superuser mode
  379. * Turn off IT/DT
  380. * Use MMU_CFG.DE as the data access endian
  381. * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
  382. * Disable audio special features
  383. * Disable inline function call
  384. *
  385. * Because hardware will turn off IT/DT by default, it MUST translate virtual address
  386. * to physical address.
  387. */
  388. if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
  389. address = physical_address;
  390. else
  391. return ERROR_FAIL;
  392. int result;
  393. struct aice_port_s *aice = target_to_aice(target);
  394. /* give arbitrary initial value to avoid warning messages */
  395. enum nds_memory_access origin_access_channel = NDS_MEMORY_ACC_CPU;
  396. if (nds32->hit_syscall) {
  397. /* Use bus mode to access memory during virtual hosting */
  398. origin_access_channel = memory->access_channel;
  399. memory->access_channel = NDS_MEMORY_ACC_BUS;
  400. aice_memory_access(aice, NDS_MEMORY_ACC_BUS);
  401. }
  402. result = nds32_read_buffer(target, address, size, buffer);
  403. if (nds32->hit_syscall) {
  404. /* Restore access_channel after virtual hosting */
  405. memory->access_channel = origin_access_channel;
  406. aice_memory_access(aice, origin_access_channel);
  407. }
  408. return result;
  409. }
  410. int nds32_v3_write_buffer(struct target *target, target_addr_t address,
  411. uint32_t size, const uint8_t *buffer)
  412. {
  413. struct nds32 *nds32 = target_to_nds32(target);
  414. struct nds32_memory *memory = &(nds32->memory);
  415. if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
  416. (target->state != TARGET_HALTED)) {
  417. LOG_WARNING("target was not halted");
  418. return ERROR_TARGET_NOT_HALTED;
  419. }
  420. target_addr_t physical_address;
  421. /* BUG: If access range crosses multiple pages, the translation will not correct
  422. * for second page or so. */
  423. /* When DEX is set to one, hardware will enforce the following behavior without
  424. * modifying the corresponding control bits in PSW.
  425. *
  426. * Disable all interrupts
  427. * Become superuser mode
  428. * Turn off IT/DT
  429. * Use MMU_CFG.DE as the data access endian
  430. * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
  431. * Disable audio special features
  432. * Disable inline function call
  433. *
  434. * Because hardware will turn off IT/DT by default, it MUST translate virtual address
  435. * to physical address.
  436. */
  437. if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
  438. address = physical_address;
  439. else
  440. return ERROR_FAIL;
  441. if (nds32->hit_syscall) {
  442. struct aice_port_s *aice = target_to_aice(target);
  443. enum nds_memory_access origin_access_channel;
  444. origin_access_channel = memory->access_channel;
  445. /* If target has no cache, use BUS mode to access memory. */
  446. if ((memory->dcache.line_size == 0)
  447. || (memory->dcache.enable == false)) {
  448. /* There is no Dcache or Dcache is disabled. */
  449. memory->access_channel = NDS_MEMORY_ACC_BUS;
  450. aice_memory_access(aice, NDS_MEMORY_ACC_BUS);
  451. }
  452. int result;
  453. result = nds32_gdb_fileio_write_memory(nds32, address, size, buffer);
  454. if (origin_access_channel == NDS_MEMORY_ACC_CPU) {
  455. memory->access_channel = NDS_MEMORY_ACC_CPU;
  456. aice_memory_access(aice, NDS_MEMORY_ACC_CPU);
  457. }
  458. return result;
  459. }
  460. return nds32_write_buffer(target, address, size, buffer);
  461. }
  462. int nds32_v3_read_memory(struct target *target, target_addr_t address,
  463. uint32_t size, uint32_t count, uint8_t *buffer)
  464. {
  465. struct nds32 *nds32 = target_to_nds32(target);
  466. struct nds32_memory *memory = &(nds32->memory);
  467. if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
  468. (target->state != TARGET_HALTED)) {
  469. LOG_WARNING("target was not halted");
  470. return ERROR_TARGET_NOT_HALTED;
  471. }
  472. target_addr_t physical_address;
  473. /* BUG: If access range crosses multiple pages, the translation will not correct
  474. * for second page or so. */
  475. /* When DEX is set to one, hardware will enforce the following behavior without
  476. * modifying the corresponding control bits in PSW.
  477. *
  478. * Disable all interrupts
  479. * Become superuser mode
  480. * Turn off IT/DT
  481. * Use MMU_CFG.DE as the data access endian
  482. * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
  483. * Disable audio special features
  484. * Disable inline function call
  485. *
  486. * Because hardware will turn off IT/DT by default, it MUST translate virtual address
  487. * to physical address.
  488. */
  489. if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
  490. address = physical_address;
  491. else
  492. return ERROR_FAIL;
  493. struct aice_port_s *aice = target_to_aice(target);
  494. /* give arbitrary initial value to avoid warning messages */
  495. enum nds_memory_access origin_access_channel = NDS_MEMORY_ACC_CPU;
  496. int result;
  497. if (nds32->hit_syscall) {
  498. /* Use bus mode to access memory during virtual hosting */
  499. origin_access_channel = memory->access_channel;
  500. memory->access_channel = NDS_MEMORY_ACC_BUS;
  501. aice_memory_access(aice, NDS_MEMORY_ACC_BUS);
  502. }
  503. result = nds32_read_memory(target, address, size, count, buffer);
  504. if (nds32->hit_syscall) {
  505. /* Restore access_channel after virtual hosting */
  506. memory->access_channel = origin_access_channel;
  507. aice_memory_access(aice, origin_access_channel);
  508. }
  509. return result;
  510. }
  511. int nds32_v3_write_memory(struct target *target, target_addr_t address,
  512. uint32_t size, uint32_t count, const uint8_t *buffer)
  513. {
  514. struct nds32 *nds32 = target_to_nds32(target);
  515. struct nds32_memory *memory = &(nds32->memory);
  516. if ((memory->access_channel == NDS_MEMORY_ACC_CPU) &&
  517. (target->state != TARGET_HALTED)) {
  518. LOG_WARNING("target was not halted");
  519. return ERROR_TARGET_NOT_HALTED;
  520. }
  521. target_addr_t physical_address;
  522. /* BUG: If access range crosses multiple pages, the translation will not correct
  523. * for second page or so. */
  524. /* When DEX is set to one, hardware will enforce the following behavior without
  525. * modifying the corresponding control bits in PSW.
  526. *
  527. * Disable all interrupts
  528. * Become superuser mode
  529. * Turn off IT/DT
  530. * Use MMU_CFG.DE as the data access endian
  531. * Use MMU_CFG.DRDE as the device register access endian if MMU_CTL.DREE is asserted
  532. * Disable audio special features
  533. * Disable inline function call
  534. *
  535. * Because hardware will turn off IT/DT by default, it MUST translate virtual address
  536. * to physical address.
  537. */
  538. if (target->type->virt2phys(target, address, &physical_address) == ERROR_OK)
  539. address = physical_address;
  540. else
  541. return ERROR_FAIL;
  542. return nds32_write_memory(target, address, size, count, buffer);
  543. }
  544. int nds32_v3_init_target(struct command_context *cmd_ctx,
  545. struct target *target)
  546. {
  547. /* Initialize anything we can set up without talking to the target */
  548. struct nds32 *nds32 = target_to_nds32(target);
  549. nds32_init(nds32);
  550. target->fileio_info = malloc(sizeof(struct gdb_fileio_info));
  551. target->fileio_info->identifier = NULL;
  552. return ERROR_OK;
  553. }