You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

4877 lines
153 KiB

  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Support for RISC-V, debug version 0.13, which is currently (2/4/17) the
  4. * latest draft.
  5. */
  6. #include <assert.h>
  7. #include <stdlib.h>
  8. #include <time.h>
  9. #ifdef HAVE_CONFIG_H
  10. #include "config.h"
  11. #endif
  12. #include "target/target.h"
  13. #include "target/algorithm.h"
  14. #include "target/target_type.h"
  15. #include "log.h"
  16. #include "jtag/jtag.h"
  17. #include "target/register.h"
  18. #include "target/breakpoints.h"
  19. #include "helper/time_support.h"
  20. #include "helper/list.h"
  21. #include "riscv.h"
  22. #include "debug_defines.h"
  23. #include "rtos/rtos.h"
  24. #include "program.h"
  25. #include "asm.h"
  26. #include "batch.h"
  27. #define DM_DATA1 (DM_DATA0 + 1)
  28. #define DM_PROGBUF1 (DM_PROGBUF0 + 1)
  29. static int riscv013_on_step_or_resume(struct target *target, bool step);
  30. static int riscv013_step_or_resume_current_hart(struct target *target,
  31. bool step, bool use_hasel);
  32. static void riscv013_clear_abstract_error(struct target *target);
  33. /* Implementations of the functions in riscv_info_t. */
  34. static int riscv013_get_register(struct target *target,
  35. riscv_reg_t *value, int hid, int rid);
  36. static int riscv013_set_register(struct target *target, int hartid, int regid, uint64_t value);
  37. static int riscv013_select_current_hart(struct target *target);
  38. static int riscv013_halt_prep(struct target *target);
  39. static int riscv013_halt_go(struct target *target);
  40. static int riscv013_resume_go(struct target *target);
  41. static int riscv013_step_current_hart(struct target *target);
  42. static int riscv013_on_halt(struct target *target);
  43. static int riscv013_on_step(struct target *target);
  44. static int riscv013_resume_prep(struct target *target);
  45. static bool riscv013_is_halted(struct target *target);
  46. static enum riscv_halt_reason riscv013_halt_reason(struct target *target);
  47. static int riscv013_write_debug_buffer(struct target *target, unsigned index,
  48. riscv_insn_t d);
  49. static riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned
  50. index);
  51. static int riscv013_execute_debug_buffer(struct target *target);
  52. static void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d);
  53. static void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a);
  54. static int riscv013_dmi_write_u64_bits(struct target *target);
  55. static void riscv013_fill_dmi_nop_u64(struct target *target, char *buf);
  56. static int register_read(struct target *target, uint64_t *value, uint32_t number);
  57. static int register_read_direct(struct target *target, uint64_t *value, uint32_t number);
  58. static int register_write_direct(struct target *target, unsigned number,
  59. uint64_t value);
  60. static int read_memory(struct target *target, target_addr_t address,
  61. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment);
  62. static int write_memory(struct target *target, target_addr_t address,
  63. uint32_t size, uint32_t count, const uint8_t *buffer);
  64. static int riscv013_test_sba_config_reg(struct target *target, target_addr_t legal_address,
  65. uint32_t num_words, target_addr_t illegal_address, bool run_sbbusyerror_test);
  66. void write_memory_sba_simple(struct target *target, target_addr_t addr, uint32_t *write_data,
  67. uint32_t write_size, uint32_t sbcs);
  68. void read_memory_sba_simple(struct target *target, target_addr_t addr,
  69. uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs);
  70. static int riscv013_test_compliance(struct target *target);
  71. /**
  72. * Since almost everything can be accomplish by scanning the dbus register, all
  73. * functions here assume dbus is already selected. The exception are functions
  74. * called directly by OpenOCD, which can't assume anything about what's
  75. * currently in IR. They should set IR to dbus explicitly.
  76. */
  77. #define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1)))
  78. #define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask)))
  79. #define CSR_DCSR_CAUSE_SWBP 1
  80. #define CSR_DCSR_CAUSE_TRIGGER 2
  81. #define CSR_DCSR_CAUSE_DEBUGINT 3
  82. #define CSR_DCSR_CAUSE_STEP 4
  83. #define CSR_DCSR_CAUSE_HALT 5
  84. #define CSR_DCSR_CAUSE_GROUP 6
  85. #define RISCV013_INFO(r) riscv013_info_t *r = get_info(target)
  86. /*** JTAG registers. ***/
  87. typedef enum {
  88. DMI_OP_NOP = 0,
  89. DMI_OP_READ = 1,
  90. DMI_OP_WRITE = 2
  91. } dmi_op_t;
  92. typedef enum {
  93. DMI_STATUS_SUCCESS = 0,
  94. DMI_STATUS_FAILED = 2,
  95. DMI_STATUS_BUSY = 3
  96. } dmi_status_t;
  97. typedef enum slot {
  98. SLOT0,
  99. SLOT1,
  100. SLOT_LAST,
  101. } slot_t;
  102. /*** Debug Bus registers. ***/
  103. #define CMDERR_NONE 0
  104. #define CMDERR_BUSY 1
  105. #define CMDERR_NOT_SUPPORTED 2
  106. #define CMDERR_EXCEPTION 3
  107. #define CMDERR_HALT_RESUME 4
  108. #define CMDERR_OTHER 7
  109. /*** Info about the core being debugged. ***/
  110. struct trigger {
  111. uint64_t address;
  112. uint32_t length;
  113. uint64_t mask;
  114. uint64_t value;
  115. bool read, write, execute;
  116. int unique_id;
  117. };
  118. typedef enum {
  119. YNM_MAYBE,
  120. YNM_YES,
  121. YNM_NO
  122. } yes_no_maybe_t;
  123. typedef struct {
  124. struct list_head list;
  125. int abs_chain_position;
  126. /* The number of harts connected to this DM. */
  127. int hart_count;
  128. /* Indicates we already reset this DM, so don't need to do it again. */
  129. bool was_reset;
  130. /* Targets that are connected to this DM. */
  131. struct list_head target_list;
  132. /* The currently selected hartid on this DM. */
  133. int current_hartid;
  134. bool hasel_supported;
  135. /* The program buffer stores executable code. 0 is an illegal instruction,
  136. * so we use 0 to mean the cached value is invalid. */
  137. uint32_t progbuf_cache[16];
  138. } dm013_info_t;
  139. typedef struct {
  140. struct list_head list;
  141. struct target *target;
  142. } target_list_t;
  143. typedef struct {
  144. /* The indexed used to address this hart in its DM. */
  145. unsigned index;
  146. /* Number of address bits in the dbus register. */
  147. unsigned abits;
  148. /* Number of abstract command data registers. */
  149. unsigned datacount;
  150. /* Number of words in the Program Buffer. */
  151. unsigned progbufsize;
  152. /* We cache the read-only bits of sbcs here. */
  153. uint32_t sbcs;
  154. yes_no_maybe_t progbuf_writable;
  155. /* We only need the address so that we know the alignment of the buffer. */
  156. riscv_addr_t progbuf_address;
  157. /* Number of run-test/idle cycles the target requests we do after each dbus
  158. * access. */
  159. unsigned int dtmcs_idle;
  160. /* This value is incremented every time a dbus access comes back as "busy".
  161. * It's used to determine how many run-test/idle cycles to feed the target
  162. * in between accesses. */
  163. unsigned int dmi_busy_delay;
  164. /* Number of run-test/idle cycles to add between consecutive bus master
  165. * reads/writes respectively. */
  166. unsigned int bus_master_write_delay, bus_master_read_delay;
  167. /* This value is increased every time we tried to execute two commands
  168. * consecutively, and the second one failed because the previous hadn't
  169. * completed yet. It's used to add extra run-test/idle cycles after
  170. * starting a command, so we don't have to waste time checking for busy to
  171. * go low. */
  172. unsigned int ac_busy_delay;
  173. bool abstract_read_csr_supported;
  174. bool abstract_write_csr_supported;
  175. bool abstract_read_fpr_supported;
  176. bool abstract_write_fpr_supported;
  177. /* When a function returns some error due to a failure indicated by the
  178. * target in cmderr, the caller can look here to see what that error was.
  179. * (Compare with errno.) */
  180. uint8_t cmderr;
  181. /* Some fields from hartinfo. */
  182. uint8_t datasize;
  183. uint8_t dataaccess;
  184. int16_t dataaddr;
  185. /* The width of the hartsel field. */
  186. unsigned hartsellen;
  187. /* DM that provides access to this target. */
  188. dm013_info_t *dm;
  189. } riscv013_info_t;
  190. LIST_HEAD(dm_list);
  191. static riscv013_info_t *get_info(const struct target *target)
  192. {
  193. riscv_info_t *info = (riscv_info_t *) target->arch_info;
  194. return (riscv013_info_t *) info->version_specific;
  195. }
  196. /**
  197. * Return the DM structure for this target. If there isn't one, find it in the
  198. * global list of DMs. If it's not in there, then create one and initialize it
  199. * to 0.
  200. */
  201. dm013_info_t *get_dm(struct target *target)
  202. {
  203. RISCV013_INFO(info);
  204. if (info->dm)
  205. return info->dm;
  206. int abs_chain_position = target->tap->abs_chain_position;
  207. dm013_info_t *entry;
  208. dm013_info_t *dm = NULL;
  209. list_for_each_entry(entry, &dm_list, list) {
  210. if (entry->abs_chain_position == abs_chain_position) {
  211. dm = entry;
  212. break;
  213. }
  214. }
  215. if (!dm) {
  216. LOG_DEBUG("[%d] Allocating new DM", target->coreid);
  217. dm = calloc(1, sizeof(dm013_info_t));
  218. if (!dm)
  219. return NULL;
  220. dm->abs_chain_position = abs_chain_position;
  221. dm->current_hartid = -1;
  222. dm->hart_count = -1;
  223. INIT_LIST_HEAD(&dm->target_list);
  224. list_add(&dm->list, &dm_list);
  225. }
  226. info->dm = dm;
  227. target_list_t *target_entry;
  228. list_for_each_entry(target_entry, &dm->target_list, list) {
  229. if (target_entry->target == target)
  230. return dm;
  231. }
  232. target_entry = calloc(1, sizeof(*target_entry));
  233. if (!target_entry) {
  234. info->dm = NULL;
  235. return NULL;
  236. }
  237. target_entry->target = target;
  238. list_add(&target_entry->list, &dm->target_list);
  239. return dm;
  240. }
  241. static uint32_t set_hartsel(uint32_t initial, uint32_t index)
  242. {
  243. initial &= ~DM_DMCONTROL_HARTSELLO;
  244. initial &= ~DM_DMCONTROL_HARTSELHI;
  245. uint32_t index_lo = index & ((1 << DM_DMCONTROL_HARTSELLO_LENGTH) - 1);
  246. initial |= index_lo << DM_DMCONTROL_HARTSELLO_OFFSET;
  247. uint32_t index_hi = index >> DM_DMCONTROL_HARTSELLO_LENGTH;
  248. assert(index_hi < 1 << DM_DMCONTROL_HARTSELHI_LENGTH);
  249. initial |= index_hi << DM_DMCONTROL_HARTSELHI_OFFSET;
  250. return initial;
  251. }
  252. static void decode_dmi(char *text, unsigned address, unsigned data)
  253. {
  254. static const struct {
  255. unsigned address;
  256. uint64_t mask;
  257. const char *name;
  258. } description[] = {
  259. { DM_DMCONTROL, DM_DMCONTROL_HALTREQ, "haltreq" },
  260. { DM_DMCONTROL, DM_DMCONTROL_RESUMEREQ, "resumereq" },
  261. { DM_DMCONTROL, DM_DMCONTROL_HARTRESET, "hartreset" },
  262. { DM_DMCONTROL, DM_DMCONTROL_HASEL, "hasel" },
  263. { DM_DMCONTROL, DM_DMCONTROL_HARTSELHI, "hartselhi" },
  264. { DM_DMCONTROL, DM_DMCONTROL_HARTSELLO, "hartsello" },
  265. { DM_DMCONTROL, DM_DMCONTROL_NDMRESET, "ndmreset" },
  266. { DM_DMCONTROL, DM_DMCONTROL_DMACTIVE, "dmactive" },
  267. { DM_DMCONTROL, DM_DMCONTROL_ACKHAVERESET, "ackhavereset" },
  268. { DM_DMSTATUS, DM_DMSTATUS_IMPEBREAK, "impebreak" },
  269. { DM_DMSTATUS, DM_DMSTATUS_ALLHAVERESET, "allhavereset" },
  270. { DM_DMSTATUS, DM_DMSTATUS_ANYHAVERESET, "anyhavereset" },
  271. { DM_DMSTATUS, DM_DMSTATUS_ALLRESUMEACK, "allresumeack" },
  272. { DM_DMSTATUS, DM_DMSTATUS_ANYRESUMEACK, "anyresumeack" },
  273. { DM_DMSTATUS, DM_DMSTATUS_ALLNONEXISTENT, "allnonexistent" },
  274. { DM_DMSTATUS, DM_DMSTATUS_ANYNONEXISTENT, "anynonexistent" },
  275. { DM_DMSTATUS, DM_DMSTATUS_ALLUNAVAIL, "allunavail" },
  276. { DM_DMSTATUS, DM_DMSTATUS_ANYUNAVAIL, "anyunavail" },
  277. { DM_DMSTATUS, DM_DMSTATUS_ALLRUNNING, "allrunning" },
  278. { DM_DMSTATUS, DM_DMSTATUS_ANYRUNNING, "anyrunning" },
  279. { DM_DMSTATUS, DM_DMSTATUS_ALLHALTED, "allhalted" },
  280. { DM_DMSTATUS, DM_DMSTATUS_ANYHALTED, "anyhalted" },
  281. { DM_DMSTATUS, DM_DMSTATUS_AUTHENTICATED, "authenticated" },
  282. { DM_DMSTATUS, DM_DMSTATUS_AUTHBUSY, "authbusy" },
  283. { DM_DMSTATUS, DM_DMSTATUS_HASRESETHALTREQ, "hasresethaltreq" },
  284. { DM_DMSTATUS, DM_DMSTATUS_CONFSTRPTRVALID, "confstrptrvalid" },
  285. { DM_DMSTATUS, DM_DMSTATUS_VERSION, "version" },
  286. { DM_ABSTRACTCS, DM_ABSTRACTCS_PROGBUFSIZE, "progbufsize" },
  287. { DM_ABSTRACTCS, DM_ABSTRACTCS_BUSY, "busy" },
  288. { DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR, "cmderr" },
  289. { DM_ABSTRACTCS, DM_ABSTRACTCS_DATACOUNT, "datacount" },
  290. { DM_COMMAND, DM_COMMAND_CMDTYPE, "cmdtype" },
  291. { DM_SBCS, DM_SBCS_SBVERSION, "sbversion" },
  292. { DM_SBCS, DM_SBCS_SBBUSYERROR, "sbbusyerror" },
  293. { DM_SBCS, DM_SBCS_SBBUSY, "sbbusy" },
  294. { DM_SBCS, DM_SBCS_SBREADONADDR, "sbreadonaddr" },
  295. { DM_SBCS, DM_SBCS_SBACCESS, "sbaccess" },
  296. { DM_SBCS, DM_SBCS_SBAUTOINCREMENT, "sbautoincrement" },
  297. { DM_SBCS, DM_SBCS_SBREADONDATA, "sbreadondata" },
  298. { DM_SBCS, DM_SBCS_SBERROR, "sberror" },
  299. { DM_SBCS, DM_SBCS_SBASIZE, "sbasize" },
  300. { DM_SBCS, DM_SBCS_SBACCESS128, "sbaccess128" },
  301. { DM_SBCS, DM_SBCS_SBACCESS64, "sbaccess64" },
  302. { DM_SBCS, DM_SBCS_SBACCESS32, "sbaccess32" },
  303. { DM_SBCS, DM_SBCS_SBACCESS16, "sbaccess16" },
  304. { DM_SBCS, DM_SBCS_SBACCESS8, "sbaccess8" },
  305. };
  306. text[0] = 0;
  307. for (unsigned i = 0; i < ARRAY_SIZE(description); i++) {
  308. if (description[i].address == address) {
  309. uint64_t mask = description[i].mask;
  310. unsigned value = get_field(data, mask);
  311. if (value) {
  312. if (i > 0)
  313. *(text++) = ' ';
  314. if (mask & (mask >> 1)) {
  315. /* If the field is more than 1 bit wide. */
  316. sprintf(text, "%s=%d", description[i].name, value);
  317. } else {
  318. strcpy(text, description[i].name);
  319. }
  320. text += strlen(text);
  321. }
  322. }
  323. }
  324. }
  325. static void dump_field(int idle, const struct scan_field *field)
  326. {
  327. static const char * const op_string[] = {"-", "r", "w", "?"};
  328. static const char * const status_string[] = {"+", "?", "F", "b"};
  329. if (debug_level < LOG_LVL_DEBUG)
  330. return;
  331. uint64_t out = buf_get_u64(field->out_value, 0, field->num_bits);
  332. unsigned int out_op = get_field(out, DTM_DMI_OP);
  333. unsigned int out_data = get_field(out, DTM_DMI_DATA);
  334. unsigned int out_address = out >> DTM_DMI_ADDRESS_OFFSET;
  335. uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits);
  336. unsigned int in_op = get_field(in, DTM_DMI_OP);
  337. unsigned int in_data = get_field(in, DTM_DMI_DATA);
  338. unsigned int in_address = in >> DTM_DMI_ADDRESS_OFFSET;
  339. log_printf_lf(LOG_LVL_DEBUG,
  340. __FILE__, __LINE__, "scan",
  341. "%db %s %08x @%02x -> %s %08x @%02x; %di",
  342. field->num_bits, op_string[out_op], out_data, out_address,
  343. status_string[in_op], in_data, in_address, idle);
  344. char out_text[500];
  345. char in_text[500];
  346. decode_dmi(out_text, out_address, out_data);
  347. decode_dmi(in_text, in_address, in_data);
  348. if (in_text[0] || out_text[0]) {
  349. log_printf_lf(LOG_LVL_DEBUG, __FILE__, __LINE__, "scan", "%s -> %s",
  350. out_text, in_text);
  351. }
  352. }
  353. /*** Utility functions. ***/
  354. static void select_dmi(struct target *target)
  355. {
  356. if (bscan_tunnel_ir_width != 0) {
  357. select_dmi_via_bscan(target);
  358. return;
  359. }
  360. jtag_add_ir_scan(target->tap, &select_dbus, TAP_IDLE);
  361. }
  362. static uint32_t dtmcontrol_scan(struct target *target, uint32_t out)
  363. {
  364. struct scan_field field;
  365. uint8_t in_value[4];
  366. uint8_t out_value[4] = { 0 };
  367. if (bscan_tunnel_ir_width != 0)
  368. return dtmcontrol_scan_via_bscan(target, out);
  369. buf_set_u32(out_value, 0, 32, out);
  370. jtag_add_ir_scan(target->tap, &select_dtmcontrol, TAP_IDLE);
  371. field.num_bits = 32;
  372. field.out_value = out_value;
  373. field.in_value = in_value;
  374. jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
  375. /* Always return to dmi. */
  376. select_dmi(target);
  377. int retval = jtag_execute_queue();
  378. if (retval != ERROR_OK) {
  379. LOG_ERROR("failed jtag scan: %d", retval);
  380. return retval;
  381. }
  382. uint32_t in = buf_get_u32(field.in_value, 0, 32);
  383. LOG_DEBUG("DTMCS: 0x%x -> 0x%x", out, in);
  384. return in;
  385. }
  386. static void increase_dmi_busy_delay(struct target *target)
  387. {
  388. riscv013_info_t *info = get_info(target);
  389. info->dmi_busy_delay += info->dmi_busy_delay / 10 + 1;
  390. LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
  391. info->dtmcs_idle, info->dmi_busy_delay,
  392. info->ac_busy_delay);
  393. dtmcontrol_scan(target, DTM_DTMCS_DMIRESET);
  394. }
  395. /**
  396. * exec: If this is set, assume the scan results in an execution, so more
  397. * run-test/idle cycles may be required.
  398. */
  399. static dmi_status_t dmi_scan(struct target *target, uint32_t *address_in,
  400. uint32_t *data_in, dmi_op_t op, uint32_t address_out, uint32_t data_out,
  401. bool exec)
  402. {
  403. riscv013_info_t *info = get_info(target);
  404. RISCV_INFO(r);
  405. unsigned num_bits = info->abits + DTM_DMI_OP_LENGTH + DTM_DMI_DATA_LENGTH;
  406. size_t num_bytes = (num_bits + 7) / 8;
  407. uint8_t in[num_bytes];
  408. uint8_t out[num_bytes];
  409. struct scan_field field = {
  410. .num_bits = num_bits,
  411. .out_value = out,
  412. .in_value = in
  413. };
  414. riscv_bscan_tunneled_scan_context_t bscan_ctxt;
  415. if (r->reset_delays_wait >= 0) {
  416. r->reset_delays_wait--;
  417. if (r->reset_delays_wait < 0) {
  418. info->dmi_busy_delay = 0;
  419. info->ac_busy_delay = 0;
  420. }
  421. }
  422. memset(in, 0, num_bytes);
  423. memset(out, 0, num_bytes);
  424. assert(info->abits != 0);
  425. buf_set_u32(out, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, op);
  426. buf_set_u32(out, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, data_out);
  427. buf_set_u32(out, DTM_DMI_ADDRESS_OFFSET, info->abits, address_out);
  428. /* I wanted to place this code in a different function, but the way JTAG command
  429. queueing works in the jtag handling functions, the scan fields either have to be
  430. heap allocated, global/static, or else they need to stay on the stack until
  431. the jtag_execute_queue() call. Heap or static fields in this case doesn't seem
  432. the best fit. Declaring stack based field values in a subsidiary function call wouldn't
  433. work. */
  434. if (bscan_tunnel_ir_width != 0) {
  435. riscv_add_bscan_tunneled_scan(target, &field, &bscan_ctxt);
  436. } else {
  437. /* Assume dbus is already selected. */
  438. jtag_add_dr_scan(target->tap, 1, &field, TAP_IDLE);
  439. }
  440. int idle_count = info->dmi_busy_delay;
  441. if (exec)
  442. idle_count += info->ac_busy_delay;
  443. if (idle_count)
  444. jtag_add_runtest(idle_count, TAP_IDLE);
  445. int retval = jtag_execute_queue();
  446. if (retval != ERROR_OK) {
  447. LOG_ERROR("dmi_scan failed jtag scan");
  448. if (data_in)
  449. *data_in = ~0;
  450. return DMI_STATUS_FAILED;
  451. }
  452. if (bscan_tunnel_ir_width != 0) {
  453. /* need to right-shift "in" by one bit, because of clock skew between BSCAN TAP and DM TAP */
  454. buffer_shr(in, num_bytes, 1);
  455. }
  456. if (data_in)
  457. *data_in = buf_get_u32(in, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH);
  458. if (address_in)
  459. *address_in = buf_get_u32(in, DTM_DMI_ADDRESS_OFFSET, info->abits);
  460. dump_field(idle_count, &field);
  461. return buf_get_u32(in, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH);
  462. }
  463. /**
  464. * @param target
  465. * @param data_in The data we received from the target.
  466. * @param dmi_busy_encountered
  467. * If non-NULL, will be updated to reflect whether DMI busy was
  468. * encountered while executing this operation or not.
  469. * @param dmi_op The operation to perform (read/write/nop).
  470. * @param address The address argument to that operation.
  471. * @param data_out The data to send to the target.
  472. * @param timeout_sec
  473. * @param exec When true, this scan will execute something, so extra RTI
  474. * cycles may be added.
  475. * @param ensure_success
  476. * Scan a nop after the requested operation, ensuring the
  477. * DMI operation succeeded.
  478. */
  479. static int dmi_op_timeout(struct target *target, uint32_t *data_in,
  480. bool *dmi_busy_encountered, int dmi_op, uint32_t address,
  481. uint32_t data_out, int timeout_sec, bool exec, bool ensure_success)
  482. {
  483. select_dmi(target);
  484. dmi_status_t status;
  485. uint32_t address_in;
  486. if (dmi_busy_encountered)
  487. *dmi_busy_encountered = false;
  488. const char *op_name;
  489. switch (dmi_op) {
  490. case DMI_OP_NOP:
  491. op_name = "nop";
  492. break;
  493. case DMI_OP_READ:
  494. op_name = "read";
  495. break;
  496. case DMI_OP_WRITE:
  497. op_name = "write";
  498. break;
  499. default:
  500. LOG_ERROR("Invalid DMI operation: %d", dmi_op);
  501. return ERROR_FAIL;
  502. }
  503. time_t start = time(NULL);
  504. /* This first loop performs the request. Note that if for some reason this
  505. * stays busy, it is actually due to the previous access. */
  506. while (1) {
  507. status = dmi_scan(target, NULL, NULL, dmi_op, address, data_out,
  508. exec);
  509. if (status == DMI_STATUS_BUSY) {
  510. increase_dmi_busy_delay(target);
  511. if (dmi_busy_encountered)
  512. *dmi_busy_encountered = true;
  513. } else if (status == DMI_STATUS_SUCCESS) {
  514. break;
  515. } else {
  516. LOG_ERROR("failed %s at 0x%x, status=%d", op_name, address, status);
  517. return ERROR_FAIL;
  518. }
  519. if (time(NULL) - start > timeout_sec)
  520. return ERROR_TIMEOUT_REACHED;
  521. }
  522. if (status != DMI_STATUS_SUCCESS) {
  523. LOG_ERROR("Failed %s at 0x%x; status=%d", op_name, address, status);
  524. return ERROR_FAIL;
  525. }
  526. if (ensure_success) {
  527. /* This second loop ensures the request succeeded, and gets back data.
  528. * Note that NOP can result in a 'busy' result as well, but that would be
  529. * noticed on the next DMI access we do. */
  530. while (1) {
  531. status = dmi_scan(target, &address_in, data_in, DMI_OP_NOP, address, 0,
  532. false);
  533. if (status == DMI_STATUS_BUSY) {
  534. increase_dmi_busy_delay(target);
  535. if (dmi_busy_encountered)
  536. *dmi_busy_encountered = true;
  537. } else if (status == DMI_STATUS_SUCCESS) {
  538. break;
  539. } else {
  540. if (data_in) {
  541. LOG_ERROR("Failed %s (NOP) at 0x%x; value=0x%x, status=%d",
  542. op_name, address, *data_in, status);
  543. } else {
  544. LOG_ERROR("Failed %s (NOP) at 0x%x; status=%d", op_name, address,
  545. status);
  546. }
  547. return ERROR_FAIL;
  548. }
  549. if (time(NULL) - start > timeout_sec)
  550. return ERROR_TIMEOUT_REACHED;
  551. }
  552. }
  553. return ERROR_OK;
  554. }
  555. static int dmi_op(struct target *target, uint32_t *data_in,
  556. bool *dmi_busy_encountered, int dmi_op, uint32_t address,
  557. uint32_t data_out, bool exec, bool ensure_success)
  558. {
  559. int result = dmi_op_timeout(target, data_in, dmi_busy_encountered, dmi_op,
  560. address, data_out, riscv_command_timeout_sec, exec, ensure_success);
  561. if (result == ERROR_TIMEOUT_REACHED) {
  562. LOG_ERROR("DMI operation didn't complete in %d seconds. The target is "
  563. "either really slow or broken. You could increase the "
  564. "timeout with riscv set_command_timeout_sec.",
  565. riscv_command_timeout_sec);
  566. return ERROR_FAIL;
  567. }
  568. return result;
  569. }
  570. static int dmi_read(struct target *target, uint32_t *value, uint32_t address)
  571. {
  572. return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, false, true);
  573. }
  574. static int dmi_read_exec(struct target *target, uint32_t *value, uint32_t address)
  575. {
  576. return dmi_op(target, value, NULL, DMI_OP_READ, address, 0, true, true);
  577. }
  578. static int dmi_write(struct target *target, uint32_t address, uint32_t value)
  579. {
  580. return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, false, true);
  581. }
  582. static int dmi_write_exec(struct target *target, uint32_t address,
  583. uint32_t value, bool ensure_success)
  584. {
  585. return dmi_op(target, NULL, NULL, DMI_OP_WRITE, address, value, true, ensure_success);
  586. }
  587. int dmstatus_read_timeout(struct target *target, uint32_t *dmstatus,
  588. bool authenticated, unsigned timeout_sec)
  589. {
  590. int result = dmi_op_timeout(target, dmstatus, NULL, DMI_OP_READ,
  591. DM_DMSTATUS, 0, timeout_sec, false, true);
  592. if (result != ERROR_OK)
  593. return result;
  594. int dmstatus_version = get_field(*dmstatus, DM_DMSTATUS_VERSION);
  595. if (dmstatus_version != 2 && dmstatus_version != 3) {
  596. LOG_ERROR("OpenOCD only supports Debug Module version 2 (0.13) and 3 (0.14), not "
  597. "%d (dmstatus=0x%x). This error might be caused by a JTAG "
  598. "signal issue. Try reducing the JTAG clock speed.",
  599. get_field(*dmstatus, DM_DMSTATUS_VERSION), *dmstatus);
  600. } else if (authenticated && !get_field(*dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
  601. LOG_ERROR("Debugger is not authenticated to target Debug Module. "
  602. "(dmstatus=0x%x). Use `riscv authdata_read` and "
  603. "`riscv authdata_write` commands to authenticate.", *dmstatus);
  604. return ERROR_FAIL;
  605. }
  606. return ERROR_OK;
  607. }
  608. int dmstatus_read(struct target *target, uint32_t *dmstatus,
  609. bool authenticated)
  610. {
  611. return dmstatus_read_timeout(target, dmstatus, authenticated,
  612. riscv_command_timeout_sec);
  613. }
  614. static void increase_ac_busy_delay(struct target *target)
  615. {
  616. riscv013_info_t *info = get_info(target);
  617. info->ac_busy_delay += info->ac_busy_delay / 10 + 1;
  618. LOG_DEBUG("dtmcs_idle=%d, dmi_busy_delay=%d, ac_busy_delay=%d",
  619. info->dtmcs_idle, info->dmi_busy_delay,
  620. info->ac_busy_delay);
  621. }
  622. uint32_t abstract_register_size(unsigned width)
  623. {
  624. switch (width) {
  625. case 32:
  626. return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 2);
  627. case 64:
  628. return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 3);
  629. case 128:
  630. return set_field(0, AC_ACCESS_REGISTER_AARSIZE, 4);
  631. default:
  632. LOG_ERROR("Unsupported register width: %d", width);
  633. return 0;
  634. }
  635. }
  636. static int wait_for_idle(struct target *target, uint32_t *abstractcs)
  637. {
  638. RISCV013_INFO(info);
  639. time_t start = time(NULL);
  640. while (1) {
  641. if (dmi_read(target, abstractcs, DM_ABSTRACTCS) != ERROR_OK)
  642. return ERROR_FAIL;
  643. if (get_field(*abstractcs, DM_ABSTRACTCS_BUSY) == 0)
  644. return ERROR_OK;
  645. if (time(NULL) - start > riscv_command_timeout_sec) {
  646. info->cmderr = get_field(*abstractcs, DM_ABSTRACTCS_CMDERR);
  647. if (info->cmderr != CMDERR_NONE) {
  648. const char *errors[8] = {
  649. "none",
  650. "busy",
  651. "not supported",
  652. "exception",
  653. "halt/resume",
  654. "reserved",
  655. "reserved",
  656. "other" };
  657. LOG_ERROR("Abstract command ended in error '%s' (abstractcs=0x%x)",
  658. errors[info->cmderr], *abstractcs);
  659. }
  660. LOG_ERROR("Timed out after %ds waiting for busy to go low (abstractcs=0x%x). "
  661. "Increase the timeout with riscv set_command_timeout_sec.",
  662. riscv_command_timeout_sec,
  663. *abstractcs);
  664. return ERROR_FAIL;
  665. }
  666. }
  667. }
  668. static int execute_abstract_command(struct target *target, uint32_t command)
  669. {
  670. RISCV013_INFO(info);
  671. if (debug_level >= LOG_LVL_DEBUG) {
  672. switch (get_field(command, DM_COMMAND_CMDTYPE)) {
  673. case 0:
  674. LOG_DEBUG("command=0x%x; access register, size=%d, postexec=%d, "
  675. "transfer=%d, write=%d, regno=0x%x",
  676. command,
  677. 8 << get_field(command, AC_ACCESS_REGISTER_AARSIZE),
  678. get_field(command, AC_ACCESS_REGISTER_POSTEXEC),
  679. get_field(command, AC_ACCESS_REGISTER_TRANSFER),
  680. get_field(command, AC_ACCESS_REGISTER_WRITE),
  681. get_field(command, AC_ACCESS_REGISTER_REGNO));
  682. break;
  683. default:
  684. LOG_DEBUG("command=0x%x", command);
  685. break;
  686. }
  687. }
  688. if (dmi_write_exec(target, DM_COMMAND, command, false) != ERROR_OK)
  689. return ERROR_FAIL;
  690. uint32_t abstractcs = 0;
  691. int result = wait_for_idle(target, &abstractcs);
  692. info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
  693. if (info->cmderr != 0 || result != ERROR_OK) {
  694. LOG_DEBUG("command 0x%x failed; abstractcs=0x%x", command, abstractcs);
  695. /* Clear the error. */
  696. dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
  697. return ERROR_FAIL;
  698. }
  699. return ERROR_OK;
  700. }
  701. static riscv_reg_t read_abstract_arg(struct target *target, unsigned index,
  702. unsigned size_bits)
  703. {
  704. riscv_reg_t value = 0;
  705. uint32_t v;
  706. unsigned offset = index * size_bits / 32;
  707. switch (size_bits) {
  708. default:
  709. LOG_ERROR("Unsupported size: %d bits", size_bits);
  710. return ~0;
  711. case 64:
  712. dmi_read(target, &v, DM_DATA0 + offset + 1);
  713. value |= ((uint64_t) v) << 32;
  714. /* falls through */
  715. case 32:
  716. dmi_read(target, &v, DM_DATA0 + offset);
  717. value |= v;
  718. }
  719. return value;
  720. }
  721. static int write_abstract_arg(struct target *target, unsigned index,
  722. riscv_reg_t value, unsigned size_bits)
  723. {
  724. unsigned offset = index * size_bits / 32;
  725. switch (size_bits) {
  726. default:
  727. LOG_ERROR("Unsupported size: %d bits", size_bits);
  728. return ERROR_FAIL;
  729. case 64:
  730. dmi_write(target, DM_DATA0 + offset + 1, value >> 32);
  731. /* falls through */
  732. case 32:
  733. dmi_write(target, DM_DATA0 + offset, value);
  734. }
  735. return ERROR_OK;
  736. }
  737. /**
  738. * @par size in bits
  739. */
  740. static uint32_t access_register_command(struct target *target, uint32_t number,
  741. unsigned size, uint32_t flags)
  742. {
  743. uint32_t command = set_field(0, DM_COMMAND_CMDTYPE, 0);
  744. switch (size) {
  745. case 32:
  746. command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 2);
  747. break;
  748. case 64:
  749. command = set_field(command, AC_ACCESS_REGISTER_AARSIZE, 3);
  750. break;
  751. default:
  752. LOG_ERROR("%d-bit register %s not supported.", size,
  753. gdb_regno_name(number));
  754. assert(0);
  755. }
  756. if (number <= GDB_REGNO_XPR31) {
  757. command = set_field(command, AC_ACCESS_REGISTER_REGNO,
  758. 0x1000 + number - GDB_REGNO_ZERO);
  759. } else if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
  760. command = set_field(command, AC_ACCESS_REGISTER_REGNO,
  761. 0x1020 + number - GDB_REGNO_FPR0);
  762. } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
  763. command = set_field(command, AC_ACCESS_REGISTER_REGNO,
  764. number - GDB_REGNO_CSR0);
  765. } else if (number >= GDB_REGNO_COUNT) {
  766. /* Custom register. */
  767. assert(target->reg_cache->reg_list[number].arch_info);
  768. riscv_reg_info_t *reg_info = target->reg_cache->reg_list[number].arch_info;
  769. assert(reg_info);
  770. command = set_field(command, AC_ACCESS_REGISTER_REGNO,
  771. 0xc000 + reg_info->custom_number);
  772. } else {
  773. assert(0);
  774. }
  775. command |= flags;
  776. return command;
  777. }
  778. static int register_read_abstract(struct target *target, uint64_t *value,
  779. uint32_t number, unsigned size)
  780. {
  781. RISCV013_INFO(info);
  782. if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
  783. !info->abstract_read_fpr_supported)
  784. return ERROR_FAIL;
  785. if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
  786. !info->abstract_read_csr_supported)
  787. return ERROR_FAIL;
  788. /* The spec doesn't define abstract register numbers for vector registers. */
  789. if (number >= GDB_REGNO_V0 && number <= GDB_REGNO_V31)
  790. return ERROR_FAIL;
  791. uint32_t command = access_register_command(target, number, size,
  792. AC_ACCESS_REGISTER_TRANSFER);
  793. int result = execute_abstract_command(target, command);
  794. if (result != ERROR_OK) {
  795. if (info->cmderr == CMDERR_NOT_SUPPORTED) {
  796. if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
  797. info->abstract_read_fpr_supported = false;
  798. LOG_INFO("Disabling abstract command reads from FPRs.");
  799. } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
  800. info->abstract_read_csr_supported = false;
  801. LOG_INFO("Disabling abstract command reads from CSRs.");
  802. }
  803. }
  804. return result;
  805. }
  806. if (value)
  807. *value = read_abstract_arg(target, 0, size);
  808. return ERROR_OK;
  809. }
  810. static int register_write_abstract(struct target *target, uint32_t number,
  811. uint64_t value, unsigned size)
  812. {
  813. RISCV013_INFO(info);
  814. if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
  815. !info->abstract_write_fpr_supported)
  816. return ERROR_FAIL;
  817. if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095 &&
  818. !info->abstract_write_csr_supported)
  819. return ERROR_FAIL;
  820. uint32_t command = access_register_command(target, number, size,
  821. AC_ACCESS_REGISTER_TRANSFER |
  822. AC_ACCESS_REGISTER_WRITE);
  823. if (write_abstract_arg(target, 0, value, size) != ERROR_OK)
  824. return ERROR_FAIL;
  825. int result = execute_abstract_command(target, command);
  826. if (result != ERROR_OK) {
  827. if (info->cmderr == CMDERR_NOT_SUPPORTED) {
  828. if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
  829. info->abstract_write_fpr_supported = false;
  830. LOG_INFO("Disabling abstract command writes to FPRs.");
  831. } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
  832. info->abstract_write_csr_supported = false;
  833. LOG_INFO("Disabling abstract command writes to CSRs.");
  834. }
  835. }
  836. return result;
  837. }
  838. return ERROR_OK;
  839. }
  840. /*
  841. * Sets the AAMSIZE field of a memory access abstract command based on
  842. * the width (bits).
  843. */
  844. static uint32_t abstract_memory_size(unsigned width)
  845. {
  846. switch (width) {
  847. case 8:
  848. return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 0);
  849. case 16:
  850. return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 1);
  851. case 32:
  852. return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 2);
  853. case 64:
  854. return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 3);
  855. case 128:
  856. return set_field(0, AC_ACCESS_MEMORY_AAMSIZE, 4);
  857. default:
  858. LOG_ERROR("Unsupported memory width: %d", width);
  859. return 0;
  860. }
  861. }
  862. /*
  863. * Creates a memory access abstract command.
  864. */
  865. static uint32_t access_memory_command(struct target *target, bool virtual,
  866. unsigned width, bool postincrement, bool write)
  867. {
  868. uint32_t command = set_field(0, AC_ACCESS_MEMORY_CMDTYPE, 2);
  869. command = set_field(command, AC_ACCESS_MEMORY_AAMVIRTUAL, virtual);
  870. command |= abstract_memory_size(width);
  871. command = set_field(command, AC_ACCESS_MEMORY_AAMPOSTINCREMENT,
  872. postincrement);
  873. command = set_field(command, AC_ACCESS_MEMORY_WRITE, write);
  874. return command;
  875. }
  876. static int examine_progbuf(struct target *target)
  877. {
  878. riscv013_info_t *info = get_info(target);
  879. if (info->progbuf_writable != YNM_MAYBE)
  880. return ERROR_OK;
  881. /* Figure out if progbuf is writable. */
  882. if (info->progbufsize < 1) {
  883. info->progbuf_writable = YNM_NO;
  884. LOG_INFO("No program buffer present.");
  885. return ERROR_OK;
  886. }
  887. uint64_t s0;
  888. if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
  889. return ERROR_FAIL;
  890. struct riscv_program program;
  891. riscv_program_init(&program, target);
  892. riscv_program_insert(&program, auipc(S0));
  893. if (riscv_program_exec(&program, target) != ERROR_OK)
  894. return ERROR_FAIL;
  895. if (register_read_direct(target, &info->progbuf_address, GDB_REGNO_S0) != ERROR_OK)
  896. return ERROR_FAIL;
  897. riscv_program_init(&program, target);
  898. riscv_program_insert(&program, sw(S0, S0, 0));
  899. int result = riscv_program_exec(&program, target);
  900. if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
  901. return ERROR_FAIL;
  902. if (result != ERROR_OK) {
  903. /* This program might have failed if the program buffer is not
  904. * writable. */
  905. info->progbuf_writable = YNM_NO;
  906. return ERROR_OK;
  907. }
  908. uint32_t written;
  909. if (dmi_read(target, &written, DM_PROGBUF0) != ERROR_OK)
  910. return ERROR_FAIL;
  911. if (written == (uint32_t) info->progbuf_address) {
  912. LOG_INFO("progbuf is writable at 0x%" PRIx64,
  913. info->progbuf_address);
  914. info->progbuf_writable = YNM_YES;
  915. } else {
  916. LOG_INFO("progbuf is not writeable at 0x%" PRIx64,
  917. info->progbuf_address);
  918. info->progbuf_writable = YNM_NO;
  919. }
  920. return ERROR_OK;
  921. }
  922. static int is_fpu_reg(uint32_t gdb_regno)
  923. {
  924. return (gdb_regno >= GDB_REGNO_FPR0 && gdb_regno <= GDB_REGNO_FPR31) ||
  925. (gdb_regno == GDB_REGNO_CSR0 + CSR_FFLAGS) ||
  926. (gdb_regno == GDB_REGNO_CSR0 + CSR_FRM) ||
  927. (gdb_regno == GDB_REGNO_CSR0 + CSR_FCSR);
  928. }
  929. static int is_vector_reg(uint32_t gdb_regno)
  930. {
  931. return (gdb_regno >= GDB_REGNO_V0 && gdb_regno <= GDB_REGNO_V31) ||
  932. gdb_regno == GDB_REGNO_VSTART ||
  933. gdb_regno == GDB_REGNO_VXSAT ||
  934. gdb_regno == GDB_REGNO_VXRM ||
  935. gdb_regno == GDB_REGNO_VL ||
  936. gdb_regno == GDB_REGNO_VTYPE ||
  937. gdb_regno == GDB_REGNO_VLENB;
  938. }
  939. static int prep_for_register_access(struct target *target, uint64_t *mstatus,
  940. int regno)
  941. {
  942. if (is_fpu_reg(regno) || is_vector_reg(regno)) {
  943. if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
  944. return ERROR_FAIL;
  945. if (is_fpu_reg(regno) && (*mstatus & MSTATUS_FS) == 0) {
  946. if (register_write_direct(target, GDB_REGNO_MSTATUS,
  947. set_field(*mstatus, MSTATUS_FS, 1)) != ERROR_OK)
  948. return ERROR_FAIL;
  949. } else if (is_vector_reg(regno) && (*mstatus & MSTATUS_VS) == 0) {
  950. if (register_write_direct(target, GDB_REGNO_MSTATUS,
  951. set_field(*mstatus, MSTATUS_VS, 1)) != ERROR_OK)
  952. return ERROR_FAIL;
  953. }
  954. } else {
  955. *mstatus = 0;
  956. }
  957. return ERROR_OK;
  958. }
  959. static int cleanup_after_register_access(struct target *target,
  960. uint64_t mstatus, int regno)
  961. {
  962. if ((is_fpu_reg(regno) && (mstatus & MSTATUS_FS) == 0) ||
  963. (is_vector_reg(regno) && (mstatus & MSTATUS_VS) == 0))
  964. if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus) != ERROR_OK)
  965. return ERROR_FAIL;
  966. return ERROR_OK;
  967. }
  968. typedef enum {
  969. SPACE_DM_DATA,
  970. SPACE_DMI_PROGBUF,
  971. SPACE_DMI_RAM
  972. } memory_space_t;
  973. typedef struct {
  974. /* How can the debugger access this memory? */
  975. memory_space_t memory_space;
  976. /* Memory address to access the scratch memory from the hart. */
  977. riscv_addr_t hart_address;
  978. /* Memory address to access the scratch memory from the debugger. */
  979. riscv_addr_t debug_address;
  980. struct working_area *area;
  981. } scratch_mem_t;
  982. /**
  983. * Find some scratch memory to be used with the given program.
  984. */
  985. static int scratch_reserve(struct target *target,
  986. scratch_mem_t *scratch,
  987. struct riscv_program *program,
  988. unsigned size_bytes)
  989. {
  990. riscv_addr_t alignment = 1;
  991. while (alignment < size_bytes)
  992. alignment *= 2;
  993. scratch->area = NULL;
  994. riscv013_info_t *info = get_info(target);
  995. /* Option 1: See if data# registers can be used as the scratch memory */
  996. if (info->dataaccess == 1) {
  997. /* Sign extend dataaddr. */
  998. scratch->hart_address = info->dataaddr;
  999. if (info->dataaddr & (1<<11))
  1000. scratch->hart_address |= 0xfffffffffffff000ULL;
  1001. /* Align. */
  1002. scratch->hart_address = (scratch->hart_address + alignment - 1) & ~(alignment - 1);
  1003. if ((size_bytes + scratch->hart_address - info->dataaddr + 3) / 4 >=
  1004. info->datasize) {
  1005. scratch->memory_space = SPACE_DM_DATA;
  1006. scratch->debug_address = (scratch->hart_address - info->dataaddr) / 4;
  1007. return ERROR_OK;
  1008. }
  1009. }
  1010. /* Option 2: See if progbuf can be used as the scratch memory */
  1011. if (examine_progbuf(target) != ERROR_OK)
  1012. return ERROR_FAIL;
  1013. /* Allow for ebreak at the end of the program. */
  1014. unsigned program_size = (program->instruction_count + 1) * 4;
  1015. scratch->hart_address = (info->progbuf_address + program_size + alignment - 1) &
  1016. ~(alignment - 1);
  1017. if ((info->progbuf_writable == YNM_YES) &&
  1018. ((size_bytes + scratch->hart_address - info->progbuf_address + 3) / 4 >=
  1019. info->progbufsize)) {
  1020. scratch->memory_space = SPACE_DMI_PROGBUF;
  1021. scratch->debug_address = (scratch->hart_address - info->progbuf_address) / 4;
  1022. return ERROR_OK;
  1023. }
  1024. /* Option 3: User-configured memory area as scratch RAM */
  1025. if (target_alloc_working_area(target, size_bytes + alignment - 1,
  1026. &scratch->area) == ERROR_OK) {
  1027. scratch->hart_address = (scratch->area->address + alignment - 1) &
  1028. ~(alignment - 1);
  1029. scratch->memory_space = SPACE_DMI_RAM;
  1030. scratch->debug_address = scratch->hart_address;
  1031. return ERROR_OK;
  1032. }
  1033. LOG_ERROR("Couldn't find %d bytes of scratch RAM to use. Please configure "
  1034. "a work area with 'configure -work-area-phys'.", size_bytes);
  1035. return ERROR_FAIL;
  1036. }
  1037. static int scratch_release(struct target *target,
  1038. scratch_mem_t *scratch)
  1039. {
  1040. if (scratch->area)
  1041. return target_free_working_area(target, scratch->area);
  1042. return ERROR_OK;
  1043. }
  1044. static int scratch_read64(struct target *target, scratch_mem_t *scratch,
  1045. uint64_t *value)
  1046. {
  1047. uint32_t v;
  1048. switch (scratch->memory_space) {
  1049. case SPACE_DM_DATA:
  1050. if (dmi_read(target, &v, DM_DATA0 + scratch->debug_address) != ERROR_OK)
  1051. return ERROR_FAIL;
  1052. *value = v;
  1053. if (dmi_read(target, &v, DM_DATA1 + scratch->debug_address) != ERROR_OK)
  1054. return ERROR_FAIL;
  1055. *value |= ((uint64_t) v) << 32;
  1056. break;
  1057. case SPACE_DMI_PROGBUF:
  1058. if (dmi_read(target, &v, DM_PROGBUF0 + scratch->debug_address) != ERROR_OK)
  1059. return ERROR_FAIL;
  1060. *value = v;
  1061. if (dmi_read(target, &v, DM_PROGBUF1 + scratch->debug_address) != ERROR_OK)
  1062. return ERROR_FAIL;
  1063. *value |= ((uint64_t) v) << 32;
  1064. break;
  1065. case SPACE_DMI_RAM:
  1066. {
  1067. uint8_t buffer[8] = {0};
  1068. if (read_memory(target, scratch->debug_address, 4, 2, buffer, 4) != ERROR_OK)
  1069. return ERROR_FAIL;
  1070. *value = buffer[0] |
  1071. (((uint64_t) buffer[1]) << 8) |
  1072. (((uint64_t) buffer[2]) << 16) |
  1073. (((uint64_t) buffer[3]) << 24) |
  1074. (((uint64_t) buffer[4]) << 32) |
  1075. (((uint64_t) buffer[5]) << 40) |
  1076. (((uint64_t) buffer[6]) << 48) |
  1077. (((uint64_t) buffer[7]) << 56);
  1078. }
  1079. break;
  1080. }
  1081. return ERROR_OK;
  1082. }
  1083. static int scratch_write64(struct target *target, scratch_mem_t *scratch,
  1084. uint64_t value)
  1085. {
  1086. switch (scratch->memory_space) {
  1087. case SPACE_DM_DATA:
  1088. dmi_write(target, DM_DATA0 + scratch->debug_address, value);
  1089. dmi_write(target, DM_DATA1 + scratch->debug_address, value >> 32);
  1090. break;
  1091. case SPACE_DMI_PROGBUF:
  1092. dmi_write(target, DM_PROGBUF0 + scratch->debug_address, value);
  1093. dmi_write(target, DM_PROGBUF1 + scratch->debug_address, value >> 32);
  1094. break;
  1095. case SPACE_DMI_RAM:
  1096. {
  1097. uint8_t buffer[8] = {
  1098. value,
  1099. value >> 8,
  1100. value >> 16,
  1101. value >> 24,
  1102. value >> 32,
  1103. value >> 40,
  1104. value >> 48,
  1105. value >> 56
  1106. };
  1107. if (write_memory(target, scratch->debug_address, 4, 2, buffer) != ERROR_OK)
  1108. return ERROR_FAIL;
  1109. }
  1110. break;
  1111. }
  1112. return ERROR_OK;
  1113. }
  1114. /** Return register size in bits. */
  1115. static unsigned register_size(struct target *target, unsigned number)
  1116. {
  1117. /* If reg_cache hasn't been initialized yet, make a guess. We need this for
  1118. * when this function is called during examine(). */
  1119. if (target->reg_cache)
  1120. return target->reg_cache->reg_list[number].size;
  1121. else
  1122. return riscv_xlen(target);
  1123. }
  1124. static bool has_sufficient_progbuf(struct target *target, unsigned size)
  1125. {
  1126. RISCV013_INFO(info);
  1127. RISCV_INFO(r);
  1128. return info->progbufsize + r->impebreak >= size;
  1129. }
  1130. /**
  1131. * Immediately write the new value to the requested register. This mechanism
  1132. * bypasses any caches.
  1133. */
  1134. static int register_write_direct(struct target *target, unsigned number,
  1135. uint64_t value)
  1136. {
  1137. LOG_DEBUG("{%d} %s <- 0x%" PRIx64, riscv_current_hartid(target),
  1138. gdb_regno_name(number), value);
  1139. int result = register_write_abstract(target, number, value,
  1140. register_size(target, number));
  1141. if (result == ERROR_OK || !has_sufficient_progbuf(target, 2) ||
  1142. !riscv_is_halted(target))
  1143. return result;
  1144. struct riscv_program program;
  1145. riscv_program_init(&program, target);
  1146. uint64_t s0;
  1147. if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
  1148. return ERROR_FAIL;
  1149. uint64_t mstatus;
  1150. if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
  1151. return ERROR_FAIL;
  1152. scratch_mem_t scratch;
  1153. bool use_scratch = false;
  1154. if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31 &&
  1155. riscv_supports_extension(target, riscv_current_hartid(target), 'D') &&
  1156. riscv_xlen(target) < 64) {
  1157. /* There are no instructions to move all the bits from a register, so
  1158. * we need to use some scratch RAM. */
  1159. use_scratch = true;
  1160. riscv_program_insert(&program, fld(number - GDB_REGNO_FPR0, S0, 0));
  1161. if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
  1162. return ERROR_FAIL;
  1163. if (register_write_direct(target, GDB_REGNO_S0, scratch.hart_address)
  1164. != ERROR_OK) {
  1165. scratch_release(target, &scratch);
  1166. return ERROR_FAIL;
  1167. }
  1168. if (scratch_write64(target, &scratch, value) != ERROR_OK) {
  1169. scratch_release(target, &scratch);
  1170. return ERROR_FAIL;
  1171. }
  1172. } else if (number == GDB_REGNO_VTYPE) {
  1173. riscv_program_insert(&program, csrr(S0, CSR_VL));
  1174. riscv_program_insert(&program, vsetvli(ZERO, S0, value));
  1175. } else {
  1176. if (register_write_direct(target, GDB_REGNO_S0, value) != ERROR_OK)
  1177. return ERROR_FAIL;
  1178. if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
  1179. if (riscv_supports_extension(target, riscv_current_hartid(target), 'D'))
  1180. riscv_program_insert(&program, fmv_d_x(number - GDB_REGNO_FPR0, S0));
  1181. else
  1182. riscv_program_insert(&program, fmv_w_x(number - GDB_REGNO_FPR0, S0));
  1183. } else if (number == GDB_REGNO_VL) {
  1184. /* "The XLEN-bit-wide read-only vl CSR can only be updated by the
  1185. * vsetvli and vsetvl instructions, and the fault-only-rst vector
  1186. * load instruction variants." */
  1187. riscv_reg_t vtype;
  1188. if (register_read(target, &vtype, GDB_REGNO_VTYPE) != ERROR_OK)
  1189. return ERROR_FAIL;
  1190. if (riscv_program_insert(&program, vsetvli(ZERO, S0, vtype)) != ERROR_OK)
  1191. return ERROR_FAIL;
  1192. } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
  1193. riscv_program_csrw(&program, S0, number);
  1194. } else {
  1195. LOG_ERROR("Unsupported register (enum gdb_regno)(%d)", number);
  1196. return ERROR_FAIL;
  1197. }
  1198. }
  1199. int exec_out = riscv_program_exec(&program, target);
  1200. /* Don't message on error. Probably the register doesn't exist. */
  1201. if (exec_out == ERROR_OK && target->reg_cache) {
  1202. struct reg *reg = &target->reg_cache->reg_list[number];
  1203. buf_set_u64(reg->value, 0, reg->size, value);
  1204. }
  1205. if (use_scratch)
  1206. scratch_release(target, &scratch);
  1207. if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
  1208. return ERROR_FAIL;
  1209. /* Restore S0. */
  1210. if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
  1211. return ERROR_FAIL;
  1212. return exec_out;
  1213. }
  1214. /** Return the cached value, or read from the target if necessary. */
  1215. static int register_read(struct target *target, uint64_t *value, uint32_t number)
  1216. {
  1217. if (number == GDB_REGNO_ZERO) {
  1218. *value = 0;
  1219. return ERROR_OK;
  1220. }
  1221. int result = register_read_direct(target, value, number);
  1222. if (result != ERROR_OK)
  1223. return ERROR_FAIL;
  1224. if (target->reg_cache) {
  1225. struct reg *reg = &target->reg_cache->reg_list[number];
  1226. buf_set_u64(reg->value, 0, reg->size, *value);
  1227. }
  1228. return ERROR_OK;
  1229. }
  1230. /** Actually read registers from the target right now. */
  1231. static int register_read_direct(struct target *target, uint64_t *value, uint32_t number)
  1232. {
  1233. int result = register_read_abstract(target, value, number,
  1234. register_size(target, number));
  1235. if (result != ERROR_OK &&
  1236. has_sufficient_progbuf(target, 2) &&
  1237. number > GDB_REGNO_XPR31) {
  1238. struct riscv_program program;
  1239. riscv_program_init(&program, target);
  1240. scratch_mem_t scratch;
  1241. bool use_scratch = false;
  1242. riscv_reg_t s0;
  1243. if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
  1244. return ERROR_FAIL;
  1245. /* Write program to move data into s0. */
  1246. uint64_t mstatus;
  1247. if (prep_for_register_access(target, &mstatus, number) != ERROR_OK)
  1248. return ERROR_FAIL;
  1249. if (number >= GDB_REGNO_FPR0 && number <= GDB_REGNO_FPR31) {
  1250. if (riscv_supports_extension(target, riscv_current_hartid(target), 'D')
  1251. && riscv_xlen(target) < 64) {
  1252. /* There are no instructions to move all the bits from a
  1253. * register, so we need to use some scratch RAM. */
  1254. riscv_program_insert(&program, fsd(number - GDB_REGNO_FPR0, S0,
  1255. 0));
  1256. if (scratch_reserve(target, &scratch, &program, 8) != ERROR_OK)
  1257. return ERROR_FAIL;
  1258. use_scratch = true;
  1259. if (register_write_direct(target, GDB_REGNO_S0,
  1260. scratch.hart_address) != ERROR_OK) {
  1261. scratch_release(target, &scratch);
  1262. return ERROR_FAIL;
  1263. }
  1264. } else if (riscv_supports_extension(target,
  1265. riscv_current_hartid(target), 'D')) {
  1266. riscv_program_insert(&program, fmv_x_d(S0, number - GDB_REGNO_FPR0));
  1267. } else {
  1268. riscv_program_insert(&program, fmv_x_w(S0, number - GDB_REGNO_FPR0));
  1269. }
  1270. } else if (number >= GDB_REGNO_CSR0 && number <= GDB_REGNO_CSR4095) {
  1271. riscv_program_csrr(&program, S0, number);
  1272. } else {
  1273. LOG_ERROR("Unsupported register: %s", gdb_regno_name(number));
  1274. return ERROR_FAIL;
  1275. }
  1276. /* Execute program. */
  1277. result = riscv_program_exec(&program, target);
  1278. /* Don't message on error. Probably the register doesn't exist. */
  1279. if (use_scratch) {
  1280. result = scratch_read64(target, &scratch, value);
  1281. scratch_release(target, &scratch);
  1282. if (result != ERROR_OK)
  1283. return result;
  1284. } else {
  1285. /* Read S0 */
  1286. if (register_read_direct(target, value, GDB_REGNO_S0) != ERROR_OK)
  1287. return ERROR_FAIL;
  1288. }
  1289. if (cleanup_after_register_access(target, mstatus, number) != ERROR_OK)
  1290. return ERROR_FAIL;
  1291. /* Restore S0. */
  1292. if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
  1293. return ERROR_FAIL;
  1294. }
  1295. if (result == ERROR_OK) {
  1296. LOG_DEBUG("{%d} %s = 0x%" PRIx64, riscv_current_hartid(target),
  1297. gdb_regno_name(number), *value);
  1298. }
  1299. return result;
  1300. }
  1301. int wait_for_authbusy(struct target *target, uint32_t *dmstatus)
  1302. {
  1303. time_t start = time(NULL);
  1304. while (1) {
  1305. uint32_t value;
  1306. if (dmstatus_read(target, &value, false) != ERROR_OK)
  1307. return ERROR_FAIL;
  1308. if (dmstatus)
  1309. *dmstatus = value;
  1310. if (!get_field(value, DM_DMSTATUS_AUTHBUSY))
  1311. break;
  1312. if (time(NULL) - start > riscv_command_timeout_sec) {
  1313. LOG_ERROR("Timed out after %ds waiting for authbusy to go low (dmstatus=0x%x). "
  1314. "Increase the timeout with riscv set_command_timeout_sec.",
  1315. riscv_command_timeout_sec,
  1316. value);
  1317. return ERROR_FAIL;
  1318. }
  1319. }
  1320. return ERROR_OK;
  1321. }
  1322. /*** OpenOCD target functions. ***/
  1323. static void deinit_target(struct target *target)
  1324. {
  1325. LOG_DEBUG("riscv_deinit_target()");
  1326. riscv_info_t *info = (riscv_info_t *) target->arch_info;
  1327. free(info->version_specific);
  1328. /* TODO: free register arch_info */
  1329. info->version_specific = NULL;
  1330. }
  1331. static int set_haltgroup(struct target *target, bool *supported)
  1332. {
  1333. uint32_t write = set_field(DM_DMCS2_HGWRITE, DM_DMCS2_GROUP, target->smp);
  1334. if (dmi_write(target, DM_DMCS2, write) != ERROR_OK)
  1335. return ERROR_FAIL;
  1336. uint32_t read;
  1337. if (dmi_read(target, &read, DM_DMCS2) != ERROR_OK)
  1338. return ERROR_FAIL;
  1339. *supported = get_field(read, DM_DMCS2_GROUP) == (unsigned)target->smp;
  1340. return ERROR_OK;
  1341. }
  1342. static int discover_vlenb(struct target *target, int hartid)
  1343. {
  1344. RISCV_INFO(r);
  1345. riscv_reg_t vlenb;
  1346. if (register_read(target, &vlenb, GDB_REGNO_VLENB) != ERROR_OK) {
  1347. LOG_WARNING("Couldn't read vlenb for %s; vector register access won't work.",
  1348. target_name(target));
  1349. r->vlenb[hartid] = 0;
  1350. return ERROR_OK;
  1351. }
  1352. r->vlenb[hartid] = vlenb;
  1353. LOG_INFO("hart %d: Vector support with vlenb=%d", hartid, r->vlenb[hartid]);
  1354. return ERROR_OK;
  1355. }
  1356. static int examine(struct target *target)
  1357. {
  1358. /* Don't need to select dbus, since the first thing we do is read dtmcontrol. */
  1359. uint32_t dtmcontrol = dtmcontrol_scan(target, 0);
  1360. LOG_DEBUG("dtmcontrol=0x%x", dtmcontrol);
  1361. LOG_DEBUG(" dmireset=%d", get_field(dtmcontrol, DTM_DTMCS_DMIRESET));
  1362. LOG_DEBUG(" idle=%d", get_field(dtmcontrol, DTM_DTMCS_IDLE));
  1363. LOG_DEBUG(" dmistat=%d", get_field(dtmcontrol, DTM_DTMCS_DMISTAT));
  1364. LOG_DEBUG(" abits=%d", get_field(dtmcontrol, DTM_DTMCS_ABITS));
  1365. LOG_DEBUG(" version=%d", get_field(dtmcontrol, DTM_DTMCS_VERSION));
  1366. if (dtmcontrol == 0) {
  1367. LOG_ERROR("dtmcontrol is 0. Check JTAG connectivity/board power.");
  1368. return ERROR_FAIL;
  1369. }
  1370. if (get_field(dtmcontrol, DTM_DTMCS_VERSION) != 1) {
  1371. LOG_ERROR("Unsupported DTM version %d. (dtmcontrol=0x%x)",
  1372. get_field(dtmcontrol, DTM_DTMCS_VERSION), dtmcontrol);
  1373. return ERROR_FAIL;
  1374. }
  1375. riscv013_info_t *info = get_info(target);
  1376. /* TODO: This won't be true if there are multiple DMs. */
  1377. info->index = target->coreid;
  1378. info->abits = get_field(dtmcontrol, DTM_DTMCS_ABITS);
  1379. info->dtmcs_idle = get_field(dtmcontrol, DTM_DTMCS_IDLE);
  1380. /* Reset the Debug Module. */
  1381. dm013_info_t *dm = get_dm(target);
  1382. if (!dm)
  1383. return ERROR_FAIL;
  1384. if (!dm->was_reset) {
  1385. dmi_write(target, DM_DMCONTROL, 0);
  1386. dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
  1387. dm->was_reset = true;
  1388. }
  1389. dmi_write(target, DM_DMCONTROL, DM_DMCONTROL_HARTSELLO |
  1390. DM_DMCONTROL_HARTSELHI | DM_DMCONTROL_DMACTIVE |
  1391. DM_DMCONTROL_HASEL);
  1392. uint32_t dmcontrol;
  1393. if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
  1394. return ERROR_FAIL;
  1395. if (!get_field(dmcontrol, DM_DMCONTROL_DMACTIVE)) {
  1396. LOG_ERROR("Debug Module did not become active. dmcontrol=0x%x",
  1397. dmcontrol);
  1398. return ERROR_FAIL;
  1399. }
  1400. dm->hasel_supported = get_field(dmcontrol, DM_DMCONTROL_HASEL);
  1401. uint32_t dmstatus;
  1402. if (dmstatus_read(target, &dmstatus, false) != ERROR_OK)
  1403. return ERROR_FAIL;
  1404. LOG_DEBUG("dmstatus: 0x%08x", dmstatus);
  1405. int dmstatus_version = get_field(dmstatus, DM_DMSTATUS_VERSION);
  1406. if (dmstatus_version != 2 && dmstatus_version != 3) {
  1407. /* Error was already printed out in dmstatus_read(). */
  1408. return ERROR_FAIL;
  1409. }
  1410. uint32_t hartsel =
  1411. (get_field(dmcontrol, DM_DMCONTROL_HARTSELHI) <<
  1412. DM_DMCONTROL_HARTSELLO_LENGTH) |
  1413. get_field(dmcontrol, DM_DMCONTROL_HARTSELLO);
  1414. info->hartsellen = 0;
  1415. while (hartsel & 1) {
  1416. info->hartsellen++;
  1417. hartsel >>= 1;
  1418. }
  1419. LOG_DEBUG("hartsellen=%d", info->hartsellen);
  1420. uint32_t hartinfo;
  1421. if (dmi_read(target, &hartinfo, DM_HARTINFO) != ERROR_OK)
  1422. return ERROR_FAIL;
  1423. info->datasize = get_field(hartinfo, DM_HARTINFO_DATASIZE);
  1424. info->dataaccess = get_field(hartinfo, DM_HARTINFO_DATAACCESS);
  1425. info->dataaddr = get_field(hartinfo, DM_HARTINFO_DATAADDR);
  1426. if (!get_field(dmstatus, DM_DMSTATUS_AUTHENTICATED)) {
  1427. LOG_ERROR("Debugger is not authenticated to target Debug Module. "
  1428. "(dmstatus=0x%x). Use `riscv authdata_read` and "
  1429. "`riscv authdata_write` commands to authenticate.", dmstatus);
  1430. /* If we return ERROR_FAIL here, then in a multicore setup the next
  1431. * core won't be examined, which means we won't set up the
  1432. * authentication commands for them, which means the config script
  1433. * needs to be a lot more complex. */
  1434. return ERROR_OK;
  1435. }
  1436. if (dmi_read(target, &info->sbcs, DM_SBCS) != ERROR_OK)
  1437. return ERROR_FAIL;
  1438. /* Check that abstract data registers are accessible. */
  1439. uint32_t abstractcs;
  1440. if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
  1441. return ERROR_FAIL;
  1442. info->datacount = get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT);
  1443. info->progbufsize = get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE);
  1444. LOG_INFO("datacount=%d progbufsize=%d", info->datacount, info->progbufsize);
  1445. RISCV_INFO(r);
  1446. r->impebreak = get_field(dmstatus, DM_DMSTATUS_IMPEBREAK);
  1447. if (!has_sufficient_progbuf(target, 2)) {
  1448. LOG_WARNING("We won't be able to execute fence instructions on this "
  1449. "target. Memory may not always appear consistent. "
  1450. "(progbufsize=%d, impebreak=%d)", info->progbufsize,
  1451. r->impebreak);
  1452. }
  1453. if (info->progbufsize < 4 && riscv_enable_virtual) {
  1454. LOG_ERROR("set_enable_virtual is not available on this target. It "
  1455. "requires a program buffer size of at least 4. (progbufsize=%d) "
  1456. "Use `riscv set_enable_virtual off` to continue."
  1457. , info->progbufsize);
  1458. }
  1459. /* Before doing anything else we must first enumerate the harts. */
  1460. if (dm->hart_count < 0) {
  1461. for (int i = 0; i < MIN(RISCV_MAX_HARTS, 1 << info->hartsellen); ++i) {
  1462. r->current_hartid = i;
  1463. if (riscv013_select_current_hart(target) != ERROR_OK)
  1464. return ERROR_FAIL;
  1465. uint32_t s;
  1466. if (dmstatus_read(target, &s, true) != ERROR_OK)
  1467. return ERROR_FAIL;
  1468. if (get_field(s, DM_DMSTATUS_ANYNONEXISTENT))
  1469. break;
  1470. dm->hart_count = i + 1;
  1471. if (get_field(s, DM_DMSTATUS_ANYHAVERESET))
  1472. dmi_write(target, DM_DMCONTROL,
  1473. set_hartsel(DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_ACKHAVERESET, i));
  1474. }
  1475. LOG_DEBUG("Detected %d harts.", dm->hart_count);
  1476. }
  1477. if (dm->hart_count == 0) {
  1478. LOG_ERROR("No harts found!");
  1479. return ERROR_FAIL;
  1480. }
  1481. /* Don't call any riscv_* functions until after we've counted the number of
  1482. * cores and initialized registers. */
  1483. for (int i = 0; i < dm->hart_count; ++i) {
  1484. if (!riscv_rtos_enabled(target) && i != target->coreid)
  1485. continue;
  1486. r->current_hartid = i;
  1487. if (riscv013_select_current_hart(target) != ERROR_OK)
  1488. return ERROR_FAIL;
  1489. bool halted = riscv_is_halted(target);
  1490. if (!halted) {
  1491. if (riscv013_halt_go(target) != ERROR_OK) {
  1492. LOG_ERROR("Fatal: Hart %d failed to halt during examine()", i);
  1493. return ERROR_FAIL;
  1494. }
  1495. }
  1496. /* Without knowing anything else we can at least mess with the
  1497. * program buffer. */
  1498. r->debug_buffer_size[i] = info->progbufsize;
  1499. int result = register_read_abstract(target, NULL, GDB_REGNO_S0, 64);
  1500. if (result == ERROR_OK)
  1501. r->xlen[i] = 64;
  1502. else
  1503. r->xlen[i] = 32;
  1504. if (register_read(target, &r->misa[i], GDB_REGNO_MISA)) {
  1505. LOG_ERROR("Fatal: Failed to read MISA from hart %d.", i);
  1506. return ERROR_FAIL;
  1507. }
  1508. if (riscv_supports_extension(target, i, 'V')) {
  1509. if (discover_vlenb(target, i) != ERROR_OK)
  1510. return ERROR_FAIL;
  1511. }
  1512. /* Now init registers based on what we discovered. */
  1513. if (riscv_init_registers(target) != ERROR_OK)
  1514. return ERROR_FAIL;
  1515. /* Display this as early as possible to help people who are using
  1516. * really slow simulators. */
  1517. LOG_DEBUG(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
  1518. r->misa[i]);
  1519. if (!halted)
  1520. riscv013_step_or_resume_current_hart(target, false, false);
  1521. }
  1522. target_set_examined(target);
  1523. if (target->smp) {
  1524. bool haltgroup_supported;
  1525. if (set_haltgroup(target, &haltgroup_supported) != ERROR_OK)
  1526. return ERROR_FAIL;
  1527. if (haltgroup_supported)
  1528. LOG_INFO("Core %d made part of halt group %d.", target->coreid,
  1529. target->smp);
  1530. else
  1531. LOG_INFO("Core %d could not be made part of halt group %d.",
  1532. target->coreid, target->smp);
  1533. }
  1534. /* Some regression suites rely on seeing 'Examined RISC-V core' to know
  1535. * when they can connect with gdb/telnet.
  1536. * We will need to update those suites if we want to change that text. */
  1537. LOG_INFO("Examined RISC-V core; found %d harts",
  1538. riscv_count_harts(target));
  1539. for (int i = 0; i < riscv_count_harts(target); ++i) {
  1540. if (riscv_hart_enabled(target, i)) {
  1541. LOG_INFO(" hart %d: XLEN=%d, misa=0x%" PRIx64, i, r->xlen[i],
  1542. r->misa[i]);
  1543. } else {
  1544. LOG_INFO(" hart %d: currently disabled", i);
  1545. }
  1546. }
  1547. return ERROR_OK;
  1548. }
  1549. int riscv013_authdata_read(struct target *target, uint32_t *value)
  1550. {
  1551. if (wait_for_authbusy(target, NULL) != ERROR_OK)
  1552. return ERROR_FAIL;
  1553. return dmi_read(target, value, DM_AUTHDATA);
  1554. }
  1555. int riscv013_authdata_write(struct target *target, uint32_t value)
  1556. {
  1557. uint32_t before, after;
  1558. if (wait_for_authbusy(target, &before) != ERROR_OK)
  1559. return ERROR_FAIL;
  1560. dmi_write(target, DM_AUTHDATA, value);
  1561. if (wait_for_authbusy(target, &after) != ERROR_OK)
  1562. return ERROR_FAIL;
  1563. if (!get_field(before, DM_DMSTATUS_AUTHENTICATED) &&
  1564. get_field(after, DM_DMSTATUS_AUTHENTICATED)) {
  1565. LOG_INFO("authdata_write resulted in successful authentication");
  1566. int result = ERROR_OK;
  1567. dm013_info_t *dm = get_dm(target);
  1568. if (!dm)
  1569. return ERROR_FAIL;
  1570. target_list_t *entry;
  1571. list_for_each_entry(entry, &dm->target_list, list) {
  1572. if (examine(entry->target) != ERROR_OK)
  1573. result = ERROR_FAIL;
  1574. }
  1575. return result;
  1576. }
  1577. return ERROR_OK;
  1578. }
  1579. static int riscv013_hart_count(struct target *target)
  1580. {
  1581. dm013_info_t *dm = get_dm(target);
  1582. assert(dm);
  1583. return dm->hart_count;
  1584. }
  1585. static unsigned riscv013_data_bits(struct target *target)
  1586. {
  1587. RISCV013_INFO(info);
  1588. /* TODO: Once there is a spec for discovering abstract commands, we can
  1589. * take those into account as well. For now we assume abstract commands
  1590. * support XLEN-wide accesses. */
  1591. if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
  1592. return riscv_xlen(target);
  1593. if (get_field(info->sbcs, DM_SBCS_SBACCESS128))
  1594. return 128;
  1595. if (get_field(info->sbcs, DM_SBCS_SBACCESS64))
  1596. return 64;
  1597. if (get_field(info->sbcs, DM_SBCS_SBACCESS32))
  1598. return 32;
  1599. if (get_field(info->sbcs, DM_SBCS_SBACCESS16))
  1600. return 16;
  1601. if (get_field(info->sbcs, DM_SBCS_SBACCESS8))
  1602. return 8;
  1603. return riscv_xlen(target);
  1604. }
  1605. static int prep_for_vector_access(struct target *target, uint64_t *vtype,
  1606. uint64_t *vl, unsigned *debug_vl)
  1607. {
  1608. RISCV_INFO(r);
  1609. /* TODO: this continuous save/restore is terrible for performance. */
  1610. /* Write vtype and vl. */
  1611. unsigned encoded_vsew;
  1612. switch (riscv_xlen(target)) {
  1613. case 32:
  1614. encoded_vsew = 2;
  1615. break;
  1616. case 64:
  1617. encoded_vsew = 3;
  1618. break;
  1619. default:
  1620. LOG_ERROR("Unsupported xlen: %d", riscv_xlen(target));
  1621. return ERROR_FAIL;
  1622. }
  1623. /* Save vtype and vl. */
  1624. if (register_read(target, vtype, GDB_REGNO_VTYPE) != ERROR_OK)
  1625. return ERROR_FAIL;
  1626. if (register_read(target, vl, GDB_REGNO_VL) != ERROR_OK)
  1627. return ERROR_FAIL;
  1628. if (register_write_direct(target, GDB_REGNO_VTYPE, encoded_vsew << 3) != ERROR_OK)
  1629. return ERROR_FAIL;
  1630. *debug_vl = DIV_ROUND_UP(r->vlenb[r->current_hartid] * 8,
  1631. riscv_xlen(target));
  1632. if (register_write_direct(target, GDB_REGNO_VL, *debug_vl) != ERROR_OK)
  1633. return ERROR_FAIL;
  1634. return ERROR_OK;
  1635. }
  1636. static int cleanup_after_vector_access(struct target *target, uint64_t vtype,
  1637. uint64_t vl)
  1638. {
  1639. /* Restore vtype and vl. */
  1640. if (register_write_direct(target, GDB_REGNO_VTYPE, vtype) != ERROR_OK)
  1641. return ERROR_FAIL;
  1642. if (register_write_direct(target, GDB_REGNO_VL, vl) != ERROR_OK)
  1643. return ERROR_FAIL;
  1644. return ERROR_OK;
  1645. }
  1646. static int riscv013_get_register_buf(struct target *target,
  1647. uint8_t *value, int regno)
  1648. {
  1649. assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
  1650. riscv_reg_t s0;
  1651. if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
  1652. return ERROR_FAIL;
  1653. uint64_t mstatus;
  1654. if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
  1655. return ERROR_FAIL;
  1656. uint64_t vtype, vl;
  1657. unsigned debug_vl;
  1658. if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
  1659. return ERROR_FAIL;
  1660. unsigned vnum = regno - GDB_REGNO_V0;
  1661. unsigned xlen = riscv_xlen(target);
  1662. struct riscv_program program;
  1663. riscv_program_init(&program, target);
  1664. riscv_program_insert(&program, vmv_x_s(S0, vnum));
  1665. riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
  1666. int result = ERROR_OK;
  1667. for (unsigned i = 0; i < debug_vl; i++) {
  1668. /* Executing the program might result in an exception if there is some
  1669. * issue with the vector implementation/instructions we're using. If that
  1670. * happens, attempt to restore as usual. We may have clobbered the
  1671. * vector register we tried to read already.
  1672. * For other failures, we just return error because things are probably
  1673. * so messed up that attempting to restore isn't going to help. */
  1674. result = riscv_program_exec(&program, target);
  1675. if (result == ERROR_OK) {
  1676. uint64_t v;
  1677. if (register_read_direct(target, &v, GDB_REGNO_S0) != ERROR_OK)
  1678. return ERROR_FAIL;
  1679. buf_set_u64(value, xlen * i, xlen, v);
  1680. } else {
  1681. break;
  1682. }
  1683. }
  1684. if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
  1685. return ERROR_FAIL;
  1686. if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
  1687. return ERROR_FAIL;
  1688. if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
  1689. return ERROR_FAIL;
  1690. return result;
  1691. }
  1692. static int riscv013_set_register_buf(struct target *target,
  1693. int regno, const uint8_t *value)
  1694. {
  1695. assert(regno >= GDB_REGNO_V0 && regno <= GDB_REGNO_V31);
  1696. riscv_reg_t s0;
  1697. if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
  1698. return ERROR_FAIL;
  1699. uint64_t mstatus;
  1700. if (prep_for_register_access(target, &mstatus, regno) != ERROR_OK)
  1701. return ERROR_FAIL;
  1702. uint64_t vtype, vl;
  1703. unsigned debug_vl;
  1704. if (prep_for_vector_access(target, &vtype, &vl, &debug_vl) != ERROR_OK)
  1705. return ERROR_FAIL;
  1706. unsigned vnum = regno - GDB_REGNO_V0;
  1707. unsigned xlen = riscv_xlen(target);
  1708. struct riscv_program program;
  1709. riscv_program_init(&program, target);
  1710. riscv_program_insert(&program, vslide1down_vx(vnum, vnum, S0, true));
  1711. int result = ERROR_OK;
  1712. for (unsigned i = 0; i < debug_vl; i++) {
  1713. if (register_write_direct(target, GDB_REGNO_S0,
  1714. buf_get_u64(value, xlen * i, xlen)) != ERROR_OK)
  1715. return ERROR_FAIL;
  1716. result = riscv_program_exec(&program, target);
  1717. if (result != ERROR_OK)
  1718. break;
  1719. }
  1720. if (cleanup_after_vector_access(target, vtype, vl) != ERROR_OK)
  1721. return ERROR_FAIL;
  1722. if (cleanup_after_register_access(target, mstatus, regno) != ERROR_OK)
  1723. return ERROR_FAIL;
  1724. if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
  1725. return ERROR_FAIL;
  1726. return result;
  1727. }
  1728. static int init_target(struct command_context *cmd_ctx,
  1729. struct target *target)
  1730. {
  1731. LOG_DEBUG("init");
  1732. riscv_info_t *generic_info = (riscv_info_t *) target->arch_info;
  1733. generic_info->get_register = &riscv013_get_register;
  1734. generic_info->set_register = &riscv013_set_register;
  1735. generic_info->get_register_buf = &riscv013_get_register_buf;
  1736. generic_info->set_register_buf = &riscv013_set_register_buf;
  1737. generic_info->select_current_hart = &riscv013_select_current_hart;
  1738. generic_info->is_halted = &riscv013_is_halted;
  1739. generic_info->resume_go = &riscv013_resume_go;
  1740. generic_info->step_current_hart = &riscv013_step_current_hart;
  1741. generic_info->on_halt = &riscv013_on_halt;
  1742. generic_info->resume_prep = &riscv013_resume_prep;
  1743. generic_info->halt_prep = &riscv013_halt_prep;
  1744. generic_info->halt_go = &riscv013_halt_go;
  1745. generic_info->on_step = &riscv013_on_step;
  1746. generic_info->halt_reason = &riscv013_halt_reason;
  1747. generic_info->read_debug_buffer = &riscv013_read_debug_buffer;
  1748. generic_info->write_debug_buffer = &riscv013_write_debug_buffer;
  1749. generic_info->execute_debug_buffer = &riscv013_execute_debug_buffer;
  1750. generic_info->fill_dmi_write_u64 = &riscv013_fill_dmi_write_u64;
  1751. generic_info->fill_dmi_read_u64 = &riscv013_fill_dmi_read_u64;
  1752. generic_info->fill_dmi_nop_u64 = &riscv013_fill_dmi_nop_u64;
  1753. generic_info->dmi_write_u64_bits = &riscv013_dmi_write_u64_bits;
  1754. generic_info->authdata_read = &riscv013_authdata_read;
  1755. generic_info->authdata_write = &riscv013_authdata_write;
  1756. generic_info->dmi_read = &dmi_read;
  1757. generic_info->dmi_write = &dmi_write;
  1758. generic_info->read_memory = read_memory;
  1759. generic_info->test_sba_config_reg = &riscv013_test_sba_config_reg;
  1760. generic_info->test_compliance = &riscv013_test_compliance;
  1761. generic_info->hart_count = &riscv013_hart_count;
  1762. generic_info->data_bits = &riscv013_data_bits;
  1763. generic_info->version_specific = calloc(1, sizeof(riscv013_info_t));
  1764. if (!generic_info->version_specific)
  1765. return ERROR_FAIL;
  1766. riscv013_info_t *info = get_info(target);
  1767. info->progbufsize = -1;
  1768. info->dmi_busy_delay = 0;
  1769. info->bus_master_read_delay = 0;
  1770. info->bus_master_write_delay = 0;
  1771. info->ac_busy_delay = 0;
  1772. /* Assume all these abstract commands are supported until we learn
  1773. * otherwise.
  1774. * TODO: The spec allows eg. one CSR to be able to be accessed abstractly
  1775. * while another one isn't. We don't track that this closely here, but in
  1776. * the future we probably should. */
  1777. info->abstract_read_csr_supported = true;
  1778. info->abstract_write_csr_supported = true;
  1779. info->abstract_read_fpr_supported = true;
  1780. info->abstract_write_fpr_supported = true;
  1781. return ERROR_OK;
  1782. }
  1783. static int assert_reset(struct target *target)
  1784. {
  1785. RISCV_INFO(r);
  1786. select_dmi(target);
  1787. uint32_t control_base = set_field(0, DM_DMCONTROL_DMACTIVE, 1);
  1788. if (target->rtos) {
  1789. /* There's only one target, and OpenOCD thinks each hart is a thread.
  1790. * We must reset them all. */
  1791. /* TODO: Try to use hasel in dmcontrol */
  1792. /* Set haltreq for each hart. */
  1793. uint32_t control = control_base;
  1794. for (int i = 0; i < riscv_count_harts(target); ++i) {
  1795. if (!riscv_hart_enabled(target, i))
  1796. continue;
  1797. control = set_hartsel(control_base, i);
  1798. control = set_field(control, DM_DMCONTROL_HALTREQ,
  1799. target->reset_halt ? 1 : 0);
  1800. dmi_write(target, DM_DMCONTROL, control);
  1801. }
  1802. /* Assert ndmreset */
  1803. control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
  1804. dmi_write(target, DM_DMCONTROL, control);
  1805. } else {
  1806. /* Reset just this hart. */
  1807. uint32_t control = set_hartsel(control_base, r->current_hartid);
  1808. control = set_field(control, DM_DMCONTROL_HALTREQ,
  1809. target->reset_halt ? 1 : 0);
  1810. control = set_field(control, DM_DMCONTROL_NDMRESET, 1);
  1811. dmi_write(target, DM_DMCONTROL, control);
  1812. }
  1813. target->state = TARGET_RESET;
  1814. dm013_info_t *dm = get_dm(target);
  1815. if (!dm)
  1816. return ERROR_FAIL;
  1817. /* The DM might have gotten reset if OpenOCD called us in some reset that
  1818. * involves SRST being toggled. So clear our cache which may be out of
  1819. * date. */
  1820. memset(dm->progbuf_cache, 0, sizeof(dm->progbuf_cache));
  1821. return ERROR_OK;
  1822. }
  1823. static int deassert_reset(struct target *target)
  1824. {
  1825. RISCV_INFO(r);
  1826. RISCV013_INFO(info);
  1827. select_dmi(target);
  1828. /* Clear the reset, but make sure haltreq is still set */
  1829. uint32_t control = 0;
  1830. control = set_field(control, DM_DMCONTROL_HALTREQ, target->reset_halt ? 1 : 0);
  1831. control = set_field(control, DM_DMCONTROL_DMACTIVE, 1);
  1832. dmi_write(target, DM_DMCONTROL,
  1833. set_hartsel(control, r->current_hartid));
  1834. uint32_t dmstatus;
  1835. int dmi_busy_delay = info->dmi_busy_delay;
  1836. time_t start = time(NULL);
  1837. for (int i = 0; i < riscv_count_harts(target); ++i) {
  1838. int index = i;
  1839. if (target->rtos) {
  1840. if (!riscv_hart_enabled(target, index))
  1841. continue;
  1842. dmi_write(target, DM_DMCONTROL,
  1843. set_hartsel(control, index));
  1844. } else {
  1845. index = r->current_hartid;
  1846. }
  1847. char *operation;
  1848. uint32_t expected_field;
  1849. if (target->reset_halt) {
  1850. operation = "halt";
  1851. expected_field = DM_DMSTATUS_ALLHALTED;
  1852. } else {
  1853. operation = "run";
  1854. expected_field = DM_DMSTATUS_ALLRUNNING;
  1855. }
  1856. LOG_DEBUG("Waiting for hart %d to %s out of reset.", index, operation);
  1857. while (1) {
  1858. int result = dmstatus_read_timeout(target, &dmstatus, true,
  1859. riscv_reset_timeout_sec);
  1860. if (result == ERROR_TIMEOUT_REACHED)
  1861. LOG_ERROR("Hart %d didn't complete a DMI read coming out of "
  1862. "reset in %ds; Increase the timeout with riscv "
  1863. "set_reset_timeout_sec.",
  1864. index, riscv_reset_timeout_sec);
  1865. if (result != ERROR_OK)
  1866. return result;
  1867. if (get_field(dmstatus, expected_field))
  1868. break;
  1869. if (time(NULL) - start > riscv_reset_timeout_sec) {
  1870. LOG_ERROR("Hart %d didn't %s coming out of reset in %ds; "
  1871. "dmstatus=0x%x; "
  1872. "Increase the timeout with riscv set_reset_timeout_sec.",
  1873. index, operation, riscv_reset_timeout_sec, dmstatus);
  1874. return ERROR_FAIL;
  1875. }
  1876. }
  1877. target->state = TARGET_HALTED;
  1878. if (get_field(dmstatus, DM_DMSTATUS_ALLHAVERESET)) {
  1879. /* Ack reset. */
  1880. dmi_write(target, DM_DMCONTROL,
  1881. set_hartsel(control, index) |
  1882. DM_DMCONTROL_ACKHAVERESET);
  1883. }
  1884. if (!target->rtos)
  1885. break;
  1886. }
  1887. info->dmi_busy_delay = dmi_busy_delay;
  1888. return ERROR_OK;
  1889. }
  1890. static int execute_fence(struct target *target)
  1891. {
  1892. int old_hartid = riscv_current_hartid(target);
  1893. /* FIXME: For non-coherent systems we need to flush the caches right
  1894. * here, but there's no ISA-defined way of doing that. */
  1895. {
  1896. struct riscv_program program;
  1897. riscv_program_init(&program, target);
  1898. riscv_program_fence_i(&program);
  1899. riscv_program_fence(&program);
  1900. int result = riscv_program_exec(&program, target);
  1901. if (result != ERROR_OK)
  1902. LOG_DEBUG("Unable to execute pre-fence");
  1903. }
  1904. for (int i = 0; i < riscv_count_harts(target); ++i) {
  1905. if (!riscv_hart_enabled(target, i))
  1906. continue;
  1907. if (i == old_hartid)
  1908. /* Fence already executed for this hart */
  1909. continue;
  1910. riscv_set_current_hartid(target, i);
  1911. struct riscv_program program;
  1912. riscv_program_init(&program, target);
  1913. riscv_program_fence_i(&program);
  1914. riscv_program_fence(&program);
  1915. int result = riscv_program_exec(&program, target);
  1916. if (result != ERROR_OK)
  1917. LOG_DEBUG("Unable to execute fence on hart %d", i);
  1918. }
  1919. riscv_set_current_hartid(target, old_hartid);
  1920. return ERROR_OK;
  1921. }
  1922. static void log_memory_access(target_addr_t address, uint64_t value,
  1923. unsigned size_bytes, bool read)
  1924. {
  1925. if (debug_level < LOG_LVL_DEBUG)
  1926. return;
  1927. char fmt[80];
  1928. sprintf(fmt, "M[0x%" TARGET_PRIxADDR "] %ss 0x%%0%d" PRIx64,
  1929. address, read ? "read" : "write", size_bytes * 2);
  1930. switch (size_bytes) {
  1931. case 1:
  1932. value &= 0xff;
  1933. break;
  1934. case 2:
  1935. value &= 0xffff;
  1936. break;
  1937. case 4:
  1938. value &= 0xffffffffUL;
  1939. break;
  1940. case 8:
  1941. break;
  1942. default:
  1943. assert(false);
  1944. }
  1945. LOG_DEBUG(fmt, value);
  1946. }
  1947. /* Read the relevant sbdata regs depending on size, and put the results into
  1948. * buffer. */
  1949. static int read_memory_bus_word(struct target *target, target_addr_t address,
  1950. uint32_t size, uint8_t *buffer)
  1951. {
  1952. uint32_t value;
  1953. int result;
  1954. static int sbdata[4] = { DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3 };
  1955. assert(size <= 16);
  1956. for (int i = (size - 1) / 4; i >= 0; i--) {
  1957. result = dmi_op(target, &value, NULL, DMI_OP_READ, sbdata[i], 0, false, true);
  1958. if (result != ERROR_OK)
  1959. return result;
  1960. buf_set_u32(buffer + i * 4, 0, 8 * MIN(size, 4), value);
  1961. log_memory_access(address + i * 4, value, MIN(size, 4), true);
  1962. }
  1963. return ERROR_OK;
  1964. }
  1965. static uint32_t sb_sbaccess(unsigned size_bytes)
  1966. {
  1967. switch (size_bytes) {
  1968. case 1:
  1969. return set_field(0, DM_SBCS_SBACCESS, 0);
  1970. case 2:
  1971. return set_field(0, DM_SBCS_SBACCESS, 1);
  1972. case 4:
  1973. return set_field(0, DM_SBCS_SBACCESS, 2);
  1974. case 8:
  1975. return set_field(0, DM_SBCS_SBACCESS, 3);
  1976. case 16:
  1977. return set_field(0, DM_SBCS_SBACCESS, 4);
  1978. }
  1979. assert(0);
  1980. return 0; /* Make mingw happy. */
  1981. }
  1982. static target_addr_t sb_read_address(struct target *target)
  1983. {
  1984. RISCV013_INFO(info);
  1985. unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
  1986. target_addr_t address = 0;
  1987. uint32_t v;
  1988. if (sbasize > 32) {
  1989. dmi_read(target, &v, DM_SBADDRESS1);
  1990. address |= v;
  1991. address <<= 32;
  1992. }
  1993. dmi_read(target, &v, DM_SBADDRESS0);
  1994. address |= v;
  1995. return address;
  1996. }
  1997. static int sb_write_address(struct target *target, target_addr_t address)
  1998. {
  1999. RISCV013_INFO(info);
  2000. unsigned sbasize = get_field(info->sbcs, DM_SBCS_SBASIZE);
  2001. /* There currently is no support for >64-bit addresses in OpenOCD. */
  2002. if (sbasize > 96)
  2003. dmi_write(target, DM_SBADDRESS3, 0);
  2004. if (sbasize > 64)
  2005. dmi_write(target, DM_SBADDRESS2, 0);
  2006. if (sbasize > 32)
  2007. dmi_write(target, DM_SBADDRESS1, address >> 32);
  2008. return dmi_write(target, DM_SBADDRESS0, address);
  2009. }
  2010. static int read_sbcs_nonbusy(struct target *target, uint32_t *sbcs)
  2011. {
  2012. time_t start = time(NULL);
  2013. while (1) {
  2014. if (dmi_read(target, sbcs, DM_SBCS) != ERROR_OK)
  2015. return ERROR_FAIL;
  2016. if (!get_field(*sbcs, DM_SBCS_SBBUSY))
  2017. return ERROR_OK;
  2018. if (time(NULL) - start > riscv_command_timeout_sec) {
  2019. LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
  2020. "Increase the timeout with riscv set_command_timeout_sec.",
  2021. riscv_command_timeout_sec, *sbcs);
  2022. return ERROR_FAIL;
  2023. }
  2024. }
  2025. }
  2026. static int modify_privilege(struct target *target, uint64_t *mstatus, uint64_t *mstatus_old)
  2027. {
  2028. if (riscv_enable_virtual && has_sufficient_progbuf(target, 5)) {
  2029. /* Read DCSR */
  2030. uint64_t dcsr;
  2031. if (register_read(target, &dcsr, GDB_REGNO_DCSR) != ERROR_OK)
  2032. return ERROR_FAIL;
  2033. /* Read and save MSTATUS */
  2034. if (register_read(target, mstatus, GDB_REGNO_MSTATUS) != ERROR_OK)
  2035. return ERROR_FAIL;
  2036. *mstatus_old = *mstatus;
  2037. /* If we come from m-mode with mprv set, we want to keep mpp */
  2038. if (get_field(dcsr, DCSR_PRV) < 3) {
  2039. /* MPP = PRIV */
  2040. *mstatus = set_field(*mstatus, MSTATUS_MPP, get_field(dcsr, DCSR_PRV));
  2041. /* MPRV = 1 */
  2042. *mstatus = set_field(*mstatus, MSTATUS_MPRV, 1);
  2043. /* Write MSTATUS */
  2044. if (*mstatus != *mstatus_old)
  2045. if (register_write_direct(target, GDB_REGNO_MSTATUS, *mstatus) != ERROR_OK)
  2046. return ERROR_FAIL;
  2047. }
  2048. }
  2049. return ERROR_OK;
  2050. }
  2051. static int read_memory_bus_v0(struct target *target, target_addr_t address,
  2052. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
  2053. {
  2054. if (size != increment) {
  2055. LOG_ERROR("sba v0 reads only support size==increment");
  2056. return ERROR_NOT_IMPLEMENTED;
  2057. }
  2058. LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
  2059. TARGET_PRIxADDR, size, count, address);
  2060. uint8_t *t_buffer = buffer;
  2061. riscv_addr_t cur_addr = address;
  2062. riscv_addr_t fin_addr = address + (count * size);
  2063. uint32_t access = 0;
  2064. const int DM_SBCS_SBSINGLEREAD_OFFSET = 20;
  2065. const uint32_t DM_SBCS_SBSINGLEREAD = (0x1U << DM_SBCS_SBSINGLEREAD_OFFSET);
  2066. const int DM_SBCS_SBAUTOREAD_OFFSET = 15;
  2067. const uint32_t DM_SBCS_SBAUTOREAD = (0x1U << DM_SBCS_SBAUTOREAD_OFFSET);
  2068. /* ww favorise one off reading if there is an issue */
  2069. if (count == 1) {
  2070. for (uint32_t i = 0; i < count; i++) {
  2071. if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
  2072. return ERROR_FAIL;
  2073. dmi_write(target, DM_SBADDRESS0, cur_addr);
  2074. /* size/2 matching the bit access of the spec 0.13 */
  2075. access = set_field(access, DM_SBCS_SBACCESS, size/2);
  2076. access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
  2077. LOG_DEBUG("\r\nread_memory: sab: access: 0x%08x", access);
  2078. dmi_write(target, DM_SBCS, access);
  2079. /* 3) read */
  2080. uint32_t value;
  2081. if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
  2082. return ERROR_FAIL;
  2083. LOG_DEBUG("\r\nread_memory: sab: value: 0x%08x", value);
  2084. buf_set_u32(t_buffer, 0, 8 * size, value);
  2085. t_buffer += size;
  2086. cur_addr += size;
  2087. }
  2088. return ERROR_OK;
  2089. }
  2090. /* has to be the same size if we want to read a block */
  2091. LOG_DEBUG("reading block until final address 0x%" PRIx64, fin_addr);
  2092. if (dmi_read(target, &access, DM_SBCS) != ERROR_OK)
  2093. return ERROR_FAIL;
  2094. /* set current address */
  2095. dmi_write(target, DM_SBADDRESS0, cur_addr);
  2096. /* 2) write sbaccess=2, sbsingleread,sbautoread,sbautoincrement
  2097. * size/2 matching the bit access of the spec 0.13 */
  2098. access = set_field(access, DM_SBCS_SBACCESS, size/2);
  2099. access = set_field(access, DM_SBCS_SBAUTOREAD, 1);
  2100. access = set_field(access, DM_SBCS_SBSINGLEREAD, 1);
  2101. access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
  2102. LOG_DEBUG("\r\naccess: 0x%08x", access);
  2103. dmi_write(target, DM_SBCS, access);
  2104. while (cur_addr < fin_addr) {
  2105. LOG_DEBUG("\r\nsab:autoincrement: \r\n size: %d\tcount:%d\taddress: 0x%08"
  2106. PRIx64, size, count, cur_addr);
  2107. /* read */
  2108. uint32_t value;
  2109. if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
  2110. return ERROR_FAIL;
  2111. buf_set_u32(t_buffer, 0, 8 * size, value);
  2112. cur_addr += size;
  2113. t_buffer += size;
  2114. /* if we are reaching last address, we must clear autoread */
  2115. if (cur_addr == fin_addr && count != 1) {
  2116. dmi_write(target, DM_SBCS, 0);
  2117. if (dmi_read(target, &value, DM_SBDATA0) != ERROR_OK)
  2118. return ERROR_FAIL;
  2119. buf_set_u32(t_buffer, 0, 8 * size, value);
  2120. }
  2121. }
  2122. return ERROR_OK;
  2123. }
  2124. /**
  2125. * Read the requested memory using the system bus interface.
  2126. */
  2127. static int read_memory_bus_v1(struct target *target, target_addr_t address,
  2128. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
  2129. {
  2130. if (increment != size && increment != 0) {
  2131. LOG_ERROR("sba v1 reads only support increment of size or 0");
  2132. return ERROR_NOT_IMPLEMENTED;
  2133. }
  2134. RISCV013_INFO(info);
  2135. target_addr_t next_address = address;
  2136. target_addr_t end_address = address + count * size;
  2137. while (next_address < end_address) {
  2138. uint32_t sbcs_write = set_field(0, DM_SBCS_SBREADONADDR, 1);
  2139. sbcs_write |= sb_sbaccess(size);
  2140. if (increment == size)
  2141. sbcs_write = set_field(sbcs_write, DM_SBCS_SBAUTOINCREMENT, 1);
  2142. if (count > 1)
  2143. sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, count > 1);
  2144. if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
  2145. return ERROR_FAIL;
  2146. /* This address write will trigger the first read. */
  2147. if (sb_write_address(target, next_address) != ERROR_OK)
  2148. return ERROR_FAIL;
  2149. if (info->bus_master_read_delay) {
  2150. jtag_add_runtest(info->bus_master_read_delay, TAP_IDLE);
  2151. if (jtag_execute_queue() != ERROR_OK) {
  2152. LOG_ERROR("Failed to scan idle sequence");
  2153. return ERROR_FAIL;
  2154. }
  2155. }
  2156. /* First value has been read, and is waiting for us to issue a DMI read
  2157. * to get it. */
  2158. static int sbdata[4] = {DM_SBDATA0, DM_SBDATA1, DM_SBDATA2, DM_SBDATA3};
  2159. assert(size <= 16);
  2160. target_addr_t next_read = address - 1;
  2161. for (uint32_t i = (next_address - address) / size; i < count - 1; i++) {
  2162. for (int j = (size - 1) / 4; j >= 0; j--) {
  2163. uint32_t value;
  2164. unsigned attempt = 0;
  2165. while (1) {
  2166. if (attempt++ > 100) {
  2167. LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
  2168. next_read);
  2169. return ERROR_FAIL;
  2170. }
  2171. dmi_status_t status = dmi_scan(target, NULL, &value,
  2172. DMI_OP_READ, sbdata[j], 0, false);
  2173. if (status == DMI_STATUS_BUSY)
  2174. increase_dmi_busy_delay(target);
  2175. else if (status == DMI_STATUS_SUCCESS)
  2176. break;
  2177. else
  2178. return ERROR_FAIL;
  2179. }
  2180. if (next_read != address - 1) {
  2181. buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
  2182. log_memory_access(next_read, value, MIN(size, 4), true);
  2183. }
  2184. next_read = address + i * size + j * 4;
  2185. }
  2186. }
  2187. uint32_t sbcs_read = 0;
  2188. if (count > 1) {
  2189. uint32_t value;
  2190. unsigned attempt = 0;
  2191. while (1) {
  2192. if (attempt++ > 100) {
  2193. LOG_ERROR("DMI keeps being busy in while reading memory just past " TARGET_ADDR_FMT,
  2194. next_read);
  2195. return ERROR_FAIL;
  2196. }
  2197. dmi_status_t status = dmi_scan(target, NULL, &value, DMI_OP_NOP, 0, 0, false);
  2198. if (status == DMI_STATUS_BUSY)
  2199. increase_dmi_busy_delay(target);
  2200. else if (status == DMI_STATUS_SUCCESS)
  2201. break;
  2202. else
  2203. return ERROR_FAIL;
  2204. }
  2205. buf_set_u32(buffer + next_read - address, 0, 8 * MIN(size, 4), value);
  2206. log_memory_access(next_read, value, MIN(size, 4), true);
  2207. /* "Writes to sbcs while sbbusy is high result in undefined behavior.
  2208. * A debugger must not write to sbcs until it reads sbbusy as 0." */
  2209. if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
  2210. return ERROR_FAIL;
  2211. sbcs_write = set_field(sbcs_write, DM_SBCS_SBREADONDATA, 0);
  2212. if (dmi_write(target, DM_SBCS, sbcs_write) != ERROR_OK)
  2213. return ERROR_FAIL;
  2214. }
  2215. /* Read the last word, after we disabled sbreadondata if necessary. */
  2216. if (!get_field(sbcs_read, DM_SBCS_SBERROR) &&
  2217. !get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
  2218. if (read_memory_bus_word(target, address + (count - 1) * size, size,
  2219. buffer + (count - 1) * size) != ERROR_OK)
  2220. return ERROR_FAIL;
  2221. if (read_sbcs_nonbusy(target, &sbcs_read) != ERROR_OK)
  2222. return ERROR_FAIL;
  2223. }
  2224. if (get_field(sbcs_read, DM_SBCS_SBBUSYERROR)) {
  2225. /* We read while the target was busy. Slow down and try again. */
  2226. if (dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR) != ERROR_OK)
  2227. return ERROR_FAIL;
  2228. next_address = sb_read_address(target);
  2229. info->bus_master_read_delay += info->bus_master_read_delay / 10 + 1;
  2230. continue;
  2231. }
  2232. unsigned error = get_field(sbcs_read, DM_SBCS_SBERROR);
  2233. if (error == 0) {
  2234. next_address = end_address;
  2235. } else {
  2236. /* Some error indicating the bus access failed, but not because of
  2237. * something we did wrong. */
  2238. if (dmi_write(target, DM_SBCS, DM_SBCS_SBERROR) != ERROR_OK)
  2239. return ERROR_FAIL;
  2240. return ERROR_FAIL;
  2241. }
  2242. }
  2243. return ERROR_OK;
  2244. }
  2245. static int batch_run(const struct target *target, struct riscv_batch *batch)
  2246. {
  2247. RISCV013_INFO(info);
  2248. RISCV_INFO(r);
  2249. if (r->reset_delays_wait >= 0) {
  2250. r->reset_delays_wait -= batch->used_scans;
  2251. if (r->reset_delays_wait <= 0) {
  2252. batch->idle_count = 0;
  2253. info->dmi_busy_delay = 0;
  2254. info->ac_busy_delay = 0;
  2255. }
  2256. }
  2257. return riscv_batch_run(batch);
  2258. }
  2259. /*
  2260. * Performs a memory read using memory access abstract commands. The read sizes
  2261. * supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16 byte
  2262. * aamsize fields in the memory access abstract command.
  2263. */
  2264. static int read_memory_abstract(struct target *target, target_addr_t address,
  2265. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
  2266. {
  2267. if (size != increment) {
  2268. LOG_ERROR("abstract command reads only support size==increment");
  2269. return ERROR_NOT_IMPLEMENTED;
  2270. }
  2271. int result = ERROR_OK;
  2272. LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
  2273. size, address);
  2274. memset(buffer, 0, count * size);
  2275. /* Convert the size (bytes) to width (bits) */
  2276. unsigned width = size << 3;
  2277. if (width > 64) {
  2278. /* TODO: Add 128b support if it's ever used. Involves modifying
  2279. read/write_abstract_arg() to work on two 64b values. */
  2280. LOG_ERROR("Unsupported size: %d bits", size);
  2281. return ERROR_FAIL;
  2282. }
  2283. /* Create the command (physical address, postincrement, read) */
  2284. uint32_t command = access_memory_command(target, false, width, true, false);
  2285. /* Execute the reads */
  2286. uint8_t *p = buffer;
  2287. bool updateaddr = true;
  2288. unsigned width32 = (width + 31) / 32 * 32;
  2289. for (uint32_t c = 0; c < count; c++) {
  2290. /* Only update the address initially and let postincrement update it */
  2291. if (updateaddr) {
  2292. /* Set arg1 to the address: address + c * size */
  2293. result = write_abstract_arg(target, 1, address, riscv_xlen(target));
  2294. if (result != ERROR_OK) {
  2295. LOG_ERROR("Failed to write arg1 during read_memory_abstract().");
  2296. return result;
  2297. }
  2298. }
  2299. /* Execute the command */
  2300. result = execute_abstract_command(target, command);
  2301. if (result != ERROR_OK) {
  2302. LOG_ERROR("Failed to execute command read_memory_abstract().");
  2303. return result;
  2304. }
  2305. /* Copy arg0 to buffer (rounded width up to nearest 32) */
  2306. riscv_reg_t value = read_abstract_arg(target, 0, width32);
  2307. buf_set_u64(p, 0, 8 * size, value);
  2308. updateaddr = false;
  2309. p += size;
  2310. }
  2311. return result;
  2312. }
  2313. /*
  2314. * Performs a memory write using memory access abstract commands. The write
  2315. * sizes supported are 1, 2, and 4 bytes despite the spec's support of 8 and 16
  2316. * byte aamsize fields in the memory access abstract command.
  2317. */
  2318. static int write_memory_abstract(struct target *target, target_addr_t address,
  2319. uint32_t size, uint32_t count, const uint8_t *buffer)
  2320. {
  2321. int result = ERROR_OK;
  2322. LOG_DEBUG("writing %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
  2323. size, address);
  2324. /* Convert the size (bytes) to width (bits) */
  2325. unsigned width = size << 3;
  2326. if (width > 64) {
  2327. /* TODO: Add 128b support if it's ever used. Involves modifying
  2328. read/write_abstract_arg() to work on two 64b values. */
  2329. LOG_ERROR("Unsupported size: %d bits", width);
  2330. return ERROR_FAIL;
  2331. }
  2332. /* Create the command (physical address, postincrement, write) */
  2333. uint32_t command = access_memory_command(target, false, width, true, true);
  2334. /* Execute the writes */
  2335. const uint8_t *p = buffer;
  2336. bool updateaddr = true;
  2337. for (uint32_t c = 0; c < count; c++) {
  2338. /* Move data to arg0 */
  2339. riscv_reg_t value = buf_get_u64(p, 0, 8 * size);
  2340. result = write_abstract_arg(target, 0, value, riscv_xlen(target));
  2341. if (result != ERROR_OK) {
  2342. LOG_ERROR("Failed to write arg0 during write_memory_abstract().");
  2343. return result;
  2344. }
  2345. /* Only update the address initially and let postincrement update it */
  2346. if (updateaddr) {
  2347. /* Set arg1 to the address: address + c * size */
  2348. result = write_abstract_arg(target, 1, address, riscv_xlen(target));
  2349. if (result != ERROR_OK) {
  2350. LOG_ERROR("Failed to write arg1 during write_memory_abstract().");
  2351. return result;
  2352. }
  2353. }
  2354. /* Execute the command */
  2355. result = execute_abstract_command(target, command);
  2356. if (result != ERROR_OK) {
  2357. LOG_ERROR("Failed to execute command write_memory_abstract().");
  2358. return result;
  2359. }
  2360. updateaddr = false;
  2361. p += size;
  2362. }
  2363. return result;
  2364. }
  2365. /**
  2366. * Read the requested memory, taking care to execute every read exactly once,
  2367. * even if cmderr=busy is encountered.
  2368. */
  2369. static int read_memory_progbuf_inner(struct target *target, target_addr_t address,
  2370. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
  2371. {
  2372. RISCV013_INFO(info);
  2373. int result = ERROR_OK;
  2374. /* Write address to S0. */
  2375. result = register_write_direct(target, GDB_REGNO_S0, address);
  2376. if (result != ERROR_OK)
  2377. return result;
  2378. if (increment == 0 &&
  2379. register_write_direct(target, GDB_REGNO_S2, 0) != ERROR_OK)
  2380. return ERROR_FAIL;
  2381. uint32_t command = access_register_command(target, GDB_REGNO_S1,
  2382. riscv_xlen(target),
  2383. AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
  2384. if (execute_abstract_command(target, command) != ERROR_OK)
  2385. return ERROR_FAIL;
  2386. /* First read has just triggered. Result is in s1. */
  2387. if (count == 1) {
  2388. uint64_t value;
  2389. if (register_read_direct(target, &value, GDB_REGNO_S1) != ERROR_OK)
  2390. return ERROR_FAIL;
  2391. buf_set_u64(buffer, 0, 8 * size, value);
  2392. log_memory_access(address, value, size, true);
  2393. return ERROR_OK;
  2394. }
  2395. if (dmi_write(target, DM_ABSTRACTAUTO,
  2396. 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) != ERROR_OK)
  2397. goto error;
  2398. /* Read garbage from dmi_data0, which triggers another execution of the
  2399. * program. Now dmi_data0 contains the first good result, and s1 the next
  2400. * memory value. */
  2401. if (dmi_read_exec(target, NULL, DM_DATA0) != ERROR_OK)
  2402. goto error;
  2403. /* read_addr is the next address that the hart will read from, which is the
  2404. * value in s0. */
  2405. unsigned index = 2;
  2406. while (index < count) {
  2407. riscv_addr_t read_addr = address + index * increment;
  2408. LOG_DEBUG("i=%d, count=%d, read_addr=0x%" PRIx64, index, count, read_addr);
  2409. /* The pipeline looks like this:
  2410. * memory -> s1 -> dm_data0 -> debugger
  2411. * Right now:
  2412. * s0 contains read_addr
  2413. * s1 contains mem[read_addr-size]
  2414. * dm_data0 contains[read_addr-size*2]
  2415. */
  2416. struct riscv_batch *batch = riscv_batch_alloc(target, 32,
  2417. info->dmi_busy_delay + info->ac_busy_delay);
  2418. if (!batch)
  2419. return ERROR_FAIL;
  2420. unsigned reads = 0;
  2421. for (unsigned j = index; j < count; j++) {
  2422. if (size > 4)
  2423. riscv_batch_add_dmi_read(batch, DM_DATA1);
  2424. riscv_batch_add_dmi_read(batch, DM_DATA0);
  2425. reads++;
  2426. if (riscv_batch_full(batch))
  2427. break;
  2428. }
  2429. batch_run(target, batch);
  2430. /* Wait for the target to finish performing the last abstract command,
  2431. * and update our copy of cmderr. If we see that DMI is busy here,
  2432. * dmi_busy_delay will be incremented. */
  2433. uint32_t abstractcs;
  2434. if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
  2435. return ERROR_FAIL;
  2436. while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
  2437. if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
  2438. return ERROR_FAIL;
  2439. info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
  2440. unsigned next_index;
  2441. unsigned ignore_last = 0;
  2442. switch (info->cmderr) {
  2443. case CMDERR_NONE:
  2444. LOG_DEBUG("successful (partial?) memory read");
  2445. next_index = index + reads;
  2446. break;
  2447. case CMDERR_BUSY:
  2448. LOG_DEBUG("memory read resulted in busy response");
  2449. increase_ac_busy_delay(target);
  2450. riscv013_clear_abstract_error(target);
  2451. dmi_write(target, DM_ABSTRACTAUTO, 0);
  2452. uint32_t dmi_data0, dmi_data1 = 0;
  2453. /* This is definitely a good version of the value that we
  2454. * attempted to read when we discovered that the target was
  2455. * busy. */
  2456. if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK) {
  2457. riscv_batch_free(batch);
  2458. goto error;
  2459. }
  2460. if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK) {
  2461. riscv_batch_free(batch);
  2462. goto error;
  2463. }
  2464. /* See how far we got, clobbering dmi_data0. */
  2465. if (increment == 0) {
  2466. uint64_t counter;
  2467. result = register_read_direct(target, &counter, GDB_REGNO_S2);
  2468. next_index = counter;
  2469. } else {
  2470. uint64_t next_read_addr;
  2471. result = register_read_direct(target, &next_read_addr,
  2472. GDB_REGNO_S0);
  2473. next_index = (next_read_addr - address) / increment;
  2474. }
  2475. if (result != ERROR_OK) {
  2476. riscv_batch_free(batch);
  2477. goto error;
  2478. }
  2479. uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
  2480. buf_set_u64(buffer + (next_index - 2) * size, 0, 8 * size, value64);
  2481. log_memory_access(address + (next_index - 2) * size, value64, size, true);
  2482. /* Restore the command, and execute it.
  2483. * Now DM_DATA0 contains the next value just as it would if no
  2484. * error had occurred. */
  2485. dmi_write_exec(target, DM_COMMAND, command, true);
  2486. next_index++;
  2487. dmi_write(target, DM_ABSTRACTAUTO,
  2488. 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
  2489. ignore_last = 1;
  2490. break;
  2491. default:
  2492. LOG_DEBUG("error when reading memory, abstractcs=0x%08lx", (long)abstractcs);
  2493. riscv013_clear_abstract_error(target);
  2494. riscv_batch_free(batch);
  2495. result = ERROR_FAIL;
  2496. goto error;
  2497. }
  2498. /* Now read whatever we got out of the batch. */
  2499. dmi_status_t status = DMI_STATUS_SUCCESS;
  2500. unsigned read = 0;
  2501. assert(index >= 2);
  2502. for (unsigned j = index - 2; j < index + reads; j++) {
  2503. assert(j < count);
  2504. LOG_DEBUG("index=%d, reads=%d, next_index=%d, ignore_last=%d, j=%d",
  2505. index, reads, next_index, ignore_last, j);
  2506. if (j + 3 + ignore_last > next_index)
  2507. break;
  2508. status = riscv_batch_get_dmi_read_op(batch, read);
  2509. uint64_t value = riscv_batch_get_dmi_read_data(batch, read);
  2510. read++;
  2511. if (status != DMI_STATUS_SUCCESS) {
  2512. /* If we're here because of busy count, dmi_busy_delay will
  2513. * already have been increased and busy state will have been
  2514. * cleared in dmi_read(). */
  2515. /* In at least some implementations, we issue a read, and then
  2516. * can get busy back when we try to scan out the read result,
  2517. * and the actual read value is lost forever. Since this is
  2518. * rare in any case, we return error here and rely on our
  2519. * caller to reread the entire block. */
  2520. LOG_WARNING("Batch memory read encountered DMI error %d. "
  2521. "Falling back on slower reads.", status);
  2522. riscv_batch_free(batch);
  2523. result = ERROR_FAIL;
  2524. goto error;
  2525. }
  2526. if (size > 4) {
  2527. status = riscv_batch_get_dmi_read_op(batch, read);
  2528. if (status != DMI_STATUS_SUCCESS) {
  2529. LOG_WARNING("Batch memory read encountered DMI error %d. "
  2530. "Falling back on slower reads.", status);
  2531. riscv_batch_free(batch);
  2532. result = ERROR_FAIL;
  2533. goto error;
  2534. }
  2535. value <<= 32;
  2536. value |= riscv_batch_get_dmi_read_data(batch, read);
  2537. read++;
  2538. }
  2539. riscv_addr_t offset = j * size;
  2540. buf_set_u64(buffer + offset, 0, 8 * size, value);
  2541. log_memory_access(address + j * increment, value, size, true);
  2542. }
  2543. index = next_index;
  2544. riscv_batch_free(batch);
  2545. }
  2546. dmi_write(target, DM_ABSTRACTAUTO, 0);
  2547. if (count > 1) {
  2548. /* Read the penultimate word. */
  2549. uint32_t dmi_data0, dmi_data1 = 0;
  2550. if (dmi_read(target, &dmi_data0, DM_DATA0) != ERROR_OK)
  2551. return ERROR_FAIL;
  2552. if (size > 4 && dmi_read(target, &dmi_data1, DM_DATA1) != ERROR_OK)
  2553. return ERROR_FAIL;
  2554. uint64_t value64 = (((uint64_t)dmi_data1) << 32) | dmi_data0;
  2555. buf_set_u64(buffer + size * (count - 2), 0, 8 * size, value64);
  2556. log_memory_access(address + size * (count - 2), value64, size, true);
  2557. }
  2558. /* Read the last word. */
  2559. uint64_t value;
  2560. result = register_read_direct(target, &value, GDB_REGNO_S1);
  2561. if (result != ERROR_OK)
  2562. goto error;
  2563. buf_set_u64(buffer + size * (count-1), 0, 8 * size, value);
  2564. log_memory_access(address + size * (count-1), value, size, true);
  2565. return ERROR_OK;
  2566. error:
  2567. dmi_write(target, DM_ABSTRACTAUTO, 0);
  2568. return result;
  2569. }
  2570. /* Only need to save/restore one GPR to read a single word, and the progbuf
  2571. * program doesn't need to increment. */
  2572. static int read_memory_progbuf_one(struct target *target, target_addr_t address,
  2573. uint32_t size, uint8_t *buffer)
  2574. {
  2575. uint64_t mstatus = 0;
  2576. uint64_t mstatus_old = 0;
  2577. if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
  2578. return ERROR_FAIL;
  2579. uint64_t s0;
  2580. if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
  2581. return ERROR_FAIL;
  2582. /* Write the program (load, increment) */
  2583. struct riscv_program program;
  2584. riscv_program_init(&program, target);
  2585. if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
  2586. riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
  2587. switch (size) {
  2588. case 1:
  2589. riscv_program_lbr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
  2590. break;
  2591. case 2:
  2592. riscv_program_lhr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
  2593. break;
  2594. case 4:
  2595. riscv_program_lwr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
  2596. break;
  2597. case 8:
  2598. riscv_program_ldr(&program, GDB_REGNO_S0, GDB_REGNO_S0, 0);
  2599. break;
  2600. default:
  2601. LOG_ERROR("Unsupported size: %d", size);
  2602. return ERROR_FAIL;
  2603. }
  2604. if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
  2605. riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
  2606. if (riscv_program_ebreak(&program) != ERROR_OK)
  2607. return ERROR_FAIL;
  2608. if (riscv_program_write(&program) != ERROR_OK)
  2609. return ERROR_FAIL;
  2610. /* Write address to S0, and execute buffer. */
  2611. if (write_abstract_arg(target, 0, address, riscv_xlen(target)) != ERROR_OK)
  2612. return ERROR_FAIL;
  2613. uint32_t command = access_register_command(target, GDB_REGNO_S0,
  2614. riscv_xlen(target), AC_ACCESS_REGISTER_WRITE |
  2615. AC_ACCESS_REGISTER_TRANSFER | AC_ACCESS_REGISTER_POSTEXEC);
  2616. if (execute_abstract_command(target, command) != ERROR_OK)
  2617. return ERROR_FAIL;
  2618. uint64_t value;
  2619. if (register_read(target, &value, GDB_REGNO_S0) != ERROR_OK)
  2620. return ERROR_FAIL;
  2621. buf_set_u64(buffer, 0, 8 * size, value);
  2622. log_memory_access(address, value, size, true);
  2623. if (riscv_set_register(target, GDB_REGNO_S0, s0) != ERROR_OK)
  2624. return ERROR_FAIL;
  2625. /* Restore MSTATUS */
  2626. if (mstatus != mstatus_old)
  2627. if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
  2628. return ERROR_FAIL;
  2629. return ERROR_OK;
  2630. }
  2631. /**
  2632. * Read the requested memory, silently handling memory access errors.
  2633. */
  2634. static int read_memory_progbuf(struct target *target, target_addr_t address,
  2635. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
  2636. {
  2637. if (riscv_xlen(target) < size * 8) {
  2638. LOG_ERROR("XLEN (%d) is too short for %d-bit memory read.",
  2639. riscv_xlen(target), size * 8);
  2640. return ERROR_FAIL;
  2641. }
  2642. int result = ERROR_OK;
  2643. LOG_DEBUG("reading %d words of %d bytes from 0x%" TARGET_PRIxADDR, count,
  2644. size, address);
  2645. select_dmi(target);
  2646. memset(buffer, 0, count*size);
  2647. if (execute_fence(target) != ERROR_OK)
  2648. return ERROR_FAIL;
  2649. if (count == 1)
  2650. return read_memory_progbuf_one(target, address, size, buffer);
  2651. uint64_t mstatus = 0;
  2652. uint64_t mstatus_old = 0;
  2653. if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
  2654. return ERROR_FAIL;
  2655. /* s0 holds the next address to write to
  2656. * s1 holds the next data value to write
  2657. * s2 is a counter in case increment is 0
  2658. */
  2659. uint64_t s0, s1, s2;
  2660. if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
  2661. return ERROR_FAIL;
  2662. if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
  2663. return ERROR_FAIL;
  2664. if (increment == 0 && register_read(target, &s2, GDB_REGNO_S1) != ERROR_OK)
  2665. return ERROR_FAIL;
  2666. /* Write the program (load, increment) */
  2667. struct riscv_program program;
  2668. riscv_program_init(&program, target);
  2669. if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
  2670. riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
  2671. switch (size) {
  2672. case 1:
  2673. riscv_program_lbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
  2674. break;
  2675. case 2:
  2676. riscv_program_lhr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
  2677. break;
  2678. case 4:
  2679. riscv_program_lwr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
  2680. break;
  2681. case 8:
  2682. riscv_program_ldr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
  2683. break;
  2684. default:
  2685. LOG_ERROR("Unsupported size: %d", size);
  2686. return ERROR_FAIL;
  2687. }
  2688. if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
  2689. riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
  2690. if (increment == 0)
  2691. riscv_program_addi(&program, GDB_REGNO_S2, GDB_REGNO_S2, 1);
  2692. else
  2693. riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, increment);
  2694. if (riscv_program_ebreak(&program) != ERROR_OK)
  2695. return ERROR_FAIL;
  2696. if (riscv_program_write(&program) != ERROR_OK)
  2697. return ERROR_FAIL;
  2698. result = read_memory_progbuf_inner(target, address, size, count, buffer, increment);
  2699. if (result != ERROR_OK) {
  2700. /* The full read did not succeed, so we will try to read each word individually. */
  2701. /* This will not be fast, but reading outside actual memory is a special case anyway. */
  2702. /* It will make the toolchain happier, especially Eclipse Memory View as it reads ahead. */
  2703. target_addr_t address_i = address;
  2704. uint32_t count_i = 1;
  2705. uint8_t *buffer_i = buffer;
  2706. for (uint32_t i = 0; i < count; i++, address_i += increment, buffer_i += size) {
  2707. keep_alive();
  2708. /* TODO: This is much slower than it needs to be because we end up
  2709. * writing the address to read for every word we read. */
  2710. result = read_memory_progbuf_inner(target, address_i, size, count_i, buffer_i, increment);
  2711. /* The read of a single word failed, so we will just return 0 for that instead */
  2712. if (result != ERROR_OK) {
  2713. LOG_DEBUG("error reading single word of %d bytes from 0x%" TARGET_PRIxADDR,
  2714. size, address_i);
  2715. buf_set_u64(buffer_i, 0, 8 * size, 0);
  2716. }
  2717. }
  2718. result = ERROR_OK;
  2719. }
  2720. riscv_set_register(target, GDB_REGNO_S0, s0);
  2721. riscv_set_register(target, GDB_REGNO_S1, s1);
  2722. if (increment == 0)
  2723. riscv_set_register(target, GDB_REGNO_S2, s2);
  2724. /* Restore MSTATUS */
  2725. if (mstatus != mstatus_old)
  2726. if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
  2727. return ERROR_FAIL;
  2728. return result;
  2729. }
  2730. static int read_memory(struct target *target, target_addr_t address,
  2731. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t increment)
  2732. {
  2733. if (count == 0)
  2734. return ERROR_OK;
  2735. RISCV013_INFO(info);
  2736. if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
  2737. return read_memory_progbuf(target, address, size, count, buffer,
  2738. increment);
  2739. if ((get_field(info->sbcs, DM_SBCS_SBACCESS8) && size == 1) ||
  2740. (get_field(info->sbcs, DM_SBCS_SBACCESS16) && size == 2) ||
  2741. (get_field(info->sbcs, DM_SBCS_SBACCESS32) && size == 4) ||
  2742. (get_field(info->sbcs, DM_SBCS_SBACCESS64) && size == 8) ||
  2743. (get_field(info->sbcs, DM_SBCS_SBACCESS128) && size == 16)) {
  2744. if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
  2745. return read_memory_bus_v0(target, address, size, count, buffer,
  2746. increment);
  2747. else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
  2748. return read_memory_bus_v1(target, address, size, count, buffer,
  2749. increment);
  2750. }
  2751. if (has_sufficient_progbuf(target, 3))
  2752. return read_memory_progbuf(target, address, size, count, buffer,
  2753. increment);
  2754. return read_memory_abstract(target, address, size, count, buffer,
  2755. increment);
  2756. }
  2757. static int write_memory_bus_v0(struct target *target, target_addr_t address,
  2758. uint32_t size, uint32_t count, const uint8_t *buffer)
  2759. {
  2760. /*1) write sbaddress: for singlewrite and autoincrement, we need to write the address once*/
  2761. LOG_DEBUG("System Bus Access: size: %d\tcount:%d\tstart address: 0x%08"
  2762. TARGET_PRIxADDR, size, count, address);
  2763. dmi_write(target, DM_SBADDRESS0, address);
  2764. int64_t value = 0;
  2765. int64_t access = 0;
  2766. riscv_addr_t offset = 0;
  2767. riscv_addr_t t_addr = 0;
  2768. const uint8_t *t_buffer = buffer + offset;
  2769. /* B.8 Writing Memory, single write check if we write in one go */
  2770. if (count == 1) { /* count is in bytes here */
  2771. value = buf_get_u64(t_buffer, 0, 8 * size);
  2772. access = 0;
  2773. access = set_field(access, DM_SBCS_SBACCESS, size/2);
  2774. dmi_write(target, DM_SBCS, access);
  2775. LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
  2776. LOG_DEBUG("\r\nwrite_memory:SAB: ONE OFF: value 0x%08" PRIx64, value);
  2777. dmi_write(target, DM_SBDATA0, value);
  2778. return ERROR_OK;
  2779. }
  2780. /*B.8 Writing Memory, using autoincrement*/
  2781. access = 0;
  2782. access = set_field(access, DM_SBCS_SBACCESS, size/2);
  2783. access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 1);
  2784. LOG_DEBUG("\r\naccess: 0x%08" PRIx64, access);
  2785. dmi_write(target, DM_SBCS, access);
  2786. /*2)set the value according to the size required and write*/
  2787. for (riscv_addr_t i = 0; i < count; ++i) {
  2788. offset = size*i;
  2789. /* for monitoring only */
  2790. t_addr = address + offset;
  2791. t_buffer = buffer + offset;
  2792. value = buf_get_u64(t_buffer, 0, 8 * size);
  2793. LOG_DEBUG("SAB:autoincrement: expected address: 0x%08x value: 0x%08x"
  2794. PRIx64, (uint32_t)t_addr, (uint32_t)value);
  2795. dmi_write(target, DM_SBDATA0, value);
  2796. }
  2797. /*reset the autoincrement when finished (something weird is happening if this is not done at the end*/
  2798. access = set_field(access, DM_SBCS_SBAUTOINCREMENT, 0);
  2799. dmi_write(target, DM_SBCS, access);
  2800. return ERROR_OK;
  2801. }
  2802. static int write_memory_bus_v1(struct target *target, target_addr_t address,
  2803. uint32_t size, uint32_t count, const uint8_t *buffer)
  2804. {
  2805. RISCV013_INFO(info);
  2806. uint32_t sbcs = sb_sbaccess(size);
  2807. sbcs = set_field(sbcs, DM_SBCS_SBAUTOINCREMENT, 1);
  2808. dmi_write(target, DM_SBCS, sbcs);
  2809. target_addr_t next_address = address;
  2810. target_addr_t end_address = address + count * size;
  2811. int result;
  2812. sb_write_address(target, next_address);
  2813. while (next_address < end_address) {
  2814. LOG_DEBUG("transferring burst starting at address 0x%" TARGET_PRIxADDR,
  2815. next_address);
  2816. struct riscv_batch *batch = riscv_batch_alloc(
  2817. target,
  2818. 32,
  2819. info->dmi_busy_delay + info->bus_master_write_delay);
  2820. if (!batch)
  2821. return ERROR_FAIL;
  2822. for (uint32_t i = (next_address - address) / size; i < count; i++) {
  2823. const uint8_t *p = buffer + i * size;
  2824. if (riscv_batch_available_scans(batch) < (size + 3) / 4)
  2825. break;
  2826. if (size > 12)
  2827. riscv_batch_add_dmi_write(batch, DM_SBDATA3,
  2828. ((uint32_t) p[12]) |
  2829. (((uint32_t) p[13]) << 8) |
  2830. (((uint32_t) p[14]) << 16) |
  2831. (((uint32_t) p[15]) << 24));
  2832. if (size > 8)
  2833. riscv_batch_add_dmi_write(batch, DM_SBDATA2,
  2834. ((uint32_t) p[8]) |
  2835. (((uint32_t) p[9]) << 8) |
  2836. (((uint32_t) p[10]) << 16) |
  2837. (((uint32_t) p[11]) << 24));
  2838. if (size > 4)
  2839. riscv_batch_add_dmi_write(batch, DM_SBDATA1,
  2840. ((uint32_t) p[4]) |
  2841. (((uint32_t) p[5]) << 8) |
  2842. (((uint32_t) p[6]) << 16) |
  2843. (((uint32_t) p[7]) << 24));
  2844. uint32_t value = p[0];
  2845. if (size > 2) {
  2846. value |= ((uint32_t) p[2]) << 16;
  2847. value |= ((uint32_t) p[3]) << 24;
  2848. }
  2849. if (size > 1)
  2850. value |= ((uint32_t) p[1]) << 8;
  2851. riscv_batch_add_dmi_write(batch, DM_SBDATA0, value);
  2852. log_memory_access(address + i * size, value, size, false);
  2853. next_address += size;
  2854. }
  2855. result = batch_run(target, batch);
  2856. riscv_batch_free(batch);
  2857. if (result != ERROR_OK)
  2858. return result;
  2859. bool dmi_busy_encountered;
  2860. if (dmi_op(target, &sbcs, &dmi_busy_encountered, DMI_OP_READ,
  2861. DM_SBCS, 0, false, false) != ERROR_OK)
  2862. return ERROR_FAIL;
  2863. time_t start = time(NULL);
  2864. bool dmi_busy = dmi_busy_encountered;
  2865. while (get_field(sbcs, DM_SBCS_SBBUSY) || dmi_busy) {
  2866. if (time(NULL) - start > riscv_command_timeout_sec) {
  2867. LOG_ERROR("Timed out after %ds waiting for sbbusy to go low (sbcs=0x%x). "
  2868. "Increase the timeout with riscv set_command_timeout_sec.",
  2869. riscv_command_timeout_sec, sbcs);
  2870. return ERROR_FAIL;
  2871. }
  2872. if (dmi_op(target, &sbcs, &dmi_busy, DMI_OP_READ,
  2873. DM_SBCS, 0, false, true) != ERROR_OK)
  2874. return ERROR_FAIL;
  2875. }
  2876. if (get_field(sbcs, DM_SBCS_SBBUSYERROR)) {
  2877. /* We wrote while the target was busy. Slow down and try again. */
  2878. dmi_write(target, DM_SBCS, DM_SBCS_SBBUSYERROR);
  2879. info->bus_master_write_delay += info->bus_master_write_delay / 10 + 1;
  2880. }
  2881. if (get_field(sbcs, DM_SBCS_SBBUSYERROR) || dmi_busy_encountered) {
  2882. next_address = sb_read_address(target);
  2883. if (next_address < address) {
  2884. /* This should never happen, probably buggy hardware. */
  2885. LOG_DEBUG("unexpected system bus address 0x%" TARGET_PRIxADDR,
  2886. next_address);
  2887. return ERROR_FAIL;
  2888. }
  2889. continue;
  2890. }
  2891. unsigned error = get_field(sbcs, DM_SBCS_SBERROR);
  2892. if (error != 0) {
  2893. /* Some error indicating the bus access failed, but not because of
  2894. * something we did wrong. */
  2895. dmi_write(target, DM_SBCS, DM_SBCS_SBERROR);
  2896. return ERROR_FAIL;
  2897. }
  2898. }
  2899. return ERROR_OK;
  2900. }
  2901. static int write_memory_progbuf(struct target *target, target_addr_t address,
  2902. uint32_t size, uint32_t count, const uint8_t *buffer)
  2903. {
  2904. RISCV013_INFO(info);
  2905. if (riscv_xlen(target) < size * 8) {
  2906. LOG_ERROR("XLEN (%d) is too short for %d-bit memory write.",
  2907. riscv_xlen(target), size * 8);
  2908. return ERROR_FAIL;
  2909. }
  2910. LOG_DEBUG("writing %d words of %d bytes to 0x%08lx", count, size, (long)address);
  2911. select_dmi(target);
  2912. uint64_t mstatus = 0;
  2913. uint64_t mstatus_old = 0;
  2914. if (modify_privilege(target, &mstatus, &mstatus_old) != ERROR_OK)
  2915. return ERROR_FAIL;
  2916. /* s0 holds the next address to write to
  2917. * s1 holds the next data value to write
  2918. */
  2919. int result = ERROR_OK;
  2920. uint64_t s0, s1;
  2921. if (register_read(target, &s0, GDB_REGNO_S0) != ERROR_OK)
  2922. return ERROR_FAIL;
  2923. if (register_read(target, &s1, GDB_REGNO_S1) != ERROR_OK)
  2924. return ERROR_FAIL;
  2925. /* Write the program (store, increment) */
  2926. struct riscv_program program;
  2927. riscv_program_init(&program, target);
  2928. if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
  2929. riscv_program_csrrsi(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
  2930. switch (size) {
  2931. case 1:
  2932. riscv_program_sbr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
  2933. break;
  2934. case 2:
  2935. riscv_program_shr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
  2936. break;
  2937. case 4:
  2938. riscv_program_swr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
  2939. break;
  2940. case 8:
  2941. riscv_program_sdr(&program, GDB_REGNO_S1, GDB_REGNO_S0, 0);
  2942. break;
  2943. default:
  2944. LOG_ERROR("write_memory_progbuf(): Unsupported size: %d", size);
  2945. result = ERROR_FAIL;
  2946. goto error;
  2947. }
  2948. if (riscv_enable_virtual && has_sufficient_progbuf(target, 5) && get_field(mstatus, MSTATUS_MPRV))
  2949. riscv_program_csrrci(&program, GDB_REGNO_ZERO, CSR_DCSR_MPRVEN, GDB_REGNO_DCSR);
  2950. riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, size);
  2951. result = riscv_program_ebreak(&program);
  2952. if (result != ERROR_OK)
  2953. goto error;
  2954. riscv_program_write(&program);
  2955. riscv_addr_t cur_addr = address;
  2956. riscv_addr_t fin_addr = address + (count * size);
  2957. bool setup_needed = true;
  2958. LOG_DEBUG("writing until final address 0x%016" PRIx64, fin_addr);
  2959. while (cur_addr < fin_addr) {
  2960. LOG_DEBUG("transferring burst starting at address 0x%016" PRIx64,
  2961. cur_addr);
  2962. struct riscv_batch *batch = riscv_batch_alloc(
  2963. target,
  2964. 32,
  2965. info->dmi_busy_delay + info->ac_busy_delay);
  2966. if (!batch)
  2967. goto error;
  2968. /* To write another word, we put it in S1 and execute the program. */
  2969. unsigned start = (cur_addr - address) / size;
  2970. for (unsigned i = start; i < count; ++i) {
  2971. unsigned offset = size*i;
  2972. const uint8_t *t_buffer = buffer + offset;
  2973. uint64_t value = buf_get_u64(t_buffer, 0, 8 * size);
  2974. log_memory_access(address + offset, value, size, false);
  2975. cur_addr += size;
  2976. if (setup_needed) {
  2977. result = register_write_direct(target, GDB_REGNO_S0,
  2978. address + offset);
  2979. if (result != ERROR_OK) {
  2980. riscv_batch_free(batch);
  2981. goto error;
  2982. }
  2983. /* Write value. */
  2984. if (size > 4)
  2985. dmi_write(target, DM_DATA1, value >> 32);
  2986. dmi_write(target, DM_DATA0, value);
  2987. /* Write and execute command that moves value into S1 and
  2988. * executes program buffer. */
  2989. uint32_t command = access_register_command(target,
  2990. GDB_REGNO_S1, riscv_xlen(target),
  2991. AC_ACCESS_REGISTER_POSTEXEC |
  2992. AC_ACCESS_REGISTER_TRANSFER |
  2993. AC_ACCESS_REGISTER_WRITE);
  2994. result = execute_abstract_command(target, command);
  2995. if (result != ERROR_OK) {
  2996. riscv_batch_free(batch);
  2997. goto error;
  2998. }
  2999. /* Turn on autoexec */
  3000. dmi_write(target, DM_ABSTRACTAUTO,
  3001. 1 << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET);
  3002. setup_needed = false;
  3003. } else {
  3004. if (size > 4)
  3005. riscv_batch_add_dmi_write(batch, DM_DATA1, value >> 32);
  3006. riscv_batch_add_dmi_write(batch, DM_DATA0, value);
  3007. if (riscv_batch_full(batch))
  3008. break;
  3009. }
  3010. }
  3011. result = batch_run(target, batch);
  3012. riscv_batch_free(batch);
  3013. if (result != ERROR_OK)
  3014. goto error;
  3015. /* Note that if the scan resulted in a Busy DMI response, it
  3016. * is this read to abstractcs that will cause the dmi_busy_delay
  3017. * to be incremented if necessary. */
  3018. uint32_t abstractcs;
  3019. bool dmi_busy_encountered;
  3020. result = dmi_op(target, &abstractcs, &dmi_busy_encountered,
  3021. DMI_OP_READ, DM_ABSTRACTCS, 0, false, true);
  3022. if (result != ERROR_OK)
  3023. goto error;
  3024. while (get_field(abstractcs, DM_ABSTRACTCS_BUSY))
  3025. if (dmi_read(target, &abstractcs, DM_ABSTRACTCS) != ERROR_OK)
  3026. return ERROR_FAIL;
  3027. info->cmderr = get_field(abstractcs, DM_ABSTRACTCS_CMDERR);
  3028. if (info->cmderr == CMDERR_NONE && !dmi_busy_encountered) {
  3029. LOG_DEBUG("successful (partial?) memory write");
  3030. } else if (info->cmderr == CMDERR_BUSY || dmi_busy_encountered) {
  3031. if (info->cmderr == CMDERR_BUSY)
  3032. LOG_DEBUG("Memory write resulted in abstract command busy response.");
  3033. else if (dmi_busy_encountered)
  3034. LOG_DEBUG("Memory write resulted in DMI busy response.");
  3035. riscv013_clear_abstract_error(target);
  3036. increase_ac_busy_delay(target);
  3037. dmi_write(target, DM_ABSTRACTAUTO, 0);
  3038. result = register_read_direct(target, &cur_addr, GDB_REGNO_S0);
  3039. if (result != ERROR_OK)
  3040. goto error;
  3041. setup_needed = true;
  3042. } else {
  3043. LOG_ERROR("error when writing memory, abstractcs=0x%08lx", (long)abstractcs);
  3044. riscv013_clear_abstract_error(target);
  3045. result = ERROR_FAIL;
  3046. goto error;
  3047. }
  3048. }
  3049. error:
  3050. dmi_write(target, DM_ABSTRACTAUTO, 0);
  3051. if (register_write_direct(target, GDB_REGNO_S1, s1) != ERROR_OK)
  3052. return ERROR_FAIL;
  3053. if (register_write_direct(target, GDB_REGNO_S0, s0) != ERROR_OK)
  3054. return ERROR_FAIL;
  3055. /* Restore MSTATUS */
  3056. if (mstatus != mstatus_old)
  3057. if (register_write_direct(target, GDB_REGNO_MSTATUS, mstatus_old))
  3058. return ERROR_FAIL;
  3059. if (execute_fence(target) != ERROR_OK)
  3060. return ERROR_FAIL;
  3061. return result;
  3062. }
  3063. static int write_memory(struct target *target, target_addr_t address,
  3064. uint32_t size, uint32_t count, const uint8_t *buffer)
  3065. {
  3066. RISCV013_INFO(info);
  3067. if (has_sufficient_progbuf(target, 3) && !riscv_prefer_sba)
  3068. return write_memory_progbuf(target, address, size, count, buffer);
  3069. if ((get_field(info->sbcs, DM_SBCS_SBACCESS8) && size == 1) ||
  3070. (get_field(info->sbcs, DM_SBCS_SBACCESS16) && size == 2) ||
  3071. (get_field(info->sbcs, DM_SBCS_SBACCESS32) && size == 4) ||
  3072. (get_field(info->sbcs, DM_SBCS_SBACCESS64) && size == 8) ||
  3073. (get_field(info->sbcs, DM_SBCS_SBACCESS128) && size == 16)) {
  3074. if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 0)
  3075. return write_memory_bus_v0(target, address, size, count, buffer);
  3076. else if (get_field(info->sbcs, DM_SBCS_SBVERSION) == 1)
  3077. return write_memory_bus_v1(target, address, size, count, buffer);
  3078. }
  3079. if (has_sufficient_progbuf(target, 3))
  3080. return write_memory_progbuf(target, address, size, count, buffer);
  3081. return write_memory_abstract(target, address, size, count, buffer);
  3082. }
  3083. static int arch_state(struct target *target)
  3084. {
  3085. return ERROR_OK;
  3086. }
  3087. struct target_type riscv013_target = {
  3088. .name = "riscv",
  3089. .init_target = init_target,
  3090. .deinit_target = deinit_target,
  3091. .examine = examine,
  3092. .poll = &riscv_openocd_poll,
  3093. .halt = &riscv_halt,
  3094. .step = &riscv_openocd_step,
  3095. .assert_reset = assert_reset,
  3096. .deassert_reset = deassert_reset,
  3097. .write_memory = write_memory,
  3098. .arch_state = arch_state,
  3099. };
  3100. /*** 0.13-specific implementations of various RISC-V helper functions. ***/
  3101. static int riscv013_get_register(struct target *target,
  3102. riscv_reg_t *value, int hid, int rid)
  3103. {
  3104. LOG_DEBUG("[%d] reading register %s on hart %d", target->coreid,
  3105. gdb_regno_name(rid), hid);
  3106. riscv_set_current_hartid(target, hid);
  3107. int result = ERROR_OK;
  3108. if (rid == GDB_REGNO_PC) {
  3109. /* TODO: move this into riscv.c. */
  3110. result = register_read(target, value, GDB_REGNO_DPC);
  3111. LOG_DEBUG("[%d] read PC from DPC: 0x%" PRIx64, target->coreid, *value);
  3112. } else if (rid == GDB_REGNO_PRIV) {
  3113. uint64_t dcsr;
  3114. /* TODO: move this into riscv.c. */
  3115. result = register_read(target, &dcsr, GDB_REGNO_DCSR);
  3116. *value = get_field(dcsr, CSR_DCSR_PRV);
  3117. } else {
  3118. result = register_read(target, value, rid);
  3119. if (result != ERROR_OK)
  3120. *value = -1;
  3121. }
  3122. return result;
  3123. }
  3124. static int riscv013_set_register(struct target *target, int hid, int rid, uint64_t value)
  3125. {
  3126. LOG_DEBUG("[%d] writing 0x%" PRIx64 " to register %s on hart %d",
  3127. target->coreid, value, gdb_regno_name(rid), hid);
  3128. riscv_set_current_hartid(target, hid);
  3129. if (rid <= GDB_REGNO_XPR31) {
  3130. return register_write_direct(target, rid, value);
  3131. } else if (rid == GDB_REGNO_PC) {
  3132. LOG_DEBUG("[%d] writing PC to DPC: 0x%" PRIx64, target->coreid, value);
  3133. register_write_direct(target, GDB_REGNO_DPC, value);
  3134. uint64_t actual_value;
  3135. register_read_direct(target, &actual_value, GDB_REGNO_DPC);
  3136. LOG_DEBUG("[%d] actual DPC written: 0x%016" PRIx64, target->coreid, actual_value);
  3137. if (value != actual_value) {
  3138. LOG_ERROR("Written PC (0x%" PRIx64 ") does not match read back "
  3139. "value (0x%" PRIx64 ")", value, actual_value);
  3140. return ERROR_FAIL;
  3141. }
  3142. } else if (rid == GDB_REGNO_PRIV) {
  3143. uint64_t dcsr;
  3144. register_read(target, &dcsr, GDB_REGNO_DCSR);
  3145. dcsr = set_field(dcsr, CSR_DCSR_PRV, value);
  3146. return register_write_direct(target, GDB_REGNO_DCSR, dcsr);
  3147. } else {
  3148. return register_write_direct(target, rid, value);
  3149. }
  3150. return ERROR_OK;
  3151. }
  3152. static int riscv013_select_current_hart(struct target *target)
  3153. {
  3154. RISCV_INFO(r);
  3155. dm013_info_t *dm = get_dm(target);
  3156. if (!dm)
  3157. return ERROR_FAIL;
  3158. if (r->current_hartid == dm->current_hartid)
  3159. return ERROR_OK;
  3160. uint32_t dmcontrol;
  3161. /* TODO: can't we just "dmcontrol = DMI_DMACTIVE"? */
  3162. if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
  3163. return ERROR_FAIL;
  3164. dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
  3165. int result = dmi_write(target, DM_DMCONTROL, dmcontrol);
  3166. dm->current_hartid = r->current_hartid;
  3167. return result;
  3168. }
  3169. /* Select all harts that were prepped and that are selectable, clearing the
  3170. * prepped flag on the harts that actually were selected. */
  3171. static int select_prepped_harts(struct target *target, bool *use_hasel)
  3172. {
  3173. dm013_info_t *dm = get_dm(target);
  3174. if (!dm)
  3175. return ERROR_FAIL;
  3176. if (!dm->hasel_supported) {
  3177. RISCV_INFO(r);
  3178. r->prepped = false;
  3179. *use_hasel = false;
  3180. return ERROR_OK;
  3181. }
  3182. assert(dm->hart_count);
  3183. unsigned hawindow_count = (dm->hart_count + 31) / 32;
  3184. uint32_t hawindow[hawindow_count];
  3185. memset(hawindow, 0, sizeof(uint32_t) * hawindow_count);
  3186. target_list_t *entry;
  3187. unsigned total_selected = 0;
  3188. list_for_each_entry(entry, &dm->target_list, list) {
  3189. struct target *t = entry->target;
  3190. riscv_info_t *r = riscv_info(t);
  3191. riscv013_info_t *info = get_info(t);
  3192. unsigned index = info->index;
  3193. LOG_DEBUG("index=%d, coreid=%d, prepped=%d", index, t->coreid, r->prepped);
  3194. r->selected = r->prepped;
  3195. if (r->prepped) {
  3196. hawindow[index / 32] |= 1 << (index % 32);
  3197. r->prepped = false;
  3198. total_selected++;
  3199. }
  3200. index++;
  3201. }
  3202. /* Don't use hasel if we only need to talk to one hart. */
  3203. if (total_selected <= 1) {
  3204. *use_hasel = false;
  3205. return ERROR_OK;
  3206. }
  3207. for (unsigned i = 0; i < hawindow_count; i++) {
  3208. if (dmi_write(target, DM_HAWINDOWSEL, i) != ERROR_OK)
  3209. return ERROR_FAIL;
  3210. if (dmi_write(target, DM_HAWINDOW, hawindow[i]) != ERROR_OK)
  3211. return ERROR_FAIL;
  3212. }
  3213. *use_hasel = true;
  3214. return ERROR_OK;
  3215. }
  3216. static int riscv013_halt_prep(struct target *target)
  3217. {
  3218. return ERROR_OK;
  3219. }
  3220. static int riscv013_halt_go(struct target *target)
  3221. {
  3222. bool use_hasel = false;
  3223. if (!riscv_rtos_enabled(target)) {
  3224. if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
  3225. return ERROR_FAIL;
  3226. }
  3227. RISCV_INFO(r);
  3228. LOG_DEBUG("halting hart %d", r->current_hartid);
  3229. /* Issue the halt command, and then wait for the current hart to halt. */
  3230. uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_HALTREQ;
  3231. if (use_hasel)
  3232. dmcontrol |= DM_DMCONTROL_HASEL;
  3233. dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
  3234. dmi_write(target, DM_DMCONTROL, dmcontrol);
  3235. for (size_t i = 0; i < 256; ++i)
  3236. if (riscv_is_halted(target))
  3237. break;
  3238. if (!riscv_is_halted(target)) {
  3239. uint32_t dmstatus;
  3240. if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
  3241. return ERROR_FAIL;
  3242. if (dmi_read(target, &dmcontrol, DM_DMCONTROL) != ERROR_OK)
  3243. return ERROR_FAIL;
  3244. LOG_ERROR("unable to halt hart %d", r->current_hartid);
  3245. LOG_ERROR(" dmcontrol=0x%08x", dmcontrol);
  3246. LOG_ERROR(" dmstatus =0x%08x", dmstatus);
  3247. return ERROR_FAIL;
  3248. }
  3249. dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HALTREQ, 0);
  3250. dmi_write(target, DM_DMCONTROL, dmcontrol);
  3251. if (use_hasel) {
  3252. target_list_t *entry;
  3253. dm013_info_t *dm = get_dm(target);
  3254. if (!dm)
  3255. return ERROR_FAIL;
  3256. list_for_each_entry(entry, &dm->target_list, list) {
  3257. struct target *t = entry->target;
  3258. t->state = TARGET_HALTED;
  3259. if (t->debug_reason == DBG_REASON_NOTHALTED)
  3260. t->debug_reason = DBG_REASON_DBGRQ;
  3261. }
  3262. }
  3263. /* The "else" case is handled in halt_go(). */
  3264. return ERROR_OK;
  3265. }
  3266. static int riscv013_resume_go(struct target *target)
  3267. {
  3268. bool use_hasel = false;
  3269. if (!riscv_rtos_enabled(target)) {
  3270. if (select_prepped_harts(target, &use_hasel) != ERROR_OK)
  3271. return ERROR_FAIL;
  3272. }
  3273. return riscv013_step_or_resume_current_hart(target, false, use_hasel);
  3274. }
  3275. static int riscv013_step_current_hart(struct target *target)
  3276. {
  3277. return riscv013_step_or_resume_current_hart(target, true, false);
  3278. }
  3279. static int riscv013_resume_prep(struct target *target)
  3280. {
  3281. return riscv013_on_step_or_resume(target, false);
  3282. }
  3283. static int riscv013_on_step(struct target *target)
  3284. {
  3285. return riscv013_on_step_or_resume(target, true);
  3286. }
  3287. static int riscv013_on_halt(struct target *target)
  3288. {
  3289. return ERROR_OK;
  3290. }
  3291. static bool riscv013_is_halted(struct target *target)
  3292. {
  3293. uint32_t dmstatus;
  3294. if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
  3295. return false;
  3296. if (get_field(dmstatus, DM_DMSTATUS_ANYUNAVAIL))
  3297. LOG_ERROR("Hart %d is unavailable.", riscv_current_hartid(target));
  3298. if (get_field(dmstatus, DM_DMSTATUS_ANYNONEXISTENT))
  3299. LOG_ERROR("Hart %d doesn't exist.", riscv_current_hartid(target));
  3300. if (get_field(dmstatus, DM_DMSTATUS_ANYHAVERESET)) {
  3301. int hartid = riscv_current_hartid(target);
  3302. LOG_INFO("Hart %d unexpectedly reset!", hartid);
  3303. /* TODO: Can we make this more obvious to eg. a gdb user? */
  3304. uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE |
  3305. DM_DMCONTROL_ACKHAVERESET;
  3306. dmcontrol = set_hartsel(dmcontrol, hartid);
  3307. /* If we had been halted when we reset, request another halt. If we
  3308. * ended up running out of reset, then the user will (hopefully) get a
  3309. * message that a reset happened, that the target is running, and then
  3310. * that it is halted again once the request goes through.
  3311. */
  3312. if (target->state == TARGET_HALTED)
  3313. dmcontrol |= DM_DMCONTROL_HALTREQ;
  3314. dmi_write(target, DM_DMCONTROL, dmcontrol);
  3315. }
  3316. return get_field(dmstatus, DM_DMSTATUS_ALLHALTED);
  3317. }
  3318. static enum riscv_halt_reason riscv013_halt_reason(struct target *target)
  3319. {
  3320. riscv_reg_t dcsr;
  3321. int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
  3322. if (result != ERROR_OK)
  3323. return RISCV_HALT_UNKNOWN;
  3324. switch (get_field(dcsr, CSR_DCSR_CAUSE)) {
  3325. case CSR_DCSR_CAUSE_SWBP:
  3326. return RISCV_HALT_BREAKPOINT;
  3327. case CSR_DCSR_CAUSE_TRIGGER:
  3328. /* We could get here before triggers are enumerated if a trigger was
  3329. * already set when we connected. Force enumeration now, which has the
  3330. * side effect of clearing any triggers we did not set. */
  3331. riscv_enumerate_triggers(target);
  3332. LOG_DEBUG("{%d} halted because of trigger", target->coreid);
  3333. return RISCV_HALT_TRIGGER;
  3334. case CSR_DCSR_CAUSE_STEP:
  3335. return RISCV_HALT_SINGLESTEP;
  3336. case CSR_DCSR_CAUSE_DEBUGINT:
  3337. case CSR_DCSR_CAUSE_HALT:
  3338. return RISCV_HALT_INTERRUPT;
  3339. case CSR_DCSR_CAUSE_GROUP:
  3340. return RISCV_HALT_GROUP;
  3341. }
  3342. LOG_ERROR("Unknown DCSR cause field: %x", (int)get_field(dcsr, CSR_DCSR_CAUSE));
  3343. LOG_ERROR(" dcsr=0x%016lx", (long)dcsr);
  3344. return RISCV_HALT_UNKNOWN;
  3345. }
  3346. int riscv013_write_debug_buffer(struct target *target, unsigned index, riscv_insn_t data)
  3347. {
  3348. dm013_info_t *dm = get_dm(target);
  3349. if (!dm)
  3350. return ERROR_FAIL;
  3351. if (dm->progbuf_cache[index] != data) {
  3352. if (dmi_write(target, DM_PROGBUF0 + index, data) != ERROR_OK)
  3353. return ERROR_FAIL;
  3354. dm->progbuf_cache[index] = data;
  3355. } else {
  3356. LOG_DEBUG("cache hit for 0x%" PRIx32 " @%d", data, index);
  3357. }
  3358. return ERROR_OK;
  3359. }
  3360. riscv_insn_t riscv013_read_debug_buffer(struct target *target, unsigned index)
  3361. {
  3362. uint32_t value;
  3363. dmi_read(target, &value, DM_PROGBUF0 + index);
  3364. return value;
  3365. }
  3366. int riscv013_execute_debug_buffer(struct target *target)
  3367. {
  3368. uint32_t run_program = 0;
  3369. run_program = set_field(run_program, AC_ACCESS_REGISTER_AARSIZE, 2);
  3370. run_program = set_field(run_program, AC_ACCESS_REGISTER_POSTEXEC, 1);
  3371. run_program = set_field(run_program, AC_ACCESS_REGISTER_TRANSFER, 0);
  3372. run_program = set_field(run_program, AC_ACCESS_REGISTER_REGNO, 0x1000);
  3373. return execute_abstract_command(target, run_program);
  3374. }
  3375. void riscv013_fill_dmi_write_u64(struct target *target, char *buf, int a, uint64_t d)
  3376. {
  3377. RISCV013_INFO(info);
  3378. buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_WRITE);
  3379. buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, d);
  3380. buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
  3381. }
  3382. void riscv013_fill_dmi_read_u64(struct target *target, char *buf, int a)
  3383. {
  3384. RISCV013_INFO(info);
  3385. buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_READ);
  3386. buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
  3387. buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, a);
  3388. }
  3389. void riscv013_fill_dmi_nop_u64(struct target *target, char *buf)
  3390. {
  3391. RISCV013_INFO(info);
  3392. buf_set_u64((unsigned char *)buf, DTM_DMI_OP_OFFSET, DTM_DMI_OP_LENGTH, DMI_OP_NOP);
  3393. buf_set_u64((unsigned char *)buf, DTM_DMI_DATA_OFFSET, DTM_DMI_DATA_LENGTH, 0);
  3394. buf_set_u64((unsigned char *)buf, DTM_DMI_ADDRESS_OFFSET, info->abits, 0);
  3395. }
  3396. /* Helper function for riscv013_test_sba_config_reg */
  3397. static int get_max_sbaccess(struct target *target)
  3398. {
  3399. RISCV013_INFO(info);
  3400. uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
  3401. uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
  3402. uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
  3403. uint32_t sbaccess16 = get_field(info->sbcs, DM_SBCS_SBACCESS16);
  3404. uint32_t sbaccess8 = get_field(info->sbcs, DM_SBCS_SBACCESS8);
  3405. if (sbaccess128)
  3406. return 4;
  3407. else if (sbaccess64)
  3408. return 3;
  3409. else if (sbaccess32)
  3410. return 2;
  3411. else if (sbaccess16)
  3412. return 1;
  3413. else if (sbaccess8)
  3414. return 0;
  3415. else
  3416. return -1;
  3417. }
  3418. static uint32_t get_num_sbdata_regs(struct target *target)
  3419. {
  3420. RISCV013_INFO(info);
  3421. uint32_t sbaccess128 = get_field(info->sbcs, DM_SBCS_SBACCESS128);
  3422. uint32_t sbaccess64 = get_field(info->sbcs, DM_SBCS_SBACCESS64);
  3423. uint32_t sbaccess32 = get_field(info->sbcs, DM_SBCS_SBACCESS32);
  3424. if (sbaccess128)
  3425. return 4;
  3426. else if (sbaccess64)
  3427. return 2;
  3428. else if (sbaccess32)
  3429. return 1;
  3430. else
  3431. return 0;
  3432. }
  3433. static int riscv013_test_sba_config_reg(struct target *target,
  3434. target_addr_t legal_address, uint32_t num_words,
  3435. target_addr_t illegal_address, bool run_sbbusyerror_test)
  3436. {
  3437. LOG_INFO("Testing System Bus Access as defined by RISC-V Debug Spec v0.13");
  3438. uint32_t tests_failed = 0;
  3439. uint32_t rd_val;
  3440. uint32_t sbcs_orig;
  3441. dmi_read(target, &sbcs_orig, DM_SBCS);
  3442. uint32_t sbcs = sbcs_orig;
  3443. bool test_passed;
  3444. int max_sbaccess = get_max_sbaccess(target);
  3445. if (max_sbaccess == -1) {
  3446. LOG_ERROR("System Bus Access not supported in this config.");
  3447. return ERROR_FAIL;
  3448. }
  3449. if (get_field(sbcs, DM_SBCS_SBVERSION) != 1) {
  3450. LOG_ERROR("System Bus Access unsupported SBVERSION (%d). Only version 1 is supported.",
  3451. get_field(sbcs, DM_SBCS_SBVERSION));
  3452. return ERROR_FAIL;
  3453. }
  3454. uint32_t num_sbdata_regs = get_num_sbdata_regs(target);
  3455. assert(num_sbdata_regs);
  3456. uint32_t rd_buf[num_sbdata_regs];
  3457. /* Test 1: Simple write/read test */
  3458. test_passed = true;
  3459. sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 0);
  3460. dmi_write(target, DM_SBCS, sbcs);
  3461. uint32_t test_patterns[4] = {0xdeadbeef, 0xfeedbabe, 0x12345678, 0x08675309};
  3462. for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
  3463. sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
  3464. dmi_write(target, DM_SBCS, sbcs);
  3465. uint32_t compare_mask = (sbaccess == 0) ? 0xff : (sbaccess == 1) ? 0xffff : 0xffffffff;
  3466. for (uint32_t i = 0; i < num_words; i++) {
  3467. uint32_t addr = legal_address + (i << sbaccess);
  3468. uint32_t wr_data[num_sbdata_regs];
  3469. for (uint32_t j = 0; j < num_sbdata_regs; j++)
  3470. wr_data[j] = test_patterns[j] + i;
  3471. write_memory_sba_simple(target, addr, wr_data, num_sbdata_regs, sbcs);
  3472. }
  3473. for (uint32_t i = 0; i < num_words; i++) {
  3474. uint32_t addr = legal_address + (i << sbaccess);
  3475. read_memory_sba_simple(target, addr, rd_buf, num_sbdata_regs, sbcs);
  3476. for (uint32_t j = 0; j < num_sbdata_regs; j++) {
  3477. if (((test_patterns[j]+i)&compare_mask) != (rd_buf[j]&compare_mask)) {
  3478. LOG_ERROR("System Bus Access Test 1: Error reading non-autoincremented address %x,"
  3479. "expected val = %x, read val = %x", addr, test_patterns[j]+i, rd_buf[j]);
  3480. test_passed = false;
  3481. tests_failed++;
  3482. }
  3483. }
  3484. }
  3485. }
  3486. if (test_passed)
  3487. LOG_INFO("System Bus Access Test 1: Simple write/read test PASSED.");
  3488. /* Test 2: Address autoincrement test */
  3489. target_addr_t curr_addr;
  3490. target_addr_t prev_addr;
  3491. test_passed = true;
  3492. sbcs = set_field(sbcs_orig, DM_SBCS_SBAUTOINCREMENT, 1);
  3493. dmi_write(target, DM_SBCS, sbcs);
  3494. for (uint32_t sbaccess = 0; sbaccess <= (uint32_t)max_sbaccess; sbaccess++) {
  3495. sbcs = set_field(sbcs, DM_SBCS_SBACCESS, sbaccess);
  3496. dmi_write(target, DM_SBCS, sbcs);
  3497. dmi_write(target, DM_SBADDRESS0, legal_address);
  3498. read_sbcs_nonbusy(target, &sbcs);
  3499. curr_addr = legal_address;
  3500. for (uint32_t i = 0; i < num_words; i++) {
  3501. prev_addr = curr_addr;
  3502. read_sbcs_nonbusy(target, &sbcs);
  3503. curr_addr = sb_read_address(target);
  3504. if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
  3505. LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x.", sbaccess);
  3506. test_passed = false;
  3507. tests_failed++;
  3508. }
  3509. dmi_write(target, DM_SBDATA0, i);
  3510. }
  3511. read_sbcs_nonbusy(target, &sbcs);
  3512. dmi_write(target, DM_SBADDRESS0, legal_address);
  3513. uint32_t val;
  3514. sbcs = set_field(sbcs, DM_SBCS_SBREADONDATA, 1);
  3515. dmi_write(target, DM_SBCS, sbcs);
  3516. dmi_read(target, &val, DM_SBDATA0); /* Dummy read to trigger first system bus read */
  3517. curr_addr = legal_address;
  3518. for (uint32_t i = 0; i < num_words; i++) {
  3519. prev_addr = curr_addr;
  3520. read_sbcs_nonbusy(target, &sbcs);
  3521. curr_addr = sb_read_address(target);
  3522. if ((curr_addr - prev_addr != (uint32_t)(1 << sbaccess)) && (i != 0)) {
  3523. LOG_ERROR("System Bus Access Test 2: Error with address auto-increment, sbaccess = %x", sbaccess);
  3524. test_passed = false;
  3525. tests_failed++;
  3526. }
  3527. dmi_read(target, &val, DM_SBDATA0);
  3528. read_sbcs_nonbusy(target, &sbcs);
  3529. if (i != val) {
  3530. LOG_ERROR("System Bus Access Test 2: Error reading auto-incremented address,"
  3531. "expected val = %x, read val = %x.", i, val);
  3532. test_passed = false;
  3533. tests_failed++;
  3534. }
  3535. }
  3536. }
  3537. if (test_passed)
  3538. LOG_INFO("System Bus Access Test 2: Address auto-increment test PASSED.");
  3539. /* Test 3: Read from illegal address */
  3540. read_memory_sba_simple(target, illegal_address, rd_buf, 1, sbcs_orig);
  3541. dmi_read(target, &rd_val, DM_SBCS);
  3542. if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
  3543. sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
  3544. dmi_write(target, DM_SBCS, sbcs);
  3545. dmi_read(target, &rd_val, DM_SBCS);
  3546. if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
  3547. LOG_INFO("System Bus Access Test 3: Illegal address read test PASSED.");
  3548. else
  3549. LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to clear to 0.");
  3550. } else {
  3551. LOG_ERROR("System Bus Access Test 3: Illegal address read test FAILED, unable to set error code.");
  3552. }
  3553. /* Test 4: Write to illegal address */
  3554. write_memory_sba_simple(target, illegal_address, test_patterns, 1, sbcs_orig);
  3555. dmi_read(target, &rd_val, DM_SBCS);
  3556. if (get_field(rd_val, DM_SBCS_SBERROR) == 2) {
  3557. sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 2);
  3558. dmi_write(target, DM_SBCS, sbcs);
  3559. dmi_read(target, &rd_val, DM_SBCS);
  3560. if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
  3561. LOG_INFO("System Bus Access Test 4: Illegal address write test PASSED.");
  3562. else {
  3563. LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to clear to 0.");
  3564. tests_failed++;
  3565. }
  3566. } else {
  3567. LOG_ERROR("System Bus Access Test 4: Illegal address write test FAILED, unable to set error code.");
  3568. tests_failed++;
  3569. }
  3570. /* Test 5: Write with unsupported sbaccess size */
  3571. uint32_t sbaccess128 = get_field(sbcs_orig, DM_SBCS_SBACCESS128);
  3572. if (sbaccess128) {
  3573. LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED, all sbaccess sizes supported.");
  3574. } else {
  3575. sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 4);
  3576. write_memory_sba_simple(target, legal_address, test_patterns, 1, sbcs);
  3577. dmi_read(target, &rd_val, DM_SBCS);
  3578. if (get_field(rd_val, DM_SBCS_SBERROR) == 4) {
  3579. sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 4);
  3580. dmi_write(target, DM_SBCS, sbcs);
  3581. dmi_read(target, &rd_val, DM_SBCS);
  3582. if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
  3583. LOG_INFO("System Bus Access Test 5: SBCS sbaccess error test PASSED.");
  3584. else {
  3585. LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to clear to 0.");
  3586. tests_failed++;
  3587. }
  3588. } else {
  3589. LOG_ERROR("System Bus Access Test 5: SBCS sbaccess error test FAILED, unable to set error code.");
  3590. tests_failed++;
  3591. }
  3592. }
  3593. /* Test 6: Write to misaligned address */
  3594. sbcs = set_field(sbcs_orig, DM_SBCS_SBACCESS, 1);
  3595. write_memory_sba_simple(target, legal_address+1, test_patterns, 1, sbcs);
  3596. dmi_read(target, &rd_val, DM_SBCS);
  3597. if (get_field(rd_val, DM_SBCS_SBERROR) == 3) {
  3598. sbcs = set_field(sbcs_orig, DM_SBCS_SBERROR, 3);
  3599. dmi_write(target, DM_SBCS, sbcs);
  3600. dmi_read(target, &rd_val, DM_SBCS);
  3601. if (get_field(rd_val, DM_SBCS_SBERROR) == 0)
  3602. LOG_INFO("System Bus Access Test 6: SBCS address alignment error test PASSED");
  3603. else {
  3604. LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to clear to 0.");
  3605. tests_failed++;
  3606. }
  3607. } else {
  3608. LOG_ERROR("System Bus Access Test 6: SBCS address alignment error test FAILED, unable to set error code.");
  3609. tests_failed++;
  3610. }
  3611. /* Test 7: Set sbbusyerror, only run this case in simulation as it is likely
  3612. * impossible to hit otherwise */
  3613. if (run_sbbusyerror_test) {
  3614. sbcs = set_field(sbcs_orig, DM_SBCS_SBREADONADDR, 1);
  3615. dmi_write(target, DM_SBCS, sbcs);
  3616. for (int i = 0; i < 16; i++)
  3617. dmi_write(target, DM_SBDATA0, 0xdeadbeef);
  3618. for (int i = 0; i < 16; i++)
  3619. dmi_write(target, DM_SBADDRESS0, legal_address);
  3620. dmi_read(target, &rd_val, DM_SBCS);
  3621. if (get_field(rd_val, DM_SBCS_SBBUSYERROR)) {
  3622. sbcs = set_field(sbcs_orig, DM_SBCS_SBBUSYERROR, 1);
  3623. dmi_write(target, DM_SBCS, sbcs);
  3624. dmi_read(target, &rd_val, DM_SBCS);
  3625. if (get_field(rd_val, DM_SBCS_SBBUSYERROR) == 0)
  3626. LOG_INFO("System Bus Access Test 7: SBCS sbbusyerror test PASSED.");
  3627. else {
  3628. LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to clear to 0.");
  3629. tests_failed++;
  3630. }
  3631. } else {
  3632. LOG_ERROR("System Bus Access Test 7: SBCS sbbusyerror test FAILED, unable to set error code.");
  3633. tests_failed++;
  3634. }
  3635. }
  3636. if (tests_failed == 0) {
  3637. LOG_INFO("ALL TESTS PASSED");
  3638. return ERROR_OK;
  3639. } else {
  3640. LOG_ERROR("%d TESTS FAILED", tests_failed);
  3641. return ERROR_FAIL;
  3642. }
  3643. }
  3644. void write_memory_sba_simple(struct target *target, target_addr_t addr,
  3645. uint32_t *write_data, uint32_t write_size, uint32_t sbcs)
  3646. {
  3647. RISCV013_INFO(info);
  3648. uint32_t rd_sbcs;
  3649. uint32_t masked_addr;
  3650. uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
  3651. read_sbcs_nonbusy(target, &rd_sbcs);
  3652. uint32_t sbcs_no_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 0);
  3653. dmi_write(target, DM_SBCS, sbcs_no_readonaddr);
  3654. for (uint32_t i = 0; i < sba_size/32; i++) {
  3655. masked_addr = (addr >> 32*i) & 0xffffffff;
  3656. if (i != 3)
  3657. dmi_write(target, DM_SBADDRESS0+i, masked_addr);
  3658. else
  3659. dmi_write(target, DM_SBADDRESS3, masked_addr);
  3660. }
  3661. /* Write SBDATA registers starting with highest address, since write to
  3662. * SBDATA0 triggers write */
  3663. for (int i = write_size-1; i >= 0; i--)
  3664. dmi_write(target, DM_SBDATA0+i, write_data[i]);
  3665. }
  3666. void read_memory_sba_simple(struct target *target, target_addr_t addr,
  3667. uint32_t *rd_buf, uint32_t read_size, uint32_t sbcs)
  3668. {
  3669. RISCV013_INFO(info);
  3670. uint32_t rd_sbcs;
  3671. uint32_t masked_addr;
  3672. uint32_t sba_size = get_field(info->sbcs, DM_SBCS_SBASIZE);
  3673. read_sbcs_nonbusy(target, &rd_sbcs);
  3674. uint32_t sbcs_readonaddr = set_field(sbcs, DM_SBCS_SBREADONADDR, 1);
  3675. dmi_write(target, DM_SBCS, sbcs_readonaddr);
  3676. /* Write addresses starting with highest address register */
  3677. for (int i = sba_size/32-1; i >= 0; i--) {
  3678. masked_addr = (addr >> 32*i) & 0xffffffff;
  3679. if (i != 3)
  3680. dmi_write(target, DM_SBADDRESS0+i, masked_addr);
  3681. else
  3682. dmi_write(target, DM_SBADDRESS3, masked_addr);
  3683. }
  3684. read_sbcs_nonbusy(target, &rd_sbcs);
  3685. for (uint32_t i = 0; i < read_size; i++)
  3686. dmi_read(target, &(rd_buf[i]), DM_SBDATA0+i);
  3687. }
  3688. int riscv013_dmi_write_u64_bits(struct target *target)
  3689. {
  3690. RISCV013_INFO(info);
  3691. return info->abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH;
  3692. }
  3693. static int maybe_execute_fence_i(struct target *target)
  3694. {
  3695. if (has_sufficient_progbuf(target, 3))
  3696. return execute_fence(target);
  3697. return ERROR_OK;
  3698. }
  3699. /* Helper Functions. */
  3700. static int riscv013_on_step_or_resume(struct target *target, bool step)
  3701. {
  3702. if (maybe_execute_fence_i(target) != ERROR_OK)
  3703. return ERROR_FAIL;
  3704. /* We want to twiddle some bits in the debug CSR so debugging works. */
  3705. riscv_reg_t dcsr;
  3706. int result = register_read(target, &dcsr, GDB_REGNO_DCSR);
  3707. if (result != ERROR_OK)
  3708. return result;
  3709. dcsr = set_field(dcsr, CSR_DCSR_STEP, step);
  3710. dcsr = set_field(dcsr, CSR_DCSR_EBREAKM, riscv_ebreakm);
  3711. dcsr = set_field(dcsr, CSR_DCSR_EBREAKS, riscv_ebreaks);
  3712. dcsr = set_field(dcsr, CSR_DCSR_EBREAKU, riscv_ebreaku);
  3713. return riscv_set_register(target, GDB_REGNO_DCSR, dcsr);
  3714. }
  3715. static int riscv013_step_or_resume_current_hart(struct target *target,
  3716. bool step, bool use_hasel)
  3717. {
  3718. RISCV_INFO(r);
  3719. LOG_DEBUG("resuming hart %d (for step?=%d)", r->current_hartid, step);
  3720. if (!riscv_is_halted(target)) {
  3721. LOG_ERROR("Hart %d is not halted!", r->current_hartid);
  3722. return ERROR_FAIL;
  3723. }
  3724. /* Issue the resume command, and then wait for the current hart to resume. */
  3725. uint32_t dmcontrol = DM_DMCONTROL_DMACTIVE | DM_DMCONTROL_RESUMEREQ;
  3726. if (use_hasel)
  3727. dmcontrol |= DM_DMCONTROL_HASEL;
  3728. dmcontrol = set_hartsel(dmcontrol, r->current_hartid);
  3729. dmi_write(target, DM_DMCONTROL, dmcontrol);
  3730. dmcontrol = set_field(dmcontrol, DM_DMCONTROL_HASEL, 0);
  3731. dmcontrol = set_field(dmcontrol, DM_DMCONTROL_RESUMEREQ, 0);
  3732. uint32_t dmstatus;
  3733. for (size_t i = 0; i < 256; ++i) {
  3734. usleep(10);
  3735. if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
  3736. return ERROR_FAIL;
  3737. if (get_field(dmstatus, DM_DMSTATUS_ALLRESUMEACK) == 0)
  3738. continue;
  3739. if (step && get_field(dmstatus, DM_DMSTATUS_ALLHALTED) == 0)
  3740. continue;
  3741. dmi_write(target, DM_DMCONTROL, dmcontrol);
  3742. return ERROR_OK;
  3743. }
  3744. dmi_write(target, DM_DMCONTROL, dmcontrol);
  3745. LOG_ERROR("unable to resume hart %d", r->current_hartid);
  3746. if (dmstatus_read(target, &dmstatus, true) != ERROR_OK)
  3747. return ERROR_FAIL;
  3748. LOG_ERROR(" dmstatus =0x%08x", dmstatus);
  3749. if (step) {
  3750. LOG_ERROR(" was stepping, halting");
  3751. riscv_halt(target);
  3752. return ERROR_OK;
  3753. }
  3754. return ERROR_FAIL;
  3755. }
  3756. void riscv013_clear_abstract_error(struct target *target)
  3757. {
  3758. /* Wait for busy to go away. */
  3759. time_t start = time(NULL);
  3760. uint32_t abstractcs;
  3761. dmi_read(target, &abstractcs, DM_ABSTRACTCS);
  3762. while (get_field(abstractcs, DM_ABSTRACTCS_BUSY)) {
  3763. dmi_read(target, &abstractcs, DM_ABSTRACTCS);
  3764. if (time(NULL) - start > riscv_command_timeout_sec) {
  3765. LOG_ERROR("abstractcs.busy is not going low after %d seconds "
  3766. "(abstractcs=0x%x). The target is either really slow or "
  3767. "broken. You could increase the timeout with riscv "
  3768. "set_command_timeout_sec.",
  3769. riscv_command_timeout_sec, abstractcs);
  3770. break;
  3771. }
  3772. }
  3773. /* Clear the error status. */
  3774. dmi_write(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
  3775. }
  3776. #ifdef _WIN32
  3777. #define FILE_SEP '\\'
  3778. #else
  3779. #define FILE_SEP '/'
  3780. #endif
  3781. #define COMPLIANCE_TEST(b, message) \
  3782. { \
  3783. const char *last_sep = strrchr(__FILE__, FILE_SEP); \
  3784. const char *fname = (!last_sep ? __FILE__ : last_sep + 1); \
  3785. LOG_INFO("Executing test %d (%s:%d): %s", total_tests, fname, __LINE__, message); \
  3786. int pass = 0; \
  3787. if (b) { \
  3788. pass = 1; \
  3789. passed_tests++; \
  3790. } \
  3791. LOG_INFO(" %s", (pass) ? "PASSED" : "FAILED"); \
  3792. assert(pass); \
  3793. total_tests++; \
  3794. }
  3795. #define COMPLIANCE_MUST_PASS(b) COMPLIANCE_TEST(ERROR_OK == (b), "Regular calls must return ERROR_OK")
  3796. #define COMPLIANCE_READ(target, addr, value) COMPLIANCE_MUST_PASS(dmi_read(target, addr, value))
  3797. #define COMPLIANCE_WRITE(target, addr, value) COMPLIANCE_MUST_PASS(dmi_write(target, addr, value))
  3798. #define COMPLIANCE_CHECK_RO(target, addr) \
  3799. { \
  3800. uint32_t orig; \
  3801. uint32_t inverse; \
  3802. COMPLIANCE_READ(target, &orig, addr); \
  3803. COMPLIANCE_WRITE(target, addr, ~orig); \
  3804. COMPLIANCE_READ(target, &inverse, addr); \
  3805. COMPLIANCE_TEST(orig == inverse, "Register must be read-only"); \
  3806. }
  3807. int riscv013_test_compliance(struct target *target)
  3808. {
  3809. LOG_INFO("Basic compliance test against RISC-V Debug Spec v0.13");
  3810. LOG_INFO("This test is not complete, and not well supported.");
  3811. LOG_INFO("Your core might pass this test without being compliant.");
  3812. LOG_INFO("Your core might fail this test while being compliant.");
  3813. LOG_INFO("Use your judgment, and please contribute improvements.");
  3814. if (!riscv_rtos_enabled(target)) {
  3815. LOG_ERROR("Please run with -rtos riscv to run compliance test.");
  3816. return ERROR_FAIL;
  3817. }
  3818. if (!target_was_examined(target)) {
  3819. LOG_ERROR("Cannot run compliance test, because target has not yet "
  3820. "been examined, or the examination failed.\n");
  3821. return ERROR_FAIL;
  3822. }
  3823. int total_tests = 0;
  3824. int passed_tests = 0;
  3825. uint32_t dmcontrol_orig = DM_DMCONTROL_DMACTIVE;
  3826. uint32_t dmcontrol;
  3827. uint32_t testvar;
  3828. uint32_t testvar_read;
  3829. riscv_reg_t value;
  3830. RISCV013_INFO(info);
  3831. /* All the bits of HARTSEL are covered by the examine sequence. */
  3832. /* hartreset */
  3833. /* This field is optional. Either we can read and write it to 1/0,
  3834. or it is tied to 0. This check doesn't really do anything, but
  3835. it does attempt to set the bit to 1 and then back to 0, which needs to
  3836. work if its implemented. */
  3837. COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HARTRESET, 1));
  3838. COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HARTRESET, 0));
  3839. COMPLIANCE_READ(target, &dmcontrol, DM_DMCONTROL);
  3840. COMPLIANCE_TEST((get_field(dmcontrol, DM_DMCONTROL_HARTRESET) == 0),
  3841. "DMCONTROL.hartreset can be 0 or RW.");
  3842. /* hasel */
  3843. COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HASEL, 1));
  3844. COMPLIANCE_WRITE(target, DM_DMCONTROL, set_field(dmcontrol_orig, DM_DMCONTROL_HASEL, 0));
  3845. COMPLIANCE_READ(target, &dmcontrol, DM_DMCONTROL);
  3846. COMPLIANCE_TEST((get_field(dmcontrol, DM_DMCONTROL_HASEL) == 0),
  3847. "DMCONTROL.hasel can be 0 or RW.");
  3848. /* TODO: test that hamask registers exist if hasel does. */
  3849. /* haltreq */
  3850. COMPLIANCE_MUST_PASS(riscv_halt(target));
  3851. /* This bit is not actually readable according to the spec, so nothing to check.*/
  3852. /* DMSTATUS */
  3853. COMPLIANCE_CHECK_RO(target, DM_DMSTATUS);
  3854. /* resumereq */
  3855. /* This bit is not actually readable according to the spec, so nothing to check.*/
  3856. COMPLIANCE_MUST_PASS(riscv_resume(target, true, 0, false, false, false));
  3857. /* Halt all harts again so the test can continue.*/
  3858. COMPLIANCE_MUST_PASS(riscv_halt(target));
  3859. /* HARTINFO: Read-Only. This is per-hart, so need to adjust hartsel. */
  3860. uint32_t hartinfo;
  3861. COMPLIANCE_READ(target, &hartinfo, DM_HARTINFO);
  3862. for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
  3863. COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
  3864. COMPLIANCE_CHECK_RO(target, DM_HARTINFO);
  3865. /* $dscratch CSRs */
  3866. uint32_t nscratch = get_field(hartinfo, DM_HARTINFO_NSCRATCH);
  3867. for (unsigned int d = 0; d < nscratch; d++) {
  3868. riscv_reg_t testval, testval_read;
  3869. /* Because DSCRATCH0 is not guaranteed to last across PB executions, need to put
  3870. this all into one PB execution. Which may not be possible on all implementations.*/
  3871. if (info->progbufsize >= 5) {
  3872. for (testval = 0x0011223300112233;
  3873. testval != 0xDEAD;
  3874. testval = testval == 0x0011223300112233 ? ~testval : 0xDEAD) {
  3875. COMPLIANCE_TEST(register_write_direct(target, GDB_REGNO_S0, testval) == ERROR_OK,
  3876. "Need to be able to write S0 in order to test DSCRATCH0.");
  3877. struct riscv_program program32;
  3878. riscv_program_init(&program32, target);
  3879. riscv_program_csrw(&program32, GDB_REGNO_S0, GDB_REGNO_DSCRATCH0 + d);
  3880. riscv_program_csrr(&program32, GDB_REGNO_S1, GDB_REGNO_DSCRATCH0 + d);
  3881. riscv_program_fence(&program32);
  3882. riscv_program_ebreak(&program32);
  3883. COMPLIANCE_TEST(riscv_program_exec(&program32, target) == ERROR_OK,
  3884. "Accessing DSCRATCH0 with program buffer should succeed.");
  3885. COMPLIANCE_TEST(register_read_direct(target, &testval_read, GDB_REGNO_S1) == ERROR_OK,
  3886. "Need to be able to read S1 in order to test DSCRATCH0.");
  3887. if (riscv_xlen(target) > 32) {
  3888. COMPLIANCE_TEST(testval == testval_read,
  3889. "All DSCRATCH0 registers in HARTINFO must be R/W.");
  3890. } else {
  3891. COMPLIANCE_TEST(testval_read == (testval & 0xFFFFFFFF),
  3892. "All DSCRATCH0 registers in HARTINFO must be R/W.");
  3893. }
  3894. }
  3895. }
  3896. }
  3897. /* TODO: dataaccess */
  3898. if (get_field(hartinfo, DM_HARTINFO_DATAACCESS)) {
  3899. /* TODO: Shadowed in memory map. */
  3900. /* TODO: datasize */
  3901. /* TODO: dataaddr */
  3902. } else {
  3903. /* TODO: Shadowed in CSRs. */
  3904. /* TODO: datasize */
  3905. /* TODO: dataaddr */
  3906. }
  3907. }
  3908. /* HALTSUM -- TODO: More than 32 harts. Would need to loop over this to set hartsel */
  3909. /* TODO: HALTSUM2, HALTSUM3 */
  3910. /* HALTSUM0 */
  3911. uint32_t expected_haltsum0 = 0;
  3912. for (int i = 0; i < MIN(riscv_count_harts(target), 32); i++)
  3913. expected_haltsum0 |= (1 << i);
  3914. COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
  3915. COMPLIANCE_TEST(testvar_read == expected_haltsum0,
  3916. "HALTSUM0 should report summary of up to 32 halted harts");
  3917. COMPLIANCE_WRITE(target, DM_HALTSUM0, 0xffffffff);
  3918. COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
  3919. COMPLIANCE_TEST(testvar_read == expected_haltsum0, "HALTSUM0 should be R/O");
  3920. COMPLIANCE_WRITE(target, DM_HALTSUM0, 0x0);
  3921. COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM0);
  3922. COMPLIANCE_TEST(testvar_read == expected_haltsum0, "HALTSUM0 should be R/O");
  3923. /* HALTSUM1 */
  3924. uint32_t expected_haltsum1 = 0;
  3925. for (int i = 0; i < MIN(riscv_count_harts(target), 1024); i += 32)
  3926. expected_haltsum1 |= (1 << (i/32));
  3927. COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
  3928. COMPLIANCE_TEST(testvar_read == expected_haltsum1,
  3929. "HALTSUM1 should report summary of up to 1024 halted harts");
  3930. COMPLIANCE_WRITE(target, DM_HALTSUM1, 0xffffffff);
  3931. COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
  3932. COMPLIANCE_TEST(testvar_read == expected_haltsum1, "HALTSUM1 should be R/O");
  3933. COMPLIANCE_WRITE(target, DM_HALTSUM1, 0x0);
  3934. COMPLIANCE_READ(target, &testvar_read, DM_HALTSUM1);
  3935. COMPLIANCE_TEST(testvar_read == expected_haltsum1, "HALTSUM1 should be R/O");
  3936. /* TODO: HAWINDOWSEL */
  3937. /* TODO: HAWINDOW */
  3938. /* ABSTRACTCS */
  3939. uint32_t abstractcs;
  3940. COMPLIANCE_READ(target, &abstractcs, DM_ABSTRACTCS);
  3941. /* Check that all reported Data Words are really R/W */
  3942. for (int invert = 0; invert < 2; invert++) {
  3943. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
  3944. testvar = (i + 1) * 0x11111111;
  3945. if (invert)
  3946. testvar = ~testvar;
  3947. COMPLIANCE_WRITE(target, DM_DATA0 + i, testvar);
  3948. }
  3949. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
  3950. testvar = (i + 1) * 0x11111111;
  3951. if (invert)
  3952. testvar = ~testvar;
  3953. COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
  3954. COMPLIANCE_TEST(testvar_read == testvar, "All reported DATA words must be R/W");
  3955. }
  3956. }
  3957. /* Check that all reported ProgBuf words are really R/W */
  3958. for (int invert = 0; invert < 2; invert++) {
  3959. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
  3960. testvar = (i + 1) * 0x11111111;
  3961. if (invert)
  3962. testvar = ~testvar;
  3963. COMPLIANCE_WRITE(target, DM_PROGBUF0 + i, testvar);
  3964. }
  3965. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
  3966. testvar = (i + 1) * 0x11111111;
  3967. if (invert)
  3968. testvar = ~testvar;
  3969. COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
  3970. COMPLIANCE_TEST(testvar_read == testvar, "All reported PROGBUF words must be R/W");
  3971. }
  3972. }
  3973. /* TODO: Cause and clear all error types */
  3974. /* COMMAND
  3975. According to the spec, this register is only W, so can't really check the read result.
  3976. But at any rate, this is not legal and should cause an error. */
  3977. COMPLIANCE_WRITE(target, DM_COMMAND, 0xAAAAAAAA);
  3978. COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
  3979. COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
  3980. "Illegal COMMAND should result in UNSUPPORTED");
  3981. COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
  3982. COMPLIANCE_WRITE(target, DM_COMMAND, 0x55555555);
  3983. COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
  3984. COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
  3985. "Illegal COMMAND should result in UNSUPPORTED");
  3986. COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
  3987. /* Basic Abstract Commands */
  3988. for (unsigned int i = 1; i < 32; i = i << 1) {
  3989. riscv_reg_t testval = i | ((i + 1ULL) << 32);
  3990. riscv_reg_t testval_read;
  3991. COMPLIANCE_TEST(register_write_direct(target, GDB_REGNO_ZERO + i, testval) == ERROR_OK,
  3992. "GPR Writes should be supported.");
  3993. COMPLIANCE_MUST_PASS(write_abstract_arg(target, 0, 0xDEADBEEFDEADBEEF, 64));
  3994. COMPLIANCE_TEST(register_read_direct(target, &testval_read, GDB_REGNO_ZERO + i) == ERROR_OK,
  3995. "GPR Reads should be supported.");
  3996. if (riscv_xlen(target) > 32) {
  3997. /* Dummy comment to satisfy linter, since removing the branches here doesn't actually compile. */
  3998. COMPLIANCE_TEST(testval == testval_read, "GPR Reads and writes should be supported.");
  3999. } else {
  4000. /* Dummy comment to satisfy linter, since removing the branches here doesn't actually compile. */
  4001. COMPLIANCE_TEST((testval & 0xFFFFFFFF) == testval_read, "GPR Reads and writes should be supported.");
  4002. }
  4003. }
  4004. /* ABSTRACTAUTO
  4005. See which bits are actually writable */
  4006. COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
  4007. uint32_t abstractauto;
  4008. uint32_t busy;
  4009. COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
  4010. COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0x0);
  4011. if (abstractauto > 0) {
  4012. /* This mechanism only works when you have a reasonable sized progbuf, which is not
  4013. a true compliance requirement. */
  4014. if (info->progbufsize >= 3) {
  4015. testvar = 0;
  4016. COMPLIANCE_TEST(register_write_direct(target, GDB_REGNO_S0, 0) == ERROR_OK,
  4017. "Need to be able to write S0 to test ABSTRACTAUTO");
  4018. struct riscv_program program;
  4019. COMPLIANCE_MUST_PASS(riscv_program_init(&program, target));
  4020. /* This is also testing that WFI() is a NOP during debug mode. */
  4021. COMPLIANCE_MUST_PASS(riscv_program_insert(&program, wfi()));
  4022. COMPLIANCE_MUST_PASS(riscv_program_addi(&program, GDB_REGNO_S0, GDB_REGNO_S0, 1));
  4023. COMPLIANCE_MUST_PASS(riscv_program_ebreak(&program));
  4024. COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0x0);
  4025. COMPLIANCE_MUST_PASS(riscv_program_exec(&program, target));
  4026. testvar++;
  4027. COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
  4028. COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
  4029. uint32_t autoexec_data = get_field(abstractauto, DM_ABSTRACTAUTO_AUTOEXECDATA);
  4030. uint32_t autoexec_progbuf = get_field(abstractauto, DM_ABSTRACTAUTO_AUTOEXECPROGBUF);
  4031. for (unsigned int i = 0; i < 12; i++) {
  4032. COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
  4033. do {
  4034. COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
  4035. busy = get_field(testvar_read, DM_ABSTRACTCS_BUSY);
  4036. } while (busy);
  4037. if (autoexec_data & (1 << i)) {
  4038. COMPLIANCE_TEST(i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT),
  4039. "AUTOEXEC may be writable up to DATACOUNT bits.");
  4040. testvar++;
  4041. }
  4042. }
  4043. for (unsigned int i = 0; i < 16; i++) {
  4044. COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
  4045. do {
  4046. COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
  4047. busy = get_field(testvar_read, DM_ABSTRACTCS_BUSY);
  4048. } while (busy);
  4049. if (autoexec_progbuf & (1 << i)) {
  4050. COMPLIANCE_TEST(i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE),
  4051. "AUTOEXEC may be writable up to PROGBUFSIZE bits.");
  4052. testvar++;
  4053. }
  4054. }
  4055. COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0);
  4056. COMPLIANCE_TEST(register_read_direct(target, &value, GDB_REGNO_S0) == ERROR_OK,
  4057. "Need to be able to read S0 to test ABSTRACTAUTO");
  4058. COMPLIANCE_TEST(testvar == value,
  4059. "ABSTRACTAUTO should cause COMMAND to run the expected number of times.");
  4060. }
  4061. }
  4062. /* Single-Step each hart. */
  4063. for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
  4064. COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
  4065. COMPLIANCE_MUST_PASS(riscv013_on_step(target));
  4066. COMPLIANCE_MUST_PASS(riscv013_step_current_hart(target));
  4067. COMPLIANCE_TEST(riscv_halt_reason(target, hartsel) == RISCV_HALT_SINGLESTEP,
  4068. "Single Step should result in SINGLESTEP");
  4069. }
  4070. /* Core Register Tests */
  4071. uint64_t bogus_dpc = 0xdeadbeef;
  4072. for (int hartsel = 0; hartsel < riscv_count_harts(target); hartsel++) {
  4073. COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, hartsel));
  4074. /* DCSR Tests */
  4075. COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DCSR, 0x0));
  4076. COMPLIANCE_MUST_PASS(register_read_direct(target, &value, GDB_REGNO_DCSR));
  4077. COMPLIANCE_TEST(value != 0, "Not all bits in DCSR are writable by Debugger");
  4078. COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DCSR, 0xFFFFFFFF));
  4079. COMPLIANCE_MUST_PASS(register_read_direct(target, &value, GDB_REGNO_DCSR));
  4080. COMPLIANCE_TEST(value != 0, "At least some bits in DCSR must be 1");
  4081. /* DPC. Note that DPC is sign-extended. */
  4082. riscv_reg_t dpcmask = 0xFFFFFFFCUL;
  4083. riscv_reg_t dpc;
  4084. if (riscv_xlen(target) > 32)
  4085. dpcmask |= (0xFFFFFFFFULL << 32);
  4086. if (riscv_supports_extension(target, riscv_current_hartid(target), 'C'))
  4087. dpcmask |= 0x2;
  4088. COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DPC, dpcmask));
  4089. COMPLIANCE_MUST_PASS(register_read_direct(target, &dpc, GDB_REGNO_DPC));
  4090. COMPLIANCE_TEST(dpcmask == dpc,
  4091. "DPC must be sign-extended to XLEN and writable to all-1s (except the least significant bits)");
  4092. COMPLIANCE_MUST_PASS(register_write_direct(target, GDB_REGNO_DPC, 0));
  4093. COMPLIANCE_MUST_PASS(register_read_direct(target, &dpc, GDB_REGNO_DPC));
  4094. COMPLIANCE_TEST(dpc == 0, "DPC must be writable to 0.");
  4095. if (hartsel == 0)
  4096. bogus_dpc = dpc; /* For a later test step */
  4097. }
  4098. /* NDMRESET
  4099. Asserting non-debug module reset should not reset Debug Module state.
  4100. But it should reset Hart State, e.g. DPC should get a different value.
  4101. Also make sure that DCSR reports cause of 'HALT' even though previously we single-stepped.
  4102. */
  4103. /* Write some registers. They should not be impacted by ndmreset. */
  4104. COMPLIANCE_WRITE(target, DM_COMMAND, 0xFFFFFFFF);
  4105. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
  4106. testvar = (i + 1) * 0x11111111;
  4107. COMPLIANCE_WRITE(target, DM_PROGBUF0 + i, testvar);
  4108. }
  4109. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
  4110. testvar = (i + 1) * 0x11111111;
  4111. COMPLIANCE_WRITE(target, DM_DATA0 + i, testvar);
  4112. }
  4113. COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0xFFFFFFFF);
  4114. COMPLIANCE_READ(target, &abstractauto, DM_ABSTRACTAUTO);
  4115. /* Pulse reset. */
  4116. target->reset_halt = true;
  4117. COMPLIANCE_MUST_PASS(riscv_set_current_hartid(target, 0));
  4118. COMPLIANCE_TEST(assert_reset(target) == ERROR_OK, "Must be able to assert NDMRESET");
  4119. COMPLIANCE_TEST(deassert_reset(target) == ERROR_OK, "Must be able to deassert NDMRESET");
  4120. /* Verify that most stuff is not affected by ndmreset. */
  4121. COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
  4122. COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == CMDERR_NOT_SUPPORTED,
  4123. "NDMRESET should not affect DM_ABSTRACTCS");
  4124. COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTAUTO);
  4125. COMPLIANCE_TEST(testvar_read == abstractauto, "NDMRESET should not affect DM_ABSTRACTAUTO");
  4126. /* Clean up to avoid future test failures */
  4127. COMPLIANCE_WRITE(target, DM_ABSTRACTCS, DM_ABSTRACTCS_CMDERR);
  4128. COMPLIANCE_WRITE(target, DM_ABSTRACTAUTO, 0);
  4129. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
  4130. testvar = (i + 1) * 0x11111111;
  4131. COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
  4132. COMPLIANCE_TEST(testvar_read == testvar, "PROGBUF words must not be affected by NDMRESET");
  4133. }
  4134. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
  4135. testvar = (i + 1) * 0x11111111;
  4136. COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
  4137. COMPLIANCE_TEST(testvar_read == testvar, "DATA words must not be affected by NDMRESET");
  4138. }
  4139. /* Verify that DPC *is* affected by ndmreset. Since we don't know what it *should* be,
  4140. just verify that at least it's not the bogus value anymore. */
  4141. COMPLIANCE_TEST(bogus_dpc != 0xdeadbeef, "BOGUS DPC should have been set somehow (bug in compliance test)");
  4142. COMPLIANCE_MUST_PASS(register_read_direct(target, &value, GDB_REGNO_DPC));
  4143. COMPLIANCE_TEST(bogus_dpc != value, "NDMRESET should move DPC to reset value.");
  4144. COMPLIANCE_TEST(riscv_halt_reason(target, 0) == RISCV_HALT_INTERRUPT,
  4145. "After NDMRESET halt, DCSR should report cause of halt");
  4146. /* DMACTIVE -- deasserting DMACTIVE should reset all the above values. */
  4147. /* Toggle dmactive */
  4148. COMPLIANCE_WRITE(target, DM_DMCONTROL, 0);
  4149. COMPLIANCE_WRITE(target, DM_DMCONTROL, DM_DMCONTROL_DMACTIVE);
  4150. COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTCS);
  4151. COMPLIANCE_TEST(get_field(testvar_read, DM_ABSTRACTCS_CMDERR) == 0, "ABSTRACTCS.cmderr should reset to 0");
  4152. COMPLIANCE_READ(target, &testvar_read, DM_ABSTRACTAUTO);
  4153. COMPLIANCE_TEST(testvar_read == 0, "ABSTRACTAUTO should reset to 0");
  4154. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_PROGBUFSIZE); i++) {
  4155. COMPLIANCE_READ(target, &testvar_read, DM_PROGBUF0 + i);
  4156. COMPLIANCE_TEST(testvar_read == 0, "PROGBUF words should reset to 0");
  4157. }
  4158. for (unsigned int i = 0; i < get_field(abstractcs, DM_ABSTRACTCS_DATACOUNT); i++) {
  4159. COMPLIANCE_READ(target, &testvar_read, DM_DATA0 + i);
  4160. COMPLIANCE_TEST(testvar_read == 0, "DATA words should reset to 0");
  4161. }
  4162. /*
  4163. * TODO:
  4164. * DCSR.cause priorities
  4165. * DCSR.stoptime/stopcycle
  4166. * DCSR.stepie
  4167. * DCSR.ebreak
  4168. * DCSR.prv
  4169. */
  4170. /* Halt every hart for any follow-up tests*/
  4171. COMPLIANCE_MUST_PASS(riscv_halt(target));
  4172. uint32_t failed_tests = total_tests - passed_tests;
  4173. if (total_tests == passed_tests) {
  4174. LOG_INFO("ALL TESTS PASSED\n");
  4175. return ERROR_OK;
  4176. } else {
  4177. LOG_INFO("%d TESTS FAILED\n", failed_tests);
  4178. return ERROR_FAIL;
  4179. }
  4180. }