You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

6446 lines
168 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007-2010 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * Copyright (C) 2008, Duane Ellis *
  9. * openocd@duaneeellis.com *
  10. * *
  11. * Copyright (C) 2008 by Spencer Oliver *
  12. * spen@spen-soft.co.uk *
  13. * *
  14. * Copyright (C) 2008 by Rick Altherr *
  15. * kc8apf@kc8apf.net> *
  16. * *
  17. * Copyright (C) 2011 by Broadcom Corporation *
  18. * Evan Hunter - ehunter@broadcom.com *
  19. * *
  20. * Copyright (C) ST-Ericsson SA 2011 *
  21. * michel.jaouen@stericsson.com : smp minimum support *
  22. * *
  23. * Copyright (C) 2011 Andreas Fritiofson *
  24. * andreas.fritiofson@gmail.com *
  25. * *
  26. * This program is free software; you can redistribute it and/or modify *
  27. * it under the terms of the GNU General Public License as published by *
  28. * the Free Software Foundation; either version 2 of the License, or *
  29. * (at your option) any later version. *
  30. * *
  31. * This program is distributed in the hope that it will be useful, *
  32. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  33. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  34. * GNU General Public License for more details. *
  35. * *
  36. * You should have received a copy of the GNU General Public License *
  37. * along with this program. If not, see <http://www.gnu.org/licenses/>. *
  38. ***************************************************************************/
  39. #ifdef HAVE_CONFIG_H
  40. #include "config.h"
  41. #endif
  42. #include <helper/time_support.h>
  43. #include <jtag/jtag.h>
  44. #include <flash/nor/core.h>
  45. #include "target.h"
  46. #include "target_type.h"
  47. #include "target_request.h"
  48. #include "breakpoints.h"
  49. #include "register.h"
  50. #include "trace.h"
  51. #include "image.h"
  52. #include "rtos/rtos.h"
  53. #include "transport/transport.h"
  54. /* default halt wait timeout (ms) */
  55. #define DEFAULT_HALT_TIMEOUT 5000
  56. static int target_read_buffer_default(struct target *target, target_addr_t address,
  57. uint32_t count, uint8_t *buffer);
  58. static int target_write_buffer_default(struct target *target, target_addr_t address,
  59. uint32_t count, const uint8_t *buffer);
  60. static int target_array2mem(Jim_Interp *interp, struct target *target,
  61. int argc, Jim_Obj * const *argv);
  62. static int target_mem2array(Jim_Interp *interp, struct target *target,
  63. int argc, Jim_Obj * const *argv);
  64. static int target_register_user_commands(struct command_context *cmd_ctx);
  65. static int target_get_gdb_fileio_info_default(struct target *target,
  66. struct gdb_fileio_info *fileio_info);
  67. static int target_gdb_fileio_end_default(struct target *target, int retcode,
  68. int fileio_errno, bool ctrl_c);
  69. static int target_profiling_default(struct target *target, uint32_t *samples,
  70. uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
  71. /* targets */
  72. extern struct target_type arm7tdmi_target;
  73. extern struct target_type arm720t_target;
  74. extern struct target_type arm9tdmi_target;
  75. extern struct target_type arm920t_target;
  76. extern struct target_type arm966e_target;
  77. extern struct target_type arm946e_target;
  78. extern struct target_type arm926ejs_target;
  79. extern struct target_type fa526_target;
  80. extern struct target_type feroceon_target;
  81. extern struct target_type dragonite_target;
  82. extern struct target_type xscale_target;
  83. extern struct target_type cortexm_target;
  84. extern struct target_type cortexa_target;
  85. extern struct target_type aarch64_target;
  86. extern struct target_type cortexr4_target;
  87. extern struct target_type arm11_target;
  88. extern struct target_type ls1_sap_target;
  89. extern struct target_type mips_m4k_target;
  90. extern struct target_type avr_target;
  91. extern struct target_type dsp563xx_target;
  92. extern struct target_type dsp5680xx_target;
  93. extern struct target_type testee_target;
  94. extern struct target_type avr32_ap7k_target;
  95. extern struct target_type hla_target;
  96. extern struct target_type nds32_v2_target;
  97. extern struct target_type nds32_v3_target;
  98. extern struct target_type nds32_v3m_target;
  99. extern struct target_type or1k_target;
  100. extern struct target_type quark_x10xx_target;
  101. extern struct target_type quark_d20xx_target;
  102. static struct target_type *target_types[] = {
  103. &arm7tdmi_target,
  104. &arm9tdmi_target,
  105. &arm920t_target,
  106. &arm720t_target,
  107. &arm966e_target,
  108. &arm946e_target,
  109. &arm926ejs_target,
  110. &fa526_target,
  111. &feroceon_target,
  112. &dragonite_target,
  113. &xscale_target,
  114. &cortexm_target,
  115. &cortexa_target,
  116. &cortexr4_target,
  117. &arm11_target,
  118. &ls1_sap_target,
  119. &mips_m4k_target,
  120. &avr_target,
  121. &dsp563xx_target,
  122. &dsp5680xx_target,
  123. &testee_target,
  124. &avr32_ap7k_target,
  125. &hla_target,
  126. &nds32_v2_target,
  127. &nds32_v3_target,
  128. &nds32_v3m_target,
  129. &or1k_target,
  130. &quark_x10xx_target,
  131. &quark_d20xx_target,
  132. #if BUILD_TARGET64
  133. &aarch64_target,
  134. #endif
  135. NULL,
  136. };
  137. struct target *all_targets;
  138. static struct target_event_callback *target_event_callbacks;
  139. static struct target_timer_callback *target_timer_callbacks;
  140. LIST_HEAD(target_reset_callback_list);
  141. LIST_HEAD(target_trace_callback_list);
  142. static const int polling_interval = 100;
  143. static const Jim_Nvp nvp_assert[] = {
  144. { .name = "assert", NVP_ASSERT },
  145. { .name = "deassert", NVP_DEASSERT },
  146. { .name = "T", NVP_ASSERT },
  147. { .name = "F", NVP_DEASSERT },
  148. { .name = "t", NVP_ASSERT },
  149. { .name = "f", NVP_DEASSERT },
  150. { .name = NULL, .value = -1 }
  151. };
  152. static const Jim_Nvp nvp_error_target[] = {
  153. { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
  154. { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
  155. { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
  156. { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
  157. { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
  158. { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
  159. { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
  160. { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
  161. { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
  162. { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
  163. { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
  164. { .value = -1, .name = NULL }
  165. };
  166. static const char *target_strerror_safe(int err)
  167. {
  168. const Jim_Nvp *n;
  169. n = Jim_Nvp_value2name_simple(nvp_error_target, err);
  170. if (n->name == NULL)
  171. return "unknown";
  172. else
  173. return n->name;
  174. }
  175. static const Jim_Nvp nvp_target_event[] = {
  176. { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
  177. { .value = TARGET_EVENT_HALTED, .name = "halted" },
  178. { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
  179. { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
  180. { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
  181. { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
  182. { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
  183. { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
  184. { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
  185. { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
  186. { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
  187. { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
  188. { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
  189. { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" },
  190. { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" },
  191. { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" },
  192. { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" },
  193. { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
  194. { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
  195. { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
  196. { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
  197. { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
  198. { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
  199. { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
  200. { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
  201. { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
  202. { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
  203. { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
  204. { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
  205. { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
  206. { .name = NULL, .value = -1 }
  207. };
  208. static const Jim_Nvp nvp_target_state[] = {
  209. { .name = "unknown", .value = TARGET_UNKNOWN },
  210. { .name = "running", .value = TARGET_RUNNING },
  211. { .name = "halted", .value = TARGET_HALTED },
  212. { .name = "reset", .value = TARGET_RESET },
  213. { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
  214. { .name = NULL, .value = -1 },
  215. };
  216. static const Jim_Nvp nvp_target_debug_reason[] = {
  217. { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
  218. { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
  219. { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
  220. { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
  221. { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
  222. { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
  223. { .name = "program-exit" , .value = DBG_REASON_EXIT },
  224. { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
  225. { .name = NULL, .value = -1 },
  226. };
  227. static const Jim_Nvp nvp_target_endian[] = {
  228. { .name = "big", .value = TARGET_BIG_ENDIAN },
  229. { .name = "little", .value = TARGET_LITTLE_ENDIAN },
  230. { .name = "be", .value = TARGET_BIG_ENDIAN },
  231. { .name = "le", .value = TARGET_LITTLE_ENDIAN },
  232. { .name = NULL, .value = -1 },
  233. };
  234. static const Jim_Nvp nvp_reset_modes[] = {
  235. { .name = "unknown", .value = RESET_UNKNOWN },
  236. { .name = "run" , .value = RESET_RUN },
  237. { .name = "halt" , .value = RESET_HALT },
  238. { .name = "init" , .value = RESET_INIT },
  239. { .name = NULL , .value = -1 },
  240. };
  241. const char *debug_reason_name(struct target *t)
  242. {
  243. const char *cp;
  244. cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
  245. t->debug_reason)->name;
  246. if (!cp) {
  247. LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
  248. cp = "(*BUG*unknown*BUG*)";
  249. }
  250. return cp;
  251. }
  252. const char *target_state_name(struct target *t)
  253. {
  254. const char *cp;
  255. cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
  256. if (!cp) {
  257. LOG_ERROR("Invalid target state: %d", (int)(t->state));
  258. cp = "(*BUG*unknown*BUG*)";
  259. }
  260. if (!target_was_examined(t) && t->defer_examine)
  261. cp = "examine deferred";
  262. return cp;
  263. }
  264. const char *target_event_name(enum target_event event)
  265. {
  266. const char *cp;
  267. cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
  268. if (!cp) {
  269. LOG_ERROR("Invalid target event: %d", (int)(event));
  270. cp = "(*BUG*unknown*BUG*)";
  271. }
  272. return cp;
  273. }
  274. const char *target_reset_mode_name(enum target_reset_mode reset_mode)
  275. {
  276. const char *cp;
  277. cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
  278. if (!cp) {
  279. LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
  280. cp = "(*BUG*unknown*BUG*)";
  281. }
  282. return cp;
  283. }
  284. /* determine the number of the new target */
  285. static int new_target_number(void)
  286. {
  287. struct target *t;
  288. int x;
  289. /* number is 0 based */
  290. x = -1;
  291. t = all_targets;
  292. while (t) {
  293. if (x < t->target_number)
  294. x = t->target_number;
  295. t = t->next;
  296. }
  297. return x + 1;
  298. }
  299. /* read a uint64_t from a buffer in target memory endianness */
  300. uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
  301. {
  302. if (target->endianness == TARGET_LITTLE_ENDIAN)
  303. return le_to_h_u64(buffer);
  304. else
  305. return be_to_h_u64(buffer);
  306. }
  307. /* read a uint32_t from a buffer in target memory endianness */
  308. uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
  309. {
  310. if (target->endianness == TARGET_LITTLE_ENDIAN)
  311. return le_to_h_u32(buffer);
  312. else
  313. return be_to_h_u32(buffer);
  314. }
  315. /* read a uint24_t from a buffer in target memory endianness */
  316. uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
  317. {
  318. if (target->endianness == TARGET_LITTLE_ENDIAN)
  319. return le_to_h_u24(buffer);
  320. else
  321. return be_to_h_u24(buffer);
  322. }
  323. /* read a uint16_t from a buffer in target memory endianness */
  324. uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
  325. {
  326. if (target->endianness == TARGET_LITTLE_ENDIAN)
  327. return le_to_h_u16(buffer);
  328. else
  329. return be_to_h_u16(buffer);
  330. }
  331. /* read a uint8_t from a buffer in target memory endianness */
  332. static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
  333. {
  334. return *buffer & 0x0ff;
  335. }
  336. /* write a uint64_t to a buffer in target memory endianness */
  337. void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
  338. {
  339. if (target->endianness == TARGET_LITTLE_ENDIAN)
  340. h_u64_to_le(buffer, value);
  341. else
  342. h_u64_to_be(buffer, value);
  343. }
  344. /* write a uint32_t to a buffer in target memory endianness */
  345. void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
  346. {
  347. if (target->endianness == TARGET_LITTLE_ENDIAN)
  348. h_u32_to_le(buffer, value);
  349. else
  350. h_u32_to_be(buffer, value);
  351. }
  352. /* write a uint24_t to a buffer in target memory endianness */
  353. void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
  354. {
  355. if (target->endianness == TARGET_LITTLE_ENDIAN)
  356. h_u24_to_le(buffer, value);
  357. else
  358. h_u24_to_be(buffer, value);
  359. }
  360. /* write a uint16_t to a buffer in target memory endianness */
  361. void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
  362. {
  363. if (target->endianness == TARGET_LITTLE_ENDIAN)
  364. h_u16_to_le(buffer, value);
  365. else
  366. h_u16_to_be(buffer, value);
  367. }
  368. /* write a uint8_t to a buffer in target memory endianness */
  369. static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
  370. {
  371. *buffer = value;
  372. }
  373. /* write a uint64_t array to a buffer in target memory endianness */
  374. void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
  375. {
  376. uint32_t i;
  377. for (i = 0; i < count; i++)
  378. dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
  379. }
  380. /* write a uint32_t array to a buffer in target memory endianness */
  381. void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
  382. {
  383. uint32_t i;
  384. for (i = 0; i < count; i++)
  385. dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
  386. }
  387. /* write a uint16_t array to a buffer in target memory endianness */
  388. void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
  389. {
  390. uint32_t i;
  391. for (i = 0; i < count; i++)
  392. dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
  393. }
  394. /* write a uint64_t array to a buffer in target memory endianness */
  395. void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
  396. {
  397. uint32_t i;
  398. for (i = 0; i < count; i++)
  399. target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
  400. }
  401. /* write a uint32_t array to a buffer in target memory endianness */
  402. void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
  403. {
  404. uint32_t i;
  405. for (i = 0; i < count; i++)
  406. target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
  407. }
  408. /* write a uint16_t array to a buffer in target memory endianness */
  409. void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
  410. {
  411. uint32_t i;
  412. for (i = 0; i < count; i++)
  413. target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
  414. }
  415. /* return a pointer to a configured target; id is name or number */
  416. struct target *get_target(const char *id)
  417. {
  418. struct target *target;
  419. /* try as tcltarget name */
  420. for (target = all_targets; target; target = target->next) {
  421. if (target_name(target) == NULL)
  422. continue;
  423. if (strcmp(id, target_name(target)) == 0)
  424. return target;
  425. }
  426. /* It's OK to remove this fallback sometime after August 2010 or so */
  427. /* no match, try as number */
  428. unsigned num;
  429. if (parse_uint(id, &num) != ERROR_OK)
  430. return NULL;
  431. for (target = all_targets; target; target = target->next) {
  432. if (target->target_number == (int)num) {
  433. LOG_WARNING("use '%s' as target identifier, not '%u'",
  434. target_name(target), num);
  435. return target;
  436. }
  437. }
  438. return NULL;
  439. }
  440. /* returns a pointer to the n-th configured target */
  441. struct target *get_target_by_num(int num)
  442. {
  443. struct target *target = all_targets;
  444. while (target) {
  445. if (target->target_number == num)
  446. return target;
  447. target = target->next;
  448. }
  449. return NULL;
  450. }
  451. struct target *get_current_target(struct command_context *cmd_ctx)
  452. {
  453. struct target *target = get_target_by_num(cmd_ctx->current_target);
  454. if (target == NULL) {
  455. LOG_ERROR("BUG: current_target out of bounds");
  456. exit(-1);
  457. }
  458. return target;
  459. }
  460. int target_poll(struct target *target)
  461. {
  462. int retval;
  463. /* We can't poll until after examine */
  464. if (!target_was_examined(target)) {
  465. /* Fail silently lest we pollute the log */
  466. return ERROR_FAIL;
  467. }
  468. retval = target->type->poll(target);
  469. if (retval != ERROR_OK)
  470. return retval;
  471. if (target->halt_issued) {
  472. if (target->state == TARGET_HALTED)
  473. target->halt_issued = false;
  474. else {
  475. int64_t t = timeval_ms() - target->halt_issued_time;
  476. if (t > DEFAULT_HALT_TIMEOUT) {
  477. target->halt_issued = false;
  478. LOG_INFO("Halt timed out, wake up GDB.");
  479. target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
  480. }
  481. }
  482. }
  483. return ERROR_OK;
  484. }
  485. int target_halt(struct target *target)
  486. {
  487. int retval;
  488. /* We can't poll until after examine */
  489. if (!target_was_examined(target)) {
  490. LOG_ERROR("Target not examined yet");
  491. return ERROR_FAIL;
  492. }
  493. retval = target->type->halt(target);
  494. if (retval != ERROR_OK)
  495. return retval;
  496. target->halt_issued = true;
  497. target->halt_issued_time = timeval_ms();
  498. return ERROR_OK;
  499. }
  500. /**
  501. * Make the target (re)start executing using its saved execution
  502. * context (possibly with some modifications).
  503. *
  504. * @param target Which target should start executing.
  505. * @param current True to use the target's saved program counter instead
  506. * of the address parameter
  507. * @param address Optionally used as the program counter.
  508. * @param handle_breakpoints True iff breakpoints at the resumption PC
  509. * should be skipped. (For example, maybe execution was stopped by
  510. * such a breakpoint, in which case it would be counterprodutive to
  511. * let it re-trigger.
  512. * @param debug_execution False if all working areas allocated by OpenOCD
  513. * should be released and/or restored to their original contents.
  514. * (This would for example be true to run some downloaded "helper"
  515. * algorithm code, which resides in one such working buffer and uses
  516. * another for data storage.)
  517. *
  518. * @todo Resolve the ambiguity about what the "debug_execution" flag
  519. * signifies. For example, Target implementations don't agree on how
  520. * it relates to invalidation of the register cache, or to whether
  521. * breakpoints and watchpoints should be enabled. (It would seem wrong
  522. * to enable breakpoints when running downloaded "helper" algorithms
  523. * (debug_execution true), since the breakpoints would be set to match
  524. * target firmware being debugged, not the helper algorithm.... and
  525. * enabling them could cause such helpers to malfunction (for example,
  526. * by overwriting data with a breakpoint instruction. On the other
  527. * hand the infrastructure for running such helpers might use this
  528. * procedure but rely on hardware breakpoint to detect termination.)
  529. */
  530. int target_resume(struct target *target, int current, target_addr_t address,
  531. int handle_breakpoints, int debug_execution)
  532. {
  533. int retval;
  534. /* We can't poll until after examine */
  535. if (!target_was_examined(target)) {
  536. LOG_ERROR("Target not examined yet");
  537. return ERROR_FAIL;
  538. }
  539. target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
  540. /* note that resume *must* be asynchronous. The CPU can halt before
  541. * we poll. The CPU can even halt at the current PC as a result of
  542. * a software breakpoint being inserted by (a bug?) the application.
  543. */
  544. retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
  545. if (retval != ERROR_OK)
  546. return retval;
  547. target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
  548. return retval;
  549. }
  550. static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
  551. {
  552. char buf[100];
  553. int retval;
  554. Jim_Nvp *n;
  555. n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
  556. if (n->name == NULL) {
  557. LOG_ERROR("invalid reset mode");
  558. return ERROR_FAIL;
  559. }
  560. struct target *target;
  561. for (target = all_targets; target; target = target->next)
  562. target_call_reset_callbacks(target, reset_mode);
  563. /* disable polling during reset to make reset event scripts
  564. * more predictable, i.e. dr/irscan & pathmove in events will
  565. * not have JTAG operations injected into the middle of a sequence.
  566. */
  567. bool save_poll = jtag_poll_get_enabled();
  568. jtag_poll_set_enabled(false);
  569. sprintf(buf, "ocd_process_reset %s", n->name);
  570. retval = Jim_Eval(cmd_ctx->interp->interp, buf);
  571. jtag_poll_set_enabled(save_poll);
  572. if (retval != JIM_OK) {
  573. Jim_MakeErrorMessage(cmd_ctx->interp->interp);
  574. command_print(NULL, "%s\n",
  575. Jim_GetString(Jim_GetResult(cmd_ctx->interp->interp), NULL));
  576. return ERROR_FAIL;
  577. }
  578. /* We want any events to be processed before the prompt */
  579. retval = target_call_timer_callbacks_now();
  580. for (target = all_targets; target; target = target->next) {
  581. target->type->check_reset(target);
  582. target->running_alg = false;
  583. }
  584. return retval;
  585. }
  586. static int identity_virt2phys(struct target *target,
  587. target_addr_t virtual, target_addr_t *physical)
  588. {
  589. *physical = virtual;
  590. return ERROR_OK;
  591. }
  592. static int no_mmu(struct target *target, int *enabled)
  593. {
  594. *enabled = 0;
  595. return ERROR_OK;
  596. }
  597. static int default_examine(struct target *target)
  598. {
  599. target_set_examined(target);
  600. return ERROR_OK;
  601. }
  602. /* no check by default */
  603. static int default_check_reset(struct target *target)
  604. {
  605. return ERROR_OK;
  606. }
  607. int target_examine_one(struct target *target)
  608. {
  609. target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
  610. int retval = target->type->examine(target);
  611. if (retval != ERROR_OK)
  612. return retval;
  613. target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
  614. return ERROR_OK;
  615. }
  616. static int jtag_enable_callback(enum jtag_event event, void *priv)
  617. {
  618. struct target *target = priv;
  619. if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
  620. return ERROR_OK;
  621. jtag_unregister_event_callback(jtag_enable_callback, target);
  622. return target_examine_one(target);
  623. }
  624. /* Targets that correctly implement init + examine, i.e.
  625. * no communication with target during init:
  626. *
  627. * XScale
  628. */
  629. int target_examine(void)
  630. {
  631. int retval = ERROR_OK;
  632. struct target *target;
  633. for (target = all_targets; target; target = target->next) {
  634. /* defer examination, but don't skip it */
  635. if (!target->tap->enabled) {
  636. jtag_register_event_callback(jtag_enable_callback,
  637. target);
  638. continue;
  639. }
  640. if (target->defer_examine)
  641. continue;
  642. retval = target_examine_one(target);
  643. if (retval != ERROR_OK)
  644. return retval;
  645. }
  646. return retval;
  647. }
  648. const char *target_type_name(struct target *target)
  649. {
  650. return target->type->name;
  651. }
  652. static int target_soft_reset_halt(struct target *target)
  653. {
  654. if (!target_was_examined(target)) {
  655. LOG_ERROR("Target not examined yet");
  656. return ERROR_FAIL;
  657. }
  658. if (!target->type->soft_reset_halt) {
  659. LOG_ERROR("Target %s does not support soft_reset_halt",
  660. target_name(target));
  661. return ERROR_FAIL;
  662. }
  663. return target->type->soft_reset_halt(target);
  664. }
  665. /**
  666. * Downloads a target-specific native code algorithm to the target,
  667. * and executes it. * Note that some targets may need to set up, enable,
  668. * and tear down a breakpoint (hard or * soft) to detect algorithm
  669. * termination, while others may support lower overhead schemes where
  670. * soft breakpoints embedded in the algorithm automatically terminate the
  671. * algorithm.
  672. *
  673. * @param target used to run the algorithm
  674. * @param arch_info target-specific description of the algorithm.
  675. */
  676. int target_run_algorithm(struct target *target,
  677. int num_mem_params, struct mem_param *mem_params,
  678. int num_reg_params, struct reg_param *reg_param,
  679. uint32_t entry_point, uint32_t exit_point,
  680. int timeout_ms, void *arch_info)
  681. {
  682. int retval = ERROR_FAIL;
  683. if (!target_was_examined(target)) {
  684. LOG_ERROR("Target not examined yet");
  685. goto done;
  686. }
  687. if (!target->type->run_algorithm) {
  688. LOG_ERROR("Target type '%s' does not support %s",
  689. target_type_name(target), __func__);
  690. goto done;
  691. }
  692. target->running_alg = true;
  693. retval = target->type->run_algorithm(target,
  694. num_mem_params, mem_params,
  695. num_reg_params, reg_param,
  696. entry_point, exit_point, timeout_ms, arch_info);
  697. target->running_alg = false;
  698. done:
  699. return retval;
  700. }
  701. /**
  702. * Downloads a target-specific native code algorithm to the target,
  703. * executes and leaves it running.
  704. *
  705. * @param target used to run the algorithm
  706. * @param arch_info target-specific description of the algorithm.
  707. */
  708. int target_start_algorithm(struct target *target,
  709. int num_mem_params, struct mem_param *mem_params,
  710. int num_reg_params, struct reg_param *reg_params,
  711. uint32_t entry_point, uint32_t exit_point,
  712. void *arch_info)
  713. {
  714. int retval = ERROR_FAIL;
  715. if (!target_was_examined(target)) {
  716. LOG_ERROR("Target not examined yet");
  717. goto done;
  718. }
  719. if (!target->type->start_algorithm) {
  720. LOG_ERROR("Target type '%s' does not support %s",
  721. target_type_name(target), __func__);
  722. goto done;
  723. }
  724. if (target->running_alg) {
  725. LOG_ERROR("Target is already running an algorithm");
  726. goto done;
  727. }
  728. target->running_alg = true;
  729. retval = target->type->start_algorithm(target,
  730. num_mem_params, mem_params,
  731. num_reg_params, reg_params,
  732. entry_point, exit_point, arch_info);
  733. done:
  734. return retval;
  735. }
  736. /**
  737. * Waits for an algorithm started with target_start_algorithm() to complete.
  738. *
  739. * @param target used to run the algorithm
  740. * @param arch_info target-specific description of the algorithm.
  741. */
  742. int target_wait_algorithm(struct target *target,
  743. int num_mem_params, struct mem_param *mem_params,
  744. int num_reg_params, struct reg_param *reg_params,
  745. uint32_t exit_point, int timeout_ms,
  746. void *arch_info)
  747. {
  748. int retval = ERROR_FAIL;
  749. if (!target->type->wait_algorithm) {
  750. LOG_ERROR("Target type '%s' does not support %s",
  751. target_type_name(target), __func__);
  752. goto done;
  753. }
  754. if (!target->running_alg) {
  755. LOG_ERROR("Target is not running an algorithm");
  756. goto done;
  757. }
  758. retval = target->type->wait_algorithm(target,
  759. num_mem_params, mem_params,
  760. num_reg_params, reg_params,
  761. exit_point, timeout_ms, arch_info);
  762. if (retval != ERROR_TARGET_TIMEOUT)
  763. target->running_alg = false;
  764. done:
  765. return retval;
  766. }
  767. /**
  768. * Executes a target-specific native code algorithm in the target.
  769. * It differs from target_run_algorithm in that the algorithm is asynchronous.
  770. * Because of this it requires an compliant algorithm:
  771. * see contrib/loaders/flash/stm32f1x.S for example.
  772. *
  773. * @param target used to run the algorithm
  774. */
  775. int target_run_flash_async_algorithm(struct target *target,
  776. const uint8_t *buffer, uint32_t count, int block_size,
  777. int num_mem_params, struct mem_param *mem_params,
  778. int num_reg_params, struct reg_param *reg_params,
  779. uint32_t buffer_start, uint32_t buffer_size,
  780. uint32_t entry_point, uint32_t exit_point, void *arch_info)
  781. {
  782. int retval;
  783. int timeout = 0;
  784. const uint8_t *buffer_orig = buffer;
  785. /* Set up working area. First word is write pointer, second word is read pointer,
  786. * rest is fifo data area. */
  787. uint32_t wp_addr = buffer_start;
  788. uint32_t rp_addr = buffer_start + 4;
  789. uint32_t fifo_start_addr = buffer_start + 8;
  790. uint32_t fifo_end_addr = buffer_start + buffer_size;
  791. uint32_t wp = fifo_start_addr;
  792. uint32_t rp = fifo_start_addr;
  793. /* validate block_size is 2^n */
  794. assert(!block_size || !(block_size & (block_size - 1)));
  795. retval = target_write_u32(target, wp_addr, wp);
  796. if (retval != ERROR_OK)
  797. return retval;
  798. retval = target_write_u32(target, rp_addr, rp);
  799. if (retval != ERROR_OK)
  800. return retval;
  801. /* Start up algorithm on target and let it idle while writing the first chunk */
  802. retval = target_start_algorithm(target, num_mem_params, mem_params,
  803. num_reg_params, reg_params,
  804. entry_point,
  805. exit_point,
  806. arch_info);
  807. if (retval != ERROR_OK) {
  808. LOG_ERROR("error starting target flash write algorithm");
  809. return retval;
  810. }
  811. while (count > 0) {
  812. retval = target_read_u32(target, rp_addr, &rp);
  813. if (retval != ERROR_OK) {
  814. LOG_ERROR("failed to get read pointer");
  815. break;
  816. }
  817. LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
  818. (size_t) (buffer - buffer_orig), count, wp, rp);
  819. if (rp == 0) {
  820. LOG_ERROR("flash write algorithm aborted by target");
  821. retval = ERROR_FLASH_OPERATION_FAILED;
  822. break;
  823. }
  824. if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
  825. LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
  826. break;
  827. }
  828. /* Count the number of bytes available in the fifo without
  829. * crossing the wrap around. Make sure to not fill it completely,
  830. * because that would make wp == rp and that's the empty condition. */
  831. uint32_t thisrun_bytes;
  832. if (rp > wp)
  833. thisrun_bytes = rp - wp - block_size;
  834. else if (rp > fifo_start_addr)
  835. thisrun_bytes = fifo_end_addr - wp;
  836. else
  837. thisrun_bytes = fifo_end_addr - wp - block_size;
  838. if (thisrun_bytes == 0) {
  839. /* Throttle polling a bit if transfer is (much) faster than flash
  840. * programming. The exact delay shouldn't matter as long as it's
  841. * less than buffer size / flash speed. This is very unlikely to
  842. * run when using high latency connections such as USB. */
  843. alive_sleep(10);
  844. /* to stop an infinite loop on some targets check and increment a timeout
  845. * this issue was observed on a stellaris using the new ICDI interface */
  846. if (timeout++ >= 500) {
  847. LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
  848. return ERROR_FLASH_OPERATION_FAILED;
  849. }
  850. continue;
  851. }
  852. /* reset our timeout */
  853. timeout = 0;
  854. /* Limit to the amount of data we actually want to write */
  855. if (thisrun_bytes > count * block_size)
  856. thisrun_bytes = count * block_size;
  857. /* Write data to fifo */
  858. retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
  859. if (retval != ERROR_OK)
  860. break;
  861. /* Update counters and wrap write pointer */
  862. buffer += thisrun_bytes;
  863. count -= thisrun_bytes / block_size;
  864. wp += thisrun_bytes;
  865. if (wp >= fifo_end_addr)
  866. wp = fifo_start_addr;
  867. /* Store updated write pointer to target */
  868. retval = target_write_u32(target, wp_addr, wp);
  869. if (retval != ERROR_OK)
  870. break;
  871. }
  872. if (retval != ERROR_OK) {
  873. /* abort flash write algorithm on target */
  874. target_write_u32(target, wp_addr, 0);
  875. }
  876. int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
  877. num_reg_params, reg_params,
  878. exit_point,
  879. 10000,
  880. arch_info);
  881. if (retval2 != ERROR_OK) {
  882. LOG_ERROR("error waiting for target flash write algorithm");
  883. retval = retval2;
  884. }
  885. if (retval == ERROR_OK) {
  886. /* check if algorithm set rp = 0 after fifo writer loop finished */
  887. retval = target_read_u32(target, rp_addr, &rp);
  888. if (retval == ERROR_OK && rp == 0) {
  889. LOG_ERROR("flash write algorithm aborted by target");
  890. retval = ERROR_FLASH_OPERATION_FAILED;
  891. }
  892. }
  893. return retval;
  894. }
  895. int target_read_memory(struct target *target,
  896. target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
  897. {
  898. if (!target_was_examined(target)) {
  899. LOG_ERROR("Target not examined yet");
  900. return ERROR_FAIL;
  901. }
  902. if (!target->type->read_memory) {
  903. LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
  904. return ERROR_FAIL;
  905. }
  906. return target->type->read_memory(target, address, size, count, buffer);
  907. }
  908. int target_read_phys_memory(struct target *target,
  909. target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
  910. {
  911. if (!target_was_examined(target)) {
  912. LOG_ERROR("Target not examined yet");
  913. return ERROR_FAIL;
  914. }
  915. if (!target->type->read_phys_memory) {
  916. LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
  917. return ERROR_FAIL;
  918. }
  919. return target->type->read_phys_memory(target, address, size, count, buffer);
  920. }
  921. int target_write_memory(struct target *target,
  922. target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
  923. {
  924. if (!target_was_examined(target)) {
  925. LOG_ERROR("Target not examined yet");
  926. return ERROR_FAIL;
  927. }
  928. if (!target->type->write_memory) {
  929. LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
  930. return ERROR_FAIL;
  931. }
  932. return target->type->write_memory(target, address, size, count, buffer);
  933. }
  934. int target_write_phys_memory(struct target *target,
  935. target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
  936. {
  937. if (!target_was_examined(target)) {
  938. LOG_ERROR("Target not examined yet");
  939. return ERROR_FAIL;
  940. }
  941. if (!target->type->write_phys_memory) {
  942. LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
  943. return ERROR_FAIL;
  944. }
  945. return target->type->write_phys_memory(target, address, size, count, buffer);
  946. }
  947. int target_add_breakpoint(struct target *target,
  948. struct breakpoint *breakpoint)
  949. {
  950. if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
  951. LOG_WARNING("target %s is not halted", target_name(target));
  952. return ERROR_TARGET_NOT_HALTED;
  953. }
  954. return target->type->add_breakpoint(target, breakpoint);
  955. }
  956. int target_add_context_breakpoint(struct target *target,
  957. struct breakpoint *breakpoint)
  958. {
  959. if (target->state != TARGET_HALTED) {
  960. LOG_WARNING("target %s is not halted", target_name(target));
  961. return ERROR_TARGET_NOT_HALTED;
  962. }
  963. return target->type->add_context_breakpoint(target, breakpoint);
  964. }
  965. int target_add_hybrid_breakpoint(struct target *target,
  966. struct breakpoint *breakpoint)
  967. {
  968. if (target->state != TARGET_HALTED) {
  969. LOG_WARNING("target %s is not halted", target_name(target));
  970. return ERROR_TARGET_NOT_HALTED;
  971. }
  972. return target->type->add_hybrid_breakpoint(target, breakpoint);
  973. }
  974. int target_remove_breakpoint(struct target *target,
  975. struct breakpoint *breakpoint)
  976. {
  977. return target->type->remove_breakpoint(target, breakpoint);
  978. }
  979. int target_add_watchpoint(struct target *target,
  980. struct watchpoint *watchpoint)
  981. {
  982. if (target->state != TARGET_HALTED) {
  983. LOG_WARNING("target %s is not halted", target_name(target));
  984. return ERROR_TARGET_NOT_HALTED;
  985. }
  986. return target->type->add_watchpoint(target, watchpoint);
  987. }
  988. int target_remove_watchpoint(struct target *target,
  989. struct watchpoint *watchpoint)
  990. {
  991. return target->type->remove_watchpoint(target, watchpoint);
  992. }
  993. int target_hit_watchpoint(struct target *target,
  994. struct watchpoint **hit_watchpoint)
  995. {
  996. if (target->state != TARGET_HALTED) {
  997. LOG_WARNING("target %s is not halted", target->cmd_name);
  998. return ERROR_TARGET_NOT_HALTED;
  999. }
  1000. if (target->type->hit_watchpoint == NULL) {
  1001. /* For backward compatible, if hit_watchpoint is not implemented,
  1002. * return ERROR_FAIL such that gdb_server will not take the nonsense
  1003. * information. */
  1004. return ERROR_FAIL;
  1005. }
  1006. return target->type->hit_watchpoint(target, hit_watchpoint);
  1007. }
  1008. int target_get_gdb_reg_list(struct target *target,
  1009. struct reg **reg_list[], int *reg_list_size,
  1010. enum target_register_class reg_class)
  1011. {
  1012. return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
  1013. }
  1014. int target_step(struct target *target,
  1015. int current, target_addr_t address, int handle_breakpoints)
  1016. {
  1017. return target->type->step(target, current, address, handle_breakpoints);
  1018. }
  1019. int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
  1020. {
  1021. if (target->state != TARGET_HALTED) {
  1022. LOG_WARNING("target %s is not halted", target->cmd_name);
  1023. return ERROR_TARGET_NOT_HALTED;
  1024. }
  1025. return target->type->get_gdb_fileio_info(target, fileio_info);
  1026. }
  1027. int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
  1028. {
  1029. if (target->state != TARGET_HALTED) {
  1030. LOG_WARNING("target %s is not halted", target->cmd_name);
  1031. return ERROR_TARGET_NOT_HALTED;
  1032. }
  1033. return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
  1034. }
  1035. int target_profiling(struct target *target, uint32_t *samples,
  1036. uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
  1037. {
  1038. if (target->state != TARGET_HALTED) {
  1039. LOG_WARNING("target %s is not halted", target->cmd_name);
  1040. return ERROR_TARGET_NOT_HALTED;
  1041. }
  1042. return target->type->profiling(target, samples, max_num_samples,
  1043. num_samples, seconds);
  1044. }
  1045. /**
  1046. * Reset the @c examined flag for the given target.
  1047. * Pure paranoia -- targets are zeroed on allocation.
  1048. */
  1049. static void target_reset_examined(struct target *target)
  1050. {
  1051. target->examined = false;
  1052. }
  1053. static int handle_target(void *priv);
  1054. static int target_init_one(struct command_context *cmd_ctx,
  1055. struct target *target)
  1056. {
  1057. target_reset_examined(target);
  1058. struct target_type *type = target->type;
  1059. if (type->examine == NULL)
  1060. type->examine = default_examine;
  1061. if (type->check_reset == NULL)
  1062. type->check_reset = default_check_reset;
  1063. assert(type->init_target != NULL);
  1064. int retval = type->init_target(cmd_ctx, target);
  1065. if (ERROR_OK != retval) {
  1066. LOG_ERROR("target '%s' init failed", target_name(target));
  1067. return retval;
  1068. }
  1069. /* Sanity-check MMU support ... stub in what we must, to help
  1070. * implement it in stages, but warn if we need to do so.
  1071. */
  1072. if (type->mmu) {
  1073. if (type->virt2phys == NULL) {
  1074. LOG_ERROR("type '%s' is missing virt2phys", type->name);
  1075. type->virt2phys = identity_virt2phys;
  1076. }
  1077. } else {
  1078. /* Make sure no-MMU targets all behave the same: make no
  1079. * distinction between physical and virtual addresses, and
  1080. * ensure that virt2phys() is always an identity mapping.
  1081. */
  1082. if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
  1083. LOG_WARNING("type '%s' has bad MMU hooks", type->name);
  1084. type->mmu = no_mmu;
  1085. type->write_phys_memory = type->write_memory;
  1086. type->read_phys_memory = type->read_memory;
  1087. type->virt2phys = identity_virt2phys;
  1088. }
  1089. if (target->type->read_buffer == NULL)
  1090. target->type->read_buffer = target_read_buffer_default;
  1091. if (target->type->write_buffer == NULL)
  1092. target->type->write_buffer = target_write_buffer_default;
  1093. if (target->type->get_gdb_fileio_info == NULL)
  1094. target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
  1095. if (target->type->gdb_fileio_end == NULL)
  1096. target->type->gdb_fileio_end = target_gdb_fileio_end_default;
  1097. if (target->type->profiling == NULL)
  1098. target->type->profiling = target_profiling_default;
  1099. return ERROR_OK;
  1100. }
  1101. static int target_init(struct command_context *cmd_ctx)
  1102. {
  1103. struct target *target;
  1104. int retval;
  1105. for (target = all_targets; target; target = target->next) {
  1106. retval = target_init_one(cmd_ctx, target);
  1107. if (ERROR_OK != retval)
  1108. return retval;
  1109. }
  1110. if (!all_targets)
  1111. return ERROR_OK;
  1112. retval = target_register_user_commands(cmd_ctx);
  1113. if (ERROR_OK != retval)
  1114. return retval;
  1115. retval = target_register_timer_callback(&handle_target,
  1116. polling_interval, 1, cmd_ctx->interp->interp);
  1117. if (ERROR_OK != retval)
  1118. return retval;
  1119. return ERROR_OK;
  1120. }
  1121. COMMAND_HANDLER(handle_target_init_command)
  1122. {
  1123. int retval;
  1124. if (CMD_ARGC != 0)
  1125. return ERROR_COMMAND_SYNTAX_ERROR;
  1126. static bool target_initialized;
  1127. if (target_initialized) {
  1128. LOG_INFO("'target init' has already been called");
  1129. return ERROR_OK;
  1130. }
  1131. target_initialized = true;
  1132. retval = command_run_line(CMD_CTX, "init_targets");
  1133. if (ERROR_OK != retval)
  1134. return retval;
  1135. retval = command_run_line(CMD_CTX, "init_target_events");
  1136. if (ERROR_OK != retval)
  1137. return retval;
  1138. retval = command_run_line(CMD_CTX, "init_board");
  1139. if (ERROR_OK != retval)
  1140. return retval;
  1141. LOG_DEBUG("Initializing targets...");
  1142. return target_init(CMD_CTX);
  1143. }
  1144. int target_register_event_callback(int (*callback)(struct target *target,
  1145. enum target_event event, void *priv), void *priv)
  1146. {
  1147. struct target_event_callback **callbacks_p = &target_event_callbacks;
  1148. if (callback == NULL)
  1149. return ERROR_COMMAND_SYNTAX_ERROR;
  1150. if (*callbacks_p) {
  1151. while ((*callbacks_p)->next)
  1152. callbacks_p = &((*callbacks_p)->next);
  1153. callbacks_p = &((*callbacks_p)->next);
  1154. }
  1155. (*callbacks_p) = malloc(sizeof(struct target_event_callback));
  1156. (*callbacks_p)->callback = callback;
  1157. (*callbacks_p)->priv = priv;
  1158. (*callbacks_p)->next = NULL;
  1159. return ERROR_OK;
  1160. }
  1161. int target_register_reset_callback(int (*callback)(struct target *target,
  1162. enum target_reset_mode reset_mode, void *priv), void *priv)
  1163. {
  1164. struct target_reset_callback *entry;
  1165. if (callback == NULL)
  1166. return ERROR_COMMAND_SYNTAX_ERROR;
  1167. entry = malloc(sizeof(struct target_reset_callback));
  1168. if (entry == NULL) {
  1169. LOG_ERROR("error allocating buffer for reset callback entry");
  1170. return ERROR_COMMAND_SYNTAX_ERROR;
  1171. }
  1172. entry->callback = callback;
  1173. entry->priv = priv;
  1174. list_add(&entry->list, &target_reset_callback_list);
  1175. return ERROR_OK;
  1176. }
  1177. int target_register_trace_callback(int (*callback)(struct target *target,
  1178. size_t len, uint8_t *data, void *priv), void *priv)
  1179. {
  1180. struct target_trace_callback *entry;
  1181. if (callback == NULL)
  1182. return ERROR_COMMAND_SYNTAX_ERROR;
  1183. entry = malloc(sizeof(struct target_trace_callback));
  1184. if (entry == NULL) {
  1185. LOG_ERROR("error allocating buffer for trace callback entry");
  1186. return ERROR_COMMAND_SYNTAX_ERROR;
  1187. }
  1188. entry->callback = callback;
  1189. entry->priv = priv;
  1190. list_add(&entry->list, &target_trace_callback_list);
  1191. return ERROR_OK;
  1192. }
  1193. int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
  1194. {
  1195. struct target_timer_callback **callbacks_p = &target_timer_callbacks;
  1196. struct timeval now;
  1197. if (callback == NULL)
  1198. return ERROR_COMMAND_SYNTAX_ERROR;
  1199. if (*callbacks_p) {
  1200. while ((*callbacks_p)->next)
  1201. callbacks_p = &((*callbacks_p)->next);
  1202. callbacks_p = &((*callbacks_p)->next);
  1203. }
  1204. (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
  1205. (*callbacks_p)->callback = callback;
  1206. (*callbacks_p)->periodic = periodic;
  1207. (*callbacks_p)->time_ms = time_ms;
  1208. (*callbacks_p)->removed = false;
  1209. gettimeofday(&now, NULL);
  1210. (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000;
  1211. time_ms -= (time_ms % 1000);
  1212. (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000);
  1213. if ((*callbacks_p)->when.tv_usec > 1000000) {
  1214. (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000;
  1215. (*callbacks_p)->when.tv_sec += 1;
  1216. }
  1217. (*callbacks_p)->priv = priv;
  1218. (*callbacks_p)->next = NULL;
  1219. return ERROR_OK;
  1220. }
  1221. int target_unregister_event_callback(int (*callback)(struct target *target,
  1222. enum target_event event, void *priv), void *priv)
  1223. {
  1224. struct target_event_callback **p = &target_event_callbacks;
  1225. struct target_event_callback *c = target_event_callbacks;
  1226. if (callback == NULL)
  1227. return ERROR_COMMAND_SYNTAX_ERROR;
  1228. while (c) {
  1229. struct target_event_callback *next = c->next;
  1230. if ((c->callback == callback) && (c->priv == priv)) {
  1231. *p = next;
  1232. free(c);
  1233. return ERROR_OK;
  1234. } else
  1235. p = &(c->next);
  1236. c = next;
  1237. }
  1238. return ERROR_OK;
  1239. }
  1240. int target_unregister_reset_callback(int (*callback)(struct target *target,
  1241. enum target_reset_mode reset_mode, void *priv), void *priv)
  1242. {
  1243. struct target_reset_callback *entry;
  1244. if (callback == NULL)
  1245. return ERROR_COMMAND_SYNTAX_ERROR;
  1246. list_for_each_entry(entry, &target_reset_callback_list, list) {
  1247. if (entry->callback == callback && entry->priv == priv) {
  1248. list_del(&entry->list);
  1249. free(entry);
  1250. break;
  1251. }
  1252. }
  1253. return ERROR_OK;
  1254. }
  1255. int target_unregister_trace_callback(int (*callback)(struct target *target,
  1256. size_t len, uint8_t *data, void *priv), void *priv)
  1257. {
  1258. struct target_trace_callback *entry;
  1259. if (callback == NULL)
  1260. return ERROR_COMMAND_SYNTAX_ERROR;
  1261. list_for_each_entry(entry, &target_trace_callback_list, list) {
  1262. if (entry->callback == callback && entry->priv == priv) {
  1263. list_del(&entry->list);
  1264. free(entry);
  1265. break;
  1266. }
  1267. }
  1268. return ERROR_OK;
  1269. }
  1270. int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
  1271. {
  1272. if (callback == NULL)
  1273. return ERROR_COMMAND_SYNTAX_ERROR;
  1274. for (struct target_timer_callback *c = target_timer_callbacks;
  1275. c; c = c->next) {
  1276. if ((c->callback == callback) && (c->priv == priv)) {
  1277. c->removed = true;
  1278. return ERROR_OK;
  1279. }
  1280. }
  1281. return ERROR_FAIL;
  1282. }
  1283. int target_call_event_callbacks(struct target *target, enum target_event event)
  1284. {
  1285. struct target_event_callback *callback = target_event_callbacks;
  1286. struct target_event_callback *next_callback;
  1287. if (event == TARGET_EVENT_HALTED) {
  1288. /* execute early halted first */
  1289. target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
  1290. }
  1291. LOG_DEBUG("target event %i (%s)", event,
  1292. Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
  1293. target_handle_event(target, event);
  1294. while (callback) {
  1295. next_callback = callback->next;
  1296. callback->callback(target, event, callback->priv);
  1297. callback = next_callback;
  1298. }
  1299. return ERROR_OK;
  1300. }
  1301. int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
  1302. {
  1303. struct target_reset_callback *callback;
  1304. LOG_DEBUG("target reset %i (%s)", reset_mode,
  1305. Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
  1306. list_for_each_entry(callback, &target_reset_callback_list, list)
  1307. callback->callback(target, reset_mode, callback->priv);
  1308. return ERROR_OK;
  1309. }
  1310. int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
  1311. {
  1312. struct target_trace_callback *callback;
  1313. list_for_each_entry(callback, &target_trace_callback_list, list)
  1314. callback->callback(target, len, data, callback->priv);
  1315. return ERROR_OK;
  1316. }
  1317. static int target_timer_callback_periodic_restart(
  1318. struct target_timer_callback *cb, struct timeval *now)
  1319. {
  1320. int time_ms = cb->time_ms;
  1321. cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000;
  1322. time_ms -= (time_ms % 1000);
  1323. cb->when.tv_sec = now->tv_sec + time_ms / 1000;
  1324. if (cb->when.tv_usec > 1000000) {
  1325. cb->when.tv_usec = cb->when.tv_usec - 1000000;
  1326. cb->when.tv_sec += 1;
  1327. }
  1328. return ERROR_OK;
  1329. }
  1330. static int target_call_timer_callback(struct target_timer_callback *cb,
  1331. struct timeval *now)
  1332. {
  1333. cb->callback(cb->priv);
  1334. if (cb->periodic)
  1335. return target_timer_callback_periodic_restart(cb, now);
  1336. return target_unregister_timer_callback(cb->callback, cb->priv);
  1337. }
  1338. static int target_call_timer_callbacks_check_time(int checktime)
  1339. {
  1340. static bool callback_processing;
  1341. /* Do not allow nesting */
  1342. if (callback_processing)
  1343. return ERROR_OK;
  1344. callback_processing = true;
  1345. keep_alive();
  1346. struct timeval now;
  1347. gettimeofday(&now, NULL);
  1348. /* Store an address of the place containing a pointer to the
  1349. * next item; initially, that's a standalone "root of the
  1350. * list" variable. */
  1351. struct target_timer_callback **callback = &target_timer_callbacks;
  1352. while (*callback) {
  1353. if ((*callback)->removed) {
  1354. struct target_timer_callback *p = *callback;
  1355. *callback = (*callback)->next;
  1356. free(p);
  1357. continue;
  1358. }
  1359. bool call_it = (*callback)->callback &&
  1360. ((!checktime && (*callback)->periodic) ||
  1361. now.tv_sec > (*callback)->when.tv_sec ||
  1362. (now.tv_sec == (*callback)->when.tv_sec &&
  1363. now.tv_usec >= (*callback)->when.tv_usec));
  1364. if (call_it)
  1365. target_call_timer_callback(*callback, &now);
  1366. callback = &(*callback)->next;
  1367. }
  1368. callback_processing = false;
  1369. return ERROR_OK;
  1370. }
  1371. int target_call_timer_callbacks(void)
  1372. {
  1373. return target_call_timer_callbacks_check_time(1);
  1374. }
  1375. /* invoke periodic callbacks immediately */
  1376. int target_call_timer_callbacks_now(void)
  1377. {
  1378. return target_call_timer_callbacks_check_time(0);
  1379. }
  1380. /* Prints the working area layout for debug purposes */
  1381. static void print_wa_layout(struct target *target)
  1382. {
  1383. struct working_area *c = target->working_areas;
  1384. while (c) {
  1385. LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
  1386. c->backup ? 'b' : ' ', c->free ? ' ' : '*',
  1387. c->address, c->address + c->size - 1, c->size);
  1388. c = c->next;
  1389. }
  1390. }
  1391. /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
  1392. static void target_split_working_area(struct working_area *area, uint32_t size)
  1393. {
  1394. assert(area->free); /* Shouldn't split an allocated area */
  1395. assert(size <= area->size); /* Caller should guarantee this */
  1396. /* Split only if not already the right size */
  1397. if (size < area->size) {
  1398. struct working_area *new_wa = malloc(sizeof(*new_wa));
  1399. if (new_wa == NULL)
  1400. return;
  1401. new_wa->next = area->next;
  1402. new_wa->size = area->size - size;
  1403. new_wa->address = area->address + size;
  1404. new_wa->backup = NULL;
  1405. new_wa->user = NULL;
  1406. new_wa->free = true;
  1407. area->next = new_wa;
  1408. area->size = size;
  1409. /* If backup memory was allocated to this area, it has the wrong size
  1410. * now so free it and it will be reallocated if/when needed */
  1411. if (area->backup) {
  1412. free(area->backup);
  1413. area->backup = NULL;
  1414. }
  1415. }
  1416. }
  1417. /* Merge all adjacent free areas into one */
  1418. static void target_merge_working_areas(struct target *target)
  1419. {
  1420. struct working_area *c = target->working_areas;
  1421. while (c && c->next) {
  1422. assert(c->next->address == c->address + c->size); /* This is an invariant */
  1423. /* Find two adjacent free areas */
  1424. if (c->free && c->next->free) {
  1425. /* Merge the last into the first */
  1426. c->size += c->next->size;
  1427. /* Remove the last */
  1428. struct working_area *to_be_freed = c->next;
  1429. c->next = c->next->next;
  1430. if (to_be_freed->backup)
  1431. free(to_be_freed->backup);
  1432. free(to_be_freed);
  1433. /* If backup memory was allocated to the remaining area, it's has
  1434. * the wrong size now */
  1435. if (c->backup) {
  1436. free(c->backup);
  1437. c->backup = NULL;
  1438. }
  1439. } else {
  1440. c = c->next;
  1441. }
  1442. }
  1443. }
  1444. int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
  1445. {
  1446. /* Reevaluate working area address based on MMU state*/
  1447. if (target->working_areas == NULL) {
  1448. int retval;
  1449. int enabled;
  1450. retval = target->type->mmu(target, &enabled);
  1451. if (retval != ERROR_OK)
  1452. return retval;
  1453. if (!enabled) {
  1454. if (target->working_area_phys_spec) {
  1455. LOG_DEBUG("MMU disabled, using physical "
  1456. "address for working memory " TARGET_ADDR_FMT,
  1457. target->working_area_phys);
  1458. target->working_area = target->working_area_phys;
  1459. } else {
  1460. LOG_ERROR("No working memory available. "
  1461. "Specify -work-area-phys to target.");
  1462. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1463. }
  1464. } else {
  1465. if (target->working_area_virt_spec) {
  1466. LOG_DEBUG("MMU enabled, using virtual "
  1467. "address for working memory " TARGET_ADDR_FMT,
  1468. target->working_area_virt);
  1469. target->working_area = target->working_area_virt;
  1470. } else {
  1471. LOG_ERROR("No working memory available. "
  1472. "Specify -work-area-virt to target.");
  1473. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1474. }
  1475. }
  1476. /* Set up initial working area on first call */
  1477. struct working_area *new_wa = malloc(sizeof(*new_wa));
  1478. if (new_wa) {
  1479. new_wa->next = NULL;
  1480. new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
  1481. new_wa->address = target->working_area;
  1482. new_wa->backup = NULL;
  1483. new_wa->user = NULL;
  1484. new_wa->free = true;
  1485. }
  1486. target->working_areas = new_wa;
  1487. }
  1488. /* only allocate multiples of 4 byte */
  1489. if (size % 4)
  1490. size = (size + 3) & (~3UL);
  1491. struct working_area *c = target->working_areas;
  1492. /* Find the first large enough working area */
  1493. while (c) {
  1494. if (c->free && c->size >= size)
  1495. break;
  1496. c = c->next;
  1497. }
  1498. if (c == NULL)
  1499. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1500. /* Split the working area into the requested size */
  1501. target_split_working_area(c, size);
  1502. LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
  1503. size, c->address);
  1504. if (target->backup_working_area) {
  1505. if (c->backup == NULL) {
  1506. c->backup = malloc(c->size);
  1507. if (c->backup == NULL)
  1508. return ERROR_FAIL;
  1509. }
  1510. int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
  1511. if (retval != ERROR_OK)
  1512. return retval;
  1513. }
  1514. /* mark as used, and return the new (reused) area */
  1515. c->free = false;
  1516. *area = c;
  1517. /* user pointer */
  1518. c->user = area;
  1519. print_wa_layout(target);
  1520. return ERROR_OK;
  1521. }
  1522. int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
  1523. {
  1524. int retval;
  1525. retval = target_alloc_working_area_try(target, size, area);
  1526. if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
  1527. LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
  1528. return retval;
  1529. }
  1530. static int target_restore_working_area(struct target *target, struct working_area *area)
  1531. {
  1532. int retval = ERROR_OK;
  1533. if (target->backup_working_area && area->backup != NULL) {
  1534. retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
  1535. if (retval != ERROR_OK)
  1536. LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
  1537. area->size, area->address);
  1538. }
  1539. return retval;
  1540. }
  1541. /* Restore the area's backup memory, if any, and return the area to the allocation pool */
  1542. static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
  1543. {
  1544. int retval = ERROR_OK;
  1545. if (area->free)
  1546. return retval;
  1547. if (restore) {
  1548. retval = target_restore_working_area(target, area);
  1549. /* REVISIT: Perhaps the area should be freed even if restoring fails. */
  1550. if (retval != ERROR_OK)
  1551. return retval;
  1552. }
  1553. area->free = true;
  1554. LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
  1555. area->size, area->address);
  1556. /* mark user pointer invalid */
  1557. /* TODO: Is this really safe? It points to some previous caller's memory.
  1558. * How could we know that the area pointer is still in that place and not
  1559. * some other vital data? What's the purpose of this, anyway? */
  1560. *area->user = NULL;
  1561. area->user = NULL;
  1562. target_merge_working_areas(target);
  1563. print_wa_layout(target);
  1564. return retval;
  1565. }
  1566. int target_free_working_area(struct target *target, struct working_area *area)
  1567. {
  1568. return target_free_working_area_restore(target, area, 1);
  1569. }
  1570. static void target_destroy(struct target *target)
  1571. {
  1572. if (target->type->deinit_target)
  1573. target->type->deinit_target(target);
  1574. free(target->type);
  1575. free(target->trace_info);
  1576. free(target->cmd_name);
  1577. free(target);
  1578. }
  1579. void target_quit(void)
  1580. {
  1581. struct target_event_callback *pe = target_event_callbacks;
  1582. while (pe) {
  1583. struct target_event_callback *t = pe->next;
  1584. free(pe);
  1585. pe = t;
  1586. }
  1587. target_event_callbacks = NULL;
  1588. struct target_timer_callback *pt = target_timer_callbacks;
  1589. while (pt) {
  1590. struct target_timer_callback *t = pt->next;
  1591. free(pt);
  1592. pt = t;
  1593. }
  1594. target_timer_callbacks = NULL;
  1595. for (struct target *target = all_targets; target;) {
  1596. struct target *tmp;
  1597. tmp = target->next;
  1598. target_destroy(target);
  1599. target = tmp;
  1600. }
  1601. all_targets = NULL;
  1602. }
  1603. /* free resources and restore memory, if restoring memory fails,
  1604. * free up resources anyway
  1605. */
  1606. static void target_free_all_working_areas_restore(struct target *target, int restore)
  1607. {
  1608. struct working_area *c = target->working_areas;
  1609. LOG_DEBUG("freeing all working areas");
  1610. /* Loop through all areas, restoring the allocated ones and marking them as free */
  1611. while (c) {
  1612. if (!c->free) {
  1613. if (restore)
  1614. target_restore_working_area(target, c);
  1615. c->free = true;
  1616. *c->user = NULL; /* Same as above */
  1617. c->user = NULL;
  1618. }
  1619. c = c->next;
  1620. }
  1621. /* Run a merge pass to combine all areas into one */
  1622. target_merge_working_areas(target);
  1623. print_wa_layout(target);
  1624. }
  1625. void target_free_all_working_areas(struct target *target)
  1626. {
  1627. target_free_all_working_areas_restore(target, 1);
  1628. }
  1629. /* Find the largest number of bytes that can be allocated */
  1630. uint32_t target_get_working_area_avail(struct target *target)
  1631. {
  1632. struct working_area *c = target->working_areas;
  1633. uint32_t max_size = 0;
  1634. if (c == NULL)
  1635. return target->working_area_size;
  1636. while (c) {
  1637. if (c->free && max_size < c->size)
  1638. max_size = c->size;
  1639. c = c->next;
  1640. }
  1641. return max_size;
  1642. }
  1643. int target_arch_state(struct target *target)
  1644. {
  1645. int retval;
  1646. if (target == NULL) {
  1647. LOG_WARNING("No target has been configured");
  1648. return ERROR_OK;
  1649. }
  1650. if (target->state != TARGET_HALTED)
  1651. return ERROR_OK;
  1652. retval = target->type->arch_state(target);
  1653. return retval;
  1654. }
  1655. static int target_get_gdb_fileio_info_default(struct target *target,
  1656. struct gdb_fileio_info *fileio_info)
  1657. {
  1658. /* If target does not support semi-hosting function, target
  1659. has no need to provide .get_gdb_fileio_info callback.
  1660. It just return ERROR_FAIL and gdb_server will return "Txx"
  1661. as target halted every time. */
  1662. return ERROR_FAIL;
  1663. }
  1664. static int target_gdb_fileio_end_default(struct target *target,
  1665. int retcode, int fileio_errno, bool ctrl_c)
  1666. {
  1667. return ERROR_OK;
  1668. }
  1669. static int target_profiling_default(struct target *target, uint32_t *samples,
  1670. uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
  1671. {
  1672. struct timeval timeout, now;
  1673. gettimeofday(&timeout, NULL);
  1674. timeval_add_time(&timeout, seconds, 0);
  1675. LOG_INFO("Starting profiling. Halting and resuming the"
  1676. " target as often as we can...");
  1677. uint32_t sample_count = 0;
  1678. /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
  1679. struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
  1680. int retval = ERROR_OK;
  1681. for (;;) {
  1682. target_poll(target);
  1683. if (target->state == TARGET_HALTED) {
  1684. uint32_t t = buf_get_u32(reg->value, 0, 32);
  1685. samples[sample_count++] = t;
  1686. /* current pc, addr = 0, do not handle breakpoints, not debugging */
  1687. retval = target_resume(target, 1, 0, 0, 0);
  1688. target_poll(target);
  1689. alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
  1690. } else if (target->state == TARGET_RUNNING) {
  1691. /* We want to quickly sample the PC. */
  1692. retval = target_halt(target);
  1693. } else {
  1694. LOG_INFO("Target not halted or running");
  1695. retval = ERROR_OK;
  1696. break;
  1697. }
  1698. if (retval != ERROR_OK)
  1699. break;
  1700. gettimeofday(&now, NULL);
  1701. if ((sample_count >= max_num_samples) ||
  1702. ((now.tv_sec >= timeout.tv_sec) && (now.tv_usec >= timeout.tv_usec))) {
  1703. LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
  1704. break;
  1705. }
  1706. }
  1707. *num_samples = sample_count;
  1708. return retval;
  1709. }
  1710. /* Single aligned words are guaranteed to use 16 or 32 bit access
  1711. * mode respectively, otherwise data is handled as quickly as
  1712. * possible
  1713. */
  1714. int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
  1715. {
  1716. LOG_DEBUG("writing buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
  1717. size, address);
  1718. if (!target_was_examined(target)) {
  1719. LOG_ERROR("Target not examined yet");
  1720. return ERROR_FAIL;
  1721. }
  1722. if (size == 0)
  1723. return ERROR_OK;
  1724. if ((address + size - 1) < address) {
  1725. /* GDB can request this when e.g. PC is 0xfffffffc */
  1726. LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
  1727. address,
  1728. size);
  1729. return ERROR_FAIL;
  1730. }
  1731. return target->type->write_buffer(target, address, size, buffer);
  1732. }
  1733. static int target_write_buffer_default(struct target *target,
  1734. target_addr_t address, uint32_t count, const uint8_t *buffer)
  1735. {
  1736. uint32_t size;
  1737. /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
  1738. * will have something to do with the size we leave to it. */
  1739. for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
  1740. if (address & size) {
  1741. int retval = target_write_memory(target, address, size, 1, buffer);
  1742. if (retval != ERROR_OK)
  1743. return retval;
  1744. address += size;
  1745. count -= size;
  1746. buffer += size;
  1747. }
  1748. }
  1749. /* Write the data with as large access size as possible. */
  1750. for (; size > 0; size /= 2) {
  1751. uint32_t aligned = count - count % size;
  1752. if (aligned > 0) {
  1753. int retval = target_write_memory(target, address, size, aligned / size, buffer);
  1754. if (retval != ERROR_OK)
  1755. return retval;
  1756. address += aligned;
  1757. count -= aligned;
  1758. buffer += aligned;
  1759. }
  1760. }
  1761. return ERROR_OK;
  1762. }
  1763. /* Single aligned words are guaranteed to use 16 or 32 bit access
  1764. * mode respectively, otherwise data is handled as quickly as
  1765. * possible
  1766. */
  1767. int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
  1768. {
  1769. LOG_DEBUG("reading buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
  1770. size, address);
  1771. if (!target_was_examined(target)) {
  1772. LOG_ERROR("Target not examined yet");
  1773. return ERROR_FAIL;
  1774. }
  1775. if (size == 0)
  1776. return ERROR_OK;
  1777. if ((address + size - 1) < address) {
  1778. /* GDB can request this when e.g. PC is 0xfffffffc */
  1779. LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
  1780. address,
  1781. size);
  1782. return ERROR_FAIL;
  1783. }
  1784. return target->type->read_buffer(target, address, size, buffer);
  1785. }
  1786. static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
  1787. {
  1788. uint32_t size;
  1789. /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
  1790. * will have something to do with the size we leave to it. */
  1791. for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
  1792. if (address & size) {
  1793. int retval = target_read_memory(target, address, size, 1, buffer);
  1794. if (retval != ERROR_OK)
  1795. return retval;
  1796. address += size;
  1797. count -= size;
  1798. buffer += size;
  1799. }
  1800. }
  1801. /* Read the data with as large access size as possible. */
  1802. for (; size > 0; size /= 2) {
  1803. uint32_t aligned = count - count % size;
  1804. if (aligned > 0) {
  1805. int retval = target_read_memory(target, address, size, aligned / size, buffer);
  1806. if (retval != ERROR_OK)
  1807. return retval;
  1808. address += aligned;
  1809. count -= aligned;
  1810. buffer += aligned;
  1811. }
  1812. }
  1813. return ERROR_OK;
  1814. }
  1815. int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* crc)
  1816. {
  1817. uint8_t *buffer;
  1818. int retval;
  1819. uint32_t i;
  1820. uint32_t checksum = 0;
  1821. if (!target_was_examined(target)) {
  1822. LOG_ERROR("Target not examined yet");
  1823. return ERROR_FAIL;
  1824. }
  1825. retval = target->type->checksum_memory(target, address, size, &checksum);
  1826. if (retval != ERROR_OK) {
  1827. buffer = malloc(size);
  1828. if (buffer == NULL) {
  1829. LOG_ERROR("error allocating buffer for section (%" PRId32 " bytes)", size);
  1830. return ERROR_COMMAND_SYNTAX_ERROR;
  1831. }
  1832. retval = target_read_buffer(target, address, size, buffer);
  1833. if (retval != ERROR_OK) {
  1834. free(buffer);
  1835. return retval;
  1836. }
  1837. /* convert to target endianness */
  1838. for (i = 0; i < (size/sizeof(uint32_t)); i++) {
  1839. uint32_t target_data;
  1840. target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
  1841. target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
  1842. }
  1843. retval = image_calculate_checksum(buffer, size, &checksum);
  1844. free(buffer);
  1845. }
  1846. *crc = checksum;
  1847. return retval;
  1848. }
  1849. int target_blank_check_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* blank,
  1850. uint8_t erased_value)
  1851. {
  1852. int retval;
  1853. if (!target_was_examined(target)) {
  1854. LOG_ERROR("Target not examined yet");
  1855. return ERROR_FAIL;
  1856. }
  1857. if (target->type->blank_check_memory == 0)
  1858. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1859. retval = target->type->blank_check_memory(target, address, size, blank, erased_value);
  1860. return retval;
  1861. }
  1862. int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
  1863. {
  1864. uint8_t value_buf[8];
  1865. if (!target_was_examined(target)) {
  1866. LOG_ERROR("Target not examined yet");
  1867. return ERROR_FAIL;
  1868. }
  1869. int retval = target_read_memory(target, address, 8, 1, value_buf);
  1870. if (retval == ERROR_OK) {
  1871. *value = target_buffer_get_u64(target, value_buf);
  1872. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
  1873. address,
  1874. *value);
  1875. } else {
  1876. *value = 0x0;
  1877. LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
  1878. address);
  1879. }
  1880. return retval;
  1881. }
  1882. int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
  1883. {
  1884. uint8_t value_buf[4];
  1885. if (!target_was_examined(target)) {
  1886. LOG_ERROR("Target not examined yet");
  1887. return ERROR_FAIL;
  1888. }
  1889. int retval = target_read_memory(target, address, 4, 1, value_buf);
  1890. if (retval == ERROR_OK) {
  1891. *value = target_buffer_get_u32(target, value_buf);
  1892. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
  1893. address,
  1894. *value);
  1895. } else {
  1896. *value = 0x0;
  1897. LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
  1898. address);
  1899. }
  1900. return retval;
  1901. }
  1902. int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
  1903. {
  1904. uint8_t value_buf[2];
  1905. if (!target_was_examined(target)) {
  1906. LOG_ERROR("Target not examined yet");
  1907. return ERROR_FAIL;
  1908. }
  1909. int retval = target_read_memory(target, address, 2, 1, value_buf);
  1910. if (retval == ERROR_OK) {
  1911. *value = target_buffer_get_u16(target, value_buf);
  1912. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
  1913. address,
  1914. *value);
  1915. } else {
  1916. *value = 0x0;
  1917. LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
  1918. address);
  1919. }
  1920. return retval;
  1921. }
  1922. int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
  1923. {
  1924. if (!target_was_examined(target)) {
  1925. LOG_ERROR("Target not examined yet");
  1926. return ERROR_FAIL;
  1927. }
  1928. int retval = target_read_memory(target, address, 1, 1, value);
  1929. if (retval == ERROR_OK) {
  1930. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
  1931. address,
  1932. *value);
  1933. } else {
  1934. *value = 0x0;
  1935. LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
  1936. address);
  1937. }
  1938. return retval;
  1939. }
  1940. int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
  1941. {
  1942. int retval;
  1943. uint8_t value_buf[8];
  1944. if (!target_was_examined(target)) {
  1945. LOG_ERROR("Target not examined yet");
  1946. return ERROR_FAIL;
  1947. }
  1948. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
  1949. address,
  1950. value);
  1951. target_buffer_set_u64(target, value_buf, value);
  1952. retval = target_write_memory(target, address, 8, 1, value_buf);
  1953. if (retval != ERROR_OK)
  1954. LOG_DEBUG("failed: %i", retval);
  1955. return retval;
  1956. }
  1957. int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
  1958. {
  1959. int retval;
  1960. uint8_t value_buf[4];
  1961. if (!target_was_examined(target)) {
  1962. LOG_ERROR("Target not examined yet");
  1963. return ERROR_FAIL;
  1964. }
  1965. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
  1966. address,
  1967. value);
  1968. target_buffer_set_u32(target, value_buf, value);
  1969. retval = target_write_memory(target, address, 4, 1, value_buf);
  1970. if (retval != ERROR_OK)
  1971. LOG_DEBUG("failed: %i", retval);
  1972. return retval;
  1973. }
  1974. int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
  1975. {
  1976. int retval;
  1977. uint8_t value_buf[2];
  1978. if (!target_was_examined(target)) {
  1979. LOG_ERROR("Target not examined yet");
  1980. return ERROR_FAIL;
  1981. }
  1982. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
  1983. address,
  1984. value);
  1985. target_buffer_set_u16(target, value_buf, value);
  1986. retval = target_write_memory(target, address, 2, 1, value_buf);
  1987. if (retval != ERROR_OK)
  1988. LOG_DEBUG("failed: %i", retval);
  1989. return retval;
  1990. }
  1991. int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
  1992. {
  1993. int retval;
  1994. if (!target_was_examined(target)) {
  1995. LOG_ERROR("Target not examined yet");
  1996. return ERROR_FAIL;
  1997. }
  1998. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
  1999. address, value);
  2000. retval = target_write_memory(target, address, 1, 1, &value);
  2001. if (retval != ERROR_OK)
  2002. LOG_DEBUG("failed: %i", retval);
  2003. return retval;
  2004. }
  2005. int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
  2006. {
  2007. int retval;
  2008. uint8_t value_buf[8];
  2009. if (!target_was_examined(target)) {
  2010. LOG_ERROR("Target not examined yet");
  2011. return ERROR_FAIL;
  2012. }
  2013. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
  2014. address,
  2015. value);
  2016. target_buffer_set_u64(target, value_buf, value);
  2017. retval = target_write_phys_memory(target, address, 8, 1, value_buf);
  2018. if (retval != ERROR_OK)
  2019. LOG_DEBUG("failed: %i", retval);
  2020. return retval;
  2021. }
  2022. int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
  2023. {
  2024. int retval;
  2025. uint8_t value_buf[4];
  2026. if (!target_was_examined(target)) {
  2027. LOG_ERROR("Target not examined yet");
  2028. return ERROR_FAIL;
  2029. }
  2030. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
  2031. address,
  2032. value);
  2033. target_buffer_set_u32(target, value_buf, value);
  2034. retval = target_write_phys_memory(target, address, 4, 1, value_buf);
  2035. if (retval != ERROR_OK)
  2036. LOG_DEBUG("failed: %i", retval);
  2037. return retval;
  2038. }
  2039. int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
  2040. {
  2041. int retval;
  2042. uint8_t value_buf[2];
  2043. if (!target_was_examined(target)) {
  2044. LOG_ERROR("Target not examined yet");
  2045. return ERROR_FAIL;
  2046. }
  2047. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
  2048. address,
  2049. value);
  2050. target_buffer_set_u16(target, value_buf, value);
  2051. retval = target_write_phys_memory(target, address, 2, 1, value_buf);
  2052. if (retval != ERROR_OK)
  2053. LOG_DEBUG("failed: %i", retval);
  2054. return retval;
  2055. }
  2056. int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
  2057. {
  2058. int retval;
  2059. if (!target_was_examined(target)) {
  2060. LOG_ERROR("Target not examined yet");
  2061. return ERROR_FAIL;
  2062. }
  2063. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
  2064. address, value);
  2065. retval = target_write_phys_memory(target, address, 1, 1, &value);
  2066. if (retval != ERROR_OK)
  2067. LOG_DEBUG("failed: %i", retval);
  2068. return retval;
  2069. }
  2070. static int find_target(struct command_context *cmd_ctx, const char *name)
  2071. {
  2072. struct target *target = get_target(name);
  2073. if (target == NULL) {
  2074. LOG_ERROR("Target: %s is unknown, try one of:\n", name);
  2075. return ERROR_FAIL;
  2076. }
  2077. if (!target->tap->enabled) {
  2078. LOG_USER("Target: TAP %s is disabled, "
  2079. "can't be the current target\n",
  2080. target->tap->dotted_name);
  2081. return ERROR_FAIL;
  2082. }
  2083. cmd_ctx->current_target = target->target_number;
  2084. return ERROR_OK;
  2085. }
  2086. COMMAND_HANDLER(handle_targets_command)
  2087. {
  2088. int retval = ERROR_OK;
  2089. if (CMD_ARGC == 1) {
  2090. retval = find_target(CMD_CTX, CMD_ARGV[0]);
  2091. if (retval == ERROR_OK) {
  2092. /* we're done! */
  2093. return retval;
  2094. }
  2095. }
  2096. struct target *target = all_targets;
  2097. command_print(CMD_CTX, " TargetName Type Endian TapName State ");
  2098. command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
  2099. while (target) {
  2100. const char *state;
  2101. char marker = ' ';
  2102. if (target->tap->enabled)
  2103. state = target_state_name(target);
  2104. else
  2105. state = "tap-disabled";
  2106. if (CMD_CTX->current_target == target->target_number)
  2107. marker = '*';
  2108. /* keep columns lined up to match the headers above */
  2109. command_print(CMD_CTX,
  2110. "%2d%c %-18s %-10s %-6s %-18s %s",
  2111. target->target_number,
  2112. marker,
  2113. target_name(target),
  2114. target_type_name(target),
  2115. Jim_Nvp_value2name_simple(nvp_target_endian,
  2116. target->endianness)->name,
  2117. target->tap->dotted_name,
  2118. state);
  2119. target = target->next;
  2120. }
  2121. return retval;
  2122. }
  2123. /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
  2124. static int powerDropout;
  2125. static int srstAsserted;
  2126. static int runPowerRestore;
  2127. static int runPowerDropout;
  2128. static int runSrstAsserted;
  2129. static int runSrstDeasserted;
  2130. static int sense_handler(void)
  2131. {
  2132. static int prevSrstAsserted;
  2133. static int prevPowerdropout;
  2134. int retval = jtag_power_dropout(&powerDropout);
  2135. if (retval != ERROR_OK)
  2136. return retval;
  2137. int powerRestored;
  2138. powerRestored = prevPowerdropout && !powerDropout;
  2139. if (powerRestored)
  2140. runPowerRestore = 1;
  2141. int64_t current = timeval_ms();
  2142. static int64_t lastPower;
  2143. bool waitMore = lastPower + 2000 > current;
  2144. if (powerDropout && !waitMore) {
  2145. runPowerDropout = 1;
  2146. lastPower = current;
  2147. }
  2148. retval = jtag_srst_asserted(&srstAsserted);
  2149. if (retval != ERROR_OK)
  2150. return retval;
  2151. int srstDeasserted;
  2152. srstDeasserted = prevSrstAsserted && !srstAsserted;
  2153. static int64_t lastSrst;
  2154. waitMore = lastSrst + 2000 > current;
  2155. if (srstDeasserted && !waitMore) {
  2156. runSrstDeasserted = 1;
  2157. lastSrst = current;
  2158. }
  2159. if (!prevSrstAsserted && srstAsserted)
  2160. runSrstAsserted = 1;
  2161. prevSrstAsserted = srstAsserted;
  2162. prevPowerdropout = powerDropout;
  2163. if (srstDeasserted || powerRestored) {
  2164. /* Other than logging the event we can't do anything here.
  2165. * Issuing a reset is a particularly bad idea as we might
  2166. * be inside a reset already.
  2167. */
  2168. }
  2169. return ERROR_OK;
  2170. }
  2171. /* process target state changes */
  2172. static int handle_target(void *priv)
  2173. {
  2174. Jim_Interp *interp = (Jim_Interp *)priv;
  2175. int retval = ERROR_OK;
  2176. if (!is_jtag_poll_safe()) {
  2177. /* polling is disabled currently */
  2178. return ERROR_OK;
  2179. }
  2180. /* we do not want to recurse here... */
  2181. static int recursive;
  2182. if (!recursive) {
  2183. recursive = 1;
  2184. sense_handler();
  2185. /* danger! running these procedures can trigger srst assertions and power dropouts.
  2186. * We need to avoid an infinite loop/recursion here and we do that by
  2187. * clearing the flags after running these events.
  2188. */
  2189. int did_something = 0;
  2190. if (runSrstAsserted) {
  2191. LOG_INFO("srst asserted detected, running srst_asserted proc.");
  2192. Jim_Eval(interp, "srst_asserted");
  2193. did_something = 1;
  2194. }
  2195. if (runSrstDeasserted) {
  2196. Jim_Eval(interp, "srst_deasserted");
  2197. did_something = 1;
  2198. }
  2199. if (runPowerDropout) {
  2200. LOG_INFO("Power dropout detected, running power_dropout proc.");
  2201. Jim_Eval(interp, "power_dropout");
  2202. did_something = 1;
  2203. }
  2204. if (runPowerRestore) {
  2205. Jim_Eval(interp, "power_restore");
  2206. did_something = 1;
  2207. }
  2208. if (did_something) {
  2209. /* clear detect flags */
  2210. sense_handler();
  2211. }
  2212. /* clear action flags */
  2213. runSrstAsserted = 0;
  2214. runSrstDeasserted = 0;
  2215. runPowerRestore = 0;
  2216. runPowerDropout = 0;
  2217. recursive = 0;
  2218. }
  2219. /* Poll targets for state changes unless that's globally disabled.
  2220. * Skip targets that are currently disabled.
  2221. */
  2222. for (struct target *target = all_targets;
  2223. is_jtag_poll_safe() && target;
  2224. target = target->next) {
  2225. if (!target_was_examined(target))
  2226. continue;
  2227. if (!target->tap->enabled)
  2228. continue;
  2229. if (target->backoff.times > target->backoff.count) {
  2230. /* do not poll this time as we failed previously */
  2231. target->backoff.count++;
  2232. continue;
  2233. }
  2234. target->backoff.count = 0;
  2235. /* only poll target if we've got power and srst isn't asserted */
  2236. if (!powerDropout && !srstAsserted) {
  2237. /* polling may fail silently until the target has been examined */
  2238. retval = target_poll(target);
  2239. if (retval != ERROR_OK) {
  2240. /* 100ms polling interval. Increase interval between polling up to 5000ms */
  2241. if (target->backoff.times * polling_interval < 5000) {
  2242. target->backoff.times *= 2;
  2243. target->backoff.times++;
  2244. }
  2245. /* Tell GDB to halt the debugger. This allows the user to
  2246. * run monitor commands to handle the situation.
  2247. */
  2248. target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
  2249. }
  2250. if (target->backoff.times > 0) {
  2251. LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
  2252. target_reset_examined(target);
  2253. retval = target_examine_one(target);
  2254. /* Target examination could have failed due to unstable connection,
  2255. * but we set the examined flag anyway to repoll it later */
  2256. if (retval != ERROR_OK) {
  2257. target->examined = true;
  2258. LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
  2259. target->backoff.times * polling_interval);
  2260. return retval;
  2261. }
  2262. }
  2263. /* Since we succeeded, we reset backoff count */
  2264. target->backoff.times = 0;
  2265. }
  2266. }
  2267. return retval;
  2268. }
  2269. COMMAND_HANDLER(handle_reg_command)
  2270. {
  2271. struct target *target;
  2272. struct reg *reg = NULL;
  2273. unsigned count = 0;
  2274. char *value;
  2275. LOG_DEBUG("-");
  2276. target = get_current_target(CMD_CTX);
  2277. /* list all available registers for the current target */
  2278. if (CMD_ARGC == 0) {
  2279. struct reg_cache *cache = target->reg_cache;
  2280. count = 0;
  2281. while (cache) {
  2282. unsigned i;
  2283. command_print(CMD_CTX, "===== %s", cache->name);
  2284. for (i = 0, reg = cache->reg_list;
  2285. i < cache->num_regs;
  2286. i++, reg++, count++) {
  2287. /* only print cached values if they are valid */
  2288. if (reg->valid) {
  2289. value = buf_to_str(reg->value,
  2290. reg->size, 16);
  2291. command_print(CMD_CTX,
  2292. "(%i) %s (/%" PRIu32 "): 0x%s%s",
  2293. count, reg->name,
  2294. reg->size, value,
  2295. reg->dirty
  2296. ? " (dirty)"
  2297. : "");
  2298. free(value);
  2299. } else {
  2300. command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
  2301. count, reg->name,
  2302. reg->size) ;
  2303. }
  2304. }
  2305. cache = cache->next;
  2306. }
  2307. return ERROR_OK;
  2308. }
  2309. /* access a single register by its ordinal number */
  2310. if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
  2311. unsigned num;
  2312. COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
  2313. struct reg_cache *cache = target->reg_cache;
  2314. count = 0;
  2315. while (cache) {
  2316. unsigned i;
  2317. for (i = 0; i < cache->num_regs; i++) {
  2318. if (count++ == num) {
  2319. reg = &cache->reg_list[i];
  2320. break;
  2321. }
  2322. }
  2323. if (reg)
  2324. break;
  2325. cache = cache->next;
  2326. }
  2327. if (!reg) {
  2328. command_print(CMD_CTX, "%i is out of bounds, the current target "
  2329. "has only %i registers (0 - %i)", num, count, count - 1);
  2330. return ERROR_OK;
  2331. }
  2332. } else {
  2333. /* access a single register by its name */
  2334. reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
  2335. if (!reg) {
  2336. command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
  2337. return ERROR_OK;
  2338. }
  2339. }
  2340. assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
  2341. /* display a register */
  2342. if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
  2343. && (CMD_ARGV[1][0] <= '9')))) {
  2344. if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
  2345. reg->valid = 0;
  2346. if (reg->valid == 0)
  2347. reg->type->get(reg);
  2348. value = buf_to_str(reg->value, reg->size, 16);
  2349. command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
  2350. free(value);
  2351. return ERROR_OK;
  2352. }
  2353. /* set register value */
  2354. if (CMD_ARGC == 2) {
  2355. uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
  2356. if (buf == NULL)
  2357. return ERROR_FAIL;
  2358. str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
  2359. reg->type->set(reg, buf);
  2360. value = buf_to_str(reg->value, reg->size, 16);
  2361. command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
  2362. free(value);
  2363. free(buf);
  2364. return ERROR_OK;
  2365. }
  2366. return ERROR_COMMAND_SYNTAX_ERROR;
  2367. }
  2368. COMMAND_HANDLER(handle_poll_command)
  2369. {
  2370. int retval = ERROR_OK;
  2371. struct target *target = get_current_target(CMD_CTX);
  2372. if (CMD_ARGC == 0) {
  2373. command_print(CMD_CTX, "background polling: %s",
  2374. jtag_poll_get_enabled() ? "on" : "off");
  2375. command_print(CMD_CTX, "TAP: %s (%s)",
  2376. target->tap->dotted_name,
  2377. target->tap->enabled ? "enabled" : "disabled");
  2378. if (!target->tap->enabled)
  2379. return ERROR_OK;
  2380. retval = target_poll(target);
  2381. if (retval != ERROR_OK)
  2382. return retval;
  2383. retval = target_arch_state(target);
  2384. if (retval != ERROR_OK)
  2385. return retval;
  2386. } else if (CMD_ARGC == 1) {
  2387. bool enable;
  2388. COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
  2389. jtag_poll_set_enabled(enable);
  2390. } else
  2391. return ERROR_COMMAND_SYNTAX_ERROR;
  2392. return retval;
  2393. }
  2394. COMMAND_HANDLER(handle_wait_halt_command)
  2395. {
  2396. if (CMD_ARGC > 1)
  2397. return ERROR_COMMAND_SYNTAX_ERROR;
  2398. unsigned ms = DEFAULT_HALT_TIMEOUT;
  2399. if (1 == CMD_ARGC) {
  2400. int retval = parse_uint(CMD_ARGV[0], &ms);
  2401. if (ERROR_OK != retval)
  2402. return ERROR_COMMAND_SYNTAX_ERROR;
  2403. }
  2404. struct target *target = get_current_target(CMD_CTX);
  2405. return target_wait_state(target, TARGET_HALTED, ms);
  2406. }
  2407. /* wait for target state to change. The trick here is to have a low
  2408. * latency for short waits and not to suck up all the CPU time
  2409. * on longer waits.
  2410. *
  2411. * After 500ms, keep_alive() is invoked
  2412. */
  2413. int target_wait_state(struct target *target, enum target_state state, int ms)
  2414. {
  2415. int retval;
  2416. int64_t then = 0, cur;
  2417. bool once = true;
  2418. for (;;) {
  2419. retval = target_poll(target);
  2420. if (retval != ERROR_OK)
  2421. return retval;
  2422. if (target->state == state)
  2423. break;
  2424. cur = timeval_ms();
  2425. if (once) {
  2426. once = false;
  2427. then = timeval_ms();
  2428. LOG_DEBUG("waiting for target %s...",
  2429. Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
  2430. }
  2431. if (cur-then > 500)
  2432. keep_alive();
  2433. if ((cur-then) > ms) {
  2434. LOG_ERROR("timed out while waiting for target %s",
  2435. Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
  2436. return ERROR_FAIL;
  2437. }
  2438. }
  2439. return ERROR_OK;
  2440. }
  2441. COMMAND_HANDLER(handle_halt_command)
  2442. {
  2443. LOG_DEBUG("-");
  2444. struct target *target = get_current_target(CMD_CTX);
  2445. int retval = target_halt(target);
  2446. if (ERROR_OK != retval)
  2447. return retval;
  2448. if (CMD_ARGC == 1) {
  2449. unsigned wait_local;
  2450. retval = parse_uint(CMD_ARGV[0], &wait_local);
  2451. if (ERROR_OK != retval)
  2452. return ERROR_COMMAND_SYNTAX_ERROR;
  2453. if (!wait_local)
  2454. return ERROR_OK;
  2455. }
  2456. return CALL_COMMAND_HANDLER(handle_wait_halt_command);
  2457. }
  2458. COMMAND_HANDLER(handle_soft_reset_halt_command)
  2459. {
  2460. struct target *target = get_current_target(CMD_CTX);
  2461. LOG_USER("requesting target halt and executing a soft reset");
  2462. target_soft_reset_halt(target);
  2463. return ERROR_OK;
  2464. }
  2465. COMMAND_HANDLER(handle_reset_command)
  2466. {
  2467. if (CMD_ARGC > 1)
  2468. return ERROR_COMMAND_SYNTAX_ERROR;
  2469. enum target_reset_mode reset_mode = RESET_RUN;
  2470. if (CMD_ARGC == 1) {
  2471. const Jim_Nvp *n;
  2472. n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
  2473. if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
  2474. return ERROR_COMMAND_SYNTAX_ERROR;
  2475. reset_mode = n->value;
  2476. }
  2477. /* reset *all* targets */
  2478. return target_process_reset(CMD_CTX, reset_mode);
  2479. }
  2480. COMMAND_HANDLER(handle_resume_command)
  2481. {
  2482. int current = 1;
  2483. if (CMD_ARGC > 1)
  2484. return ERROR_COMMAND_SYNTAX_ERROR;
  2485. struct target *target = get_current_target(CMD_CTX);
  2486. /* with no CMD_ARGV, resume from current pc, addr = 0,
  2487. * with one arguments, addr = CMD_ARGV[0],
  2488. * handle breakpoints, not debugging */
  2489. target_addr_t addr = 0;
  2490. if (CMD_ARGC == 1) {
  2491. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  2492. current = 0;
  2493. }
  2494. return target_resume(target, current, addr, 1, 0);
  2495. }
  2496. COMMAND_HANDLER(handle_step_command)
  2497. {
  2498. if (CMD_ARGC > 1)
  2499. return ERROR_COMMAND_SYNTAX_ERROR;
  2500. LOG_DEBUG("-");
  2501. /* with no CMD_ARGV, step from current pc, addr = 0,
  2502. * with one argument addr = CMD_ARGV[0],
  2503. * handle breakpoints, debugging */
  2504. target_addr_t addr = 0;
  2505. int current_pc = 1;
  2506. if (CMD_ARGC == 1) {
  2507. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  2508. current_pc = 0;
  2509. }
  2510. struct target *target = get_current_target(CMD_CTX);
  2511. return target->type->step(target, current_pc, addr, 1);
  2512. }
  2513. static void handle_md_output(struct command_context *cmd_ctx,
  2514. struct target *target, target_addr_t address, unsigned size,
  2515. unsigned count, const uint8_t *buffer)
  2516. {
  2517. const unsigned line_bytecnt = 32;
  2518. unsigned line_modulo = line_bytecnt / size;
  2519. char output[line_bytecnt * 4 + 1];
  2520. unsigned output_len = 0;
  2521. const char *value_fmt;
  2522. switch (size) {
  2523. case 8:
  2524. value_fmt = "%16.16llx ";
  2525. break;
  2526. case 4:
  2527. value_fmt = "%8.8x ";
  2528. break;
  2529. case 2:
  2530. value_fmt = "%4.4x ";
  2531. break;
  2532. case 1:
  2533. value_fmt = "%2.2x ";
  2534. break;
  2535. default:
  2536. /* "can't happen", caller checked */
  2537. LOG_ERROR("invalid memory read size: %u", size);
  2538. return;
  2539. }
  2540. for (unsigned i = 0; i < count; i++) {
  2541. if (i % line_modulo == 0) {
  2542. output_len += snprintf(output + output_len,
  2543. sizeof(output) - output_len,
  2544. TARGET_ADDR_FMT ": ",
  2545. (address + (i * size)));
  2546. }
  2547. uint64_t value = 0;
  2548. const uint8_t *value_ptr = buffer + i * size;
  2549. switch (size) {
  2550. case 8:
  2551. value = target_buffer_get_u64(target, value_ptr);
  2552. break;
  2553. case 4:
  2554. value = target_buffer_get_u32(target, value_ptr);
  2555. break;
  2556. case 2:
  2557. value = target_buffer_get_u16(target, value_ptr);
  2558. break;
  2559. case 1:
  2560. value = *value_ptr;
  2561. }
  2562. output_len += snprintf(output + output_len,
  2563. sizeof(output) - output_len,
  2564. value_fmt, value);
  2565. if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
  2566. command_print(cmd_ctx, "%s", output);
  2567. output_len = 0;
  2568. }
  2569. }
  2570. }
  2571. COMMAND_HANDLER(handle_md_command)
  2572. {
  2573. if (CMD_ARGC < 1)
  2574. return ERROR_COMMAND_SYNTAX_ERROR;
  2575. unsigned size = 0;
  2576. switch (CMD_NAME[2]) {
  2577. case 'd':
  2578. size = 8;
  2579. break;
  2580. case 'w':
  2581. size = 4;
  2582. break;
  2583. case 'h':
  2584. size = 2;
  2585. break;
  2586. case 'b':
  2587. size = 1;
  2588. break;
  2589. default:
  2590. return ERROR_COMMAND_SYNTAX_ERROR;
  2591. }
  2592. bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
  2593. int (*fn)(struct target *target,
  2594. target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
  2595. if (physical) {
  2596. CMD_ARGC--;
  2597. CMD_ARGV++;
  2598. fn = target_read_phys_memory;
  2599. } else
  2600. fn = target_read_memory;
  2601. if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
  2602. return ERROR_COMMAND_SYNTAX_ERROR;
  2603. target_addr_t address;
  2604. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
  2605. unsigned count = 1;
  2606. if (CMD_ARGC == 2)
  2607. COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
  2608. uint8_t *buffer = calloc(count, size);
  2609. struct target *target = get_current_target(CMD_CTX);
  2610. int retval = fn(target, address, size, count, buffer);
  2611. if (ERROR_OK == retval)
  2612. handle_md_output(CMD_CTX, target, address, size, count, buffer);
  2613. free(buffer);
  2614. return retval;
  2615. }
  2616. typedef int (*target_write_fn)(struct target *target,
  2617. target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
  2618. static int target_fill_mem(struct target *target,
  2619. target_addr_t address,
  2620. target_write_fn fn,
  2621. unsigned data_size,
  2622. /* value */
  2623. uint64_t b,
  2624. /* count */
  2625. unsigned c)
  2626. {
  2627. /* We have to write in reasonably large chunks to be able
  2628. * to fill large memory areas with any sane speed */
  2629. const unsigned chunk_size = 16384;
  2630. uint8_t *target_buf = malloc(chunk_size * data_size);
  2631. if (target_buf == NULL) {
  2632. LOG_ERROR("Out of memory");
  2633. return ERROR_FAIL;
  2634. }
  2635. for (unsigned i = 0; i < chunk_size; i++) {
  2636. switch (data_size) {
  2637. case 8:
  2638. target_buffer_set_u64(target, target_buf + i * data_size, b);
  2639. break;
  2640. case 4:
  2641. target_buffer_set_u32(target, target_buf + i * data_size, b);
  2642. break;
  2643. case 2:
  2644. target_buffer_set_u16(target, target_buf + i * data_size, b);
  2645. break;
  2646. case 1:
  2647. target_buffer_set_u8(target, target_buf + i * data_size, b);
  2648. break;
  2649. default:
  2650. exit(-1);
  2651. }
  2652. }
  2653. int retval = ERROR_OK;
  2654. for (unsigned x = 0; x < c; x += chunk_size) {
  2655. unsigned current;
  2656. current = c - x;
  2657. if (current > chunk_size)
  2658. current = chunk_size;
  2659. retval = fn(target, address + x * data_size, data_size, current, target_buf);
  2660. if (retval != ERROR_OK)
  2661. break;
  2662. /* avoid GDB timeouts */
  2663. keep_alive();
  2664. }
  2665. free(target_buf);
  2666. return retval;
  2667. }
  2668. COMMAND_HANDLER(handle_mw_command)
  2669. {
  2670. if (CMD_ARGC < 2)
  2671. return ERROR_COMMAND_SYNTAX_ERROR;
  2672. bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
  2673. target_write_fn fn;
  2674. if (physical) {
  2675. CMD_ARGC--;
  2676. CMD_ARGV++;
  2677. fn = target_write_phys_memory;
  2678. } else
  2679. fn = target_write_memory;
  2680. if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
  2681. return ERROR_COMMAND_SYNTAX_ERROR;
  2682. target_addr_t address;
  2683. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
  2684. target_addr_t value;
  2685. COMMAND_PARSE_ADDRESS(CMD_ARGV[1], value);
  2686. unsigned count = 1;
  2687. if (CMD_ARGC == 3)
  2688. COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
  2689. struct target *target = get_current_target(CMD_CTX);
  2690. unsigned wordsize;
  2691. switch (CMD_NAME[2]) {
  2692. case 'd':
  2693. wordsize = 8;
  2694. break;
  2695. case 'w':
  2696. wordsize = 4;
  2697. break;
  2698. case 'h':
  2699. wordsize = 2;
  2700. break;
  2701. case 'b':
  2702. wordsize = 1;
  2703. break;
  2704. default:
  2705. return ERROR_COMMAND_SYNTAX_ERROR;
  2706. }
  2707. return target_fill_mem(target, address, fn, wordsize, value, count);
  2708. }
  2709. static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
  2710. target_addr_t *min_address, target_addr_t *max_address)
  2711. {
  2712. if (CMD_ARGC < 1 || CMD_ARGC > 5)
  2713. return ERROR_COMMAND_SYNTAX_ERROR;
  2714. /* a base address isn't always necessary,
  2715. * default to 0x0 (i.e. don't relocate) */
  2716. if (CMD_ARGC >= 2) {
  2717. target_addr_t addr;
  2718. COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
  2719. image->base_address = addr;
  2720. image->base_address_set = 1;
  2721. } else
  2722. image->base_address_set = 0;
  2723. image->start_address_set = 0;
  2724. if (CMD_ARGC >= 4)
  2725. COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
  2726. if (CMD_ARGC == 5) {
  2727. COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
  2728. /* use size (given) to find max (required) */
  2729. *max_address += *min_address;
  2730. }
  2731. if (*min_address > *max_address)
  2732. return ERROR_COMMAND_SYNTAX_ERROR;
  2733. return ERROR_OK;
  2734. }
  2735. COMMAND_HANDLER(handle_load_image_command)
  2736. {
  2737. uint8_t *buffer;
  2738. size_t buf_cnt;
  2739. uint32_t image_size;
  2740. target_addr_t min_address = 0;
  2741. target_addr_t max_address = -1;
  2742. int i;
  2743. struct image image;
  2744. int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
  2745. &image, &min_address, &max_address);
  2746. if (ERROR_OK != retval)
  2747. return retval;
  2748. struct target *target = get_current_target(CMD_CTX);
  2749. struct duration bench;
  2750. duration_start(&bench);
  2751. if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
  2752. return ERROR_FAIL;
  2753. image_size = 0x0;
  2754. retval = ERROR_OK;
  2755. for (i = 0; i < image.num_sections; i++) {
  2756. buffer = malloc(image.sections[i].size);
  2757. if (buffer == NULL) {
  2758. command_print(CMD_CTX,
  2759. "error allocating buffer for section (%d bytes)",
  2760. (int)(image.sections[i].size));
  2761. retval = ERROR_FAIL;
  2762. break;
  2763. }
  2764. retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
  2765. if (retval != ERROR_OK) {
  2766. free(buffer);
  2767. break;
  2768. }
  2769. uint32_t offset = 0;
  2770. uint32_t length = buf_cnt;
  2771. /* DANGER!!! beware of unsigned comparision here!!! */
  2772. if ((image.sections[i].base_address + buf_cnt >= min_address) &&
  2773. (image.sections[i].base_address < max_address)) {
  2774. if (image.sections[i].base_address < min_address) {
  2775. /* clip addresses below */
  2776. offset += min_address-image.sections[i].base_address;
  2777. length -= offset;
  2778. }
  2779. if (image.sections[i].base_address + buf_cnt > max_address)
  2780. length -= (image.sections[i].base_address + buf_cnt)-max_address;
  2781. retval = target_write_buffer(target,
  2782. image.sections[i].base_address + offset, length, buffer + offset);
  2783. if (retval != ERROR_OK) {
  2784. free(buffer);
  2785. break;
  2786. }
  2787. image_size += length;
  2788. command_print(CMD_CTX, "%u bytes written at address " TARGET_ADDR_FMT "",
  2789. (unsigned int)length,
  2790. image.sections[i].base_address + offset);
  2791. }
  2792. free(buffer);
  2793. }
  2794. if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
  2795. command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
  2796. "in %fs (%0.3f KiB/s)", image_size,
  2797. duration_elapsed(&bench), duration_kbps(&bench, image_size));
  2798. }
  2799. image_close(&image);
  2800. return retval;
  2801. }
  2802. COMMAND_HANDLER(handle_dump_image_command)
  2803. {
  2804. struct fileio *fileio;
  2805. uint8_t *buffer;
  2806. int retval, retvaltemp;
  2807. target_addr_t address, size;
  2808. struct duration bench;
  2809. struct target *target = get_current_target(CMD_CTX);
  2810. if (CMD_ARGC != 3)
  2811. return ERROR_COMMAND_SYNTAX_ERROR;
  2812. COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
  2813. COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
  2814. uint32_t buf_size = (size > 4096) ? 4096 : size;
  2815. buffer = malloc(buf_size);
  2816. if (!buffer)
  2817. return ERROR_FAIL;
  2818. retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
  2819. if (retval != ERROR_OK) {
  2820. free(buffer);
  2821. return retval;
  2822. }
  2823. duration_start(&bench);
  2824. while (size > 0) {
  2825. size_t size_written;
  2826. uint32_t this_run_size = (size > buf_size) ? buf_size : size;
  2827. retval = target_read_buffer(target, address, this_run_size, buffer);
  2828. if (retval != ERROR_OK)
  2829. break;
  2830. retval = fileio_write(fileio, this_run_size, buffer, &size_written);
  2831. if (retval != ERROR_OK)
  2832. break;
  2833. size -= this_run_size;
  2834. address += this_run_size;
  2835. }
  2836. free(buffer);
  2837. if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
  2838. size_t filesize;
  2839. retval = fileio_size(fileio, &filesize);
  2840. if (retval != ERROR_OK)
  2841. return retval;
  2842. command_print(CMD_CTX,
  2843. "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
  2844. duration_elapsed(&bench), duration_kbps(&bench, filesize));
  2845. }
  2846. retvaltemp = fileio_close(fileio);
  2847. if (retvaltemp != ERROR_OK)
  2848. return retvaltemp;
  2849. return retval;
  2850. }
  2851. enum verify_mode {
  2852. IMAGE_TEST = 0,
  2853. IMAGE_VERIFY = 1,
  2854. IMAGE_CHECKSUM_ONLY = 2
  2855. };
  2856. static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
  2857. {
  2858. uint8_t *buffer;
  2859. size_t buf_cnt;
  2860. uint32_t image_size;
  2861. int i;
  2862. int retval;
  2863. uint32_t checksum = 0;
  2864. uint32_t mem_checksum = 0;
  2865. struct image image;
  2866. struct target *target = get_current_target(CMD_CTX);
  2867. if (CMD_ARGC < 1)
  2868. return ERROR_COMMAND_SYNTAX_ERROR;
  2869. if (!target) {
  2870. LOG_ERROR("no target selected");
  2871. return ERROR_FAIL;
  2872. }
  2873. struct duration bench;
  2874. duration_start(&bench);
  2875. if (CMD_ARGC >= 2) {
  2876. target_addr_t addr;
  2877. COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
  2878. image.base_address = addr;
  2879. image.base_address_set = 1;
  2880. } else {
  2881. image.base_address_set = 0;
  2882. image.base_address = 0x0;
  2883. }
  2884. image.start_address_set = 0;
  2885. retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
  2886. if (retval != ERROR_OK)
  2887. return retval;
  2888. image_size = 0x0;
  2889. int diffs = 0;
  2890. retval = ERROR_OK;
  2891. for (i = 0; i < image.num_sections; i++) {
  2892. buffer = malloc(image.sections[i].size);
  2893. if (buffer == NULL) {
  2894. command_print(CMD_CTX,
  2895. "error allocating buffer for section (%d bytes)",
  2896. (int)(image.sections[i].size));
  2897. break;
  2898. }
  2899. retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
  2900. if (retval != ERROR_OK) {
  2901. free(buffer);
  2902. break;
  2903. }
  2904. if (verify >= IMAGE_VERIFY) {
  2905. /* calculate checksum of image */
  2906. retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
  2907. if (retval != ERROR_OK) {
  2908. free(buffer);
  2909. break;
  2910. }
  2911. retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
  2912. if (retval != ERROR_OK) {
  2913. free(buffer);
  2914. break;
  2915. }
  2916. if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
  2917. LOG_ERROR("checksum mismatch");
  2918. free(buffer);
  2919. retval = ERROR_FAIL;
  2920. goto done;
  2921. }
  2922. if (checksum != mem_checksum) {
  2923. /* failed crc checksum, fall back to a binary compare */
  2924. uint8_t *data;
  2925. if (diffs == 0)
  2926. LOG_ERROR("checksum mismatch - attempting binary compare");
  2927. data = malloc(buf_cnt);
  2928. /* Can we use 32bit word accesses? */
  2929. int size = 1;
  2930. int count = buf_cnt;
  2931. if ((count % 4) == 0) {
  2932. size *= 4;
  2933. count /= 4;
  2934. }
  2935. retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
  2936. if (retval == ERROR_OK) {
  2937. uint32_t t;
  2938. for (t = 0; t < buf_cnt; t++) {
  2939. if (data[t] != buffer[t]) {
  2940. command_print(CMD_CTX,
  2941. "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
  2942. diffs,
  2943. (unsigned)(t + image.sections[i].base_address),
  2944. data[t],
  2945. buffer[t]);
  2946. if (diffs++ >= 127) {
  2947. command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
  2948. free(data);
  2949. free(buffer);
  2950. goto done;
  2951. }
  2952. }
  2953. keep_alive();
  2954. }
  2955. }
  2956. free(data);
  2957. }
  2958. } else {
  2959. command_print(CMD_CTX, "address " TARGET_ADDR_FMT " length 0x%08zx",
  2960. image.sections[i].base_address,
  2961. buf_cnt);
  2962. }
  2963. free(buffer);
  2964. image_size += buf_cnt;
  2965. }
  2966. if (diffs > 0)
  2967. command_print(CMD_CTX, "No more differences found.");
  2968. done:
  2969. if (diffs > 0)
  2970. retval = ERROR_FAIL;
  2971. if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
  2972. command_print(CMD_CTX, "verified %" PRIu32 " bytes "
  2973. "in %fs (%0.3f KiB/s)", image_size,
  2974. duration_elapsed(&bench), duration_kbps(&bench, image_size));
  2975. }
  2976. image_close(&image);
  2977. return retval;
  2978. }
  2979. COMMAND_HANDLER(handle_verify_image_checksum_command)
  2980. {
  2981. return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
  2982. }
  2983. COMMAND_HANDLER(handle_verify_image_command)
  2984. {
  2985. return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
  2986. }
  2987. COMMAND_HANDLER(handle_test_image_command)
  2988. {
  2989. return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
  2990. }
  2991. static int handle_bp_command_list(struct command_context *cmd_ctx)
  2992. {
  2993. struct target *target = get_current_target(cmd_ctx);
  2994. struct breakpoint *breakpoint = target->breakpoints;
  2995. while (breakpoint) {
  2996. if (breakpoint->type == BKPT_SOFT) {
  2997. char *buf = buf_to_str(breakpoint->orig_instr,
  2998. breakpoint->length, 16);
  2999. command_print(cmd_ctx, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
  3000. breakpoint->address,
  3001. breakpoint->length,
  3002. breakpoint->set, buf);
  3003. free(buf);
  3004. } else {
  3005. if ((breakpoint->address == 0) && (breakpoint->asid != 0))
  3006. command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
  3007. breakpoint->asid,
  3008. breakpoint->length, breakpoint->set);
  3009. else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
  3010. command_print(cmd_ctx, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
  3011. breakpoint->address,
  3012. breakpoint->length, breakpoint->set);
  3013. command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
  3014. breakpoint->asid);
  3015. } else
  3016. command_print(cmd_ctx, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
  3017. breakpoint->address,
  3018. breakpoint->length, breakpoint->set);
  3019. }
  3020. breakpoint = breakpoint->next;
  3021. }
  3022. return ERROR_OK;
  3023. }
  3024. static int handle_bp_command_set(struct command_context *cmd_ctx,
  3025. target_addr_t addr, uint32_t asid, uint32_t length, int hw)
  3026. {
  3027. struct target *target = get_current_target(cmd_ctx);
  3028. int retval;
  3029. if (asid == 0) {
  3030. retval = breakpoint_add(target, addr, length, hw);
  3031. if (ERROR_OK == retval)
  3032. command_print(cmd_ctx, "breakpoint set at " TARGET_ADDR_FMT "", addr);
  3033. else {
  3034. LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
  3035. return retval;
  3036. }
  3037. } else if (addr == 0) {
  3038. if (target->type->add_context_breakpoint == NULL) {
  3039. LOG_WARNING("Context breakpoint not available");
  3040. return ERROR_OK;
  3041. }
  3042. retval = context_breakpoint_add(target, asid, length, hw);
  3043. if (ERROR_OK == retval)
  3044. command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
  3045. else {
  3046. LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
  3047. return retval;
  3048. }
  3049. } else {
  3050. if (target->type->add_hybrid_breakpoint == NULL) {
  3051. LOG_WARNING("Hybrid breakpoint not available");
  3052. return ERROR_OK;
  3053. }
  3054. retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
  3055. if (ERROR_OK == retval)
  3056. command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
  3057. else {
  3058. LOG_ERROR("Failure setting breakpoint, the same address is already used");
  3059. return retval;
  3060. }
  3061. }
  3062. return ERROR_OK;
  3063. }
  3064. COMMAND_HANDLER(handle_bp_command)
  3065. {
  3066. target_addr_t addr;
  3067. uint32_t asid;
  3068. uint32_t length;
  3069. int hw = BKPT_SOFT;
  3070. switch (CMD_ARGC) {
  3071. case 0:
  3072. return handle_bp_command_list(CMD_CTX);
  3073. case 2:
  3074. asid = 0;
  3075. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3076. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
  3077. return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
  3078. case 3:
  3079. if (strcmp(CMD_ARGV[2], "hw") == 0) {
  3080. hw = BKPT_HARD;
  3081. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3082. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
  3083. asid = 0;
  3084. return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
  3085. } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
  3086. hw = BKPT_HARD;
  3087. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
  3088. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
  3089. addr = 0;
  3090. return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
  3091. }
  3092. case 4:
  3093. hw = BKPT_HARD;
  3094. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3095. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
  3096. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
  3097. return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
  3098. default:
  3099. return ERROR_COMMAND_SYNTAX_ERROR;
  3100. }
  3101. }
  3102. COMMAND_HANDLER(handle_rbp_command)
  3103. {
  3104. if (CMD_ARGC != 1)
  3105. return ERROR_COMMAND_SYNTAX_ERROR;
  3106. target_addr_t addr;
  3107. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3108. struct target *target = get_current_target(CMD_CTX);
  3109. breakpoint_remove(target, addr);
  3110. return ERROR_OK;
  3111. }
  3112. COMMAND_HANDLER(handle_wp_command)
  3113. {
  3114. struct target *target = get_current_target(CMD_CTX);
  3115. if (CMD_ARGC == 0) {
  3116. struct watchpoint *watchpoint = target->watchpoints;
  3117. while (watchpoint) {
  3118. command_print(CMD_CTX, "address: " TARGET_ADDR_FMT
  3119. ", len: 0x%8.8" PRIx32
  3120. ", r/w/a: %i, value: 0x%8.8" PRIx32
  3121. ", mask: 0x%8.8" PRIx32,
  3122. watchpoint->address,
  3123. watchpoint->length,
  3124. (int)watchpoint->rw,
  3125. watchpoint->value,
  3126. watchpoint->mask);
  3127. watchpoint = watchpoint->next;
  3128. }
  3129. return ERROR_OK;
  3130. }
  3131. enum watchpoint_rw type = WPT_ACCESS;
  3132. uint32_t addr = 0;
  3133. uint32_t length = 0;
  3134. uint32_t data_value = 0x0;
  3135. uint32_t data_mask = 0xffffffff;
  3136. switch (CMD_ARGC) {
  3137. case 5:
  3138. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
  3139. /* fall through */
  3140. case 4:
  3141. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
  3142. /* fall through */
  3143. case 3:
  3144. switch (CMD_ARGV[2][0]) {
  3145. case 'r':
  3146. type = WPT_READ;
  3147. break;
  3148. case 'w':
  3149. type = WPT_WRITE;
  3150. break;
  3151. case 'a':
  3152. type = WPT_ACCESS;
  3153. break;
  3154. default:
  3155. LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
  3156. return ERROR_COMMAND_SYNTAX_ERROR;
  3157. }
  3158. /* fall through */
  3159. case 2:
  3160. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
  3161. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
  3162. break;
  3163. default:
  3164. return ERROR_COMMAND_SYNTAX_ERROR;
  3165. }
  3166. int retval = watchpoint_add(target, addr, length, type,
  3167. data_value, data_mask);
  3168. if (ERROR_OK != retval)
  3169. LOG_ERROR("Failure setting watchpoints");
  3170. return retval;
  3171. }
  3172. COMMAND_HANDLER(handle_rwp_command)
  3173. {
  3174. if (CMD_ARGC != 1)
  3175. return ERROR_COMMAND_SYNTAX_ERROR;
  3176. uint32_t addr;
  3177. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
  3178. struct target *target = get_current_target(CMD_CTX);
  3179. watchpoint_remove(target, addr);
  3180. return ERROR_OK;
  3181. }
  3182. /**
  3183. * Translate a virtual address to a physical address.
  3184. *
  3185. * The low-level target implementation must have logged a detailed error
  3186. * which is forwarded to telnet/GDB session.
  3187. */
  3188. COMMAND_HANDLER(handle_virt2phys_command)
  3189. {
  3190. if (CMD_ARGC != 1)
  3191. return ERROR_COMMAND_SYNTAX_ERROR;
  3192. target_addr_t va;
  3193. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
  3194. target_addr_t pa;
  3195. struct target *target = get_current_target(CMD_CTX);
  3196. int retval = target->type->virt2phys(target, va, &pa);
  3197. if (retval == ERROR_OK)
  3198. command_print(CMD_CTX, "Physical address " TARGET_ADDR_FMT "", pa);
  3199. return retval;
  3200. }
  3201. static void writeData(FILE *f, const void *data, size_t len)
  3202. {
  3203. size_t written = fwrite(data, 1, len, f);
  3204. if (written != len)
  3205. LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
  3206. }
  3207. static void writeLong(FILE *f, int l, struct target *target)
  3208. {
  3209. uint8_t val[4];
  3210. target_buffer_set_u32(target, val, l);
  3211. writeData(f, val, 4);
  3212. }
  3213. static void writeString(FILE *f, char *s)
  3214. {
  3215. writeData(f, s, strlen(s));
  3216. }
  3217. typedef unsigned char UNIT[2]; /* unit of profiling */
  3218. /* Dump a gmon.out histogram file. */
  3219. static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
  3220. uint32_t start_address, uint32_t end_address, struct target *target)
  3221. {
  3222. uint32_t i;
  3223. FILE *f = fopen(filename, "w");
  3224. if (f == NULL)
  3225. return;
  3226. writeString(f, "gmon");
  3227. writeLong(f, 0x00000001, target); /* Version */
  3228. writeLong(f, 0, target); /* padding */
  3229. writeLong(f, 0, target); /* padding */
  3230. writeLong(f, 0, target); /* padding */
  3231. uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
  3232. writeData(f, &zero, 1);
  3233. /* figure out bucket size */
  3234. uint32_t min;
  3235. uint32_t max;
  3236. if (with_range) {
  3237. min = start_address;
  3238. max = end_address;
  3239. } else {
  3240. min = samples[0];
  3241. max = samples[0];
  3242. for (i = 0; i < sampleNum; i++) {
  3243. if (min > samples[i])
  3244. min = samples[i];
  3245. if (max < samples[i])
  3246. max = samples[i];
  3247. }
  3248. /* max should be (largest sample + 1)
  3249. * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
  3250. max++;
  3251. }
  3252. int addressSpace = max - min;
  3253. assert(addressSpace >= 2);
  3254. /* FIXME: What is the reasonable number of buckets?
  3255. * The profiling result will be more accurate if there are enough buckets. */
  3256. static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
  3257. uint32_t numBuckets = addressSpace / sizeof(UNIT);
  3258. if (numBuckets > maxBuckets)
  3259. numBuckets = maxBuckets;
  3260. int *buckets = malloc(sizeof(int) * numBuckets);
  3261. if (buckets == NULL) {
  3262. fclose(f);
  3263. return;
  3264. }
  3265. memset(buckets, 0, sizeof(int) * numBuckets);
  3266. for (i = 0; i < sampleNum; i++) {
  3267. uint32_t address = samples[i];
  3268. if ((address < min) || (max <= address))
  3269. continue;
  3270. long long a = address - min;
  3271. long long b = numBuckets;
  3272. long long c = addressSpace;
  3273. int index_t = (a * b) / c; /* danger!!!! int32 overflows */
  3274. buckets[index_t]++;
  3275. }
  3276. /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
  3277. writeLong(f, min, target); /* low_pc */
  3278. writeLong(f, max, target); /* high_pc */
  3279. writeLong(f, numBuckets, target); /* # of buckets */
  3280. writeLong(f, 100, target); /* KLUDGE! We lie, ca. 100Hz best case. */
  3281. writeString(f, "seconds");
  3282. for (i = 0; i < (15-strlen("seconds")); i++)
  3283. writeData(f, &zero, 1);
  3284. writeString(f, "s");
  3285. /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
  3286. char *data = malloc(2 * numBuckets);
  3287. if (data != NULL) {
  3288. for (i = 0; i < numBuckets; i++) {
  3289. int val;
  3290. val = buckets[i];
  3291. if (val > 65535)
  3292. val = 65535;
  3293. data[i * 2] = val&0xff;
  3294. data[i * 2 + 1] = (val >> 8) & 0xff;
  3295. }
  3296. free(buckets);
  3297. writeData(f, data, numBuckets * 2);
  3298. free(data);
  3299. } else
  3300. free(buckets);
  3301. fclose(f);
  3302. }
  3303. /* profiling samples the CPU PC as quickly as OpenOCD is able,
  3304. * which will be used as a random sampling of PC */
  3305. COMMAND_HANDLER(handle_profile_command)
  3306. {
  3307. struct target *target = get_current_target(CMD_CTX);
  3308. if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
  3309. return ERROR_COMMAND_SYNTAX_ERROR;
  3310. const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
  3311. uint32_t offset;
  3312. uint32_t num_of_samples;
  3313. int retval = ERROR_OK;
  3314. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
  3315. uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
  3316. if (samples == NULL) {
  3317. LOG_ERROR("No memory to store samples.");
  3318. return ERROR_FAIL;
  3319. }
  3320. /**
  3321. * Some cores let us sample the PC without the
  3322. * annoying halt/resume step; for example, ARMv7 PCSR.
  3323. * Provide a way to use that more efficient mechanism.
  3324. */
  3325. retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
  3326. &num_of_samples, offset);
  3327. if (retval != ERROR_OK) {
  3328. free(samples);
  3329. return retval;
  3330. }
  3331. assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
  3332. retval = target_poll(target);
  3333. if (retval != ERROR_OK) {
  3334. free(samples);
  3335. return retval;
  3336. }
  3337. if (target->state == TARGET_RUNNING) {
  3338. retval = target_halt(target);
  3339. if (retval != ERROR_OK) {
  3340. free(samples);
  3341. return retval;
  3342. }
  3343. }
  3344. retval = target_poll(target);
  3345. if (retval != ERROR_OK) {
  3346. free(samples);
  3347. return retval;
  3348. }
  3349. uint32_t start_address = 0;
  3350. uint32_t end_address = 0;
  3351. bool with_range = false;
  3352. if (CMD_ARGC == 4) {
  3353. with_range = true;
  3354. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
  3355. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
  3356. }
  3357. write_gmon(samples, num_of_samples, CMD_ARGV[1],
  3358. with_range, start_address, end_address, target);
  3359. command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
  3360. free(samples);
  3361. return retval;
  3362. }
  3363. static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
  3364. {
  3365. char *namebuf;
  3366. Jim_Obj *nameObjPtr, *valObjPtr;
  3367. int result;
  3368. namebuf = alloc_printf("%s(%d)", varname, idx);
  3369. if (!namebuf)
  3370. return JIM_ERR;
  3371. nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
  3372. valObjPtr = Jim_NewIntObj(interp, val);
  3373. if (!nameObjPtr || !valObjPtr) {
  3374. free(namebuf);
  3375. return JIM_ERR;
  3376. }
  3377. Jim_IncrRefCount(nameObjPtr);
  3378. Jim_IncrRefCount(valObjPtr);
  3379. result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
  3380. Jim_DecrRefCount(interp, nameObjPtr);
  3381. Jim_DecrRefCount(interp, valObjPtr);
  3382. free(namebuf);
  3383. /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
  3384. return result;
  3385. }
  3386. static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  3387. {
  3388. struct command_context *context;
  3389. struct target *target;
  3390. context = current_command_context(interp);
  3391. assert(context != NULL);
  3392. target = get_current_target(context);
  3393. if (target == NULL) {
  3394. LOG_ERROR("mem2array: no current target");
  3395. return JIM_ERR;
  3396. }
  3397. return target_mem2array(interp, target, argc - 1, argv + 1);
  3398. }
  3399. static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
  3400. {
  3401. long l;
  3402. uint32_t width;
  3403. int len;
  3404. uint32_t addr;
  3405. uint32_t count;
  3406. uint32_t v;
  3407. const char *varname;
  3408. const char *phys;
  3409. bool is_phys;
  3410. int n, e, retval;
  3411. uint32_t i;
  3412. /* argv[1] = name of array to receive the data
  3413. * argv[2] = desired width
  3414. * argv[3] = memory address
  3415. * argv[4] = count of times to read
  3416. */
  3417. if (argc < 4 || argc > 5) {
  3418. Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems [phys]");
  3419. return JIM_ERR;
  3420. }
  3421. varname = Jim_GetString(argv[0], &len);
  3422. /* given "foo" get space for worse case "foo(%d)" .. add 20 */
  3423. e = Jim_GetLong(interp, argv[1], &l);
  3424. width = l;
  3425. if (e != JIM_OK)
  3426. return e;
  3427. e = Jim_GetLong(interp, argv[2], &l);
  3428. addr = l;
  3429. if (e != JIM_OK)
  3430. return e;
  3431. e = Jim_GetLong(interp, argv[3], &l);
  3432. len = l;
  3433. if (e != JIM_OK)
  3434. return e;
  3435. is_phys = false;
  3436. if (argc > 4) {
  3437. phys = Jim_GetString(argv[4], &n);
  3438. if (!strncmp(phys, "phys", n))
  3439. is_phys = true;
  3440. else
  3441. return JIM_ERR;
  3442. }
  3443. switch (width) {
  3444. case 8:
  3445. width = 1;
  3446. break;
  3447. case 16:
  3448. width = 2;
  3449. break;
  3450. case 32:
  3451. width = 4;
  3452. break;
  3453. default:
  3454. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3455. Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
  3456. return JIM_ERR;
  3457. }
  3458. if (len == 0) {
  3459. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3460. Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
  3461. return JIM_ERR;
  3462. }
  3463. if ((addr + (len * width)) < addr) {
  3464. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3465. Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
  3466. return JIM_ERR;
  3467. }
  3468. /* absurd transfer size? */
  3469. if (len > 65536) {
  3470. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3471. Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
  3472. return JIM_ERR;
  3473. }
  3474. if ((width == 1) ||
  3475. ((width == 2) && ((addr & 1) == 0)) ||
  3476. ((width == 4) && ((addr & 3) == 0))) {
  3477. /* all is well */
  3478. } else {
  3479. char buf[100];
  3480. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3481. sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
  3482. addr,
  3483. width);
  3484. Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
  3485. return JIM_ERR;
  3486. }
  3487. /* Transfer loop */
  3488. /* index counter */
  3489. n = 0;
  3490. size_t buffersize = 4096;
  3491. uint8_t *buffer = malloc(buffersize);
  3492. if (buffer == NULL)
  3493. return JIM_ERR;
  3494. /* assume ok */
  3495. e = JIM_OK;
  3496. while (len) {
  3497. /* Slurp... in buffer size chunks */
  3498. count = len; /* in objects.. */
  3499. if (count > (buffersize / width))
  3500. count = (buffersize / width);
  3501. if (is_phys)
  3502. retval = target_read_phys_memory(target, addr, width, count, buffer);
  3503. else
  3504. retval = target_read_memory(target, addr, width, count, buffer);
  3505. if (retval != ERROR_OK) {
  3506. /* BOO !*/
  3507. LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
  3508. addr,
  3509. width,
  3510. count);
  3511. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3512. Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
  3513. e = JIM_ERR;
  3514. break;
  3515. } else {
  3516. v = 0; /* shut up gcc */
  3517. for (i = 0; i < count ; i++, n++) {
  3518. switch (width) {
  3519. case 4:
  3520. v = target_buffer_get_u32(target, &buffer[i*width]);
  3521. break;
  3522. case 2:
  3523. v = target_buffer_get_u16(target, &buffer[i*width]);
  3524. break;
  3525. case 1:
  3526. v = buffer[i] & 0x0ff;
  3527. break;
  3528. }
  3529. new_int_array_element(interp, varname, n, v);
  3530. }
  3531. len -= count;
  3532. addr += count * width;
  3533. }
  3534. }
  3535. free(buffer);
  3536. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3537. return e;
  3538. }
  3539. static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
  3540. {
  3541. char *namebuf;
  3542. Jim_Obj *nameObjPtr, *valObjPtr;
  3543. int result;
  3544. long l;
  3545. namebuf = alloc_printf("%s(%d)", varname, idx);
  3546. if (!namebuf)
  3547. return JIM_ERR;
  3548. nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
  3549. if (!nameObjPtr) {
  3550. free(namebuf);
  3551. return JIM_ERR;
  3552. }
  3553. Jim_IncrRefCount(nameObjPtr);
  3554. valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
  3555. Jim_DecrRefCount(interp, nameObjPtr);
  3556. free(namebuf);
  3557. if (valObjPtr == NULL)
  3558. return JIM_ERR;
  3559. result = Jim_GetLong(interp, valObjPtr, &l);
  3560. /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
  3561. *val = l;
  3562. return result;
  3563. }
  3564. static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  3565. {
  3566. struct command_context *context;
  3567. struct target *target;
  3568. context = current_command_context(interp);
  3569. assert(context != NULL);
  3570. target = get_current_target(context);
  3571. if (target == NULL) {
  3572. LOG_ERROR("array2mem: no current target");
  3573. return JIM_ERR;
  3574. }
  3575. return target_array2mem(interp, target, argc-1, argv + 1);
  3576. }
  3577. static int target_array2mem(Jim_Interp *interp, struct target *target,
  3578. int argc, Jim_Obj *const *argv)
  3579. {
  3580. long l;
  3581. uint32_t width;
  3582. int len;
  3583. uint32_t addr;
  3584. uint32_t count;
  3585. uint32_t v;
  3586. const char *varname;
  3587. const char *phys;
  3588. bool is_phys;
  3589. int n, e, retval;
  3590. uint32_t i;
  3591. /* argv[1] = name of array to get the data
  3592. * argv[2] = desired width
  3593. * argv[3] = memory address
  3594. * argv[4] = count to write
  3595. */
  3596. if (argc < 4 || argc > 5) {
  3597. Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
  3598. return JIM_ERR;
  3599. }
  3600. varname = Jim_GetString(argv[0], &len);
  3601. /* given "foo" get space for worse case "foo(%d)" .. add 20 */
  3602. e = Jim_GetLong(interp, argv[1], &l);
  3603. width = l;
  3604. if (e != JIM_OK)
  3605. return e;
  3606. e = Jim_GetLong(interp, argv[2], &l);
  3607. addr = l;
  3608. if (e != JIM_OK)
  3609. return e;
  3610. e = Jim_GetLong(interp, argv[3], &l);
  3611. len = l;
  3612. if (e != JIM_OK)
  3613. return e;
  3614. is_phys = false;
  3615. if (argc > 4) {
  3616. phys = Jim_GetString(argv[4], &n);
  3617. if (!strncmp(phys, "phys", n))
  3618. is_phys = true;
  3619. else
  3620. return JIM_ERR;
  3621. }
  3622. switch (width) {
  3623. case 8:
  3624. width = 1;
  3625. break;
  3626. case 16:
  3627. width = 2;
  3628. break;
  3629. case 32:
  3630. width = 4;
  3631. break;
  3632. default:
  3633. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3634. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3635. "Invalid width param, must be 8/16/32", NULL);
  3636. return JIM_ERR;
  3637. }
  3638. if (len == 0) {
  3639. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3640. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3641. "array2mem: zero width read?", NULL);
  3642. return JIM_ERR;
  3643. }
  3644. if ((addr + (len * width)) < addr) {
  3645. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3646. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3647. "array2mem: addr + len - wraps to zero?", NULL);
  3648. return JIM_ERR;
  3649. }
  3650. /* absurd transfer size? */
  3651. if (len > 65536) {
  3652. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3653. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3654. "array2mem: absurd > 64K item request", NULL);
  3655. return JIM_ERR;
  3656. }
  3657. if ((width == 1) ||
  3658. ((width == 2) && ((addr & 1) == 0)) ||
  3659. ((width == 4) && ((addr & 3) == 0))) {
  3660. /* all is well */
  3661. } else {
  3662. char buf[100];
  3663. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3664. sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
  3665. addr,
  3666. width);
  3667. Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
  3668. return JIM_ERR;
  3669. }
  3670. /* Transfer loop */
  3671. /* index counter */
  3672. n = 0;
  3673. /* assume ok */
  3674. e = JIM_OK;
  3675. size_t buffersize = 4096;
  3676. uint8_t *buffer = malloc(buffersize);
  3677. if (buffer == NULL)
  3678. return JIM_ERR;
  3679. while (len) {
  3680. /* Slurp... in buffer size chunks */
  3681. count = len; /* in objects.. */
  3682. if (count > (buffersize / width))
  3683. count = (buffersize / width);
  3684. v = 0; /* shut up gcc */
  3685. for (i = 0; i < count; i++, n++) {
  3686. get_int_array_element(interp, varname, n, &v);
  3687. switch (width) {
  3688. case 4:
  3689. target_buffer_set_u32(target, &buffer[i * width], v);
  3690. break;
  3691. case 2:
  3692. target_buffer_set_u16(target, &buffer[i * width], v);
  3693. break;
  3694. case 1:
  3695. buffer[i] = v & 0x0ff;
  3696. break;
  3697. }
  3698. }
  3699. len -= count;
  3700. if (is_phys)
  3701. retval = target_write_phys_memory(target, addr, width, count, buffer);
  3702. else
  3703. retval = target_write_memory(target, addr, width, count, buffer);
  3704. if (retval != ERROR_OK) {
  3705. /* BOO !*/
  3706. LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
  3707. addr,
  3708. width,
  3709. count);
  3710. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3711. Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
  3712. e = JIM_ERR;
  3713. break;
  3714. }
  3715. addr += count * width;
  3716. }
  3717. free(buffer);
  3718. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3719. return e;
  3720. }
  3721. /* FIX? should we propagate errors here rather than printing them
  3722. * and continuing?
  3723. */
  3724. void target_handle_event(struct target *target, enum target_event e)
  3725. {
  3726. struct target_event_action *teap;
  3727. for (teap = target->event_action; teap != NULL; teap = teap->next) {
  3728. if (teap->event == e) {
  3729. LOG_DEBUG("target: (%d) %s (%s) event: %d (%s) action: %s",
  3730. target->target_number,
  3731. target_name(target),
  3732. target_type_name(target),
  3733. e,
  3734. Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
  3735. Jim_GetString(teap->body, NULL));
  3736. if (Jim_EvalObj(teap->interp, teap->body) != JIM_OK) {
  3737. Jim_MakeErrorMessage(teap->interp);
  3738. command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(teap->interp), NULL));
  3739. }
  3740. }
  3741. }
  3742. }
  3743. /**
  3744. * Returns true only if the target has a handler for the specified event.
  3745. */
  3746. bool target_has_event_action(struct target *target, enum target_event event)
  3747. {
  3748. struct target_event_action *teap;
  3749. for (teap = target->event_action; teap != NULL; teap = teap->next) {
  3750. if (teap->event == event)
  3751. return true;
  3752. }
  3753. return false;
  3754. }
  3755. enum target_cfg_param {
  3756. TCFG_TYPE,
  3757. TCFG_EVENT,
  3758. TCFG_WORK_AREA_VIRT,
  3759. TCFG_WORK_AREA_PHYS,
  3760. TCFG_WORK_AREA_SIZE,
  3761. TCFG_WORK_AREA_BACKUP,
  3762. TCFG_ENDIAN,
  3763. TCFG_COREID,
  3764. TCFG_CHAIN_POSITION,
  3765. TCFG_DBGBASE,
  3766. TCFG_CTIBASE,
  3767. TCFG_RTOS,
  3768. TCFG_DEFER_EXAMINE,
  3769. };
  3770. static Jim_Nvp nvp_config_opts[] = {
  3771. { .name = "-type", .value = TCFG_TYPE },
  3772. { .name = "-event", .value = TCFG_EVENT },
  3773. { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
  3774. { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
  3775. { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
  3776. { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
  3777. { .name = "-endian" , .value = TCFG_ENDIAN },
  3778. { .name = "-coreid", .value = TCFG_COREID },
  3779. { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
  3780. { .name = "-dbgbase", .value = TCFG_DBGBASE },
  3781. { .name = "-ctibase", .value = TCFG_CTIBASE },
  3782. { .name = "-rtos", .value = TCFG_RTOS },
  3783. { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
  3784. { .name = NULL, .value = -1 }
  3785. };
  3786. static int target_configure(Jim_GetOptInfo *goi, struct target *target)
  3787. {
  3788. Jim_Nvp *n;
  3789. Jim_Obj *o;
  3790. jim_wide w;
  3791. int e;
  3792. /* parse config or cget options ... */
  3793. while (goi->argc > 0) {
  3794. Jim_SetEmptyResult(goi->interp);
  3795. /* Jim_GetOpt_Debug(goi); */
  3796. if (target->type->target_jim_configure) {
  3797. /* target defines a configure function */
  3798. /* target gets first dibs on parameters */
  3799. e = (*(target->type->target_jim_configure))(target, goi);
  3800. if (e == JIM_OK) {
  3801. /* more? */
  3802. continue;
  3803. }
  3804. if (e == JIM_ERR) {
  3805. /* An error */
  3806. return e;
  3807. }
  3808. /* otherwise we 'continue' below */
  3809. }
  3810. e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
  3811. if (e != JIM_OK) {
  3812. Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
  3813. return e;
  3814. }
  3815. switch (n->value) {
  3816. case TCFG_TYPE:
  3817. /* not setable */
  3818. if (goi->isconfigure) {
  3819. Jim_SetResultFormatted(goi->interp,
  3820. "not settable: %s", n->name);
  3821. return JIM_ERR;
  3822. } else {
  3823. no_params:
  3824. if (goi->argc != 0) {
  3825. Jim_WrongNumArgs(goi->interp,
  3826. goi->argc, goi->argv,
  3827. "NO PARAMS");
  3828. return JIM_ERR;
  3829. }
  3830. }
  3831. Jim_SetResultString(goi->interp,
  3832. target_type_name(target), -1);
  3833. /* loop for more */
  3834. break;
  3835. case TCFG_EVENT:
  3836. if (goi->argc == 0) {
  3837. Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
  3838. return JIM_ERR;
  3839. }
  3840. e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
  3841. if (e != JIM_OK) {
  3842. Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
  3843. return e;
  3844. }
  3845. if (goi->isconfigure) {
  3846. if (goi->argc != 1) {
  3847. Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
  3848. return JIM_ERR;
  3849. }
  3850. } else {
  3851. if (goi->argc != 0) {
  3852. Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
  3853. return JIM_ERR;
  3854. }
  3855. }
  3856. {
  3857. struct target_event_action *teap;
  3858. teap = target->event_action;
  3859. /* replace existing? */
  3860. while (teap) {
  3861. if (teap->event == (enum target_event)n->value)
  3862. break;
  3863. teap = teap->next;
  3864. }
  3865. if (goi->isconfigure) {
  3866. bool replace = true;
  3867. if (teap == NULL) {
  3868. /* create new */
  3869. teap = calloc(1, sizeof(*teap));
  3870. replace = false;
  3871. }
  3872. teap->event = n->value;
  3873. teap->interp = goi->interp;
  3874. Jim_GetOpt_Obj(goi, &o);
  3875. if (teap->body)
  3876. Jim_DecrRefCount(teap->interp, teap->body);
  3877. teap->body = Jim_DuplicateObj(goi->interp, o);
  3878. /*
  3879. * FIXME:
  3880. * Tcl/TK - "tk events" have a nice feature.
  3881. * See the "BIND" command.
  3882. * We should support that here.
  3883. * You can specify %X and %Y in the event code.
  3884. * The idea is: %T - target name.
  3885. * The idea is: %N - target number
  3886. * The idea is: %E - event name.
  3887. */
  3888. Jim_IncrRefCount(teap->body);
  3889. if (!replace) {
  3890. /* add to head of event list */
  3891. teap->next = target->event_action;
  3892. target->event_action = teap;
  3893. }
  3894. Jim_SetEmptyResult(goi->interp);
  3895. } else {
  3896. /* get */
  3897. if (teap == NULL)
  3898. Jim_SetEmptyResult(goi->interp);
  3899. else
  3900. Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
  3901. }
  3902. }
  3903. /* loop for more */
  3904. break;
  3905. case TCFG_WORK_AREA_VIRT:
  3906. if (goi->isconfigure) {
  3907. target_free_all_working_areas(target);
  3908. e = Jim_GetOpt_Wide(goi, &w);
  3909. if (e != JIM_OK)
  3910. return e;
  3911. target->working_area_virt = w;
  3912. target->working_area_virt_spec = true;
  3913. } else {
  3914. if (goi->argc != 0)
  3915. goto no_params;
  3916. }
  3917. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
  3918. /* loop for more */
  3919. break;
  3920. case TCFG_WORK_AREA_PHYS:
  3921. if (goi->isconfigure) {
  3922. target_free_all_working_areas(target);
  3923. e = Jim_GetOpt_Wide(goi, &w);
  3924. if (e != JIM_OK)
  3925. return e;
  3926. target->working_area_phys = w;
  3927. target->working_area_phys_spec = true;
  3928. } else {
  3929. if (goi->argc != 0)
  3930. goto no_params;
  3931. }
  3932. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
  3933. /* loop for more */
  3934. break;
  3935. case TCFG_WORK_AREA_SIZE:
  3936. if (goi->isconfigure) {
  3937. target_free_all_working_areas(target);
  3938. e = Jim_GetOpt_Wide(goi, &w);
  3939. if (e != JIM_OK)
  3940. return e;
  3941. target->working_area_size = w;
  3942. } else {
  3943. if (goi->argc != 0)
  3944. goto no_params;
  3945. }
  3946. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
  3947. /* loop for more */
  3948. break;
  3949. case TCFG_WORK_AREA_BACKUP:
  3950. if (goi->isconfigure) {
  3951. target_free_all_working_areas(target);
  3952. e = Jim_GetOpt_Wide(goi, &w);
  3953. if (e != JIM_OK)
  3954. return e;
  3955. /* make this exactly 1 or 0 */
  3956. target->backup_working_area = (!!w);
  3957. } else {
  3958. if (goi->argc != 0)
  3959. goto no_params;
  3960. }
  3961. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
  3962. /* loop for more e*/
  3963. break;
  3964. case TCFG_ENDIAN:
  3965. if (goi->isconfigure) {
  3966. e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
  3967. if (e != JIM_OK) {
  3968. Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
  3969. return e;
  3970. }
  3971. target->endianness = n->value;
  3972. } else {
  3973. if (goi->argc != 0)
  3974. goto no_params;
  3975. }
  3976. n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
  3977. if (n->name == NULL) {
  3978. target->endianness = TARGET_LITTLE_ENDIAN;
  3979. n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
  3980. }
  3981. Jim_SetResultString(goi->interp, n->name, -1);
  3982. /* loop for more */
  3983. break;
  3984. case TCFG_COREID:
  3985. if (goi->isconfigure) {
  3986. e = Jim_GetOpt_Wide(goi, &w);
  3987. if (e != JIM_OK)
  3988. return e;
  3989. target->coreid = (int32_t)w;
  3990. } else {
  3991. if (goi->argc != 0)
  3992. goto no_params;
  3993. }
  3994. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
  3995. /* loop for more */
  3996. break;
  3997. case TCFG_CHAIN_POSITION:
  3998. if (goi->isconfigure) {
  3999. Jim_Obj *o_t;
  4000. struct jtag_tap *tap;
  4001. target_free_all_working_areas(target);
  4002. e = Jim_GetOpt_Obj(goi, &o_t);
  4003. if (e != JIM_OK)
  4004. return e;
  4005. tap = jtag_tap_by_jim_obj(goi->interp, o_t);
  4006. if (tap == NULL)
  4007. return JIM_ERR;
  4008. /* make this exactly 1 or 0 */
  4009. target->tap = tap;
  4010. } else {
  4011. if (goi->argc != 0)
  4012. goto no_params;
  4013. }
  4014. Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
  4015. /* loop for more e*/
  4016. break;
  4017. case TCFG_DBGBASE:
  4018. if (goi->isconfigure) {
  4019. e = Jim_GetOpt_Wide(goi, &w);
  4020. if (e != JIM_OK)
  4021. return e;
  4022. target->dbgbase = (uint32_t)w;
  4023. target->dbgbase_set = true;
  4024. } else {
  4025. if (goi->argc != 0)
  4026. goto no_params;
  4027. }
  4028. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
  4029. /* loop for more */
  4030. break;
  4031. case TCFG_CTIBASE:
  4032. if (goi->isconfigure) {
  4033. e = Jim_GetOpt_Wide(goi, &w);
  4034. if (e != JIM_OK)
  4035. return e;
  4036. target->ctibase = (uint32_t)w;
  4037. target->ctibase_set = true;
  4038. } else {
  4039. if (goi->argc != 0)
  4040. goto no_params;
  4041. }
  4042. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->ctibase));
  4043. /* loop for more */
  4044. break;
  4045. case TCFG_RTOS:
  4046. /* RTOS */
  4047. {
  4048. int result = rtos_create(goi, target);
  4049. if (result != JIM_OK)
  4050. return result;
  4051. }
  4052. /* loop for more */
  4053. break;
  4054. case TCFG_DEFER_EXAMINE:
  4055. /* DEFER_EXAMINE */
  4056. target->defer_examine = true;
  4057. /* loop for more */
  4058. break;
  4059. }
  4060. } /* while (goi->argc) */
  4061. /* done - we return */
  4062. return JIM_OK;
  4063. }
  4064. static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
  4065. {
  4066. Jim_GetOptInfo goi;
  4067. Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
  4068. goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
  4069. if (goi.argc < 1) {
  4070. Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
  4071. "missing: -option ...");
  4072. return JIM_ERR;
  4073. }
  4074. struct target *target = Jim_CmdPrivData(goi.interp);
  4075. return target_configure(&goi, target);
  4076. }
  4077. static int jim_target_mw(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4078. {
  4079. const char *cmd_name = Jim_GetString(argv[0], NULL);
  4080. Jim_GetOptInfo goi;
  4081. Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
  4082. if (goi.argc < 2 || goi.argc > 4) {
  4083. Jim_SetResultFormatted(goi.interp,
  4084. "usage: %s [phys] <address> <data> [<count>]", cmd_name);
  4085. return JIM_ERR;
  4086. }
  4087. target_write_fn fn;
  4088. fn = target_write_memory;
  4089. int e;
  4090. if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
  4091. /* consume it */
  4092. struct Jim_Obj *obj;
  4093. e = Jim_GetOpt_Obj(&goi, &obj);
  4094. if (e != JIM_OK)
  4095. return e;
  4096. fn = target_write_phys_memory;
  4097. }
  4098. jim_wide a;
  4099. e = Jim_GetOpt_Wide(&goi, &a);
  4100. if (e != JIM_OK)
  4101. return e;
  4102. jim_wide b;
  4103. e = Jim_GetOpt_Wide(&goi, &b);
  4104. if (e != JIM_OK)
  4105. return e;
  4106. jim_wide c = 1;
  4107. if (goi.argc == 1) {
  4108. e = Jim_GetOpt_Wide(&goi, &c);
  4109. if (e != JIM_OK)
  4110. return e;
  4111. }
  4112. /* all args must be consumed */
  4113. if (goi.argc != 0)
  4114. return JIM_ERR;
  4115. struct target *target = Jim_CmdPrivData(goi.interp);
  4116. unsigned data_size;
  4117. if (strcasecmp(cmd_name, "mww") == 0)
  4118. data_size = 4;
  4119. else if (strcasecmp(cmd_name, "mwh") == 0)
  4120. data_size = 2;
  4121. else if (strcasecmp(cmd_name, "mwb") == 0)
  4122. data_size = 1;
  4123. else {
  4124. LOG_ERROR("command '%s' unknown: ", cmd_name);
  4125. return JIM_ERR;
  4126. }
  4127. return (target_fill_mem(target, a, fn, data_size, b, c) == ERROR_OK) ? JIM_OK : JIM_ERR;
  4128. }
  4129. /**
  4130. * @brief Reads an array of words/halfwords/bytes from target memory starting at specified address.
  4131. *
  4132. * Usage: mdw [phys] <address> [<count>] - for 32 bit reads
  4133. * mdh [phys] <address> [<count>] - for 16 bit reads
  4134. * mdb [phys] <address> [<count>] - for 8 bit reads
  4135. *
  4136. * Count defaults to 1.
  4137. *
  4138. * Calls target_read_memory or target_read_phys_memory depending on
  4139. * the presence of the "phys" argument
  4140. * Reads the target memory in blocks of max. 32 bytes, and returns an array of ints formatted
  4141. * to int representation in base16.
  4142. * Also outputs read data in a human readable form using command_print
  4143. *
  4144. * @param phys if present target_read_phys_memory will be used instead of target_read_memory
  4145. * @param address address where to start the read. May be specified in decimal or hex using the standard "0x" prefix
  4146. * @param count optional count parameter to read an array of values. If not specified, defaults to 1.
  4147. * @returns: JIM_ERR on error or JIM_OK on success and sets the result string to an array of ascii formatted numbers
  4148. * on success, with [<count>] number of elements.
  4149. *
  4150. * In case of little endian target:
  4151. * Example1: "mdw 0x00000000" returns "10123456"
  4152. * Exmaple2: "mdh 0x00000000 1" returns "3456"
  4153. * Example3: "mdb 0x00000000" returns "56"
  4154. * Example4: "mdh 0x00000000 2" returns "3456 1012"
  4155. * Example5: "mdb 0x00000000 3" returns "56 34 12"
  4156. **/
  4157. static int jim_target_md(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4158. {
  4159. const char *cmd_name = Jim_GetString(argv[0], NULL);
  4160. Jim_GetOptInfo goi;
  4161. Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
  4162. if ((goi.argc < 1) || (goi.argc > 3)) {
  4163. Jim_SetResultFormatted(goi.interp,
  4164. "usage: %s [phys] <address> [<count>]", cmd_name);
  4165. return JIM_ERR;
  4166. }
  4167. int (*fn)(struct target *target,
  4168. target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer);
  4169. fn = target_read_memory;
  4170. int e;
  4171. if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
  4172. /* consume it */
  4173. struct Jim_Obj *obj;
  4174. e = Jim_GetOpt_Obj(&goi, &obj);
  4175. if (e != JIM_OK)
  4176. return e;
  4177. fn = target_read_phys_memory;
  4178. }
  4179. /* Read address parameter */
  4180. jim_wide addr;
  4181. e = Jim_GetOpt_Wide(&goi, &addr);
  4182. if (e != JIM_OK)
  4183. return JIM_ERR;
  4184. /* If next parameter exists, read it out as the count parameter, if not, set it to 1 (default) */
  4185. jim_wide count;
  4186. if (goi.argc == 1) {
  4187. e = Jim_GetOpt_Wide(&goi, &count);
  4188. if (e != JIM_OK)
  4189. return JIM_ERR;
  4190. } else
  4191. count = 1;
  4192. /* all args must be consumed */
  4193. if (goi.argc != 0)
  4194. return JIM_ERR;
  4195. jim_wide dwidth = 1; /* shut up gcc */
  4196. if (strcasecmp(cmd_name, "mdw") == 0)
  4197. dwidth = 4;
  4198. else if (strcasecmp(cmd_name, "mdh") == 0)
  4199. dwidth = 2;
  4200. else if (strcasecmp(cmd_name, "mdb") == 0)
  4201. dwidth = 1;
  4202. else {
  4203. LOG_ERROR("command '%s' unknown: ", cmd_name);
  4204. return JIM_ERR;
  4205. }
  4206. /* convert count to "bytes" */
  4207. int bytes = count * dwidth;
  4208. struct target *target = Jim_CmdPrivData(goi.interp);
  4209. uint8_t target_buf[32];
  4210. jim_wide x, y, z;
  4211. while (bytes > 0) {
  4212. y = (bytes < 16) ? bytes : 16; /* y = min(bytes, 16); */
  4213. /* Try to read out next block */
  4214. e = fn(target, addr, dwidth, y / dwidth, target_buf);
  4215. if (e != ERROR_OK) {
  4216. Jim_SetResultFormatted(interp, "error reading target @ 0x%08lx", (long)addr);
  4217. return JIM_ERR;
  4218. }
  4219. command_print_sameline(NULL, "0x%08x ", (int)(addr));
  4220. switch (dwidth) {
  4221. case 4:
  4222. for (x = 0; x < 16 && x < y; x += 4) {
  4223. z = target_buffer_get_u32(target, &(target_buf[x]));
  4224. command_print_sameline(NULL, "%08x ", (int)(z));
  4225. }
  4226. for (; (x < 16) ; x += 4)
  4227. command_print_sameline(NULL, " ");
  4228. break;
  4229. case 2:
  4230. for (x = 0; x < 16 && x < y; x += 2) {
  4231. z = target_buffer_get_u16(target, &(target_buf[x]));
  4232. command_print_sameline(NULL, "%04x ", (int)(z));
  4233. }
  4234. for (; (x < 16) ; x += 2)
  4235. command_print_sameline(NULL, " ");
  4236. break;
  4237. case 1:
  4238. default:
  4239. for (x = 0 ; (x < 16) && (x < y) ; x += 1) {
  4240. z = target_buffer_get_u8(target, &(target_buf[x]));
  4241. command_print_sameline(NULL, "%02x ", (int)(z));
  4242. }
  4243. for (; (x < 16) ; x += 1)
  4244. command_print_sameline(NULL, " ");
  4245. break;
  4246. }
  4247. /* ascii-ify the bytes */
  4248. for (x = 0 ; x < y ; x++) {
  4249. if ((target_buf[x] >= 0x20) &&
  4250. (target_buf[x] <= 0x7e)) {
  4251. /* good */
  4252. } else {
  4253. /* smack it */
  4254. target_buf[x] = '.';
  4255. }
  4256. }
  4257. /* space pad */
  4258. while (x < 16) {
  4259. target_buf[x] = ' ';
  4260. x++;
  4261. }
  4262. /* terminate */
  4263. target_buf[16] = 0;
  4264. /* print - with a newline */
  4265. command_print_sameline(NULL, "%s\n", target_buf);
  4266. /* NEXT... */
  4267. bytes -= 16;
  4268. addr += 16;
  4269. }
  4270. return JIM_OK;
  4271. }
  4272. static int jim_target_mem2array(Jim_Interp *interp,
  4273. int argc, Jim_Obj *const *argv)
  4274. {
  4275. struct target *target = Jim_CmdPrivData(interp);
  4276. return target_mem2array(interp, target, argc - 1, argv + 1);
  4277. }
  4278. static int jim_target_array2mem(Jim_Interp *interp,
  4279. int argc, Jim_Obj *const *argv)
  4280. {
  4281. struct target *target = Jim_CmdPrivData(interp);
  4282. return target_array2mem(interp, target, argc - 1, argv + 1);
  4283. }
  4284. static int jim_target_tap_disabled(Jim_Interp *interp)
  4285. {
  4286. Jim_SetResultFormatted(interp, "[TAP is disabled]");
  4287. return JIM_ERR;
  4288. }
  4289. static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4290. {
  4291. bool allow_defer = false;
  4292. Jim_GetOptInfo goi;
  4293. Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
  4294. if (goi.argc > 1) {
  4295. const char *cmd_name = Jim_GetString(argv[0], NULL);
  4296. Jim_SetResultFormatted(goi.interp,
  4297. "usage: %s ['allow-defer']", cmd_name);
  4298. return JIM_ERR;
  4299. }
  4300. if (goi.argc > 0 &&
  4301. strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
  4302. /* consume it */
  4303. struct Jim_Obj *obj;
  4304. int e = Jim_GetOpt_Obj(&goi, &obj);
  4305. if (e != JIM_OK)
  4306. return e;
  4307. allow_defer = true;
  4308. }
  4309. struct target *target = Jim_CmdPrivData(interp);
  4310. if (!target->tap->enabled)
  4311. return jim_target_tap_disabled(interp);
  4312. if (allow_defer && target->defer_examine) {
  4313. LOG_INFO("Deferring arp_examine of %s", target_name(target));
  4314. LOG_INFO("Use arp_examine command to examine it manually!");
  4315. return JIM_OK;
  4316. }
  4317. int e = target->type->examine(target);
  4318. if (e != ERROR_OK)
  4319. return JIM_ERR;
  4320. return JIM_OK;
  4321. }
  4322. static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
  4323. {
  4324. struct target *target = Jim_CmdPrivData(interp);
  4325. Jim_SetResultBool(interp, target_was_examined(target));
  4326. return JIM_OK;
  4327. }
  4328. static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
  4329. {
  4330. struct target *target = Jim_CmdPrivData(interp);
  4331. Jim_SetResultBool(interp, target->defer_examine);
  4332. return JIM_OK;
  4333. }
  4334. static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4335. {
  4336. if (argc != 1) {
  4337. Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
  4338. return JIM_ERR;
  4339. }
  4340. struct target *target = Jim_CmdPrivData(interp);
  4341. if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
  4342. return JIM_ERR;
  4343. return JIM_OK;
  4344. }
  4345. static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4346. {
  4347. if (argc != 1) {
  4348. Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
  4349. return JIM_ERR;
  4350. }
  4351. struct target *target = Jim_CmdPrivData(interp);
  4352. if (!target->tap->enabled)
  4353. return jim_target_tap_disabled(interp);
  4354. int e;
  4355. if (!(target_was_examined(target)))
  4356. e = ERROR_TARGET_NOT_EXAMINED;
  4357. else
  4358. e = target->type->poll(target);
  4359. if (e != ERROR_OK)
  4360. return JIM_ERR;
  4361. return JIM_OK;
  4362. }
  4363. static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4364. {
  4365. Jim_GetOptInfo goi;
  4366. Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
  4367. if (goi.argc != 2) {
  4368. Jim_WrongNumArgs(interp, 0, argv,
  4369. "([tT]|[fF]|assert|deassert) BOOL");
  4370. return JIM_ERR;
  4371. }
  4372. Jim_Nvp *n;
  4373. int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
  4374. if (e != JIM_OK) {
  4375. Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
  4376. return e;
  4377. }
  4378. /* the halt or not param */
  4379. jim_wide a;
  4380. e = Jim_GetOpt_Wide(&goi, &a);
  4381. if (e != JIM_OK)
  4382. return e;
  4383. struct target *target = Jim_CmdPrivData(goi.interp);
  4384. if (!target->tap->enabled)
  4385. return jim_target_tap_disabled(interp);
  4386. if (!target->type->assert_reset || !target->type->deassert_reset) {
  4387. Jim_SetResultFormatted(interp,
  4388. "No target-specific reset for %s",
  4389. target_name(target));
  4390. return JIM_ERR;
  4391. }
  4392. if (target->defer_examine)
  4393. target_reset_examined(target);
  4394. /* determine if we should halt or not. */
  4395. target->reset_halt = !!a;
  4396. /* When this happens - all workareas are invalid. */
  4397. target_free_all_working_areas_restore(target, 0);
  4398. /* do the assert */
  4399. if (n->value == NVP_ASSERT)
  4400. e = target->type->assert_reset(target);
  4401. else
  4402. e = target->type->deassert_reset(target);
  4403. return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
  4404. }
  4405. static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4406. {
  4407. if (argc != 1) {
  4408. Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
  4409. return JIM_ERR;
  4410. }
  4411. struct target *target = Jim_CmdPrivData(interp);
  4412. if (!target->tap->enabled)
  4413. return jim_target_tap_disabled(interp);
  4414. int e = target->type->halt(target);
  4415. return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
  4416. }
  4417. static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4418. {
  4419. Jim_GetOptInfo goi;
  4420. Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
  4421. /* params: <name> statename timeoutmsecs */
  4422. if (goi.argc != 2) {
  4423. const char *cmd_name = Jim_GetString(argv[0], NULL);
  4424. Jim_SetResultFormatted(goi.interp,
  4425. "%s <state_name> <timeout_in_msec>", cmd_name);
  4426. return JIM_ERR;
  4427. }
  4428. Jim_Nvp *n;
  4429. int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
  4430. if (e != JIM_OK) {
  4431. Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
  4432. return e;
  4433. }
  4434. jim_wide a;
  4435. e = Jim_GetOpt_Wide(&goi, &a);
  4436. if (e != JIM_OK)
  4437. return e;
  4438. struct target *target = Jim_CmdPrivData(interp);
  4439. if (!target->tap->enabled)
  4440. return jim_target_tap_disabled(interp);
  4441. e = target_wait_state(target, n->value, a);
  4442. if (e != ERROR_OK) {
  4443. Jim_Obj *eObj = Jim_NewIntObj(interp, e);
  4444. Jim_SetResultFormatted(goi.interp,
  4445. "target: %s wait %s fails (%#s) %s",
  4446. target_name(target), n->name,
  4447. eObj, target_strerror_safe(e));
  4448. Jim_FreeNewObj(interp, eObj);
  4449. return JIM_ERR;
  4450. }
  4451. return JIM_OK;
  4452. }
  4453. /* List for human, Events defined for this target.
  4454. * scripts/programs should use 'name cget -event NAME'
  4455. */
  4456. static int jim_target_event_list(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4457. {
  4458. struct command_context *cmd_ctx = current_command_context(interp);
  4459. assert(cmd_ctx != NULL);
  4460. struct target *target = Jim_CmdPrivData(interp);
  4461. struct target_event_action *teap = target->event_action;
  4462. command_print(cmd_ctx, "Event actions for target (%d) %s\n",
  4463. target->target_number,
  4464. target_name(target));
  4465. command_print(cmd_ctx, "%-25s | Body", "Event");
  4466. command_print(cmd_ctx, "------------------------- | "
  4467. "----------------------------------------");
  4468. while (teap) {
  4469. Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
  4470. command_print(cmd_ctx, "%-25s | %s",
  4471. opt->name, Jim_GetString(teap->body, NULL));
  4472. teap = teap->next;
  4473. }
  4474. command_print(cmd_ctx, "***END***");
  4475. return JIM_OK;
  4476. }
  4477. static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4478. {
  4479. if (argc != 1) {
  4480. Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
  4481. return JIM_ERR;
  4482. }
  4483. struct target *target = Jim_CmdPrivData(interp);
  4484. Jim_SetResultString(interp, target_state_name(target), -1);
  4485. return JIM_OK;
  4486. }
  4487. static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4488. {
  4489. Jim_GetOptInfo goi;
  4490. Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
  4491. if (goi.argc != 1) {
  4492. const char *cmd_name = Jim_GetString(argv[0], NULL);
  4493. Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
  4494. return JIM_ERR;
  4495. }
  4496. Jim_Nvp *n;
  4497. int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
  4498. if (e != JIM_OK) {
  4499. Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
  4500. return e;
  4501. }
  4502. struct target *target = Jim_CmdPrivData(interp);
  4503. target_handle_event(target, n->value);
  4504. return JIM_OK;
  4505. }
  4506. static const struct command_registration target_instance_command_handlers[] = {
  4507. {
  4508. .name = "configure",
  4509. .mode = COMMAND_CONFIG,
  4510. .jim_handler = jim_target_configure,
  4511. .help = "configure a new target for use",
  4512. .usage = "[target_attribute ...]",
  4513. },
  4514. {
  4515. .name = "cget",
  4516. .mode = COMMAND_ANY,
  4517. .jim_handler = jim_target_configure,
  4518. .help = "returns the specified target attribute",
  4519. .usage = "target_attribute",
  4520. },
  4521. {
  4522. .name = "mww",
  4523. .mode = COMMAND_EXEC,
  4524. .jim_handler = jim_target_mw,
  4525. .help = "Write 32-bit word(s) to target memory",
  4526. .usage = "address data [count]",
  4527. },
  4528. {
  4529. .name = "mwh",
  4530. .mode = COMMAND_EXEC,
  4531. .jim_handler = jim_target_mw,
  4532. .help = "Write 16-bit half-word(s) to target memory",
  4533. .usage = "address data [count]",
  4534. },
  4535. {
  4536. .name = "mwb",
  4537. .mode = COMMAND_EXEC,
  4538. .jim_handler = jim_target_mw,
  4539. .help = "Write byte(s) to target memory",
  4540. .usage = "address data [count]",
  4541. },
  4542. {
  4543. .name = "mdw",
  4544. .mode = COMMAND_EXEC,
  4545. .jim_handler = jim_target_md,
  4546. .help = "Display target memory as 32-bit words",
  4547. .usage = "address [count]",
  4548. },
  4549. {
  4550. .name = "mdh",
  4551. .mode = COMMAND_EXEC,
  4552. .jim_handler = jim_target_md,
  4553. .help = "Display target memory as 16-bit half-words",
  4554. .usage = "address [count]",
  4555. },
  4556. {
  4557. .name = "mdb",
  4558. .mode = COMMAND_EXEC,
  4559. .jim_handler = jim_target_md,
  4560. .help = "Display target memory as 8-bit bytes",
  4561. .usage = "address [count]",
  4562. },
  4563. {
  4564. .name = "array2mem",
  4565. .mode = COMMAND_EXEC,
  4566. .jim_handler = jim_target_array2mem,
  4567. .help = "Writes Tcl array of 8/16/32 bit numbers "
  4568. "to target memory",
  4569. .usage = "arrayname bitwidth address count",
  4570. },
  4571. {
  4572. .name = "mem2array",
  4573. .mode = COMMAND_EXEC,
  4574. .jim_handler = jim_target_mem2array,
  4575. .help = "Loads Tcl array of 8/16/32 bit numbers "
  4576. "from target memory",
  4577. .usage = "arrayname bitwidth address count",
  4578. },
  4579. {
  4580. .name = "eventlist",
  4581. .mode = COMMAND_EXEC,
  4582. .jim_handler = jim_target_event_list,
  4583. .help = "displays a table of events defined for this target",
  4584. },
  4585. {
  4586. .name = "curstate",
  4587. .mode = COMMAND_EXEC,
  4588. .jim_handler = jim_target_current_state,
  4589. .help = "displays the current state of this target",
  4590. },
  4591. {
  4592. .name = "arp_examine",
  4593. .mode = COMMAND_EXEC,
  4594. .jim_handler = jim_target_examine,
  4595. .help = "used internally for reset processing",
  4596. .usage = "arp_examine ['allow-defer']",
  4597. },
  4598. {
  4599. .name = "was_examined",
  4600. .mode = COMMAND_EXEC,
  4601. .jim_handler = jim_target_was_examined,
  4602. .help = "used internally for reset processing",
  4603. .usage = "was_examined",
  4604. },
  4605. {
  4606. .name = "examine_deferred",
  4607. .mode = COMMAND_EXEC,
  4608. .jim_handler = jim_target_examine_deferred,
  4609. .help = "used internally for reset processing",
  4610. .usage = "examine_deferred",
  4611. },
  4612. {
  4613. .name = "arp_halt_gdb",
  4614. .mode = COMMAND_EXEC,
  4615. .jim_handler = jim_target_halt_gdb,
  4616. .help = "used internally for reset processing to halt GDB",
  4617. },
  4618. {
  4619. .name = "arp_poll",
  4620. .mode = COMMAND_EXEC,
  4621. .jim_handler = jim_target_poll,
  4622. .help = "used internally for reset processing",
  4623. },
  4624. {
  4625. .name = "arp_reset",
  4626. .mode = COMMAND_EXEC,
  4627. .jim_handler = jim_target_reset,
  4628. .help = "used internally for reset processing",
  4629. },
  4630. {
  4631. .name = "arp_halt",
  4632. .mode = COMMAND_EXEC,
  4633. .jim_handler = jim_target_halt,
  4634. .help = "used internally for reset processing",
  4635. },
  4636. {
  4637. .name = "arp_waitstate",
  4638. .mode = COMMAND_EXEC,
  4639. .jim_handler = jim_target_wait_state,
  4640. .help = "used internally for reset processing",
  4641. },
  4642. {
  4643. .name = "invoke-event",
  4644. .mode = COMMAND_EXEC,
  4645. .jim_handler = jim_target_invoke_event,
  4646. .help = "invoke handler for specified event",
  4647. .usage = "event_name",
  4648. },
  4649. COMMAND_REGISTRATION_DONE
  4650. };
  4651. static int target_create(Jim_GetOptInfo *goi)
  4652. {
  4653. Jim_Obj *new_cmd;
  4654. Jim_Cmd *cmd;
  4655. const char *cp;
  4656. int e;
  4657. int x;
  4658. struct target *target;
  4659. struct command_context *cmd_ctx;
  4660. cmd_ctx = current_command_context(goi->interp);
  4661. assert(cmd_ctx != NULL);
  4662. if (goi->argc < 3) {
  4663. Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
  4664. return JIM_ERR;
  4665. }
  4666. /* COMMAND */
  4667. Jim_GetOpt_Obj(goi, &new_cmd);
  4668. /* does this command exist? */
  4669. cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
  4670. if (cmd) {
  4671. cp = Jim_GetString(new_cmd, NULL);
  4672. Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
  4673. return JIM_ERR;
  4674. }
  4675. /* TYPE */
  4676. e = Jim_GetOpt_String(goi, &cp, NULL);
  4677. if (e != JIM_OK)
  4678. return e;
  4679. struct transport *tr = get_current_transport();
  4680. if (tr->override_target) {
  4681. e = tr->override_target(&cp);
  4682. if (e != ERROR_OK) {
  4683. LOG_ERROR("The selected transport doesn't support this target");
  4684. return JIM_ERR;
  4685. }
  4686. LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
  4687. }
  4688. /* now does target type exist */
  4689. for (x = 0 ; target_types[x] ; x++) {
  4690. if (0 == strcmp(cp, target_types[x]->name)) {
  4691. /* found */
  4692. break;
  4693. }
  4694. /* check for deprecated name */
  4695. if (target_types[x]->deprecated_name) {
  4696. if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
  4697. /* found */
  4698. LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
  4699. break;
  4700. }
  4701. }
  4702. }
  4703. if (target_types[x] == NULL) {
  4704. Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
  4705. for (x = 0 ; target_types[x] ; x++) {
  4706. if (target_types[x + 1]) {
  4707. Jim_AppendStrings(goi->interp,
  4708. Jim_GetResult(goi->interp),
  4709. target_types[x]->name,
  4710. ", ", NULL);
  4711. } else {
  4712. Jim_AppendStrings(goi->interp,
  4713. Jim_GetResult(goi->interp),
  4714. " or ",
  4715. target_types[x]->name, NULL);
  4716. }
  4717. }
  4718. return JIM_ERR;
  4719. }
  4720. /* Create it */
  4721. target = calloc(1, sizeof(struct target));
  4722. /* set target number */
  4723. target->target_number = new_target_number();
  4724. cmd_ctx->current_target = target->target_number;
  4725. /* allocate memory for each unique target type */
  4726. target->type = calloc(1, sizeof(struct target_type));
  4727. memcpy(target->type, target_types[x], sizeof(struct target_type));
  4728. /* will be set by "-endian" */
  4729. target->endianness = TARGET_ENDIAN_UNKNOWN;
  4730. /* default to first core, override with -coreid */
  4731. target->coreid = 0;
  4732. target->working_area = 0x0;
  4733. target->working_area_size = 0x0;
  4734. target->working_areas = NULL;
  4735. target->backup_working_area = 0;
  4736. target->state = TARGET_UNKNOWN;
  4737. target->debug_reason = DBG_REASON_UNDEFINED;
  4738. target->reg_cache = NULL;
  4739. target->breakpoints = NULL;
  4740. target->watchpoints = NULL;
  4741. target->next = NULL;
  4742. target->arch_info = NULL;
  4743. target->display = 1;
  4744. target->halt_issued = false;
  4745. /* initialize trace information */
  4746. target->trace_info = calloc(1, sizeof(struct trace));
  4747. target->dbgmsg = NULL;
  4748. target->dbg_msg_enabled = 0;
  4749. target->endianness = TARGET_ENDIAN_UNKNOWN;
  4750. target->rtos = NULL;
  4751. target->rtos_auto_detect = false;
  4752. /* Do the rest as "configure" options */
  4753. goi->isconfigure = 1;
  4754. e = target_configure(goi, target);
  4755. if (target->tap == NULL) {
  4756. Jim_SetResultString(goi->interp, "-chain-position required when creating target", -1);
  4757. e = JIM_ERR;
  4758. }
  4759. if (e != JIM_OK) {
  4760. free(target->type);
  4761. free(target);
  4762. return e;
  4763. }
  4764. if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
  4765. /* default endian to little if not specified */
  4766. target->endianness = TARGET_LITTLE_ENDIAN;
  4767. }
  4768. cp = Jim_GetString(new_cmd, NULL);
  4769. target->cmd_name = strdup(cp);
  4770. /* create the target specific commands */
  4771. if (target->type->commands) {
  4772. e = register_commands(cmd_ctx, NULL, target->type->commands);
  4773. if (ERROR_OK != e)
  4774. LOG_ERROR("unable to register '%s' commands", cp);
  4775. }
  4776. if (target->type->target_create)
  4777. (*(target->type->target_create))(target, goi->interp);
  4778. /* append to end of list */
  4779. {
  4780. struct target **tpp;
  4781. tpp = &(all_targets);
  4782. while (*tpp)
  4783. tpp = &((*tpp)->next);
  4784. *tpp = target;
  4785. }
  4786. /* now - create the new target name command */
  4787. const struct command_registration target_subcommands[] = {
  4788. {
  4789. .chain = target_instance_command_handlers,
  4790. },
  4791. {
  4792. .chain = target->type->commands,
  4793. },
  4794. COMMAND_REGISTRATION_DONE
  4795. };
  4796. const struct command_registration target_commands[] = {
  4797. {
  4798. .name = cp,
  4799. .mode = COMMAND_ANY,
  4800. .help = "target command group",
  4801. .usage = "",
  4802. .chain = target_subcommands,
  4803. },
  4804. COMMAND_REGISTRATION_DONE
  4805. };
  4806. e = register_commands(cmd_ctx, NULL, target_commands);
  4807. if (ERROR_OK != e)
  4808. return JIM_ERR;
  4809. struct command *c = command_find_in_context(cmd_ctx, cp);
  4810. assert(c);
  4811. command_set_handler_data(c, target);
  4812. return (ERROR_OK == e) ? JIM_OK : JIM_ERR;
  4813. }
  4814. static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4815. {
  4816. if (argc != 1) {
  4817. Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
  4818. return JIM_ERR;
  4819. }
  4820. struct command_context *cmd_ctx = current_command_context(interp);
  4821. assert(cmd_ctx != NULL);
  4822. Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1);
  4823. return JIM_OK;
  4824. }
  4825. static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4826. {
  4827. if (argc != 1) {
  4828. Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
  4829. return JIM_ERR;
  4830. }
  4831. Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
  4832. for (unsigned x = 0; NULL != target_types[x]; x++) {
  4833. Jim_ListAppendElement(interp, Jim_GetResult(interp),
  4834. Jim_NewStringObj(interp, target_types[x]->name, -1));
  4835. }
  4836. return JIM_OK;
  4837. }
  4838. static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4839. {
  4840. if (argc != 1) {
  4841. Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
  4842. return JIM_ERR;
  4843. }
  4844. Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
  4845. struct target *target = all_targets;
  4846. while (target) {
  4847. Jim_ListAppendElement(interp, Jim_GetResult(interp),
  4848. Jim_NewStringObj(interp, target_name(target), -1));
  4849. target = target->next;
  4850. }
  4851. return JIM_OK;
  4852. }
  4853. static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4854. {
  4855. int i;
  4856. const char *targetname;
  4857. int retval, len;
  4858. struct target *target = (struct target *) NULL;
  4859. struct target_list *head, *curr, *new;
  4860. curr = (struct target_list *) NULL;
  4861. head = (struct target_list *) NULL;
  4862. retval = 0;
  4863. LOG_DEBUG("%d", argc);
  4864. /* argv[1] = target to associate in smp
  4865. * argv[2] = target to assoicate in smp
  4866. * argv[3] ...
  4867. */
  4868. for (i = 1; i < argc; i++) {
  4869. targetname = Jim_GetString(argv[i], &len);
  4870. target = get_target(targetname);
  4871. LOG_DEBUG("%s ", targetname);
  4872. if (target) {
  4873. new = malloc(sizeof(struct target_list));
  4874. new->target = target;
  4875. new->next = (struct target_list *)NULL;
  4876. if (head == (struct target_list *)NULL) {
  4877. head = new;
  4878. curr = head;
  4879. } else {
  4880. curr->next = new;
  4881. curr = new;
  4882. }
  4883. }
  4884. }
  4885. /* now parse the list of cpu and put the target in smp mode*/
  4886. curr = head;
  4887. while (curr != (struct target_list *)NULL) {
  4888. target = curr->target;
  4889. target->smp = 1;
  4890. target->head = head;
  4891. curr = curr->next;
  4892. }
  4893. if (target && target->rtos)
  4894. retval = rtos_smp_init(head->target);
  4895. return retval;
  4896. }
  4897. static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4898. {
  4899. Jim_GetOptInfo goi;
  4900. Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
  4901. if (goi.argc < 3) {
  4902. Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
  4903. "<name> <target_type> [<target_options> ...]");
  4904. return JIM_ERR;
  4905. }
  4906. return target_create(&goi);
  4907. }
  4908. static const struct command_registration target_subcommand_handlers[] = {
  4909. {
  4910. .name = "init",
  4911. .mode = COMMAND_CONFIG,
  4912. .handler = handle_target_init_command,
  4913. .help = "initialize targets",
  4914. },
  4915. {
  4916. .name = "create",
  4917. /* REVISIT this should be COMMAND_CONFIG ... */
  4918. .mode = COMMAND_ANY,
  4919. .jim_handler = jim_target_create,
  4920. .usage = "name type '-chain-position' name [options ...]",
  4921. .help = "Creates and selects a new target",
  4922. },
  4923. {
  4924. .name = "current",
  4925. .mode = COMMAND_ANY,
  4926. .jim_handler = jim_target_current,
  4927. .help = "Returns the currently selected target",
  4928. },
  4929. {
  4930. .name = "types",
  4931. .mode = COMMAND_ANY,
  4932. .jim_handler = jim_target_types,
  4933. .help = "Returns the available target types as "
  4934. "a list of strings",
  4935. },
  4936. {
  4937. .name = "names",
  4938. .mode = COMMAND_ANY,
  4939. .jim_handler = jim_target_names,
  4940. .help = "Returns the names of all targets as a list of strings",
  4941. },
  4942. {
  4943. .name = "smp",
  4944. .mode = COMMAND_ANY,
  4945. .jim_handler = jim_target_smp,
  4946. .usage = "targetname1 targetname2 ...",
  4947. .help = "gather several target in a smp list"
  4948. },
  4949. COMMAND_REGISTRATION_DONE
  4950. };
  4951. struct FastLoad {
  4952. target_addr_t address;
  4953. uint8_t *data;
  4954. int length;
  4955. };
  4956. static int fastload_num;
  4957. static struct FastLoad *fastload;
  4958. static void free_fastload(void)
  4959. {
  4960. if (fastload != NULL) {
  4961. int i;
  4962. for (i = 0; i < fastload_num; i++) {
  4963. if (fastload[i].data)
  4964. free(fastload[i].data);
  4965. }
  4966. free(fastload);
  4967. fastload = NULL;
  4968. }
  4969. }
  4970. COMMAND_HANDLER(handle_fast_load_image_command)
  4971. {
  4972. uint8_t *buffer;
  4973. size_t buf_cnt;
  4974. uint32_t image_size;
  4975. target_addr_t min_address = 0;
  4976. target_addr_t max_address = -1;
  4977. int i;
  4978. struct image image;
  4979. int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
  4980. &image, &min_address, &max_address);
  4981. if (ERROR_OK != retval)
  4982. return retval;
  4983. struct duration bench;
  4984. duration_start(&bench);
  4985. retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
  4986. if (retval != ERROR_OK)
  4987. return retval;
  4988. image_size = 0x0;
  4989. retval = ERROR_OK;
  4990. fastload_num = image.num_sections;
  4991. fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
  4992. if (fastload == NULL) {
  4993. command_print(CMD_CTX, "out of memory");
  4994. image_close(&image);
  4995. return ERROR_FAIL;
  4996. }
  4997. memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
  4998. for (i = 0; i < image.num_sections; i++) {
  4999. buffer = malloc(image.sections[i].size);
  5000. if (buffer == NULL) {
  5001. command_print(CMD_CTX, "error allocating buffer for section (%d bytes)",
  5002. (int)(image.sections[i].size));
  5003. retval = ERROR_FAIL;
  5004. break;
  5005. }
  5006. retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
  5007. if (retval != ERROR_OK) {
  5008. free(buffer);
  5009. break;
  5010. }
  5011. uint32_t offset = 0;
  5012. uint32_t length = buf_cnt;
  5013. /* DANGER!!! beware of unsigned comparision here!!! */
  5014. if ((image.sections[i].base_address + buf_cnt >= min_address) &&
  5015. (image.sections[i].base_address < max_address)) {
  5016. if (image.sections[i].base_address < min_address) {
  5017. /* clip addresses below */
  5018. offset += min_address-image.sections[i].base_address;
  5019. length -= offset;
  5020. }
  5021. if (image.sections[i].base_address + buf_cnt > max_address)
  5022. length -= (image.sections[i].base_address + buf_cnt)-max_address;
  5023. fastload[i].address = image.sections[i].base_address + offset;
  5024. fastload[i].data = malloc(length);
  5025. if (fastload[i].data == NULL) {
  5026. free(buffer);
  5027. command_print(CMD_CTX, "error allocating buffer for section (%" PRIu32 " bytes)",
  5028. length);
  5029. retval = ERROR_FAIL;
  5030. break;
  5031. }
  5032. memcpy(fastload[i].data, buffer + offset, length);
  5033. fastload[i].length = length;
  5034. image_size += length;
  5035. command_print(CMD_CTX, "%u bytes written at address 0x%8.8x",
  5036. (unsigned int)length,
  5037. ((unsigned int)(image.sections[i].base_address + offset)));
  5038. }
  5039. free(buffer);
  5040. }
  5041. if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
  5042. command_print(CMD_CTX, "Loaded %" PRIu32 " bytes "
  5043. "in %fs (%0.3f KiB/s)", image_size,
  5044. duration_elapsed(&bench), duration_kbps(&bench, image_size));
  5045. command_print(CMD_CTX,
  5046. "WARNING: image has not been loaded to target!"
  5047. "You can issue a 'fast_load' to finish loading.");
  5048. }
  5049. image_close(&image);
  5050. if (retval != ERROR_OK)
  5051. free_fastload();
  5052. return retval;
  5053. }
  5054. COMMAND_HANDLER(handle_fast_load_command)
  5055. {
  5056. if (CMD_ARGC > 0)
  5057. return ERROR_COMMAND_SYNTAX_ERROR;
  5058. if (fastload == NULL) {
  5059. LOG_ERROR("No image in memory");
  5060. return ERROR_FAIL;
  5061. }
  5062. int i;
  5063. int64_t ms = timeval_ms();
  5064. int size = 0;
  5065. int retval = ERROR_OK;
  5066. for (i = 0; i < fastload_num; i++) {
  5067. struct target *target = get_current_target(CMD_CTX);
  5068. command_print(CMD_CTX, "Write to 0x%08x, length 0x%08x",
  5069. (unsigned int)(fastload[i].address),
  5070. (unsigned int)(fastload[i].length));
  5071. retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
  5072. if (retval != ERROR_OK)
  5073. break;
  5074. size += fastload[i].length;
  5075. }
  5076. if (retval == ERROR_OK) {
  5077. int64_t after = timeval_ms();
  5078. command_print(CMD_CTX, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
  5079. }
  5080. return retval;
  5081. }
  5082. static const struct command_registration target_command_handlers[] = {
  5083. {
  5084. .name = "targets",
  5085. .handler = handle_targets_command,
  5086. .mode = COMMAND_ANY,
  5087. .help = "change current default target (one parameter) "
  5088. "or prints table of all targets (no parameters)",
  5089. .usage = "[target]",
  5090. },
  5091. {
  5092. .name = "target",
  5093. .mode = COMMAND_CONFIG,
  5094. .help = "configure target",
  5095. .chain = target_subcommand_handlers,
  5096. },
  5097. COMMAND_REGISTRATION_DONE
  5098. };
  5099. int target_register_commands(struct command_context *cmd_ctx)
  5100. {
  5101. return register_commands(cmd_ctx, NULL, target_command_handlers);
  5102. }
  5103. static bool target_reset_nag = true;
  5104. bool get_target_reset_nag(void)
  5105. {
  5106. return target_reset_nag;
  5107. }
  5108. COMMAND_HANDLER(handle_target_reset_nag)
  5109. {
  5110. return CALL_COMMAND_HANDLER(handle_command_parse_bool,
  5111. &target_reset_nag, "Nag after each reset about options to improve "
  5112. "performance");
  5113. }
  5114. COMMAND_HANDLER(handle_ps_command)
  5115. {
  5116. struct target *target = get_current_target(CMD_CTX);
  5117. char *display;
  5118. if (target->state != TARGET_HALTED) {
  5119. LOG_INFO("target not halted !!");
  5120. return ERROR_OK;
  5121. }
  5122. if ((target->rtos) && (target->rtos->type)
  5123. && (target->rtos->type->ps_command)) {
  5124. display = target->rtos->type->ps_command(target);
  5125. command_print(CMD_CTX, "%s", display);
  5126. free(display);
  5127. return ERROR_OK;
  5128. } else {
  5129. LOG_INFO("failed");
  5130. return ERROR_TARGET_FAILURE;
  5131. }
  5132. }
  5133. static void binprint(struct command_context *cmd_ctx, const char *text, const uint8_t *buf, int size)
  5134. {
  5135. if (text != NULL)
  5136. command_print_sameline(cmd_ctx, "%s", text);
  5137. for (int i = 0; i < size; i++)
  5138. command_print_sameline(cmd_ctx, " %02x", buf[i]);
  5139. command_print(cmd_ctx, " ");
  5140. }
  5141. COMMAND_HANDLER(handle_test_mem_access_command)
  5142. {
  5143. struct target *target = get_current_target(CMD_CTX);
  5144. uint32_t test_size;
  5145. int retval = ERROR_OK;
  5146. if (target->state != TARGET_HALTED) {
  5147. LOG_INFO("target not halted !!");
  5148. return ERROR_FAIL;
  5149. }
  5150. if (CMD_ARGC != 1)
  5151. return ERROR_COMMAND_SYNTAX_ERROR;
  5152. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
  5153. /* Test reads */
  5154. size_t num_bytes = test_size + 4;
  5155. struct working_area *wa = NULL;
  5156. retval = target_alloc_working_area(target, num_bytes, &wa);
  5157. if (retval != ERROR_OK) {
  5158. LOG_ERROR("Not enough working area");
  5159. return ERROR_FAIL;
  5160. }
  5161. uint8_t *test_pattern = malloc(num_bytes);
  5162. for (size_t i = 0; i < num_bytes; i++)
  5163. test_pattern[i] = rand();
  5164. retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
  5165. if (retval != ERROR_OK) {
  5166. LOG_ERROR("Test pattern write failed");
  5167. goto out;
  5168. }
  5169. for (int host_offset = 0; host_offset <= 1; host_offset++) {
  5170. for (int size = 1; size <= 4; size *= 2) {
  5171. for (int offset = 0; offset < 4; offset++) {
  5172. uint32_t count = test_size / size;
  5173. size_t host_bufsiz = (count + 2) * size + host_offset;
  5174. uint8_t *read_ref = malloc(host_bufsiz);
  5175. uint8_t *read_buf = malloc(host_bufsiz);
  5176. for (size_t i = 0; i < host_bufsiz; i++) {
  5177. read_ref[i] = rand();
  5178. read_buf[i] = read_ref[i];
  5179. }
  5180. command_print_sameline(CMD_CTX,
  5181. "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
  5182. size, offset, host_offset ? "un" : "");
  5183. struct duration bench;
  5184. duration_start(&bench);
  5185. retval = target_read_memory(target, wa->address + offset, size, count,
  5186. read_buf + size + host_offset);
  5187. duration_measure(&bench);
  5188. if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
  5189. command_print(CMD_CTX, "Unsupported alignment");
  5190. goto next;
  5191. } else if (retval != ERROR_OK) {
  5192. command_print(CMD_CTX, "Memory read failed");
  5193. goto next;
  5194. }
  5195. /* replay on host */
  5196. memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
  5197. /* check result */
  5198. int result = memcmp(read_ref, read_buf, host_bufsiz);
  5199. if (result == 0) {
  5200. command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
  5201. duration_elapsed(&bench),
  5202. duration_kbps(&bench, count * size));
  5203. } else {
  5204. command_print(CMD_CTX, "Compare failed");
  5205. binprint(CMD_CTX, "ref:", read_ref, host_bufsiz);
  5206. binprint(CMD_CTX, "buf:", read_buf, host_bufsiz);
  5207. }
  5208. next:
  5209. free(read_ref);
  5210. free(read_buf);
  5211. }
  5212. }
  5213. }
  5214. out:
  5215. free(test_pattern);
  5216. if (wa != NULL)
  5217. target_free_working_area(target, wa);
  5218. /* Test writes */
  5219. num_bytes = test_size + 4 + 4 + 4;
  5220. retval = target_alloc_working_area(target, num_bytes, &wa);
  5221. if (retval != ERROR_OK) {
  5222. LOG_ERROR("Not enough working area");
  5223. return ERROR_FAIL;
  5224. }
  5225. test_pattern = malloc(num_bytes);
  5226. for (size_t i = 0; i < num_bytes; i++)
  5227. test_pattern[i] = rand();
  5228. for (int host_offset = 0; host_offset <= 1; host_offset++) {
  5229. for (int size = 1; size <= 4; size *= 2) {
  5230. for (int offset = 0; offset < 4; offset++) {
  5231. uint32_t count = test_size / size;
  5232. size_t host_bufsiz = count * size + host_offset;
  5233. uint8_t *read_ref = malloc(num_bytes);
  5234. uint8_t *read_buf = malloc(num_bytes);
  5235. uint8_t *write_buf = malloc(host_bufsiz);
  5236. for (size_t i = 0; i < host_bufsiz; i++)
  5237. write_buf[i] = rand();
  5238. command_print_sameline(CMD_CTX,
  5239. "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
  5240. size, offset, host_offset ? "un" : "");
  5241. retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
  5242. if (retval != ERROR_OK) {
  5243. command_print(CMD_CTX, "Test pattern write failed");
  5244. goto nextw;
  5245. }
  5246. /* replay on host */
  5247. memcpy(read_ref, test_pattern, num_bytes);
  5248. memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
  5249. struct duration bench;
  5250. duration_start(&bench);
  5251. retval = target_write_memory(target, wa->address + size + offset, size, count,
  5252. write_buf + host_offset);
  5253. duration_measure(&bench);
  5254. if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
  5255. command_print(CMD_CTX, "Unsupported alignment");
  5256. goto nextw;
  5257. } else if (retval != ERROR_OK) {
  5258. command_print(CMD_CTX, "Memory write failed");
  5259. goto nextw;
  5260. }
  5261. /* read back */
  5262. retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
  5263. if (retval != ERROR_OK) {
  5264. command_print(CMD_CTX, "Test pattern write failed");
  5265. goto nextw;
  5266. }
  5267. /* check result */
  5268. int result = memcmp(read_ref, read_buf, num_bytes);
  5269. if (result == 0) {
  5270. command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
  5271. duration_elapsed(&bench),
  5272. duration_kbps(&bench, count * size));
  5273. } else {
  5274. command_print(CMD_CTX, "Compare failed");
  5275. binprint(CMD_CTX, "ref:", read_ref, num_bytes);
  5276. binprint(CMD_CTX, "buf:", read_buf, num_bytes);
  5277. }
  5278. nextw:
  5279. free(read_ref);
  5280. free(read_buf);
  5281. }
  5282. }
  5283. }
  5284. free(test_pattern);
  5285. if (wa != NULL)
  5286. target_free_working_area(target, wa);
  5287. return retval;
  5288. }
  5289. static const struct command_registration target_exec_command_handlers[] = {
  5290. {
  5291. .name = "fast_load_image",
  5292. .handler = handle_fast_load_image_command,
  5293. .mode = COMMAND_ANY,
  5294. .help = "Load image into server memory for later use by "
  5295. "fast_load; primarily for profiling",
  5296. .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
  5297. "[min_address [max_length]]",
  5298. },
  5299. {
  5300. .name = "fast_load",
  5301. .handler = handle_fast_load_command,
  5302. .mode = COMMAND_EXEC,
  5303. .help = "loads active fast load image to current target "
  5304. "- mainly for profiling purposes",
  5305. .usage = "",
  5306. },
  5307. {
  5308. .name = "profile",
  5309. .handler = handle_profile_command,
  5310. .mode = COMMAND_EXEC,
  5311. .usage = "seconds filename [start end]",
  5312. .help = "profiling samples the CPU PC",
  5313. },
  5314. /** @todo don't register virt2phys() unless target supports it */
  5315. {
  5316. .name = "virt2phys",
  5317. .handler = handle_virt2phys_command,
  5318. .mode = COMMAND_ANY,
  5319. .help = "translate a virtual address into a physical address",
  5320. .usage = "virtual_address",
  5321. },
  5322. {
  5323. .name = "reg",
  5324. .handler = handle_reg_command,
  5325. .mode = COMMAND_EXEC,
  5326. .help = "display (reread from target with \"force\") or set a register; "
  5327. "with no arguments, displays all registers and their values",
  5328. .usage = "[(register_number|register_name) [(value|'force')]]",
  5329. },
  5330. {
  5331. .name = "poll",
  5332. .handler = handle_poll_command,
  5333. .mode = COMMAND_EXEC,
  5334. .help = "poll target state; or reconfigure background polling",
  5335. .usage = "['on'|'off']",
  5336. },
  5337. {
  5338. .name = "wait_halt",
  5339. .handler = handle_wait_halt_command,
  5340. .mode = COMMAND_EXEC,
  5341. .help = "wait up to the specified number of milliseconds "
  5342. "(default 5000) for a previously requested halt",
  5343. .usage = "[milliseconds]",
  5344. },
  5345. {
  5346. .name = "halt",
  5347. .handler = handle_halt_command,
  5348. .mode = COMMAND_EXEC,
  5349. .help = "request target to halt, then wait up to the specified"
  5350. "number of milliseconds (default 5000) for it to complete",
  5351. .usage = "[milliseconds]",
  5352. },
  5353. {
  5354. .name = "resume",
  5355. .handler = handle_resume_command,
  5356. .mode = COMMAND_EXEC,
  5357. .help = "resume target execution from current PC or address",
  5358. .usage = "[address]",
  5359. },
  5360. {
  5361. .name = "reset",
  5362. .handler = handle_reset_command,
  5363. .mode = COMMAND_EXEC,
  5364. .usage = "[run|halt|init]",
  5365. .help = "Reset all targets into the specified mode."
  5366. "Default reset mode is run, if not given.",
  5367. },
  5368. {
  5369. .name = "soft_reset_halt",
  5370. .handler = handle_soft_reset_halt_command,
  5371. .mode = COMMAND_EXEC,
  5372. .usage = "",
  5373. .help = "halt the target and do a soft reset",
  5374. },
  5375. {
  5376. .name = "step",
  5377. .handler = handle_step_command,
  5378. .mode = COMMAND_EXEC,
  5379. .help = "step one instruction from current PC or address",
  5380. .usage = "[address]",
  5381. },
  5382. {
  5383. .name = "mdd",
  5384. .handler = handle_md_command,
  5385. .mode = COMMAND_EXEC,
  5386. .help = "display memory words",
  5387. .usage = "['phys'] address [count]",
  5388. },
  5389. {
  5390. .name = "mdw",
  5391. .handler = handle_md_command,
  5392. .mode = COMMAND_EXEC,
  5393. .help = "display memory words",
  5394. .usage = "['phys'] address [count]",
  5395. },
  5396. {
  5397. .name = "mdh",
  5398. .handler = handle_md_command,
  5399. .mode = COMMAND_EXEC,
  5400. .help = "display memory half-words",
  5401. .usage = "['phys'] address [count]",
  5402. },
  5403. {
  5404. .name = "mdb",
  5405. .handler = handle_md_command,
  5406. .mode = COMMAND_EXEC,
  5407. .help = "display memory bytes",
  5408. .usage = "['phys'] address [count]",
  5409. },
  5410. {
  5411. .name = "mwd",
  5412. .handler = handle_mw_command,
  5413. .mode = COMMAND_EXEC,
  5414. .help = "write memory word",
  5415. .usage = "['phys'] address value [count]",
  5416. },
  5417. {
  5418. .name = "mww",
  5419. .handler = handle_mw_command,
  5420. .mode = COMMAND_EXEC,
  5421. .help = "write memory word",
  5422. .usage = "['phys'] address value [count]",
  5423. },
  5424. {
  5425. .name = "mwh",
  5426. .handler = handle_mw_command,
  5427. .mode = COMMAND_EXEC,
  5428. .help = "write memory half-word",
  5429. .usage = "['phys'] address value [count]",
  5430. },
  5431. {
  5432. .name = "mwb",
  5433. .handler = handle_mw_command,
  5434. .mode = COMMAND_EXEC,
  5435. .help = "write memory byte",
  5436. .usage = "['phys'] address value [count]",
  5437. },
  5438. {
  5439. .name = "bp",
  5440. .handler = handle_bp_command,
  5441. .mode = COMMAND_EXEC,
  5442. .help = "list or set hardware or software breakpoint",
  5443. .usage = "<address> [<asid>]<length> ['hw'|'hw_ctx']",
  5444. },
  5445. {
  5446. .name = "rbp",
  5447. .handler = handle_rbp_command,
  5448. .mode = COMMAND_EXEC,
  5449. .help = "remove breakpoint",
  5450. .usage = "address",
  5451. },
  5452. {
  5453. .name = "wp",
  5454. .handler = handle_wp_command,
  5455. .mode = COMMAND_EXEC,
  5456. .help = "list (no params) or create watchpoints",
  5457. .usage = "[address length [('r'|'w'|'a') value [mask]]]",
  5458. },
  5459. {
  5460. .name = "rwp",
  5461. .handler = handle_rwp_command,
  5462. .mode = COMMAND_EXEC,
  5463. .help = "remove watchpoint",
  5464. .usage = "address",
  5465. },
  5466. {
  5467. .name = "load_image",
  5468. .handler = handle_load_image_command,
  5469. .mode = COMMAND_EXEC,
  5470. .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
  5471. "[min_address] [max_length]",
  5472. },
  5473. {
  5474. .name = "dump_image",
  5475. .handler = handle_dump_image_command,
  5476. .mode = COMMAND_EXEC,
  5477. .usage = "filename address size",
  5478. },
  5479. {
  5480. .name = "verify_image_checksum",
  5481. .handler = handle_verify_image_checksum_command,
  5482. .mode = COMMAND_EXEC,
  5483. .usage = "filename [offset [type]]",
  5484. },
  5485. {
  5486. .name = "verify_image",
  5487. .handler = handle_verify_image_command,
  5488. .mode = COMMAND_EXEC,
  5489. .usage = "filename [offset [type]]",
  5490. },
  5491. {
  5492. .name = "test_image",
  5493. .handler = handle_test_image_command,
  5494. .mode = COMMAND_EXEC,
  5495. .usage = "filename [offset [type]]",
  5496. },
  5497. {
  5498. .name = "mem2array",
  5499. .mode = COMMAND_EXEC,
  5500. .jim_handler = jim_mem2array,
  5501. .help = "read 8/16/32 bit memory and return as a TCL array "
  5502. "for script processing",
  5503. .usage = "arrayname bitwidth address count",
  5504. },
  5505. {
  5506. .name = "array2mem",
  5507. .mode = COMMAND_EXEC,
  5508. .jim_handler = jim_array2mem,
  5509. .help = "convert a TCL array to memory locations "
  5510. "and write the 8/16/32 bit values",
  5511. .usage = "arrayname bitwidth address count",
  5512. },
  5513. {
  5514. .name = "reset_nag",
  5515. .handler = handle_target_reset_nag,
  5516. .mode = COMMAND_ANY,
  5517. .help = "Nag after each reset about options that could have been "
  5518. "enabled to improve performance. ",
  5519. .usage = "['enable'|'disable']",
  5520. },
  5521. {
  5522. .name = "ps",
  5523. .handler = handle_ps_command,
  5524. .mode = COMMAND_EXEC,
  5525. .help = "list all tasks ",
  5526. .usage = " ",
  5527. },
  5528. {
  5529. .name = "test_mem_access",
  5530. .handler = handle_test_mem_access_command,
  5531. .mode = COMMAND_EXEC,
  5532. .help = "Test the target's memory access functions",
  5533. .usage = "size",
  5534. },
  5535. COMMAND_REGISTRATION_DONE
  5536. };
  5537. static int target_register_user_commands(struct command_context *cmd_ctx)
  5538. {
  5539. int retval = ERROR_OK;
  5540. retval = target_request_register_commands(cmd_ctx);
  5541. if (retval != ERROR_OK)
  5542. return retval;
  5543. retval = trace_register_commands(cmd_ctx);
  5544. if (retval != ERROR_OK)
  5545. return retval;
  5546. return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
  5547. }