You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

6737 lines
177 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2007-2010 Øyvind Harboe *
  6. * oyvind.harboe@zylin.com *
  7. * *
  8. * Copyright (C) 2008, Duane Ellis *
  9. * openocd@duaneeellis.com *
  10. * *
  11. * Copyright (C) 2008 by Spencer Oliver *
  12. * spen@spen-soft.co.uk *
  13. * *
  14. * Copyright (C) 2008 by Rick Altherr *
  15. * kc8apf@kc8apf.net> *
  16. * *
  17. * Copyright (C) 2011 by Broadcom Corporation *
  18. * Evan Hunter - ehunter@broadcom.com *
  19. * *
  20. * Copyright (C) ST-Ericsson SA 2011 *
  21. * michel.jaouen@stericsson.com : smp minimum support *
  22. * *
  23. * Copyright (C) 2011 Andreas Fritiofson *
  24. * andreas.fritiofson@gmail.com *
  25. * *
  26. * This program is free software; you can redistribute it and/or modify *
  27. * it under the terms of the GNU General Public License as published by *
  28. * the Free Software Foundation; either version 2 of the License, or *
  29. * (at your option) any later version. *
  30. * *
  31. * This program is distributed in the hope that it will be useful, *
  32. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  33. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  34. * GNU General Public License for more details. *
  35. * *
  36. * You should have received a copy of the GNU General Public License *
  37. * along with this program. If not, see <http://www.gnu.org/licenses/>. *
  38. ***************************************************************************/
  39. #ifdef HAVE_CONFIG_H
  40. #include "config.h"
  41. #endif
  42. #include <helper/time_support.h>
  43. #include <jtag/jtag.h>
  44. #include <flash/nor/core.h>
  45. #include "target.h"
  46. #include "target_type.h"
  47. #include "target_request.h"
  48. #include "breakpoints.h"
  49. #include "register.h"
  50. #include "trace.h"
  51. #include "image.h"
  52. #include "rtos/rtos.h"
  53. #include "transport/transport.h"
  54. #include "arm_cti.h"
  55. /* default halt wait timeout (ms) */
  56. #define DEFAULT_HALT_TIMEOUT 5000
  57. static int target_read_buffer_default(struct target *target, target_addr_t address,
  58. uint32_t count, uint8_t *buffer);
  59. static int target_write_buffer_default(struct target *target, target_addr_t address,
  60. uint32_t count, const uint8_t *buffer);
  61. static int target_array2mem(Jim_Interp *interp, struct target *target,
  62. int argc, Jim_Obj * const *argv);
  63. static int target_mem2array(Jim_Interp *interp, struct target *target,
  64. int argc, Jim_Obj * const *argv);
  65. static int target_register_user_commands(struct command_context *cmd_ctx);
  66. static int target_get_gdb_fileio_info_default(struct target *target,
  67. struct gdb_fileio_info *fileio_info);
  68. static int target_gdb_fileio_end_default(struct target *target, int retcode,
  69. int fileio_errno, bool ctrl_c);
  70. /* targets */
  71. extern struct target_type arm7tdmi_target;
  72. extern struct target_type arm720t_target;
  73. extern struct target_type arm9tdmi_target;
  74. extern struct target_type arm920t_target;
  75. extern struct target_type arm966e_target;
  76. extern struct target_type arm946e_target;
  77. extern struct target_type arm926ejs_target;
  78. extern struct target_type fa526_target;
  79. extern struct target_type feroceon_target;
  80. extern struct target_type dragonite_target;
  81. extern struct target_type xscale_target;
  82. extern struct target_type cortexm_target;
  83. extern struct target_type cortexa_target;
  84. extern struct target_type aarch64_target;
  85. extern struct target_type cortexr4_target;
  86. extern struct target_type arm11_target;
  87. extern struct target_type ls1_sap_target;
  88. extern struct target_type mips_m4k_target;
  89. extern struct target_type mips_mips64_target;
  90. extern struct target_type avr_target;
  91. extern struct target_type dsp563xx_target;
  92. extern struct target_type dsp5680xx_target;
  93. extern struct target_type testee_target;
  94. extern struct target_type avr32_ap7k_target;
  95. extern struct target_type hla_target;
  96. extern struct target_type nds32_v2_target;
  97. extern struct target_type nds32_v3_target;
  98. extern struct target_type nds32_v3m_target;
  99. extern struct target_type or1k_target;
  100. extern struct target_type quark_x10xx_target;
  101. extern struct target_type quark_d20xx_target;
  102. extern struct target_type stm8_target;
  103. extern struct target_type riscv_target;
  104. extern struct target_type mem_ap_target;
  105. extern struct target_type esirisc_target;
  106. extern struct target_type arcv2_target;
  107. static struct target_type *target_types[] = {
  108. &arm7tdmi_target,
  109. &arm9tdmi_target,
  110. &arm920t_target,
  111. &arm720t_target,
  112. &arm966e_target,
  113. &arm946e_target,
  114. &arm926ejs_target,
  115. &fa526_target,
  116. &feroceon_target,
  117. &dragonite_target,
  118. &xscale_target,
  119. &cortexm_target,
  120. &cortexa_target,
  121. &cortexr4_target,
  122. &arm11_target,
  123. &ls1_sap_target,
  124. &mips_m4k_target,
  125. &avr_target,
  126. &dsp563xx_target,
  127. &dsp5680xx_target,
  128. &testee_target,
  129. &avr32_ap7k_target,
  130. &hla_target,
  131. &nds32_v2_target,
  132. &nds32_v3_target,
  133. &nds32_v3m_target,
  134. &or1k_target,
  135. &quark_x10xx_target,
  136. &quark_d20xx_target,
  137. &stm8_target,
  138. &riscv_target,
  139. &mem_ap_target,
  140. &esirisc_target,
  141. &arcv2_target,
  142. &aarch64_target,
  143. &mips_mips64_target,
  144. NULL,
  145. };
  146. struct target *all_targets;
  147. static struct target_event_callback *target_event_callbacks;
  148. static struct target_timer_callback *target_timer_callbacks;
  149. static LIST_HEAD(target_reset_callback_list);
  150. static LIST_HEAD(target_trace_callback_list);
  151. static const int polling_interval = 100;
  152. static const struct jim_nvp nvp_assert[] = {
  153. { .name = "assert", NVP_ASSERT },
  154. { .name = "deassert", NVP_DEASSERT },
  155. { .name = "T", NVP_ASSERT },
  156. { .name = "F", NVP_DEASSERT },
  157. { .name = "t", NVP_ASSERT },
  158. { .name = "f", NVP_DEASSERT },
  159. { .name = NULL, .value = -1 }
  160. };
  161. static const struct jim_nvp nvp_error_target[] = {
  162. { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
  163. { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
  164. { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
  165. { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
  166. { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
  167. { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
  168. { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
  169. { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
  170. { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
  171. { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
  172. { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
  173. { .value = -1, .name = NULL }
  174. };
  175. static const char *target_strerror_safe(int err)
  176. {
  177. const struct jim_nvp *n;
  178. n = jim_nvp_value2name_simple(nvp_error_target, err);
  179. if (!n->name)
  180. return "unknown";
  181. else
  182. return n->name;
  183. }
  184. static const struct jim_nvp nvp_target_event[] = {
  185. { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
  186. { .value = TARGET_EVENT_HALTED, .name = "halted" },
  187. { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
  188. { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
  189. { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
  190. { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
  191. { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
  192. { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
  193. { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
  194. { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
  195. { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
  196. { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
  197. { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
  198. { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
  199. { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
  200. { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
  201. { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
  202. { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
  203. { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
  204. { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
  205. { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
  206. { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
  207. { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
  208. { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
  209. { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
  210. { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
  211. { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
  212. { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
  213. { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
  214. { .name = NULL, .value = -1 }
  215. };
  216. static const struct jim_nvp nvp_target_state[] = {
  217. { .name = "unknown", .value = TARGET_UNKNOWN },
  218. { .name = "running", .value = TARGET_RUNNING },
  219. { .name = "halted", .value = TARGET_HALTED },
  220. { .name = "reset", .value = TARGET_RESET },
  221. { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
  222. { .name = NULL, .value = -1 },
  223. };
  224. static const struct jim_nvp nvp_target_debug_reason[] = {
  225. { .name = "debug-request", .value = DBG_REASON_DBGRQ },
  226. { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
  227. { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
  228. { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
  229. { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
  230. { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
  231. { .name = "program-exit", .value = DBG_REASON_EXIT },
  232. { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
  233. { .name = "undefined", .value = DBG_REASON_UNDEFINED },
  234. { .name = NULL, .value = -1 },
  235. };
  236. static const struct jim_nvp nvp_target_endian[] = {
  237. { .name = "big", .value = TARGET_BIG_ENDIAN },
  238. { .name = "little", .value = TARGET_LITTLE_ENDIAN },
  239. { .name = "be", .value = TARGET_BIG_ENDIAN },
  240. { .name = "le", .value = TARGET_LITTLE_ENDIAN },
  241. { .name = NULL, .value = -1 },
  242. };
  243. static const struct jim_nvp nvp_reset_modes[] = {
  244. { .name = "unknown", .value = RESET_UNKNOWN },
  245. { .name = "run", .value = RESET_RUN },
  246. { .name = "halt", .value = RESET_HALT },
  247. { .name = "init", .value = RESET_INIT },
  248. { .name = NULL, .value = -1 },
  249. };
  250. const char *debug_reason_name(struct target *t)
  251. {
  252. const char *cp;
  253. cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
  254. t->debug_reason)->name;
  255. if (!cp) {
  256. LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
  257. cp = "(*BUG*unknown*BUG*)";
  258. }
  259. return cp;
  260. }
  261. const char *target_state_name(struct target *t)
  262. {
  263. const char *cp;
  264. cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
  265. if (!cp) {
  266. LOG_ERROR("Invalid target state: %d", (int)(t->state));
  267. cp = "(*BUG*unknown*BUG*)";
  268. }
  269. if (!target_was_examined(t) && t->defer_examine)
  270. cp = "examine deferred";
  271. return cp;
  272. }
  273. const char *target_event_name(enum target_event event)
  274. {
  275. const char *cp;
  276. cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
  277. if (!cp) {
  278. LOG_ERROR("Invalid target event: %d", (int)(event));
  279. cp = "(*BUG*unknown*BUG*)";
  280. }
  281. return cp;
  282. }
  283. const char *target_reset_mode_name(enum target_reset_mode reset_mode)
  284. {
  285. const char *cp;
  286. cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
  287. if (!cp) {
  288. LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
  289. cp = "(*BUG*unknown*BUG*)";
  290. }
  291. return cp;
  292. }
  293. /* determine the number of the new target */
  294. static int new_target_number(void)
  295. {
  296. struct target *t;
  297. int x;
  298. /* number is 0 based */
  299. x = -1;
  300. t = all_targets;
  301. while (t) {
  302. if (x < t->target_number)
  303. x = t->target_number;
  304. t = t->next;
  305. }
  306. return x + 1;
  307. }
  308. static void append_to_list_all_targets(struct target *target)
  309. {
  310. struct target **t = &all_targets;
  311. while (*t)
  312. t = &((*t)->next);
  313. *t = target;
  314. }
  315. /* read a uint64_t from a buffer in target memory endianness */
  316. uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
  317. {
  318. if (target->endianness == TARGET_LITTLE_ENDIAN)
  319. return le_to_h_u64(buffer);
  320. else
  321. return be_to_h_u64(buffer);
  322. }
  323. /* read a uint32_t from a buffer in target memory endianness */
  324. uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
  325. {
  326. if (target->endianness == TARGET_LITTLE_ENDIAN)
  327. return le_to_h_u32(buffer);
  328. else
  329. return be_to_h_u32(buffer);
  330. }
  331. /* read a uint24_t from a buffer in target memory endianness */
  332. uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
  333. {
  334. if (target->endianness == TARGET_LITTLE_ENDIAN)
  335. return le_to_h_u24(buffer);
  336. else
  337. return be_to_h_u24(buffer);
  338. }
  339. /* read a uint16_t from a buffer in target memory endianness */
  340. uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
  341. {
  342. if (target->endianness == TARGET_LITTLE_ENDIAN)
  343. return le_to_h_u16(buffer);
  344. else
  345. return be_to_h_u16(buffer);
  346. }
  347. /* write a uint64_t to a buffer in target memory endianness */
  348. void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
  349. {
  350. if (target->endianness == TARGET_LITTLE_ENDIAN)
  351. h_u64_to_le(buffer, value);
  352. else
  353. h_u64_to_be(buffer, value);
  354. }
  355. /* write a uint32_t to a buffer in target memory endianness */
  356. void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
  357. {
  358. if (target->endianness == TARGET_LITTLE_ENDIAN)
  359. h_u32_to_le(buffer, value);
  360. else
  361. h_u32_to_be(buffer, value);
  362. }
  363. /* write a uint24_t to a buffer in target memory endianness */
  364. void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
  365. {
  366. if (target->endianness == TARGET_LITTLE_ENDIAN)
  367. h_u24_to_le(buffer, value);
  368. else
  369. h_u24_to_be(buffer, value);
  370. }
  371. /* write a uint16_t to a buffer in target memory endianness */
  372. void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
  373. {
  374. if (target->endianness == TARGET_LITTLE_ENDIAN)
  375. h_u16_to_le(buffer, value);
  376. else
  377. h_u16_to_be(buffer, value);
  378. }
  379. /* write a uint8_t to a buffer in target memory endianness */
  380. static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
  381. {
  382. *buffer = value;
  383. }
  384. /* write a uint64_t array to a buffer in target memory endianness */
  385. void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
  386. {
  387. uint32_t i;
  388. for (i = 0; i < count; i++)
  389. dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
  390. }
  391. /* write a uint32_t array to a buffer in target memory endianness */
  392. void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
  393. {
  394. uint32_t i;
  395. for (i = 0; i < count; i++)
  396. dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
  397. }
  398. /* write a uint16_t array to a buffer in target memory endianness */
  399. void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
  400. {
  401. uint32_t i;
  402. for (i = 0; i < count; i++)
  403. dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
  404. }
  405. /* write a uint64_t array to a buffer in target memory endianness */
  406. void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
  407. {
  408. uint32_t i;
  409. for (i = 0; i < count; i++)
  410. target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
  411. }
  412. /* write a uint32_t array to a buffer in target memory endianness */
  413. void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
  414. {
  415. uint32_t i;
  416. for (i = 0; i < count; i++)
  417. target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
  418. }
  419. /* write a uint16_t array to a buffer in target memory endianness */
  420. void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
  421. {
  422. uint32_t i;
  423. for (i = 0; i < count; i++)
  424. target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
  425. }
  426. /* return a pointer to a configured target; id is name or number */
  427. struct target *get_target(const char *id)
  428. {
  429. struct target *target;
  430. /* try as tcltarget name */
  431. for (target = all_targets; target; target = target->next) {
  432. if (target_name(target) == NULL)
  433. continue;
  434. if (strcmp(id, target_name(target)) == 0)
  435. return target;
  436. }
  437. /* It's OK to remove this fallback sometime after August 2010 or so */
  438. /* no match, try as number */
  439. unsigned num;
  440. if (parse_uint(id, &num) != ERROR_OK)
  441. return NULL;
  442. for (target = all_targets; target; target = target->next) {
  443. if (target->target_number == (int)num) {
  444. LOG_WARNING("use '%s' as target identifier, not '%u'",
  445. target_name(target), num);
  446. return target;
  447. }
  448. }
  449. return NULL;
  450. }
  451. /* returns a pointer to the n-th configured target */
  452. struct target *get_target_by_num(int num)
  453. {
  454. struct target *target = all_targets;
  455. while (target) {
  456. if (target->target_number == num)
  457. return target;
  458. target = target->next;
  459. }
  460. return NULL;
  461. }
  462. struct target *get_current_target(struct command_context *cmd_ctx)
  463. {
  464. struct target *target = get_current_target_or_null(cmd_ctx);
  465. if (!target) {
  466. LOG_ERROR("BUG: current_target out of bounds");
  467. exit(-1);
  468. }
  469. return target;
  470. }
  471. struct target *get_current_target_or_null(struct command_context *cmd_ctx)
  472. {
  473. return cmd_ctx->current_target_override
  474. ? cmd_ctx->current_target_override
  475. : cmd_ctx->current_target;
  476. }
  477. int target_poll(struct target *target)
  478. {
  479. int retval;
  480. /* We can't poll until after examine */
  481. if (!target_was_examined(target)) {
  482. /* Fail silently lest we pollute the log */
  483. return ERROR_FAIL;
  484. }
  485. retval = target->type->poll(target);
  486. if (retval != ERROR_OK)
  487. return retval;
  488. if (target->halt_issued) {
  489. if (target->state == TARGET_HALTED)
  490. target->halt_issued = false;
  491. else {
  492. int64_t t = timeval_ms() - target->halt_issued_time;
  493. if (t > DEFAULT_HALT_TIMEOUT) {
  494. target->halt_issued = false;
  495. LOG_INFO("Halt timed out, wake up GDB.");
  496. target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
  497. }
  498. }
  499. }
  500. return ERROR_OK;
  501. }
  502. int target_halt(struct target *target)
  503. {
  504. int retval;
  505. /* We can't poll until after examine */
  506. if (!target_was_examined(target)) {
  507. LOG_ERROR("Target not examined yet");
  508. return ERROR_FAIL;
  509. }
  510. retval = target->type->halt(target);
  511. if (retval != ERROR_OK)
  512. return retval;
  513. target->halt_issued = true;
  514. target->halt_issued_time = timeval_ms();
  515. return ERROR_OK;
  516. }
  517. /**
  518. * Make the target (re)start executing using its saved execution
  519. * context (possibly with some modifications).
  520. *
  521. * @param target Which target should start executing.
  522. * @param current True to use the target's saved program counter instead
  523. * of the address parameter
  524. * @param address Optionally used as the program counter.
  525. * @param handle_breakpoints True iff breakpoints at the resumption PC
  526. * should be skipped. (For example, maybe execution was stopped by
  527. * such a breakpoint, in which case it would be counterproductive to
  528. * let it re-trigger.
  529. * @param debug_execution False if all working areas allocated by OpenOCD
  530. * should be released and/or restored to their original contents.
  531. * (This would for example be true to run some downloaded "helper"
  532. * algorithm code, which resides in one such working buffer and uses
  533. * another for data storage.)
  534. *
  535. * @todo Resolve the ambiguity about what the "debug_execution" flag
  536. * signifies. For example, Target implementations don't agree on how
  537. * it relates to invalidation of the register cache, or to whether
  538. * breakpoints and watchpoints should be enabled. (It would seem wrong
  539. * to enable breakpoints when running downloaded "helper" algorithms
  540. * (debug_execution true), since the breakpoints would be set to match
  541. * target firmware being debugged, not the helper algorithm.... and
  542. * enabling them could cause such helpers to malfunction (for example,
  543. * by overwriting data with a breakpoint instruction. On the other
  544. * hand the infrastructure for running such helpers might use this
  545. * procedure but rely on hardware breakpoint to detect termination.)
  546. */
  547. int target_resume(struct target *target, int current, target_addr_t address,
  548. int handle_breakpoints, int debug_execution)
  549. {
  550. int retval;
  551. /* We can't poll until after examine */
  552. if (!target_was_examined(target)) {
  553. LOG_ERROR("Target not examined yet");
  554. return ERROR_FAIL;
  555. }
  556. target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
  557. /* note that resume *must* be asynchronous. The CPU can halt before
  558. * we poll. The CPU can even halt at the current PC as a result of
  559. * a software breakpoint being inserted by (a bug?) the application.
  560. */
  561. /*
  562. * resume() triggers the event 'resumed'. The execution of TCL commands
  563. * in the event handler causes the polling of targets. If the target has
  564. * already halted for a breakpoint, polling will run the 'halted' event
  565. * handler before the pending 'resumed' handler.
  566. * Disable polling during resume() to guarantee the execution of handlers
  567. * in the correct order.
  568. */
  569. bool save_poll = jtag_poll_get_enabled();
  570. jtag_poll_set_enabled(false);
  571. retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
  572. jtag_poll_set_enabled(save_poll);
  573. if (retval != ERROR_OK)
  574. return retval;
  575. target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
  576. return retval;
  577. }
  578. static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
  579. {
  580. char buf[100];
  581. int retval;
  582. struct jim_nvp *n;
  583. n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
  584. if (!n->name) {
  585. LOG_ERROR("invalid reset mode");
  586. return ERROR_FAIL;
  587. }
  588. struct target *target;
  589. for (target = all_targets; target; target = target->next)
  590. target_call_reset_callbacks(target, reset_mode);
  591. /* disable polling during reset to make reset event scripts
  592. * more predictable, i.e. dr/irscan & pathmove in events will
  593. * not have JTAG operations injected into the middle of a sequence.
  594. */
  595. bool save_poll = jtag_poll_get_enabled();
  596. jtag_poll_set_enabled(false);
  597. sprintf(buf, "ocd_process_reset %s", n->name);
  598. retval = Jim_Eval(cmd->ctx->interp, buf);
  599. jtag_poll_set_enabled(save_poll);
  600. if (retval != JIM_OK) {
  601. Jim_MakeErrorMessage(cmd->ctx->interp);
  602. command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
  603. return ERROR_FAIL;
  604. }
  605. /* We want any events to be processed before the prompt */
  606. retval = target_call_timer_callbacks_now();
  607. for (target = all_targets; target; target = target->next) {
  608. target->type->check_reset(target);
  609. target->running_alg = false;
  610. }
  611. return retval;
  612. }
  613. static int identity_virt2phys(struct target *target,
  614. target_addr_t virtual, target_addr_t *physical)
  615. {
  616. *physical = virtual;
  617. return ERROR_OK;
  618. }
  619. static int no_mmu(struct target *target, int *enabled)
  620. {
  621. *enabled = 0;
  622. return ERROR_OK;
  623. }
  624. static int default_examine(struct target *target)
  625. {
  626. target_set_examined(target);
  627. return ERROR_OK;
  628. }
  629. /* no check by default */
  630. static int default_check_reset(struct target *target)
  631. {
  632. return ERROR_OK;
  633. }
  634. /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
  635. * Keep in sync */
  636. int target_examine_one(struct target *target)
  637. {
  638. target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
  639. int retval = target->type->examine(target);
  640. if (retval != ERROR_OK) {
  641. target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
  642. return retval;
  643. }
  644. target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
  645. return ERROR_OK;
  646. }
  647. static int jtag_enable_callback(enum jtag_event event, void *priv)
  648. {
  649. struct target *target = priv;
  650. if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
  651. return ERROR_OK;
  652. jtag_unregister_event_callback(jtag_enable_callback, target);
  653. return target_examine_one(target);
  654. }
  655. /* Targets that correctly implement init + examine, i.e.
  656. * no communication with target during init:
  657. *
  658. * XScale
  659. */
  660. int target_examine(void)
  661. {
  662. int retval = ERROR_OK;
  663. struct target *target;
  664. for (target = all_targets; target; target = target->next) {
  665. /* defer examination, but don't skip it */
  666. if (!target->tap->enabled) {
  667. jtag_register_event_callback(jtag_enable_callback,
  668. target);
  669. continue;
  670. }
  671. if (target->defer_examine)
  672. continue;
  673. int retval2 = target_examine_one(target);
  674. if (retval2 != ERROR_OK) {
  675. LOG_WARNING("target %s examination failed", target_name(target));
  676. retval = retval2;
  677. }
  678. }
  679. return retval;
  680. }
  681. const char *target_type_name(struct target *target)
  682. {
  683. return target->type->name;
  684. }
  685. static int target_soft_reset_halt(struct target *target)
  686. {
  687. if (!target_was_examined(target)) {
  688. LOG_ERROR("Target not examined yet");
  689. return ERROR_FAIL;
  690. }
  691. if (!target->type->soft_reset_halt) {
  692. LOG_ERROR("Target %s does not support soft_reset_halt",
  693. target_name(target));
  694. return ERROR_FAIL;
  695. }
  696. return target->type->soft_reset_halt(target);
  697. }
  698. /**
  699. * Downloads a target-specific native code algorithm to the target,
  700. * and executes it. * Note that some targets may need to set up, enable,
  701. * and tear down a breakpoint (hard or * soft) to detect algorithm
  702. * termination, while others may support lower overhead schemes where
  703. * soft breakpoints embedded in the algorithm automatically terminate the
  704. * algorithm.
  705. *
  706. * @param target used to run the algorithm
  707. * @param num_mem_params
  708. * @param mem_params
  709. * @param num_reg_params
  710. * @param reg_param
  711. * @param entry_point
  712. * @param exit_point
  713. * @param timeout_ms
  714. * @param arch_info target-specific description of the algorithm.
  715. */
  716. int target_run_algorithm(struct target *target,
  717. int num_mem_params, struct mem_param *mem_params,
  718. int num_reg_params, struct reg_param *reg_param,
  719. uint32_t entry_point, uint32_t exit_point,
  720. int timeout_ms, void *arch_info)
  721. {
  722. int retval = ERROR_FAIL;
  723. if (!target_was_examined(target)) {
  724. LOG_ERROR("Target not examined yet");
  725. goto done;
  726. }
  727. if (!target->type->run_algorithm) {
  728. LOG_ERROR("Target type '%s' does not support %s",
  729. target_type_name(target), __func__);
  730. goto done;
  731. }
  732. target->running_alg = true;
  733. retval = target->type->run_algorithm(target,
  734. num_mem_params, mem_params,
  735. num_reg_params, reg_param,
  736. entry_point, exit_point, timeout_ms, arch_info);
  737. target->running_alg = false;
  738. done:
  739. return retval;
  740. }
  741. /**
  742. * Executes a target-specific native code algorithm and leaves it running.
  743. *
  744. * @param target used to run the algorithm
  745. * @param num_mem_params
  746. * @param mem_params
  747. * @param num_reg_params
  748. * @param reg_params
  749. * @param entry_point
  750. * @param exit_point
  751. * @param arch_info target-specific description of the algorithm.
  752. */
  753. int target_start_algorithm(struct target *target,
  754. int num_mem_params, struct mem_param *mem_params,
  755. int num_reg_params, struct reg_param *reg_params,
  756. uint32_t entry_point, uint32_t exit_point,
  757. void *arch_info)
  758. {
  759. int retval = ERROR_FAIL;
  760. if (!target_was_examined(target)) {
  761. LOG_ERROR("Target not examined yet");
  762. goto done;
  763. }
  764. if (!target->type->start_algorithm) {
  765. LOG_ERROR("Target type '%s' does not support %s",
  766. target_type_name(target), __func__);
  767. goto done;
  768. }
  769. if (target->running_alg) {
  770. LOG_ERROR("Target is already running an algorithm");
  771. goto done;
  772. }
  773. target->running_alg = true;
  774. retval = target->type->start_algorithm(target,
  775. num_mem_params, mem_params,
  776. num_reg_params, reg_params,
  777. entry_point, exit_point, arch_info);
  778. done:
  779. return retval;
  780. }
  781. /**
  782. * Waits for an algorithm started with target_start_algorithm() to complete.
  783. *
  784. * @param target used to run the algorithm
  785. * @param num_mem_params
  786. * @param mem_params
  787. * @param num_reg_params
  788. * @param reg_params
  789. * @param exit_point
  790. * @param timeout_ms
  791. * @param arch_info target-specific description of the algorithm.
  792. */
  793. int target_wait_algorithm(struct target *target,
  794. int num_mem_params, struct mem_param *mem_params,
  795. int num_reg_params, struct reg_param *reg_params,
  796. uint32_t exit_point, int timeout_ms,
  797. void *arch_info)
  798. {
  799. int retval = ERROR_FAIL;
  800. if (!target->type->wait_algorithm) {
  801. LOG_ERROR("Target type '%s' does not support %s",
  802. target_type_name(target), __func__);
  803. goto done;
  804. }
  805. if (!target->running_alg) {
  806. LOG_ERROR("Target is not running an algorithm");
  807. goto done;
  808. }
  809. retval = target->type->wait_algorithm(target,
  810. num_mem_params, mem_params,
  811. num_reg_params, reg_params,
  812. exit_point, timeout_ms, arch_info);
  813. if (retval != ERROR_TARGET_TIMEOUT)
  814. target->running_alg = false;
  815. done:
  816. return retval;
  817. }
  818. /**
  819. * Streams data to a circular buffer on target intended for consumption by code
  820. * running asynchronously on target.
  821. *
  822. * This is intended for applications where target-specific native code runs
  823. * on the target, receives data from the circular buffer, does something with
  824. * it (most likely writing it to a flash memory), and advances the circular
  825. * buffer pointer.
  826. *
  827. * This assumes that the helper algorithm has already been loaded to the target,
  828. * but has not been started yet. Given memory and register parameters are passed
  829. * to the algorithm.
  830. *
  831. * The buffer is defined by (buffer_start, buffer_size) arguments and has the
  832. * following format:
  833. *
  834. * [buffer_start + 0, buffer_start + 4):
  835. * Write Pointer address (aka head). Written and updated by this
  836. * routine when new data is written to the circular buffer.
  837. * [buffer_start + 4, buffer_start + 8):
  838. * Read Pointer address (aka tail). Updated by code running on the
  839. * target after it consumes data.
  840. * [buffer_start + 8, buffer_start + buffer_size):
  841. * Circular buffer contents.
  842. *
  843. * See contrib/loaders/flash/stm32f1x.S for an example.
  844. *
  845. * @param target used to run the algorithm
  846. * @param buffer address on the host where data to be sent is located
  847. * @param count number of blocks to send
  848. * @param block_size size in bytes of each block
  849. * @param num_mem_params count of memory-based params to pass to algorithm
  850. * @param mem_params memory-based params to pass to algorithm
  851. * @param num_reg_params count of register-based params to pass to algorithm
  852. * @param reg_params memory-based params to pass to algorithm
  853. * @param buffer_start address on the target of the circular buffer structure
  854. * @param buffer_size size of the circular buffer structure
  855. * @param entry_point address on the target to execute to start the algorithm
  856. * @param exit_point address at which to set a breakpoint to catch the
  857. * end of the algorithm; can be 0 if target triggers a breakpoint itself
  858. * @param arch_info
  859. */
  860. int target_run_flash_async_algorithm(struct target *target,
  861. const uint8_t *buffer, uint32_t count, int block_size,
  862. int num_mem_params, struct mem_param *mem_params,
  863. int num_reg_params, struct reg_param *reg_params,
  864. uint32_t buffer_start, uint32_t buffer_size,
  865. uint32_t entry_point, uint32_t exit_point, void *arch_info)
  866. {
  867. int retval;
  868. int timeout = 0;
  869. const uint8_t *buffer_orig = buffer;
  870. /* Set up working area. First word is write pointer, second word is read pointer,
  871. * rest is fifo data area. */
  872. uint32_t wp_addr = buffer_start;
  873. uint32_t rp_addr = buffer_start + 4;
  874. uint32_t fifo_start_addr = buffer_start + 8;
  875. uint32_t fifo_end_addr = buffer_start + buffer_size;
  876. uint32_t wp = fifo_start_addr;
  877. uint32_t rp = fifo_start_addr;
  878. /* validate block_size is 2^n */
  879. assert(!block_size || !(block_size & (block_size - 1)));
  880. retval = target_write_u32(target, wp_addr, wp);
  881. if (retval != ERROR_OK)
  882. return retval;
  883. retval = target_write_u32(target, rp_addr, rp);
  884. if (retval != ERROR_OK)
  885. return retval;
  886. /* Start up algorithm on target and let it idle while writing the first chunk */
  887. retval = target_start_algorithm(target, num_mem_params, mem_params,
  888. num_reg_params, reg_params,
  889. entry_point,
  890. exit_point,
  891. arch_info);
  892. if (retval != ERROR_OK) {
  893. LOG_ERROR("error starting target flash write algorithm");
  894. return retval;
  895. }
  896. while (count > 0) {
  897. retval = target_read_u32(target, rp_addr, &rp);
  898. if (retval != ERROR_OK) {
  899. LOG_ERROR("failed to get read pointer");
  900. break;
  901. }
  902. LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
  903. (size_t) (buffer - buffer_orig), count, wp, rp);
  904. if (rp == 0) {
  905. LOG_ERROR("flash write algorithm aborted by target");
  906. retval = ERROR_FLASH_OPERATION_FAILED;
  907. break;
  908. }
  909. if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
  910. LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
  911. break;
  912. }
  913. /* Count the number of bytes available in the fifo without
  914. * crossing the wrap around. Make sure to not fill it completely,
  915. * because that would make wp == rp and that's the empty condition. */
  916. uint32_t thisrun_bytes;
  917. if (rp > wp)
  918. thisrun_bytes = rp - wp - block_size;
  919. else if (rp > fifo_start_addr)
  920. thisrun_bytes = fifo_end_addr - wp;
  921. else
  922. thisrun_bytes = fifo_end_addr - wp - block_size;
  923. if (thisrun_bytes == 0) {
  924. /* Throttle polling a bit if transfer is (much) faster than flash
  925. * programming. The exact delay shouldn't matter as long as it's
  926. * less than buffer size / flash speed. This is very unlikely to
  927. * run when using high latency connections such as USB. */
  928. alive_sleep(2);
  929. /* to stop an infinite loop on some targets check and increment a timeout
  930. * this issue was observed on a stellaris using the new ICDI interface */
  931. if (timeout++ >= 2500) {
  932. LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
  933. return ERROR_FLASH_OPERATION_FAILED;
  934. }
  935. continue;
  936. }
  937. /* reset our timeout */
  938. timeout = 0;
  939. /* Limit to the amount of data we actually want to write */
  940. if (thisrun_bytes > count * block_size)
  941. thisrun_bytes = count * block_size;
  942. /* Force end of large blocks to be word aligned */
  943. if (thisrun_bytes >= 16)
  944. thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
  945. /* Write data to fifo */
  946. retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
  947. if (retval != ERROR_OK)
  948. break;
  949. /* Update counters and wrap write pointer */
  950. buffer += thisrun_bytes;
  951. count -= thisrun_bytes / block_size;
  952. wp += thisrun_bytes;
  953. if (wp >= fifo_end_addr)
  954. wp = fifo_start_addr;
  955. /* Store updated write pointer to target */
  956. retval = target_write_u32(target, wp_addr, wp);
  957. if (retval != ERROR_OK)
  958. break;
  959. /* Avoid GDB timeouts */
  960. keep_alive();
  961. }
  962. if (retval != ERROR_OK) {
  963. /* abort flash write algorithm on target */
  964. target_write_u32(target, wp_addr, 0);
  965. }
  966. int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
  967. num_reg_params, reg_params,
  968. exit_point,
  969. 10000,
  970. arch_info);
  971. if (retval2 != ERROR_OK) {
  972. LOG_ERROR("error waiting for target flash write algorithm");
  973. retval = retval2;
  974. }
  975. if (retval == ERROR_OK) {
  976. /* check if algorithm set rp = 0 after fifo writer loop finished */
  977. retval = target_read_u32(target, rp_addr, &rp);
  978. if (retval == ERROR_OK && rp == 0) {
  979. LOG_ERROR("flash write algorithm aborted by target");
  980. retval = ERROR_FLASH_OPERATION_FAILED;
  981. }
  982. }
  983. return retval;
  984. }
  985. int target_run_read_async_algorithm(struct target *target,
  986. uint8_t *buffer, uint32_t count, int block_size,
  987. int num_mem_params, struct mem_param *mem_params,
  988. int num_reg_params, struct reg_param *reg_params,
  989. uint32_t buffer_start, uint32_t buffer_size,
  990. uint32_t entry_point, uint32_t exit_point, void *arch_info)
  991. {
  992. int retval;
  993. int timeout = 0;
  994. const uint8_t *buffer_orig = buffer;
  995. /* Set up working area. First word is write pointer, second word is read pointer,
  996. * rest is fifo data area. */
  997. uint32_t wp_addr = buffer_start;
  998. uint32_t rp_addr = buffer_start + 4;
  999. uint32_t fifo_start_addr = buffer_start + 8;
  1000. uint32_t fifo_end_addr = buffer_start + buffer_size;
  1001. uint32_t wp = fifo_start_addr;
  1002. uint32_t rp = fifo_start_addr;
  1003. /* validate block_size is 2^n */
  1004. assert(!block_size || !(block_size & (block_size - 1)));
  1005. retval = target_write_u32(target, wp_addr, wp);
  1006. if (retval != ERROR_OK)
  1007. return retval;
  1008. retval = target_write_u32(target, rp_addr, rp);
  1009. if (retval != ERROR_OK)
  1010. return retval;
  1011. /* Start up algorithm on target */
  1012. retval = target_start_algorithm(target, num_mem_params, mem_params,
  1013. num_reg_params, reg_params,
  1014. entry_point,
  1015. exit_point,
  1016. arch_info);
  1017. if (retval != ERROR_OK) {
  1018. LOG_ERROR("error starting target flash read algorithm");
  1019. return retval;
  1020. }
  1021. while (count > 0) {
  1022. retval = target_read_u32(target, wp_addr, &wp);
  1023. if (retval != ERROR_OK) {
  1024. LOG_ERROR("failed to get write pointer");
  1025. break;
  1026. }
  1027. LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
  1028. (size_t)(buffer - buffer_orig), count, wp, rp);
  1029. if (wp == 0) {
  1030. LOG_ERROR("flash read algorithm aborted by target");
  1031. retval = ERROR_FLASH_OPERATION_FAILED;
  1032. break;
  1033. }
  1034. if (((wp - fifo_start_addr) & (block_size - 1)) || wp < fifo_start_addr || wp >= fifo_end_addr) {
  1035. LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
  1036. break;
  1037. }
  1038. /* Count the number of bytes available in the fifo without
  1039. * crossing the wrap around. */
  1040. uint32_t thisrun_bytes;
  1041. if (wp >= rp)
  1042. thisrun_bytes = wp - rp;
  1043. else
  1044. thisrun_bytes = fifo_end_addr - rp;
  1045. if (thisrun_bytes == 0) {
  1046. /* Throttle polling a bit if transfer is (much) faster than flash
  1047. * reading. The exact delay shouldn't matter as long as it's
  1048. * less than buffer size / flash speed. This is very unlikely to
  1049. * run when using high latency connections such as USB. */
  1050. alive_sleep(2);
  1051. /* to stop an infinite loop on some targets check and increment a timeout
  1052. * this issue was observed on a stellaris using the new ICDI interface */
  1053. if (timeout++ >= 2500) {
  1054. LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
  1055. return ERROR_FLASH_OPERATION_FAILED;
  1056. }
  1057. continue;
  1058. }
  1059. /* Reset our timeout */
  1060. timeout = 0;
  1061. /* Limit to the amount of data we actually want to read */
  1062. if (thisrun_bytes > count * block_size)
  1063. thisrun_bytes = count * block_size;
  1064. /* Force end of large blocks to be word aligned */
  1065. if (thisrun_bytes >= 16)
  1066. thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
  1067. /* Read data from fifo */
  1068. retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
  1069. if (retval != ERROR_OK)
  1070. break;
  1071. /* Update counters and wrap write pointer */
  1072. buffer += thisrun_bytes;
  1073. count -= thisrun_bytes / block_size;
  1074. rp += thisrun_bytes;
  1075. if (rp >= fifo_end_addr)
  1076. rp = fifo_start_addr;
  1077. /* Store updated write pointer to target */
  1078. retval = target_write_u32(target, rp_addr, rp);
  1079. if (retval != ERROR_OK)
  1080. break;
  1081. /* Avoid GDB timeouts */
  1082. keep_alive();
  1083. }
  1084. if (retval != ERROR_OK) {
  1085. /* abort flash write algorithm on target */
  1086. target_write_u32(target, rp_addr, 0);
  1087. }
  1088. int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
  1089. num_reg_params, reg_params,
  1090. exit_point,
  1091. 10000,
  1092. arch_info);
  1093. if (retval2 != ERROR_OK) {
  1094. LOG_ERROR("error waiting for target flash write algorithm");
  1095. retval = retval2;
  1096. }
  1097. if (retval == ERROR_OK) {
  1098. /* check if algorithm set wp = 0 after fifo writer loop finished */
  1099. retval = target_read_u32(target, wp_addr, &wp);
  1100. if (retval == ERROR_OK && wp == 0) {
  1101. LOG_ERROR("flash read algorithm aborted by target");
  1102. retval = ERROR_FLASH_OPERATION_FAILED;
  1103. }
  1104. }
  1105. return retval;
  1106. }
  1107. int target_read_memory(struct target *target,
  1108. target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
  1109. {
  1110. if (!target_was_examined(target)) {
  1111. LOG_ERROR("Target not examined yet");
  1112. return ERROR_FAIL;
  1113. }
  1114. if (!target->type->read_memory) {
  1115. LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
  1116. return ERROR_FAIL;
  1117. }
  1118. return target->type->read_memory(target, address, size, count, buffer);
  1119. }
  1120. int target_read_phys_memory(struct target *target,
  1121. target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
  1122. {
  1123. if (!target_was_examined(target)) {
  1124. LOG_ERROR("Target not examined yet");
  1125. return ERROR_FAIL;
  1126. }
  1127. if (!target->type->read_phys_memory) {
  1128. LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
  1129. return ERROR_FAIL;
  1130. }
  1131. return target->type->read_phys_memory(target, address, size, count, buffer);
  1132. }
  1133. int target_write_memory(struct target *target,
  1134. target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
  1135. {
  1136. if (!target_was_examined(target)) {
  1137. LOG_ERROR("Target not examined yet");
  1138. return ERROR_FAIL;
  1139. }
  1140. if (!target->type->write_memory) {
  1141. LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
  1142. return ERROR_FAIL;
  1143. }
  1144. return target->type->write_memory(target, address, size, count, buffer);
  1145. }
  1146. int target_write_phys_memory(struct target *target,
  1147. target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
  1148. {
  1149. if (!target_was_examined(target)) {
  1150. LOG_ERROR("Target not examined yet");
  1151. return ERROR_FAIL;
  1152. }
  1153. if (!target->type->write_phys_memory) {
  1154. LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
  1155. return ERROR_FAIL;
  1156. }
  1157. return target->type->write_phys_memory(target, address, size, count, buffer);
  1158. }
  1159. int target_add_breakpoint(struct target *target,
  1160. struct breakpoint *breakpoint)
  1161. {
  1162. if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
  1163. LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
  1164. return ERROR_TARGET_NOT_HALTED;
  1165. }
  1166. return target->type->add_breakpoint(target, breakpoint);
  1167. }
  1168. int target_add_context_breakpoint(struct target *target,
  1169. struct breakpoint *breakpoint)
  1170. {
  1171. if (target->state != TARGET_HALTED) {
  1172. LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
  1173. return ERROR_TARGET_NOT_HALTED;
  1174. }
  1175. return target->type->add_context_breakpoint(target, breakpoint);
  1176. }
  1177. int target_add_hybrid_breakpoint(struct target *target,
  1178. struct breakpoint *breakpoint)
  1179. {
  1180. if (target->state != TARGET_HALTED) {
  1181. LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
  1182. return ERROR_TARGET_NOT_HALTED;
  1183. }
  1184. return target->type->add_hybrid_breakpoint(target, breakpoint);
  1185. }
  1186. int target_remove_breakpoint(struct target *target,
  1187. struct breakpoint *breakpoint)
  1188. {
  1189. return target->type->remove_breakpoint(target, breakpoint);
  1190. }
  1191. int target_add_watchpoint(struct target *target,
  1192. struct watchpoint *watchpoint)
  1193. {
  1194. if (target->state != TARGET_HALTED) {
  1195. LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
  1196. return ERROR_TARGET_NOT_HALTED;
  1197. }
  1198. return target->type->add_watchpoint(target, watchpoint);
  1199. }
  1200. int target_remove_watchpoint(struct target *target,
  1201. struct watchpoint *watchpoint)
  1202. {
  1203. return target->type->remove_watchpoint(target, watchpoint);
  1204. }
  1205. int target_hit_watchpoint(struct target *target,
  1206. struct watchpoint **hit_watchpoint)
  1207. {
  1208. if (target->state != TARGET_HALTED) {
  1209. LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
  1210. return ERROR_TARGET_NOT_HALTED;
  1211. }
  1212. if (!target->type->hit_watchpoint) {
  1213. /* For backward compatible, if hit_watchpoint is not implemented,
  1214. * return ERROR_FAIL such that gdb_server will not take the nonsense
  1215. * information. */
  1216. return ERROR_FAIL;
  1217. }
  1218. return target->type->hit_watchpoint(target, hit_watchpoint);
  1219. }
  1220. const char *target_get_gdb_arch(struct target *target)
  1221. {
  1222. if (!target->type->get_gdb_arch)
  1223. return NULL;
  1224. return target->type->get_gdb_arch(target);
  1225. }
  1226. int target_get_gdb_reg_list(struct target *target,
  1227. struct reg **reg_list[], int *reg_list_size,
  1228. enum target_register_class reg_class)
  1229. {
  1230. int result = ERROR_FAIL;
  1231. if (!target_was_examined(target)) {
  1232. LOG_ERROR("Target not examined yet");
  1233. goto done;
  1234. }
  1235. result = target->type->get_gdb_reg_list(target, reg_list,
  1236. reg_list_size, reg_class);
  1237. done:
  1238. if (result != ERROR_OK) {
  1239. *reg_list = NULL;
  1240. *reg_list_size = 0;
  1241. }
  1242. return result;
  1243. }
  1244. int target_get_gdb_reg_list_noread(struct target *target,
  1245. struct reg **reg_list[], int *reg_list_size,
  1246. enum target_register_class reg_class)
  1247. {
  1248. if (target->type->get_gdb_reg_list_noread &&
  1249. target->type->get_gdb_reg_list_noread(target, reg_list,
  1250. reg_list_size, reg_class) == ERROR_OK)
  1251. return ERROR_OK;
  1252. return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
  1253. }
  1254. bool target_supports_gdb_connection(struct target *target)
  1255. {
  1256. /*
  1257. * exclude all the targets that don't provide get_gdb_reg_list
  1258. * or that have explicit gdb_max_connection == 0
  1259. */
  1260. return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
  1261. }
  1262. int target_step(struct target *target,
  1263. int current, target_addr_t address, int handle_breakpoints)
  1264. {
  1265. int retval;
  1266. target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
  1267. retval = target->type->step(target, current, address, handle_breakpoints);
  1268. if (retval != ERROR_OK)
  1269. return retval;
  1270. target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
  1271. return retval;
  1272. }
  1273. int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
  1274. {
  1275. if (target->state != TARGET_HALTED) {
  1276. LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
  1277. return ERROR_TARGET_NOT_HALTED;
  1278. }
  1279. return target->type->get_gdb_fileio_info(target, fileio_info);
  1280. }
  1281. int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
  1282. {
  1283. if (target->state != TARGET_HALTED) {
  1284. LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
  1285. return ERROR_TARGET_NOT_HALTED;
  1286. }
  1287. return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
  1288. }
  1289. target_addr_t target_address_max(struct target *target)
  1290. {
  1291. unsigned bits = target_address_bits(target);
  1292. if (sizeof(target_addr_t) * 8 == bits)
  1293. return (target_addr_t) -1;
  1294. else
  1295. return (((target_addr_t) 1) << bits) - 1;
  1296. }
  1297. unsigned target_address_bits(struct target *target)
  1298. {
  1299. if (target->type->address_bits)
  1300. return target->type->address_bits(target);
  1301. return 32;
  1302. }
  1303. unsigned int target_data_bits(struct target *target)
  1304. {
  1305. if (target->type->data_bits)
  1306. return target->type->data_bits(target);
  1307. return 32;
  1308. }
  1309. static int target_profiling(struct target *target, uint32_t *samples,
  1310. uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
  1311. {
  1312. return target->type->profiling(target, samples, max_num_samples,
  1313. num_samples, seconds);
  1314. }
  1315. /**
  1316. * Reset the @c examined flag for the given target.
  1317. * Pure paranoia -- targets are zeroed on allocation.
  1318. */
  1319. static void target_reset_examined(struct target *target)
  1320. {
  1321. target->examined = false;
  1322. }
  1323. static int handle_target(void *priv);
  1324. static int target_init_one(struct command_context *cmd_ctx,
  1325. struct target *target)
  1326. {
  1327. target_reset_examined(target);
  1328. struct target_type *type = target->type;
  1329. if (!type->examine)
  1330. type->examine = default_examine;
  1331. if (!type->check_reset)
  1332. type->check_reset = default_check_reset;
  1333. assert(type->init_target);
  1334. int retval = type->init_target(cmd_ctx, target);
  1335. if (retval != ERROR_OK) {
  1336. LOG_ERROR("target '%s' init failed", target_name(target));
  1337. return retval;
  1338. }
  1339. /* Sanity-check MMU support ... stub in what we must, to help
  1340. * implement it in stages, but warn if we need to do so.
  1341. */
  1342. if (type->mmu) {
  1343. if (!type->virt2phys) {
  1344. LOG_ERROR("type '%s' is missing virt2phys", type->name);
  1345. type->virt2phys = identity_virt2phys;
  1346. }
  1347. } else {
  1348. /* Make sure no-MMU targets all behave the same: make no
  1349. * distinction between physical and virtual addresses, and
  1350. * ensure that virt2phys() is always an identity mapping.
  1351. */
  1352. if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
  1353. LOG_WARNING("type '%s' has bad MMU hooks", type->name);
  1354. type->mmu = no_mmu;
  1355. type->write_phys_memory = type->write_memory;
  1356. type->read_phys_memory = type->read_memory;
  1357. type->virt2phys = identity_virt2phys;
  1358. }
  1359. if (!target->type->read_buffer)
  1360. target->type->read_buffer = target_read_buffer_default;
  1361. if (!target->type->write_buffer)
  1362. target->type->write_buffer = target_write_buffer_default;
  1363. if (!target->type->get_gdb_fileio_info)
  1364. target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
  1365. if (!target->type->gdb_fileio_end)
  1366. target->type->gdb_fileio_end = target_gdb_fileio_end_default;
  1367. if (!target->type->profiling)
  1368. target->type->profiling = target_profiling_default;
  1369. return ERROR_OK;
  1370. }
  1371. static int target_init(struct command_context *cmd_ctx)
  1372. {
  1373. struct target *target;
  1374. int retval;
  1375. for (target = all_targets; target; target = target->next) {
  1376. retval = target_init_one(cmd_ctx, target);
  1377. if (retval != ERROR_OK)
  1378. return retval;
  1379. }
  1380. if (!all_targets)
  1381. return ERROR_OK;
  1382. retval = target_register_user_commands(cmd_ctx);
  1383. if (retval != ERROR_OK)
  1384. return retval;
  1385. retval = target_register_timer_callback(&handle_target,
  1386. polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
  1387. if (retval != ERROR_OK)
  1388. return retval;
  1389. return ERROR_OK;
  1390. }
  1391. COMMAND_HANDLER(handle_target_init_command)
  1392. {
  1393. int retval;
  1394. if (CMD_ARGC != 0)
  1395. return ERROR_COMMAND_SYNTAX_ERROR;
  1396. static bool target_initialized;
  1397. if (target_initialized) {
  1398. LOG_INFO("'target init' has already been called");
  1399. return ERROR_OK;
  1400. }
  1401. target_initialized = true;
  1402. retval = command_run_line(CMD_CTX, "init_targets");
  1403. if (retval != ERROR_OK)
  1404. return retval;
  1405. retval = command_run_line(CMD_CTX, "init_target_events");
  1406. if (retval != ERROR_OK)
  1407. return retval;
  1408. retval = command_run_line(CMD_CTX, "init_board");
  1409. if (retval != ERROR_OK)
  1410. return retval;
  1411. LOG_DEBUG("Initializing targets...");
  1412. return target_init(CMD_CTX);
  1413. }
  1414. int target_register_event_callback(int (*callback)(struct target *target,
  1415. enum target_event event, void *priv), void *priv)
  1416. {
  1417. struct target_event_callback **callbacks_p = &target_event_callbacks;
  1418. if (!callback)
  1419. return ERROR_COMMAND_SYNTAX_ERROR;
  1420. if (*callbacks_p) {
  1421. while ((*callbacks_p)->next)
  1422. callbacks_p = &((*callbacks_p)->next);
  1423. callbacks_p = &((*callbacks_p)->next);
  1424. }
  1425. (*callbacks_p) = malloc(sizeof(struct target_event_callback));
  1426. (*callbacks_p)->callback = callback;
  1427. (*callbacks_p)->priv = priv;
  1428. (*callbacks_p)->next = NULL;
  1429. return ERROR_OK;
  1430. }
  1431. int target_register_reset_callback(int (*callback)(struct target *target,
  1432. enum target_reset_mode reset_mode, void *priv), void *priv)
  1433. {
  1434. struct target_reset_callback *entry;
  1435. if (!callback)
  1436. return ERROR_COMMAND_SYNTAX_ERROR;
  1437. entry = malloc(sizeof(struct target_reset_callback));
  1438. if (!entry) {
  1439. LOG_ERROR("error allocating buffer for reset callback entry");
  1440. return ERROR_COMMAND_SYNTAX_ERROR;
  1441. }
  1442. entry->callback = callback;
  1443. entry->priv = priv;
  1444. list_add(&entry->list, &target_reset_callback_list);
  1445. return ERROR_OK;
  1446. }
  1447. int target_register_trace_callback(int (*callback)(struct target *target,
  1448. size_t len, uint8_t *data, void *priv), void *priv)
  1449. {
  1450. struct target_trace_callback *entry;
  1451. if (!callback)
  1452. return ERROR_COMMAND_SYNTAX_ERROR;
  1453. entry = malloc(sizeof(struct target_trace_callback));
  1454. if (!entry) {
  1455. LOG_ERROR("error allocating buffer for trace callback entry");
  1456. return ERROR_COMMAND_SYNTAX_ERROR;
  1457. }
  1458. entry->callback = callback;
  1459. entry->priv = priv;
  1460. list_add(&entry->list, &target_trace_callback_list);
  1461. return ERROR_OK;
  1462. }
  1463. int target_register_timer_callback(int (*callback)(void *priv),
  1464. unsigned int time_ms, enum target_timer_type type, void *priv)
  1465. {
  1466. struct target_timer_callback **callbacks_p = &target_timer_callbacks;
  1467. if (!callback)
  1468. return ERROR_COMMAND_SYNTAX_ERROR;
  1469. if (*callbacks_p) {
  1470. while ((*callbacks_p)->next)
  1471. callbacks_p = &((*callbacks_p)->next);
  1472. callbacks_p = &((*callbacks_p)->next);
  1473. }
  1474. (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
  1475. (*callbacks_p)->callback = callback;
  1476. (*callbacks_p)->type = type;
  1477. (*callbacks_p)->time_ms = time_ms;
  1478. (*callbacks_p)->removed = false;
  1479. gettimeofday(&(*callbacks_p)->when, NULL);
  1480. timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
  1481. (*callbacks_p)->priv = priv;
  1482. (*callbacks_p)->next = NULL;
  1483. return ERROR_OK;
  1484. }
  1485. int target_unregister_event_callback(int (*callback)(struct target *target,
  1486. enum target_event event, void *priv), void *priv)
  1487. {
  1488. struct target_event_callback **p = &target_event_callbacks;
  1489. struct target_event_callback *c = target_event_callbacks;
  1490. if (!callback)
  1491. return ERROR_COMMAND_SYNTAX_ERROR;
  1492. while (c) {
  1493. struct target_event_callback *next = c->next;
  1494. if ((c->callback == callback) && (c->priv == priv)) {
  1495. *p = next;
  1496. free(c);
  1497. return ERROR_OK;
  1498. } else
  1499. p = &(c->next);
  1500. c = next;
  1501. }
  1502. return ERROR_OK;
  1503. }
  1504. int target_unregister_reset_callback(int (*callback)(struct target *target,
  1505. enum target_reset_mode reset_mode, void *priv), void *priv)
  1506. {
  1507. struct target_reset_callback *entry;
  1508. if (!callback)
  1509. return ERROR_COMMAND_SYNTAX_ERROR;
  1510. list_for_each_entry(entry, &target_reset_callback_list, list) {
  1511. if (entry->callback == callback && entry->priv == priv) {
  1512. list_del(&entry->list);
  1513. free(entry);
  1514. break;
  1515. }
  1516. }
  1517. return ERROR_OK;
  1518. }
  1519. int target_unregister_trace_callback(int (*callback)(struct target *target,
  1520. size_t len, uint8_t *data, void *priv), void *priv)
  1521. {
  1522. struct target_trace_callback *entry;
  1523. if (!callback)
  1524. return ERROR_COMMAND_SYNTAX_ERROR;
  1525. list_for_each_entry(entry, &target_trace_callback_list, list) {
  1526. if (entry->callback == callback && entry->priv == priv) {
  1527. list_del(&entry->list);
  1528. free(entry);
  1529. break;
  1530. }
  1531. }
  1532. return ERROR_OK;
  1533. }
  1534. int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
  1535. {
  1536. if (!callback)
  1537. return ERROR_COMMAND_SYNTAX_ERROR;
  1538. for (struct target_timer_callback *c = target_timer_callbacks;
  1539. c; c = c->next) {
  1540. if ((c->callback == callback) && (c->priv == priv)) {
  1541. c->removed = true;
  1542. return ERROR_OK;
  1543. }
  1544. }
  1545. return ERROR_FAIL;
  1546. }
  1547. int target_call_event_callbacks(struct target *target, enum target_event event)
  1548. {
  1549. struct target_event_callback *callback = target_event_callbacks;
  1550. struct target_event_callback *next_callback;
  1551. if (event == TARGET_EVENT_HALTED) {
  1552. /* execute early halted first */
  1553. target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
  1554. }
  1555. LOG_DEBUG("target event %i (%s) for core %s", event,
  1556. jim_nvp_value2name_simple(nvp_target_event, event)->name,
  1557. target_name(target));
  1558. target_handle_event(target, event);
  1559. while (callback) {
  1560. next_callback = callback->next;
  1561. callback->callback(target, event, callback->priv);
  1562. callback = next_callback;
  1563. }
  1564. return ERROR_OK;
  1565. }
  1566. int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
  1567. {
  1568. struct target_reset_callback *callback;
  1569. LOG_DEBUG("target reset %i (%s)", reset_mode,
  1570. jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
  1571. list_for_each_entry(callback, &target_reset_callback_list, list)
  1572. callback->callback(target, reset_mode, callback->priv);
  1573. return ERROR_OK;
  1574. }
  1575. int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
  1576. {
  1577. struct target_trace_callback *callback;
  1578. list_for_each_entry(callback, &target_trace_callback_list, list)
  1579. callback->callback(target, len, data, callback->priv);
  1580. return ERROR_OK;
  1581. }
  1582. static int target_timer_callback_periodic_restart(
  1583. struct target_timer_callback *cb, struct timeval *now)
  1584. {
  1585. cb->when = *now;
  1586. timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
  1587. return ERROR_OK;
  1588. }
  1589. static int target_call_timer_callback(struct target_timer_callback *cb,
  1590. struct timeval *now)
  1591. {
  1592. cb->callback(cb->priv);
  1593. if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
  1594. return target_timer_callback_periodic_restart(cb, now);
  1595. return target_unregister_timer_callback(cb->callback, cb->priv);
  1596. }
  1597. static int target_call_timer_callbacks_check_time(int checktime)
  1598. {
  1599. static bool callback_processing;
  1600. /* Do not allow nesting */
  1601. if (callback_processing)
  1602. return ERROR_OK;
  1603. callback_processing = true;
  1604. keep_alive();
  1605. struct timeval now;
  1606. gettimeofday(&now, NULL);
  1607. /* Store an address of the place containing a pointer to the
  1608. * next item; initially, that's a standalone "root of the
  1609. * list" variable. */
  1610. struct target_timer_callback **callback = &target_timer_callbacks;
  1611. while (callback && *callback) {
  1612. if ((*callback)->removed) {
  1613. struct target_timer_callback *p = *callback;
  1614. *callback = (*callback)->next;
  1615. free(p);
  1616. continue;
  1617. }
  1618. bool call_it = (*callback)->callback &&
  1619. ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
  1620. timeval_compare(&now, &(*callback)->when) >= 0);
  1621. if (call_it)
  1622. target_call_timer_callback(*callback, &now);
  1623. callback = &(*callback)->next;
  1624. }
  1625. callback_processing = false;
  1626. return ERROR_OK;
  1627. }
  1628. int target_call_timer_callbacks(void)
  1629. {
  1630. return target_call_timer_callbacks_check_time(1);
  1631. }
  1632. /* invoke periodic callbacks immediately */
  1633. int target_call_timer_callbacks_now(void)
  1634. {
  1635. return target_call_timer_callbacks_check_time(0);
  1636. }
  1637. /* Prints the working area layout for debug purposes */
  1638. static void print_wa_layout(struct target *target)
  1639. {
  1640. struct working_area *c = target->working_areas;
  1641. while (c) {
  1642. LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
  1643. c->backup ? 'b' : ' ', c->free ? ' ' : '*',
  1644. c->address, c->address + c->size - 1, c->size);
  1645. c = c->next;
  1646. }
  1647. }
  1648. /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
  1649. static void target_split_working_area(struct working_area *area, uint32_t size)
  1650. {
  1651. assert(area->free); /* Shouldn't split an allocated area */
  1652. assert(size <= area->size); /* Caller should guarantee this */
  1653. /* Split only if not already the right size */
  1654. if (size < area->size) {
  1655. struct working_area *new_wa = malloc(sizeof(*new_wa));
  1656. if (!new_wa)
  1657. return;
  1658. new_wa->next = area->next;
  1659. new_wa->size = area->size - size;
  1660. new_wa->address = area->address + size;
  1661. new_wa->backup = NULL;
  1662. new_wa->user = NULL;
  1663. new_wa->free = true;
  1664. area->next = new_wa;
  1665. area->size = size;
  1666. /* If backup memory was allocated to this area, it has the wrong size
  1667. * now so free it and it will be reallocated if/when needed */
  1668. free(area->backup);
  1669. area->backup = NULL;
  1670. }
  1671. }
  1672. /* Merge all adjacent free areas into one */
  1673. static void target_merge_working_areas(struct target *target)
  1674. {
  1675. struct working_area *c = target->working_areas;
  1676. while (c && c->next) {
  1677. assert(c->next->address == c->address + c->size); /* This is an invariant */
  1678. /* Find two adjacent free areas */
  1679. if (c->free && c->next->free) {
  1680. /* Merge the last into the first */
  1681. c->size += c->next->size;
  1682. /* Remove the last */
  1683. struct working_area *to_be_freed = c->next;
  1684. c->next = c->next->next;
  1685. free(to_be_freed->backup);
  1686. free(to_be_freed);
  1687. /* If backup memory was allocated to the remaining area, it's has
  1688. * the wrong size now */
  1689. free(c->backup);
  1690. c->backup = NULL;
  1691. } else {
  1692. c = c->next;
  1693. }
  1694. }
  1695. }
  1696. int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
  1697. {
  1698. /* Reevaluate working area address based on MMU state*/
  1699. if (!target->working_areas) {
  1700. int retval;
  1701. int enabled;
  1702. retval = target->type->mmu(target, &enabled);
  1703. if (retval != ERROR_OK)
  1704. return retval;
  1705. if (!enabled) {
  1706. if (target->working_area_phys_spec) {
  1707. LOG_DEBUG("MMU disabled, using physical "
  1708. "address for working memory " TARGET_ADDR_FMT,
  1709. target->working_area_phys);
  1710. target->working_area = target->working_area_phys;
  1711. } else {
  1712. LOG_ERROR("No working memory available. "
  1713. "Specify -work-area-phys to target.");
  1714. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1715. }
  1716. } else {
  1717. if (target->working_area_virt_spec) {
  1718. LOG_DEBUG("MMU enabled, using virtual "
  1719. "address for working memory " TARGET_ADDR_FMT,
  1720. target->working_area_virt);
  1721. target->working_area = target->working_area_virt;
  1722. } else {
  1723. LOG_ERROR("No working memory available. "
  1724. "Specify -work-area-virt to target.");
  1725. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1726. }
  1727. }
  1728. /* Set up initial working area on first call */
  1729. struct working_area *new_wa = malloc(sizeof(*new_wa));
  1730. if (new_wa) {
  1731. new_wa->next = NULL;
  1732. new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
  1733. new_wa->address = target->working_area;
  1734. new_wa->backup = NULL;
  1735. new_wa->user = NULL;
  1736. new_wa->free = true;
  1737. }
  1738. target->working_areas = new_wa;
  1739. }
  1740. /* only allocate multiples of 4 byte */
  1741. if (size % 4)
  1742. size = (size + 3) & (~3UL);
  1743. struct working_area *c = target->working_areas;
  1744. /* Find the first large enough working area */
  1745. while (c) {
  1746. if (c->free && c->size >= size)
  1747. break;
  1748. c = c->next;
  1749. }
  1750. if (!c)
  1751. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1752. /* Split the working area into the requested size */
  1753. target_split_working_area(c, size);
  1754. LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
  1755. size, c->address);
  1756. if (target->backup_working_area) {
  1757. if (!c->backup) {
  1758. c->backup = malloc(c->size);
  1759. if (!c->backup)
  1760. return ERROR_FAIL;
  1761. }
  1762. int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
  1763. if (retval != ERROR_OK)
  1764. return retval;
  1765. }
  1766. /* mark as used, and return the new (reused) area */
  1767. c->free = false;
  1768. *area = c;
  1769. /* user pointer */
  1770. c->user = area;
  1771. print_wa_layout(target);
  1772. return ERROR_OK;
  1773. }
  1774. int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
  1775. {
  1776. int retval;
  1777. retval = target_alloc_working_area_try(target, size, area);
  1778. if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
  1779. LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
  1780. return retval;
  1781. }
  1782. static int target_restore_working_area(struct target *target, struct working_area *area)
  1783. {
  1784. int retval = ERROR_OK;
  1785. if (target->backup_working_area && area->backup) {
  1786. retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
  1787. if (retval != ERROR_OK)
  1788. LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
  1789. area->size, area->address);
  1790. }
  1791. return retval;
  1792. }
  1793. /* Restore the area's backup memory, if any, and return the area to the allocation pool */
  1794. static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
  1795. {
  1796. int retval = ERROR_OK;
  1797. if (area->free)
  1798. return retval;
  1799. if (restore) {
  1800. retval = target_restore_working_area(target, area);
  1801. /* REVISIT: Perhaps the area should be freed even if restoring fails. */
  1802. if (retval != ERROR_OK)
  1803. return retval;
  1804. }
  1805. area->free = true;
  1806. LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
  1807. area->size, area->address);
  1808. /* mark user pointer invalid */
  1809. /* TODO: Is this really safe? It points to some previous caller's memory.
  1810. * How could we know that the area pointer is still in that place and not
  1811. * some other vital data? What's the purpose of this, anyway? */
  1812. *area->user = NULL;
  1813. area->user = NULL;
  1814. target_merge_working_areas(target);
  1815. print_wa_layout(target);
  1816. return retval;
  1817. }
  1818. int target_free_working_area(struct target *target, struct working_area *area)
  1819. {
  1820. return target_free_working_area_restore(target, area, 1);
  1821. }
  1822. /* free resources and restore memory, if restoring memory fails,
  1823. * free up resources anyway
  1824. */
  1825. static void target_free_all_working_areas_restore(struct target *target, int restore)
  1826. {
  1827. struct working_area *c = target->working_areas;
  1828. LOG_DEBUG("freeing all working areas");
  1829. /* Loop through all areas, restoring the allocated ones and marking them as free */
  1830. while (c) {
  1831. if (!c->free) {
  1832. if (restore)
  1833. target_restore_working_area(target, c);
  1834. c->free = true;
  1835. *c->user = NULL; /* Same as above */
  1836. c->user = NULL;
  1837. }
  1838. c = c->next;
  1839. }
  1840. /* Run a merge pass to combine all areas into one */
  1841. target_merge_working_areas(target);
  1842. print_wa_layout(target);
  1843. }
  1844. void target_free_all_working_areas(struct target *target)
  1845. {
  1846. target_free_all_working_areas_restore(target, 1);
  1847. /* Now we have none or only one working area marked as free */
  1848. if (target->working_areas) {
  1849. /* Free the last one to allow on-the-fly moving and resizing */
  1850. free(target->working_areas->backup);
  1851. free(target->working_areas);
  1852. target->working_areas = NULL;
  1853. }
  1854. }
  1855. /* Find the largest number of bytes that can be allocated */
  1856. uint32_t target_get_working_area_avail(struct target *target)
  1857. {
  1858. struct working_area *c = target->working_areas;
  1859. uint32_t max_size = 0;
  1860. if (!c)
  1861. return target->working_area_size;
  1862. while (c) {
  1863. if (c->free && max_size < c->size)
  1864. max_size = c->size;
  1865. c = c->next;
  1866. }
  1867. return max_size;
  1868. }
  1869. static void target_destroy(struct target *target)
  1870. {
  1871. if (target->type->deinit_target)
  1872. target->type->deinit_target(target);
  1873. free(target->semihosting);
  1874. jtag_unregister_event_callback(jtag_enable_callback, target);
  1875. struct target_event_action *teap = target->event_action;
  1876. while (teap) {
  1877. struct target_event_action *next = teap->next;
  1878. Jim_DecrRefCount(teap->interp, teap->body);
  1879. free(teap);
  1880. teap = next;
  1881. }
  1882. target_free_all_working_areas(target);
  1883. /* release the targets SMP list */
  1884. if (target->smp) {
  1885. struct target_list *head = target->head;
  1886. while (head) {
  1887. struct target_list *pos = head->next;
  1888. head->target->smp = 0;
  1889. free(head);
  1890. head = pos;
  1891. }
  1892. target->smp = 0;
  1893. }
  1894. rtos_destroy(target);
  1895. free(target->gdb_port_override);
  1896. free(target->type);
  1897. free(target->trace_info);
  1898. free(target->fileio_info);
  1899. free(target->cmd_name);
  1900. free(target);
  1901. }
  1902. void target_quit(void)
  1903. {
  1904. struct target_event_callback *pe = target_event_callbacks;
  1905. while (pe) {
  1906. struct target_event_callback *t = pe->next;
  1907. free(pe);
  1908. pe = t;
  1909. }
  1910. target_event_callbacks = NULL;
  1911. struct target_timer_callback *pt = target_timer_callbacks;
  1912. while (pt) {
  1913. struct target_timer_callback *t = pt->next;
  1914. free(pt);
  1915. pt = t;
  1916. }
  1917. target_timer_callbacks = NULL;
  1918. for (struct target *target = all_targets; target;) {
  1919. struct target *tmp;
  1920. tmp = target->next;
  1921. target_destroy(target);
  1922. target = tmp;
  1923. }
  1924. all_targets = NULL;
  1925. }
  1926. int target_arch_state(struct target *target)
  1927. {
  1928. int retval;
  1929. if (!target) {
  1930. LOG_WARNING("No target has been configured");
  1931. return ERROR_OK;
  1932. }
  1933. if (target->state != TARGET_HALTED)
  1934. return ERROR_OK;
  1935. retval = target->type->arch_state(target);
  1936. return retval;
  1937. }
  1938. static int target_get_gdb_fileio_info_default(struct target *target,
  1939. struct gdb_fileio_info *fileio_info)
  1940. {
  1941. /* If target does not support semi-hosting function, target
  1942. has no need to provide .get_gdb_fileio_info callback.
  1943. It just return ERROR_FAIL and gdb_server will return "Txx"
  1944. as target halted every time. */
  1945. return ERROR_FAIL;
  1946. }
  1947. static int target_gdb_fileio_end_default(struct target *target,
  1948. int retcode, int fileio_errno, bool ctrl_c)
  1949. {
  1950. return ERROR_OK;
  1951. }
  1952. int target_profiling_default(struct target *target, uint32_t *samples,
  1953. uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
  1954. {
  1955. struct timeval timeout, now;
  1956. gettimeofday(&timeout, NULL);
  1957. timeval_add_time(&timeout, seconds, 0);
  1958. LOG_INFO("Starting profiling. Halting and resuming the"
  1959. " target as often as we can...");
  1960. uint32_t sample_count = 0;
  1961. /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
  1962. struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
  1963. int retval = ERROR_OK;
  1964. for (;;) {
  1965. target_poll(target);
  1966. if (target->state == TARGET_HALTED) {
  1967. uint32_t t = buf_get_u32(reg->value, 0, 32);
  1968. samples[sample_count++] = t;
  1969. /* current pc, addr = 0, do not handle breakpoints, not debugging */
  1970. retval = target_resume(target, 1, 0, 0, 0);
  1971. target_poll(target);
  1972. alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
  1973. } else if (target->state == TARGET_RUNNING) {
  1974. /* We want to quickly sample the PC. */
  1975. retval = target_halt(target);
  1976. } else {
  1977. LOG_INFO("Target not halted or running");
  1978. retval = ERROR_OK;
  1979. break;
  1980. }
  1981. if (retval != ERROR_OK)
  1982. break;
  1983. gettimeofday(&now, NULL);
  1984. if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
  1985. LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
  1986. break;
  1987. }
  1988. }
  1989. *num_samples = sample_count;
  1990. return retval;
  1991. }
  1992. /* Single aligned words are guaranteed to use 16 or 32 bit access
  1993. * mode respectively, otherwise data is handled as quickly as
  1994. * possible
  1995. */
  1996. int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
  1997. {
  1998. LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
  1999. size, address);
  2000. if (!target_was_examined(target)) {
  2001. LOG_ERROR("Target not examined yet");
  2002. return ERROR_FAIL;
  2003. }
  2004. if (size == 0)
  2005. return ERROR_OK;
  2006. if ((address + size - 1) < address) {
  2007. /* GDB can request this when e.g. PC is 0xfffffffc */
  2008. LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
  2009. address,
  2010. size);
  2011. return ERROR_FAIL;
  2012. }
  2013. return target->type->write_buffer(target, address, size, buffer);
  2014. }
  2015. static int target_write_buffer_default(struct target *target,
  2016. target_addr_t address, uint32_t count, const uint8_t *buffer)
  2017. {
  2018. uint32_t size;
  2019. unsigned int data_bytes = target_data_bits(target) / 8;
  2020. /* Align up to maximum bytes. The loop condition makes sure the next pass
  2021. * will have something to do with the size we leave to it. */
  2022. for (size = 1;
  2023. size < data_bytes && count >= size * 2 + (address & size);
  2024. size *= 2) {
  2025. if (address & size) {
  2026. int retval = target_write_memory(target, address, size, 1, buffer);
  2027. if (retval != ERROR_OK)
  2028. return retval;
  2029. address += size;
  2030. count -= size;
  2031. buffer += size;
  2032. }
  2033. }
  2034. /* Write the data with as large access size as possible. */
  2035. for (; size > 0; size /= 2) {
  2036. uint32_t aligned = count - count % size;
  2037. if (aligned > 0) {
  2038. int retval = target_write_memory(target, address, size, aligned / size, buffer);
  2039. if (retval != ERROR_OK)
  2040. return retval;
  2041. address += aligned;
  2042. count -= aligned;
  2043. buffer += aligned;
  2044. }
  2045. }
  2046. return ERROR_OK;
  2047. }
  2048. /* Single aligned words are guaranteed to use 16 or 32 bit access
  2049. * mode respectively, otherwise data is handled as quickly as
  2050. * possible
  2051. */
  2052. int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
  2053. {
  2054. LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
  2055. size, address);
  2056. if (!target_was_examined(target)) {
  2057. LOG_ERROR("Target not examined yet");
  2058. return ERROR_FAIL;
  2059. }
  2060. if (size == 0)
  2061. return ERROR_OK;
  2062. if ((address + size - 1) < address) {
  2063. /* GDB can request this when e.g. PC is 0xfffffffc */
  2064. LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
  2065. address,
  2066. size);
  2067. return ERROR_FAIL;
  2068. }
  2069. return target->type->read_buffer(target, address, size, buffer);
  2070. }
  2071. static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
  2072. {
  2073. uint32_t size;
  2074. unsigned int data_bytes = target_data_bits(target) / 8;
  2075. /* Align up to maximum bytes. The loop condition makes sure the next pass
  2076. * will have something to do with the size we leave to it. */
  2077. for (size = 1;
  2078. size < data_bytes && count >= size * 2 + (address & size);
  2079. size *= 2) {
  2080. if (address & size) {
  2081. int retval = target_read_memory(target, address, size, 1, buffer);
  2082. if (retval != ERROR_OK)
  2083. return retval;
  2084. address += size;
  2085. count -= size;
  2086. buffer += size;
  2087. }
  2088. }
  2089. /* Read the data with as large access size as possible. */
  2090. for (; size > 0; size /= 2) {
  2091. uint32_t aligned = count - count % size;
  2092. if (aligned > 0) {
  2093. int retval = target_read_memory(target, address, size, aligned / size, buffer);
  2094. if (retval != ERROR_OK)
  2095. return retval;
  2096. address += aligned;
  2097. count -= aligned;
  2098. buffer += aligned;
  2099. }
  2100. }
  2101. return ERROR_OK;
  2102. }
  2103. int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
  2104. {
  2105. uint8_t *buffer;
  2106. int retval;
  2107. uint32_t i;
  2108. uint32_t checksum = 0;
  2109. if (!target_was_examined(target)) {
  2110. LOG_ERROR("Target not examined yet");
  2111. return ERROR_FAIL;
  2112. }
  2113. retval = target->type->checksum_memory(target, address, size, &checksum);
  2114. if (retval != ERROR_OK) {
  2115. buffer = malloc(size);
  2116. if (!buffer) {
  2117. LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
  2118. return ERROR_COMMAND_SYNTAX_ERROR;
  2119. }
  2120. retval = target_read_buffer(target, address, size, buffer);
  2121. if (retval != ERROR_OK) {
  2122. free(buffer);
  2123. return retval;
  2124. }
  2125. /* convert to target endianness */
  2126. for (i = 0; i < (size/sizeof(uint32_t)); i++) {
  2127. uint32_t target_data;
  2128. target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
  2129. target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
  2130. }
  2131. retval = image_calculate_checksum(buffer, size, &checksum);
  2132. free(buffer);
  2133. }
  2134. *crc = checksum;
  2135. return retval;
  2136. }
  2137. int target_blank_check_memory(struct target *target,
  2138. struct target_memory_check_block *blocks, int num_blocks,
  2139. uint8_t erased_value)
  2140. {
  2141. if (!target_was_examined(target)) {
  2142. LOG_ERROR("Target not examined yet");
  2143. return ERROR_FAIL;
  2144. }
  2145. if (!target->type->blank_check_memory)
  2146. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  2147. return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
  2148. }
  2149. int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
  2150. {
  2151. uint8_t value_buf[8];
  2152. if (!target_was_examined(target)) {
  2153. LOG_ERROR("Target not examined yet");
  2154. return ERROR_FAIL;
  2155. }
  2156. int retval = target_read_memory(target, address, 8, 1, value_buf);
  2157. if (retval == ERROR_OK) {
  2158. *value = target_buffer_get_u64(target, value_buf);
  2159. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
  2160. address,
  2161. *value);
  2162. } else {
  2163. *value = 0x0;
  2164. LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
  2165. address);
  2166. }
  2167. return retval;
  2168. }
  2169. int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
  2170. {
  2171. uint8_t value_buf[4];
  2172. if (!target_was_examined(target)) {
  2173. LOG_ERROR("Target not examined yet");
  2174. return ERROR_FAIL;
  2175. }
  2176. int retval = target_read_memory(target, address, 4, 1, value_buf);
  2177. if (retval == ERROR_OK) {
  2178. *value = target_buffer_get_u32(target, value_buf);
  2179. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
  2180. address,
  2181. *value);
  2182. } else {
  2183. *value = 0x0;
  2184. LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
  2185. address);
  2186. }
  2187. return retval;
  2188. }
  2189. int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
  2190. {
  2191. uint8_t value_buf[2];
  2192. if (!target_was_examined(target)) {
  2193. LOG_ERROR("Target not examined yet");
  2194. return ERROR_FAIL;
  2195. }
  2196. int retval = target_read_memory(target, address, 2, 1, value_buf);
  2197. if (retval == ERROR_OK) {
  2198. *value = target_buffer_get_u16(target, value_buf);
  2199. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
  2200. address,
  2201. *value);
  2202. } else {
  2203. *value = 0x0;
  2204. LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
  2205. address);
  2206. }
  2207. return retval;
  2208. }
  2209. int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
  2210. {
  2211. if (!target_was_examined(target)) {
  2212. LOG_ERROR("Target not examined yet");
  2213. return ERROR_FAIL;
  2214. }
  2215. int retval = target_read_memory(target, address, 1, 1, value);
  2216. if (retval == ERROR_OK) {
  2217. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
  2218. address,
  2219. *value);
  2220. } else {
  2221. *value = 0x0;
  2222. LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
  2223. address);
  2224. }
  2225. return retval;
  2226. }
  2227. int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
  2228. {
  2229. int retval;
  2230. uint8_t value_buf[8];
  2231. if (!target_was_examined(target)) {
  2232. LOG_ERROR("Target not examined yet");
  2233. return ERROR_FAIL;
  2234. }
  2235. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
  2236. address,
  2237. value);
  2238. target_buffer_set_u64(target, value_buf, value);
  2239. retval = target_write_memory(target, address, 8, 1, value_buf);
  2240. if (retval != ERROR_OK)
  2241. LOG_DEBUG("failed: %i", retval);
  2242. return retval;
  2243. }
  2244. int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
  2245. {
  2246. int retval;
  2247. uint8_t value_buf[4];
  2248. if (!target_was_examined(target)) {
  2249. LOG_ERROR("Target not examined yet");
  2250. return ERROR_FAIL;
  2251. }
  2252. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
  2253. address,
  2254. value);
  2255. target_buffer_set_u32(target, value_buf, value);
  2256. retval = target_write_memory(target, address, 4, 1, value_buf);
  2257. if (retval != ERROR_OK)
  2258. LOG_DEBUG("failed: %i", retval);
  2259. return retval;
  2260. }
  2261. int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
  2262. {
  2263. int retval;
  2264. uint8_t value_buf[2];
  2265. if (!target_was_examined(target)) {
  2266. LOG_ERROR("Target not examined yet");
  2267. return ERROR_FAIL;
  2268. }
  2269. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
  2270. address,
  2271. value);
  2272. target_buffer_set_u16(target, value_buf, value);
  2273. retval = target_write_memory(target, address, 2, 1, value_buf);
  2274. if (retval != ERROR_OK)
  2275. LOG_DEBUG("failed: %i", retval);
  2276. return retval;
  2277. }
  2278. int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
  2279. {
  2280. int retval;
  2281. if (!target_was_examined(target)) {
  2282. LOG_ERROR("Target not examined yet");
  2283. return ERROR_FAIL;
  2284. }
  2285. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
  2286. address, value);
  2287. retval = target_write_memory(target, address, 1, 1, &value);
  2288. if (retval != ERROR_OK)
  2289. LOG_DEBUG("failed: %i", retval);
  2290. return retval;
  2291. }
  2292. int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
  2293. {
  2294. int retval;
  2295. uint8_t value_buf[8];
  2296. if (!target_was_examined(target)) {
  2297. LOG_ERROR("Target not examined yet");
  2298. return ERROR_FAIL;
  2299. }
  2300. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
  2301. address,
  2302. value);
  2303. target_buffer_set_u64(target, value_buf, value);
  2304. retval = target_write_phys_memory(target, address, 8, 1, value_buf);
  2305. if (retval != ERROR_OK)
  2306. LOG_DEBUG("failed: %i", retval);
  2307. return retval;
  2308. }
  2309. int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
  2310. {
  2311. int retval;
  2312. uint8_t value_buf[4];
  2313. if (!target_was_examined(target)) {
  2314. LOG_ERROR("Target not examined yet");
  2315. return ERROR_FAIL;
  2316. }
  2317. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
  2318. address,
  2319. value);
  2320. target_buffer_set_u32(target, value_buf, value);
  2321. retval = target_write_phys_memory(target, address, 4, 1, value_buf);
  2322. if (retval != ERROR_OK)
  2323. LOG_DEBUG("failed: %i", retval);
  2324. return retval;
  2325. }
  2326. int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
  2327. {
  2328. int retval;
  2329. uint8_t value_buf[2];
  2330. if (!target_was_examined(target)) {
  2331. LOG_ERROR("Target not examined yet");
  2332. return ERROR_FAIL;
  2333. }
  2334. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
  2335. address,
  2336. value);
  2337. target_buffer_set_u16(target, value_buf, value);
  2338. retval = target_write_phys_memory(target, address, 2, 1, value_buf);
  2339. if (retval != ERROR_OK)
  2340. LOG_DEBUG("failed: %i", retval);
  2341. return retval;
  2342. }
  2343. int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
  2344. {
  2345. int retval;
  2346. if (!target_was_examined(target)) {
  2347. LOG_ERROR("Target not examined yet");
  2348. return ERROR_FAIL;
  2349. }
  2350. LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
  2351. address, value);
  2352. retval = target_write_phys_memory(target, address, 1, 1, &value);
  2353. if (retval != ERROR_OK)
  2354. LOG_DEBUG("failed: %i", retval);
  2355. return retval;
  2356. }
  2357. static int find_target(struct command_invocation *cmd, const char *name)
  2358. {
  2359. struct target *target = get_target(name);
  2360. if (!target) {
  2361. command_print(cmd, "Target: %s is unknown, try one of:\n", name);
  2362. return ERROR_FAIL;
  2363. }
  2364. if (!target->tap->enabled) {
  2365. command_print(cmd, "Target: TAP %s is disabled, "
  2366. "can't be the current target\n",
  2367. target->tap->dotted_name);
  2368. return ERROR_FAIL;
  2369. }
  2370. cmd->ctx->current_target = target;
  2371. if (cmd->ctx->current_target_override)
  2372. cmd->ctx->current_target_override = target;
  2373. return ERROR_OK;
  2374. }
  2375. COMMAND_HANDLER(handle_targets_command)
  2376. {
  2377. int retval = ERROR_OK;
  2378. if (CMD_ARGC == 1) {
  2379. retval = find_target(CMD, CMD_ARGV[0]);
  2380. if (retval == ERROR_OK) {
  2381. /* we're done! */
  2382. return retval;
  2383. }
  2384. }
  2385. struct target *target = all_targets;
  2386. command_print(CMD, " TargetName Type Endian TapName State ");
  2387. command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
  2388. while (target) {
  2389. const char *state;
  2390. char marker = ' ';
  2391. if (target->tap->enabled)
  2392. state = target_state_name(target);
  2393. else
  2394. state = "tap-disabled";
  2395. if (CMD_CTX->current_target == target)
  2396. marker = '*';
  2397. /* keep columns lined up to match the headers above */
  2398. command_print(CMD,
  2399. "%2d%c %-18s %-10s %-6s %-18s %s",
  2400. target->target_number,
  2401. marker,
  2402. target_name(target),
  2403. target_type_name(target),
  2404. jim_nvp_value2name_simple(nvp_target_endian,
  2405. target->endianness)->name,
  2406. target->tap->dotted_name,
  2407. state);
  2408. target = target->next;
  2409. }
  2410. return retval;
  2411. }
  2412. /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
  2413. static int power_dropout;
  2414. static int srst_asserted;
  2415. static int run_power_restore;
  2416. static int run_power_dropout;
  2417. static int run_srst_asserted;
  2418. static int run_srst_deasserted;
  2419. static int sense_handler(void)
  2420. {
  2421. static int prev_srst_asserted;
  2422. static int prev_power_dropout;
  2423. int retval = jtag_power_dropout(&power_dropout);
  2424. if (retval != ERROR_OK)
  2425. return retval;
  2426. int power_restored;
  2427. power_restored = prev_power_dropout && !power_dropout;
  2428. if (power_restored)
  2429. run_power_restore = 1;
  2430. int64_t current = timeval_ms();
  2431. static int64_t last_power;
  2432. bool wait_more = last_power + 2000 > current;
  2433. if (power_dropout && !wait_more) {
  2434. run_power_dropout = 1;
  2435. last_power = current;
  2436. }
  2437. retval = jtag_srst_asserted(&srst_asserted);
  2438. if (retval != ERROR_OK)
  2439. return retval;
  2440. int srst_deasserted;
  2441. srst_deasserted = prev_srst_asserted && !srst_asserted;
  2442. static int64_t last_srst;
  2443. wait_more = last_srst + 2000 > current;
  2444. if (srst_deasserted && !wait_more) {
  2445. run_srst_deasserted = 1;
  2446. last_srst = current;
  2447. }
  2448. if (!prev_srst_asserted && srst_asserted)
  2449. run_srst_asserted = 1;
  2450. prev_srst_asserted = srst_asserted;
  2451. prev_power_dropout = power_dropout;
  2452. if (srst_deasserted || power_restored) {
  2453. /* Other than logging the event we can't do anything here.
  2454. * Issuing a reset is a particularly bad idea as we might
  2455. * be inside a reset already.
  2456. */
  2457. }
  2458. return ERROR_OK;
  2459. }
  2460. /* process target state changes */
  2461. static int handle_target(void *priv)
  2462. {
  2463. Jim_Interp *interp = (Jim_Interp *)priv;
  2464. int retval = ERROR_OK;
  2465. if (!is_jtag_poll_safe()) {
  2466. /* polling is disabled currently */
  2467. return ERROR_OK;
  2468. }
  2469. /* we do not want to recurse here... */
  2470. static int recursive;
  2471. if (!recursive) {
  2472. recursive = 1;
  2473. sense_handler();
  2474. /* danger! running these procedures can trigger srst assertions and power dropouts.
  2475. * We need to avoid an infinite loop/recursion here and we do that by
  2476. * clearing the flags after running these events.
  2477. */
  2478. int did_something = 0;
  2479. if (run_srst_asserted) {
  2480. LOG_INFO("srst asserted detected, running srst_asserted proc.");
  2481. Jim_Eval(interp, "srst_asserted");
  2482. did_something = 1;
  2483. }
  2484. if (run_srst_deasserted) {
  2485. Jim_Eval(interp, "srst_deasserted");
  2486. did_something = 1;
  2487. }
  2488. if (run_power_dropout) {
  2489. LOG_INFO("Power dropout detected, running power_dropout proc.");
  2490. Jim_Eval(interp, "power_dropout");
  2491. did_something = 1;
  2492. }
  2493. if (run_power_restore) {
  2494. Jim_Eval(interp, "power_restore");
  2495. did_something = 1;
  2496. }
  2497. if (did_something) {
  2498. /* clear detect flags */
  2499. sense_handler();
  2500. }
  2501. /* clear action flags */
  2502. run_srst_asserted = 0;
  2503. run_srst_deasserted = 0;
  2504. run_power_restore = 0;
  2505. run_power_dropout = 0;
  2506. recursive = 0;
  2507. }
  2508. /* Poll targets for state changes unless that's globally disabled.
  2509. * Skip targets that are currently disabled.
  2510. */
  2511. for (struct target *target = all_targets;
  2512. is_jtag_poll_safe() && target;
  2513. target = target->next) {
  2514. if (!target_was_examined(target))
  2515. continue;
  2516. if (!target->tap->enabled)
  2517. continue;
  2518. if (target->backoff.times > target->backoff.count) {
  2519. /* do not poll this time as we failed previously */
  2520. target->backoff.count++;
  2521. continue;
  2522. }
  2523. target->backoff.count = 0;
  2524. /* only poll target if we've got power and srst isn't asserted */
  2525. if (!power_dropout && !srst_asserted) {
  2526. /* polling may fail silently until the target has been examined */
  2527. retval = target_poll(target);
  2528. if (retval != ERROR_OK) {
  2529. /* 100ms polling interval. Increase interval between polling up to 5000ms */
  2530. if (target->backoff.times * polling_interval < 5000) {
  2531. target->backoff.times *= 2;
  2532. target->backoff.times++;
  2533. }
  2534. /* Tell GDB to halt the debugger. This allows the user to
  2535. * run monitor commands to handle the situation.
  2536. */
  2537. target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
  2538. }
  2539. if (target->backoff.times > 0) {
  2540. LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
  2541. target_reset_examined(target);
  2542. retval = target_examine_one(target);
  2543. /* Target examination could have failed due to unstable connection,
  2544. * but we set the examined flag anyway to repoll it later */
  2545. if (retval != ERROR_OK) {
  2546. target->examined = true;
  2547. LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
  2548. target->backoff.times * polling_interval);
  2549. return retval;
  2550. }
  2551. }
  2552. /* Since we succeeded, we reset backoff count */
  2553. target->backoff.times = 0;
  2554. }
  2555. }
  2556. return retval;
  2557. }
  2558. COMMAND_HANDLER(handle_reg_command)
  2559. {
  2560. LOG_DEBUG("-");
  2561. struct target *target = get_current_target(CMD_CTX);
  2562. struct reg *reg = NULL;
  2563. /* list all available registers for the current target */
  2564. if (CMD_ARGC == 0) {
  2565. struct reg_cache *cache = target->reg_cache;
  2566. unsigned int count = 0;
  2567. while (cache) {
  2568. unsigned i;
  2569. command_print(CMD, "===== %s", cache->name);
  2570. for (i = 0, reg = cache->reg_list;
  2571. i < cache->num_regs;
  2572. i++, reg++, count++) {
  2573. if (reg->exist == false || reg->hidden)
  2574. continue;
  2575. /* only print cached values if they are valid */
  2576. if (reg->valid) {
  2577. char *value = buf_to_hex_str(reg->value,
  2578. reg->size);
  2579. command_print(CMD,
  2580. "(%i) %s (/%" PRIu32 "): 0x%s%s",
  2581. count, reg->name,
  2582. reg->size, value,
  2583. reg->dirty
  2584. ? " (dirty)"
  2585. : "");
  2586. free(value);
  2587. } else {
  2588. command_print(CMD, "(%i) %s (/%" PRIu32 ")",
  2589. count, reg->name,
  2590. reg->size);
  2591. }
  2592. }
  2593. cache = cache->next;
  2594. }
  2595. return ERROR_OK;
  2596. }
  2597. /* access a single register by its ordinal number */
  2598. if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
  2599. unsigned num;
  2600. COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
  2601. struct reg_cache *cache = target->reg_cache;
  2602. unsigned int count = 0;
  2603. while (cache) {
  2604. unsigned i;
  2605. for (i = 0; i < cache->num_regs; i++) {
  2606. if (count++ == num) {
  2607. reg = &cache->reg_list[i];
  2608. break;
  2609. }
  2610. }
  2611. if (reg)
  2612. break;
  2613. cache = cache->next;
  2614. }
  2615. if (!reg) {
  2616. command_print(CMD, "%i is out of bounds, the current target "
  2617. "has only %i registers (0 - %i)", num, count, count - 1);
  2618. return ERROR_OK;
  2619. }
  2620. } else {
  2621. /* access a single register by its name */
  2622. reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
  2623. if (!reg)
  2624. goto not_found;
  2625. }
  2626. assert(reg); /* give clang a hint that we *know* reg is != NULL here */
  2627. if (!reg->exist)
  2628. goto not_found;
  2629. /* display a register */
  2630. if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
  2631. && (CMD_ARGV[1][0] <= '9')))) {
  2632. if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
  2633. reg->valid = 0;
  2634. if (reg->valid == 0) {
  2635. int retval = reg->type->get(reg);
  2636. if (retval != ERROR_OK) {
  2637. LOG_ERROR("Could not read register '%s'", reg->name);
  2638. return retval;
  2639. }
  2640. }
  2641. char *value = buf_to_hex_str(reg->value, reg->size);
  2642. command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
  2643. free(value);
  2644. return ERROR_OK;
  2645. }
  2646. /* set register value */
  2647. if (CMD_ARGC == 2) {
  2648. uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
  2649. if (!buf)
  2650. return ERROR_FAIL;
  2651. str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
  2652. int retval = reg->type->set(reg, buf);
  2653. if (retval != ERROR_OK) {
  2654. LOG_ERROR("Could not write to register '%s'", reg->name);
  2655. } else {
  2656. char *value = buf_to_hex_str(reg->value, reg->size);
  2657. command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
  2658. free(value);
  2659. }
  2660. free(buf);
  2661. return retval;
  2662. }
  2663. return ERROR_COMMAND_SYNTAX_ERROR;
  2664. not_found:
  2665. command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
  2666. return ERROR_OK;
  2667. }
  2668. COMMAND_HANDLER(handle_poll_command)
  2669. {
  2670. int retval = ERROR_OK;
  2671. struct target *target = get_current_target(CMD_CTX);
  2672. if (CMD_ARGC == 0) {
  2673. command_print(CMD, "background polling: %s",
  2674. jtag_poll_get_enabled() ? "on" : "off");
  2675. command_print(CMD, "TAP: %s (%s)",
  2676. target->tap->dotted_name,
  2677. target->tap->enabled ? "enabled" : "disabled");
  2678. if (!target->tap->enabled)
  2679. return ERROR_OK;
  2680. retval = target_poll(target);
  2681. if (retval != ERROR_OK)
  2682. return retval;
  2683. retval = target_arch_state(target);
  2684. if (retval != ERROR_OK)
  2685. return retval;
  2686. } else if (CMD_ARGC == 1) {
  2687. bool enable;
  2688. COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
  2689. jtag_poll_set_enabled(enable);
  2690. } else
  2691. return ERROR_COMMAND_SYNTAX_ERROR;
  2692. return retval;
  2693. }
  2694. COMMAND_HANDLER(handle_wait_halt_command)
  2695. {
  2696. if (CMD_ARGC > 1)
  2697. return ERROR_COMMAND_SYNTAX_ERROR;
  2698. unsigned ms = DEFAULT_HALT_TIMEOUT;
  2699. if (1 == CMD_ARGC) {
  2700. int retval = parse_uint(CMD_ARGV[0], &ms);
  2701. if (retval != ERROR_OK)
  2702. return ERROR_COMMAND_SYNTAX_ERROR;
  2703. }
  2704. struct target *target = get_current_target(CMD_CTX);
  2705. return target_wait_state(target, TARGET_HALTED, ms);
  2706. }
  2707. /* wait for target state to change. The trick here is to have a low
  2708. * latency for short waits and not to suck up all the CPU time
  2709. * on longer waits.
  2710. *
  2711. * After 500ms, keep_alive() is invoked
  2712. */
  2713. int target_wait_state(struct target *target, enum target_state state, int ms)
  2714. {
  2715. int retval;
  2716. int64_t then = 0, cur;
  2717. bool once = true;
  2718. for (;;) {
  2719. retval = target_poll(target);
  2720. if (retval != ERROR_OK)
  2721. return retval;
  2722. if (target->state == state)
  2723. break;
  2724. cur = timeval_ms();
  2725. if (once) {
  2726. once = false;
  2727. then = timeval_ms();
  2728. LOG_DEBUG("waiting for target %s...",
  2729. jim_nvp_value2name_simple(nvp_target_state, state)->name);
  2730. }
  2731. if (cur-then > 500)
  2732. keep_alive();
  2733. if ((cur-then) > ms) {
  2734. LOG_ERROR("timed out while waiting for target %s",
  2735. jim_nvp_value2name_simple(nvp_target_state, state)->name);
  2736. return ERROR_FAIL;
  2737. }
  2738. }
  2739. return ERROR_OK;
  2740. }
  2741. COMMAND_HANDLER(handle_halt_command)
  2742. {
  2743. LOG_DEBUG("-");
  2744. struct target *target = get_current_target(CMD_CTX);
  2745. target->verbose_halt_msg = true;
  2746. int retval = target_halt(target);
  2747. if (retval != ERROR_OK)
  2748. return retval;
  2749. if (CMD_ARGC == 1) {
  2750. unsigned wait_local;
  2751. retval = parse_uint(CMD_ARGV[0], &wait_local);
  2752. if (retval != ERROR_OK)
  2753. return ERROR_COMMAND_SYNTAX_ERROR;
  2754. if (!wait_local)
  2755. return ERROR_OK;
  2756. }
  2757. return CALL_COMMAND_HANDLER(handle_wait_halt_command);
  2758. }
  2759. COMMAND_HANDLER(handle_soft_reset_halt_command)
  2760. {
  2761. struct target *target = get_current_target(CMD_CTX);
  2762. LOG_USER("requesting target halt and executing a soft reset");
  2763. target_soft_reset_halt(target);
  2764. return ERROR_OK;
  2765. }
  2766. COMMAND_HANDLER(handle_reset_command)
  2767. {
  2768. if (CMD_ARGC > 1)
  2769. return ERROR_COMMAND_SYNTAX_ERROR;
  2770. enum target_reset_mode reset_mode = RESET_RUN;
  2771. if (CMD_ARGC == 1) {
  2772. const struct jim_nvp *n;
  2773. n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
  2774. if ((!n->name) || (n->value == RESET_UNKNOWN))
  2775. return ERROR_COMMAND_SYNTAX_ERROR;
  2776. reset_mode = n->value;
  2777. }
  2778. /* reset *all* targets */
  2779. return target_process_reset(CMD, reset_mode);
  2780. }
  2781. COMMAND_HANDLER(handle_resume_command)
  2782. {
  2783. int current = 1;
  2784. if (CMD_ARGC > 1)
  2785. return ERROR_COMMAND_SYNTAX_ERROR;
  2786. struct target *target = get_current_target(CMD_CTX);
  2787. /* with no CMD_ARGV, resume from current pc, addr = 0,
  2788. * with one arguments, addr = CMD_ARGV[0],
  2789. * handle breakpoints, not debugging */
  2790. target_addr_t addr = 0;
  2791. if (CMD_ARGC == 1) {
  2792. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  2793. current = 0;
  2794. }
  2795. return target_resume(target, current, addr, 1, 0);
  2796. }
  2797. COMMAND_HANDLER(handle_step_command)
  2798. {
  2799. if (CMD_ARGC > 1)
  2800. return ERROR_COMMAND_SYNTAX_ERROR;
  2801. LOG_DEBUG("-");
  2802. /* with no CMD_ARGV, step from current pc, addr = 0,
  2803. * with one argument addr = CMD_ARGV[0],
  2804. * handle breakpoints, debugging */
  2805. target_addr_t addr = 0;
  2806. int current_pc = 1;
  2807. if (CMD_ARGC == 1) {
  2808. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  2809. current_pc = 0;
  2810. }
  2811. struct target *target = get_current_target(CMD_CTX);
  2812. return target_step(target, current_pc, addr, 1);
  2813. }
  2814. void target_handle_md_output(struct command_invocation *cmd,
  2815. struct target *target, target_addr_t address, unsigned size,
  2816. unsigned count, const uint8_t *buffer)
  2817. {
  2818. const unsigned line_bytecnt = 32;
  2819. unsigned line_modulo = line_bytecnt / size;
  2820. char output[line_bytecnt * 4 + 1];
  2821. unsigned output_len = 0;
  2822. const char *value_fmt;
  2823. switch (size) {
  2824. case 8:
  2825. value_fmt = "%16.16"PRIx64" ";
  2826. break;
  2827. case 4:
  2828. value_fmt = "%8.8"PRIx64" ";
  2829. break;
  2830. case 2:
  2831. value_fmt = "%4.4"PRIx64" ";
  2832. break;
  2833. case 1:
  2834. value_fmt = "%2.2"PRIx64" ";
  2835. break;
  2836. default:
  2837. /* "can't happen", caller checked */
  2838. LOG_ERROR("invalid memory read size: %u", size);
  2839. return;
  2840. }
  2841. for (unsigned i = 0; i < count; i++) {
  2842. if (i % line_modulo == 0) {
  2843. output_len += snprintf(output + output_len,
  2844. sizeof(output) - output_len,
  2845. TARGET_ADDR_FMT ": ",
  2846. (address + (i * size)));
  2847. }
  2848. uint64_t value = 0;
  2849. const uint8_t *value_ptr = buffer + i * size;
  2850. switch (size) {
  2851. case 8:
  2852. value = target_buffer_get_u64(target, value_ptr);
  2853. break;
  2854. case 4:
  2855. value = target_buffer_get_u32(target, value_ptr);
  2856. break;
  2857. case 2:
  2858. value = target_buffer_get_u16(target, value_ptr);
  2859. break;
  2860. case 1:
  2861. value = *value_ptr;
  2862. }
  2863. output_len += snprintf(output + output_len,
  2864. sizeof(output) - output_len,
  2865. value_fmt, value);
  2866. if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
  2867. command_print(cmd, "%s", output);
  2868. output_len = 0;
  2869. }
  2870. }
  2871. }
  2872. COMMAND_HANDLER(handle_md_command)
  2873. {
  2874. if (CMD_ARGC < 1)
  2875. return ERROR_COMMAND_SYNTAX_ERROR;
  2876. unsigned size = 0;
  2877. switch (CMD_NAME[2]) {
  2878. case 'd':
  2879. size = 8;
  2880. break;
  2881. case 'w':
  2882. size = 4;
  2883. break;
  2884. case 'h':
  2885. size = 2;
  2886. break;
  2887. case 'b':
  2888. size = 1;
  2889. break;
  2890. default:
  2891. return ERROR_COMMAND_SYNTAX_ERROR;
  2892. }
  2893. bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
  2894. int (*fn)(struct target *target,
  2895. target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
  2896. if (physical) {
  2897. CMD_ARGC--;
  2898. CMD_ARGV++;
  2899. fn = target_read_phys_memory;
  2900. } else
  2901. fn = target_read_memory;
  2902. if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
  2903. return ERROR_COMMAND_SYNTAX_ERROR;
  2904. target_addr_t address;
  2905. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
  2906. unsigned count = 1;
  2907. if (CMD_ARGC == 2)
  2908. COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
  2909. uint8_t *buffer = calloc(count, size);
  2910. if (!buffer) {
  2911. LOG_ERROR("Failed to allocate md read buffer");
  2912. return ERROR_FAIL;
  2913. }
  2914. struct target *target = get_current_target(CMD_CTX);
  2915. int retval = fn(target, address, size, count, buffer);
  2916. if (retval == ERROR_OK)
  2917. target_handle_md_output(CMD, target, address, size, count, buffer);
  2918. free(buffer);
  2919. return retval;
  2920. }
  2921. typedef int (*target_write_fn)(struct target *target,
  2922. target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
  2923. static int target_fill_mem(struct target *target,
  2924. target_addr_t address,
  2925. target_write_fn fn,
  2926. unsigned data_size,
  2927. /* value */
  2928. uint64_t b,
  2929. /* count */
  2930. unsigned c)
  2931. {
  2932. /* We have to write in reasonably large chunks to be able
  2933. * to fill large memory areas with any sane speed */
  2934. const unsigned chunk_size = 16384;
  2935. uint8_t *target_buf = malloc(chunk_size * data_size);
  2936. if (!target_buf) {
  2937. LOG_ERROR("Out of memory");
  2938. return ERROR_FAIL;
  2939. }
  2940. for (unsigned i = 0; i < chunk_size; i++) {
  2941. switch (data_size) {
  2942. case 8:
  2943. target_buffer_set_u64(target, target_buf + i * data_size, b);
  2944. break;
  2945. case 4:
  2946. target_buffer_set_u32(target, target_buf + i * data_size, b);
  2947. break;
  2948. case 2:
  2949. target_buffer_set_u16(target, target_buf + i * data_size, b);
  2950. break;
  2951. case 1:
  2952. target_buffer_set_u8(target, target_buf + i * data_size, b);
  2953. break;
  2954. default:
  2955. exit(-1);
  2956. }
  2957. }
  2958. int retval = ERROR_OK;
  2959. for (unsigned x = 0; x < c; x += chunk_size) {
  2960. unsigned current;
  2961. current = c - x;
  2962. if (current > chunk_size)
  2963. current = chunk_size;
  2964. retval = fn(target, address + x * data_size, data_size, current, target_buf);
  2965. if (retval != ERROR_OK)
  2966. break;
  2967. /* avoid GDB timeouts */
  2968. keep_alive();
  2969. }
  2970. free(target_buf);
  2971. return retval;
  2972. }
  2973. COMMAND_HANDLER(handle_mw_command)
  2974. {
  2975. if (CMD_ARGC < 2)
  2976. return ERROR_COMMAND_SYNTAX_ERROR;
  2977. bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
  2978. target_write_fn fn;
  2979. if (physical) {
  2980. CMD_ARGC--;
  2981. CMD_ARGV++;
  2982. fn = target_write_phys_memory;
  2983. } else
  2984. fn = target_write_memory;
  2985. if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
  2986. return ERROR_COMMAND_SYNTAX_ERROR;
  2987. target_addr_t address;
  2988. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
  2989. uint64_t value;
  2990. COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
  2991. unsigned count = 1;
  2992. if (CMD_ARGC == 3)
  2993. COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
  2994. struct target *target = get_current_target(CMD_CTX);
  2995. unsigned wordsize;
  2996. switch (CMD_NAME[2]) {
  2997. case 'd':
  2998. wordsize = 8;
  2999. break;
  3000. case 'w':
  3001. wordsize = 4;
  3002. break;
  3003. case 'h':
  3004. wordsize = 2;
  3005. break;
  3006. case 'b':
  3007. wordsize = 1;
  3008. break;
  3009. default:
  3010. return ERROR_COMMAND_SYNTAX_ERROR;
  3011. }
  3012. return target_fill_mem(target, address, fn, wordsize, value, count);
  3013. }
  3014. static COMMAND_HELPER(parse_load_image_command, struct image *image,
  3015. target_addr_t *min_address, target_addr_t *max_address)
  3016. {
  3017. if (CMD_ARGC < 1 || CMD_ARGC > 5)
  3018. return ERROR_COMMAND_SYNTAX_ERROR;
  3019. /* a base address isn't always necessary,
  3020. * default to 0x0 (i.e. don't relocate) */
  3021. if (CMD_ARGC >= 2) {
  3022. target_addr_t addr;
  3023. COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
  3024. image->base_address = addr;
  3025. image->base_address_set = true;
  3026. } else
  3027. image->base_address_set = false;
  3028. image->start_address_set = false;
  3029. if (CMD_ARGC >= 4)
  3030. COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
  3031. if (CMD_ARGC == 5) {
  3032. COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
  3033. /* use size (given) to find max (required) */
  3034. *max_address += *min_address;
  3035. }
  3036. if (*min_address > *max_address)
  3037. return ERROR_COMMAND_SYNTAX_ERROR;
  3038. return ERROR_OK;
  3039. }
  3040. COMMAND_HANDLER(handle_load_image_command)
  3041. {
  3042. uint8_t *buffer;
  3043. size_t buf_cnt;
  3044. uint32_t image_size;
  3045. target_addr_t min_address = 0;
  3046. target_addr_t max_address = -1;
  3047. struct image image;
  3048. int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
  3049. &image, &min_address, &max_address);
  3050. if (retval != ERROR_OK)
  3051. return retval;
  3052. struct target *target = get_current_target(CMD_CTX);
  3053. struct duration bench;
  3054. duration_start(&bench);
  3055. if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
  3056. return ERROR_FAIL;
  3057. image_size = 0x0;
  3058. retval = ERROR_OK;
  3059. for (unsigned int i = 0; i < image.num_sections; i++) {
  3060. buffer = malloc(image.sections[i].size);
  3061. if (!buffer) {
  3062. command_print(CMD,
  3063. "error allocating buffer for section (%d bytes)",
  3064. (int)(image.sections[i].size));
  3065. retval = ERROR_FAIL;
  3066. break;
  3067. }
  3068. retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
  3069. if (retval != ERROR_OK) {
  3070. free(buffer);
  3071. break;
  3072. }
  3073. uint32_t offset = 0;
  3074. uint32_t length = buf_cnt;
  3075. /* DANGER!!! beware of unsigned comparison here!!! */
  3076. if ((image.sections[i].base_address + buf_cnt >= min_address) &&
  3077. (image.sections[i].base_address < max_address)) {
  3078. if (image.sections[i].base_address < min_address) {
  3079. /* clip addresses below */
  3080. offset += min_address-image.sections[i].base_address;
  3081. length -= offset;
  3082. }
  3083. if (image.sections[i].base_address + buf_cnt > max_address)
  3084. length -= (image.sections[i].base_address + buf_cnt)-max_address;
  3085. retval = target_write_buffer(target,
  3086. image.sections[i].base_address + offset, length, buffer + offset);
  3087. if (retval != ERROR_OK) {
  3088. free(buffer);
  3089. break;
  3090. }
  3091. image_size += length;
  3092. command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
  3093. (unsigned int)length,
  3094. image.sections[i].base_address + offset);
  3095. }
  3096. free(buffer);
  3097. }
  3098. if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
  3099. command_print(CMD, "downloaded %" PRIu32 " bytes "
  3100. "in %fs (%0.3f KiB/s)", image_size,
  3101. duration_elapsed(&bench), duration_kbps(&bench, image_size));
  3102. }
  3103. image_close(&image);
  3104. return retval;
  3105. }
  3106. COMMAND_HANDLER(handle_dump_image_command)
  3107. {
  3108. struct fileio *fileio;
  3109. uint8_t *buffer;
  3110. int retval, retvaltemp;
  3111. target_addr_t address, size;
  3112. struct duration bench;
  3113. struct target *target = get_current_target(CMD_CTX);
  3114. if (CMD_ARGC != 3)
  3115. return ERROR_COMMAND_SYNTAX_ERROR;
  3116. COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
  3117. COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
  3118. uint32_t buf_size = (size > 4096) ? 4096 : size;
  3119. buffer = malloc(buf_size);
  3120. if (!buffer)
  3121. return ERROR_FAIL;
  3122. retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
  3123. if (retval != ERROR_OK) {
  3124. free(buffer);
  3125. return retval;
  3126. }
  3127. duration_start(&bench);
  3128. while (size > 0) {
  3129. size_t size_written;
  3130. uint32_t this_run_size = (size > buf_size) ? buf_size : size;
  3131. retval = target_read_buffer(target, address, this_run_size, buffer);
  3132. if (retval != ERROR_OK)
  3133. break;
  3134. retval = fileio_write(fileio, this_run_size, buffer, &size_written);
  3135. if (retval != ERROR_OK)
  3136. break;
  3137. size -= this_run_size;
  3138. address += this_run_size;
  3139. }
  3140. free(buffer);
  3141. if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
  3142. size_t filesize;
  3143. retval = fileio_size(fileio, &filesize);
  3144. if (retval != ERROR_OK)
  3145. return retval;
  3146. command_print(CMD,
  3147. "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
  3148. duration_elapsed(&bench), duration_kbps(&bench, filesize));
  3149. }
  3150. retvaltemp = fileio_close(fileio);
  3151. if (retvaltemp != ERROR_OK)
  3152. return retvaltemp;
  3153. return retval;
  3154. }
  3155. enum verify_mode {
  3156. IMAGE_TEST = 0,
  3157. IMAGE_VERIFY = 1,
  3158. IMAGE_CHECKSUM_ONLY = 2
  3159. };
  3160. static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
  3161. {
  3162. uint8_t *buffer;
  3163. size_t buf_cnt;
  3164. uint32_t image_size;
  3165. int retval;
  3166. uint32_t checksum = 0;
  3167. uint32_t mem_checksum = 0;
  3168. struct image image;
  3169. struct target *target = get_current_target(CMD_CTX);
  3170. if (CMD_ARGC < 1)
  3171. return ERROR_COMMAND_SYNTAX_ERROR;
  3172. if (!target) {
  3173. LOG_ERROR("no target selected");
  3174. return ERROR_FAIL;
  3175. }
  3176. struct duration bench;
  3177. duration_start(&bench);
  3178. if (CMD_ARGC >= 2) {
  3179. target_addr_t addr;
  3180. COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
  3181. image.base_address = addr;
  3182. image.base_address_set = true;
  3183. } else {
  3184. image.base_address_set = false;
  3185. image.base_address = 0x0;
  3186. }
  3187. image.start_address_set = false;
  3188. retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
  3189. if (retval != ERROR_OK)
  3190. return retval;
  3191. image_size = 0x0;
  3192. int diffs = 0;
  3193. retval = ERROR_OK;
  3194. for (unsigned int i = 0; i < image.num_sections; i++) {
  3195. buffer = malloc(image.sections[i].size);
  3196. if (!buffer) {
  3197. command_print(CMD,
  3198. "error allocating buffer for section (%" PRIu32 " bytes)",
  3199. image.sections[i].size);
  3200. break;
  3201. }
  3202. retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
  3203. if (retval != ERROR_OK) {
  3204. free(buffer);
  3205. break;
  3206. }
  3207. if (verify >= IMAGE_VERIFY) {
  3208. /* calculate checksum of image */
  3209. retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
  3210. if (retval != ERROR_OK) {
  3211. free(buffer);
  3212. break;
  3213. }
  3214. retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
  3215. if (retval != ERROR_OK) {
  3216. free(buffer);
  3217. break;
  3218. }
  3219. if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
  3220. LOG_ERROR("checksum mismatch");
  3221. free(buffer);
  3222. retval = ERROR_FAIL;
  3223. goto done;
  3224. }
  3225. if (checksum != mem_checksum) {
  3226. /* failed crc checksum, fall back to a binary compare */
  3227. uint8_t *data;
  3228. if (diffs == 0)
  3229. LOG_ERROR("checksum mismatch - attempting binary compare");
  3230. data = malloc(buf_cnt);
  3231. retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
  3232. if (retval == ERROR_OK) {
  3233. uint32_t t;
  3234. for (t = 0; t < buf_cnt; t++) {
  3235. if (data[t] != buffer[t]) {
  3236. command_print(CMD,
  3237. "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
  3238. diffs,
  3239. (unsigned)(t + image.sections[i].base_address),
  3240. data[t],
  3241. buffer[t]);
  3242. if (diffs++ >= 127) {
  3243. command_print(CMD, "More than 128 errors, the rest are not printed.");
  3244. free(data);
  3245. free(buffer);
  3246. goto done;
  3247. }
  3248. }
  3249. keep_alive();
  3250. }
  3251. }
  3252. free(data);
  3253. }
  3254. } else {
  3255. command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
  3256. image.sections[i].base_address,
  3257. buf_cnt);
  3258. }
  3259. free(buffer);
  3260. image_size += buf_cnt;
  3261. }
  3262. if (diffs > 0)
  3263. command_print(CMD, "No more differences found.");
  3264. done:
  3265. if (diffs > 0)
  3266. retval = ERROR_FAIL;
  3267. if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
  3268. command_print(CMD, "verified %" PRIu32 " bytes "
  3269. "in %fs (%0.3f KiB/s)", image_size,
  3270. duration_elapsed(&bench), duration_kbps(&bench, image_size));
  3271. }
  3272. image_close(&image);
  3273. return retval;
  3274. }
  3275. COMMAND_HANDLER(handle_verify_image_checksum_command)
  3276. {
  3277. return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
  3278. }
  3279. COMMAND_HANDLER(handle_verify_image_command)
  3280. {
  3281. return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
  3282. }
  3283. COMMAND_HANDLER(handle_test_image_command)
  3284. {
  3285. return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
  3286. }
  3287. static int handle_bp_command_list(struct command_invocation *cmd)
  3288. {
  3289. struct target *target = get_current_target(cmd->ctx);
  3290. struct breakpoint *breakpoint = target->breakpoints;
  3291. while (breakpoint) {
  3292. if (breakpoint->type == BKPT_SOFT) {
  3293. char *buf = buf_to_hex_str(breakpoint->orig_instr,
  3294. breakpoint->length);
  3295. command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
  3296. breakpoint->address,
  3297. breakpoint->length,
  3298. breakpoint->set, buf);
  3299. free(buf);
  3300. } else {
  3301. if ((breakpoint->address == 0) && (breakpoint->asid != 0))
  3302. command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
  3303. breakpoint->asid,
  3304. breakpoint->length, breakpoint->set);
  3305. else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
  3306. command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
  3307. breakpoint->address,
  3308. breakpoint->length, breakpoint->set);
  3309. command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
  3310. breakpoint->asid);
  3311. } else
  3312. command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
  3313. breakpoint->address,
  3314. breakpoint->length, breakpoint->set);
  3315. }
  3316. breakpoint = breakpoint->next;
  3317. }
  3318. return ERROR_OK;
  3319. }
  3320. static int handle_bp_command_set(struct command_invocation *cmd,
  3321. target_addr_t addr, uint32_t asid, uint32_t length, int hw)
  3322. {
  3323. struct target *target = get_current_target(cmd->ctx);
  3324. int retval;
  3325. if (asid == 0) {
  3326. retval = breakpoint_add(target, addr, length, hw);
  3327. /* error is always logged in breakpoint_add(), do not print it again */
  3328. if (retval == ERROR_OK)
  3329. command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
  3330. } else if (addr == 0) {
  3331. if (!target->type->add_context_breakpoint) {
  3332. LOG_ERROR("Context breakpoint not available");
  3333. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  3334. }
  3335. retval = context_breakpoint_add(target, asid, length, hw);
  3336. /* error is always logged in context_breakpoint_add(), do not print it again */
  3337. if (retval == ERROR_OK)
  3338. command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
  3339. } else {
  3340. if (!target->type->add_hybrid_breakpoint) {
  3341. LOG_ERROR("Hybrid breakpoint not available");
  3342. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  3343. }
  3344. retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
  3345. /* error is always logged in hybrid_breakpoint_add(), do not print it again */
  3346. if (retval == ERROR_OK)
  3347. command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
  3348. }
  3349. return retval;
  3350. }
  3351. COMMAND_HANDLER(handle_bp_command)
  3352. {
  3353. target_addr_t addr;
  3354. uint32_t asid;
  3355. uint32_t length;
  3356. int hw = BKPT_SOFT;
  3357. switch (CMD_ARGC) {
  3358. case 0:
  3359. return handle_bp_command_list(CMD);
  3360. case 2:
  3361. asid = 0;
  3362. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3363. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
  3364. return handle_bp_command_set(CMD, addr, asid, length, hw);
  3365. case 3:
  3366. if (strcmp(CMD_ARGV[2], "hw") == 0) {
  3367. hw = BKPT_HARD;
  3368. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3369. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
  3370. asid = 0;
  3371. return handle_bp_command_set(CMD, addr, asid, length, hw);
  3372. } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
  3373. hw = BKPT_HARD;
  3374. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
  3375. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
  3376. addr = 0;
  3377. return handle_bp_command_set(CMD, addr, asid, length, hw);
  3378. }
  3379. /* fallthrough */
  3380. case 4:
  3381. hw = BKPT_HARD;
  3382. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3383. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
  3384. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
  3385. return handle_bp_command_set(CMD, addr, asid, length, hw);
  3386. default:
  3387. return ERROR_COMMAND_SYNTAX_ERROR;
  3388. }
  3389. }
  3390. COMMAND_HANDLER(handle_rbp_command)
  3391. {
  3392. if (CMD_ARGC != 1)
  3393. return ERROR_COMMAND_SYNTAX_ERROR;
  3394. struct target *target = get_current_target(CMD_CTX);
  3395. if (!strcmp(CMD_ARGV[0], "all")) {
  3396. breakpoint_remove_all(target);
  3397. } else {
  3398. target_addr_t addr;
  3399. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3400. breakpoint_remove(target, addr);
  3401. }
  3402. return ERROR_OK;
  3403. }
  3404. COMMAND_HANDLER(handle_wp_command)
  3405. {
  3406. struct target *target = get_current_target(CMD_CTX);
  3407. if (CMD_ARGC == 0) {
  3408. struct watchpoint *watchpoint = target->watchpoints;
  3409. while (watchpoint) {
  3410. command_print(CMD, "address: " TARGET_ADDR_FMT
  3411. ", len: 0x%8.8" PRIx32
  3412. ", r/w/a: %i, value: 0x%8.8" PRIx32
  3413. ", mask: 0x%8.8" PRIx32,
  3414. watchpoint->address,
  3415. watchpoint->length,
  3416. (int)watchpoint->rw,
  3417. watchpoint->value,
  3418. watchpoint->mask);
  3419. watchpoint = watchpoint->next;
  3420. }
  3421. return ERROR_OK;
  3422. }
  3423. enum watchpoint_rw type = WPT_ACCESS;
  3424. target_addr_t addr = 0;
  3425. uint32_t length = 0;
  3426. uint32_t data_value = 0x0;
  3427. uint32_t data_mask = 0xffffffff;
  3428. switch (CMD_ARGC) {
  3429. case 5:
  3430. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
  3431. /* fall through */
  3432. case 4:
  3433. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
  3434. /* fall through */
  3435. case 3:
  3436. switch (CMD_ARGV[2][0]) {
  3437. case 'r':
  3438. type = WPT_READ;
  3439. break;
  3440. case 'w':
  3441. type = WPT_WRITE;
  3442. break;
  3443. case 'a':
  3444. type = WPT_ACCESS;
  3445. break;
  3446. default:
  3447. LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
  3448. return ERROR_COMMAND_SYNTAX_ERROR;
  3449. }
  3450. /* fall through */
  3451. case 2:
  3452. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
  3453. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3454. break;
  3455. default:
  3456. return ERROR_COMMAND_SYNTAX_ERROR;
  3457. }
  3458. int retval = watchpoint_add(target, addr, length, type,
  3459. data_value, data_mask);
  3460. if (retval != ERROR_OK)
  3461. LOG_ERROR("Failure setting watchpoints");
  3462. return retval;
  3463. }
  3464. COMMAND_HANDLER(handle_rwp_command)
  3465. {
  3466. if (CMD_ARGC != 1)
  3467. return ERROR_COMMAND_SYNTAX_ERROR;
  3468. target_addr_t addr;
  3469. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
  3470. struct target *target = get_current_target(CMD_CTX);
  3471. watchpoint_remove(target, addr);
  3472. return ERROR_OK;
  3473. }
  3474. /**
  3475. * Translate a virtual address to a physical address.
  3476. *
  3477. * The low-level target implementation must have logged a detailed error
  3478. * which is forwarded to telnet/GDB session.
  3479. */
  3480. COMMAND_HANDLER(handle_virt2phys_command)
  3481. {
  3482. if (CMD_ARGC != 1)
  3483. return ERROR_COMMAND_SYNTAX_ERROR;
  3484. target_addr_t va;
  3485. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
  3486. target_addr_t pa;
  3487. struct target *target = get_current_target(CMD_CTX);
  3488. int retval = target->type->virt2phys(target, va, &pa);
  3489. if (retval == ERROR_OK)
  3490. command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
  3491. return retval;
  3492. }
  3493. static void write_data(FILE *f, const void *data, size_t len)
  3494. {
  3495. size_t written = fwrite(data, 1, len, f);
  3496. if (written != len)
  3497. LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
  3498. }
  3499. static void write_long(FILE *f, int l, struct target *target)
  3500. {
  3501. uint8_t val[4];
  3502. target_buffer_set_u32(target, val, l);
  3503. write_data(f, val, 4);
  3504. }
  3505. static void write_string(FILE *f, char *s)
  3506. {
  3507. write_data(f, s, strlen(s));
  3508. }
  3509. typedef unsigned char UNIT[2]; /* unit of profiling */
  3510. /* Dump a gmon.out histogram file. */
  3511. static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
  3512. uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
  3513. {
  3514. uint32_t i;
  3515. FILE *f = fopen(filename, "w");
  3516. if (!f)
  3517. return;
  3518. write_string(f, "gmon");
  3519. write_long(f, 0x00000001, target); /* Version */
  3520. write_long(f, 0, target); /* padding */
  3521. write_long(f, 0, target); /* padding */
  3522. write_long(f, 0, target); /* padding */
  3523. uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
  3524. write_data(f, &zero, 1);
  3525. /* figure out bucket size */
  3526. uint32_t min;
  3527. uint32_t max;
  3528. if (with_range) {
  3529. min = start_address;
  3530. max = end_address;
  3531. } else {
  3532. min = samples[0];
  3533. max = samples[0];
  3534. for (i = 0; i < sample_num; i++) {
  3535. if (min > samples[i])
  3536. min = samples[i];
  3537. if (max < samples[i])
  3538. max = samples[i];
  3539. }
  3540. /* max should be (largest sample + 1)
  3541. * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
  3542. max++;
  3543. }
  3544. int address_space = max - min;
  3545. assert(address_space >= 2);
  3546. /* FIXME: What is the reasonable number of buckets?
  3547. * The profiling result will be more accurate if there are enough buckets. */
  3548. static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
  3549. uint32_t num_buckets = address_space / sizeof(UNIT);
  3550. if (num_buckets > max_buckets)
  3551. num_buckets = max_buckets;
  3552. int *buckets = malloc(sizeof(int) * num_buckets);
  3553. if (!buckets) {
  3554. fclose(f);
  3555. return;
  3556. }
  3557. memset(buckets, 0, sizeof(int) * num_buckets);
  3558. for (i = 0; i < sample_num; i++) {
  3559. uint32_t address = samples[i];
  3560. if ((address < min) || (max <= address))
  3561. continue;
  3562. long long a = address - min;
  3563. long long b = num_buckets;
  3564. long long c = address_space;
  3565. int index_t = (a * b) / c; /* danger!!!! int32 overflows */
  3566. buckets[index_t]++;
  3567. }
  3568. /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
  3569. write_long(f, min, target); /* low_pc */
  3570. write_long(f, max, target); /* high_pc */
  3571. write_long(f, num_buckets, target); /* # of buckets */
  3572. float sample_rate = sample_num / (duration_ms / 1000.0);
  3573. write_long(f, sample_rate, target);
  3574. write_string(f, "seconds");
  3575. for (i = 0; i < (15-strlen("seconds")); i++)
  3576. write_data(f, &zero, 1);
  3577. write_string(f, "s");
  3578. /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
  3579. char *data = malloc(2 * num_buckets);
  3580. if (data) {
  3581. for (i = 0; i < num_buckets; i++) {
  3582. int val;
  3583. val = buckets[i];
  3584. if (val > 65535)
  3585. val = 65535;
  3586. data[i * 2] = val&0xff;
  3587. data[i * 2 + 1] = (val >> 8) & 0xff;
  3588. }
  3589. free(buckets);
  3590. write_data(f, data, num_buckets * 2);
  3591. free(data);
  3592. } else
  3593. free(buckets);
  3594. fclose(f);
  3595. }
  3596. /* profiling samples the CPU PC as quickly as OpenOCD is able,
  3597. * which will be used as a random sampling of PC */
  3598. COMMAND_HANDLER(handle_profile_command)
  3599. {
  3600. struct target *target = get_current_target(CMD_CTX);
  3601. if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
  3602. return ERROR_COMMAND_SYNTAX_ERROR;
  3603. const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
  3604. uint32_t offset;
  3605. uint32_t num_of_samples;
  3606. int retval = ERROR_OK;
  3607. bool halted_before_profiling = target->state == TARGET_HALTED;
  3608. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
  3609. uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
  3610. if (!samples) {
  3611. LOG_ERROR("No memory to store samples.");
  3612. return ERROR_FAIL;
  3613. }
  3614. uint64_t timestart_ms = timeval_ms();
  3615. /**
  3616. * Some cores let us sample the PC without the
  3617. * annoying halt/resume step; for example, ARMv7 PCSR.
  3618. * Provide a way to use that more efficient mechanism.
  3619. */
  3620. retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
  3621. &num_of_samples, offset);
  3622. if (retval != ERROR_OK) {
  3623. free(samples);
  3624. return retval;
  3625. }
  3626. uint32_t duration_ms = timeval_ms() - timestart_ms;
  3627. assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
  3628. retval = target_poll(target);
  3629. if (retval != ERROR_OK) {
  3630. free(samples);
  3631. return retval;
  3632. }
  3633. if (target->state == TARGET_RUNNING && halted_before_profiling) {
  3634. /* The target was halted before we started and is running now. Halt it,
  3635. * for consistency. */
  3636. retval = target_halt(target);
  3637. if (retval != ERROR_OK) {
  3638. free(samples);
  3639. return retval;
  3640. }
  3641. } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
  3642. /* The target was running before we started and is halted now. Resume
  3643. * it, for consistency. */
  3644. retval = target_resume(target, 1, 0, 0, 0);
  3645. if (retval != ERROR_OK) {
  3646. free(samples);
  3647. return retval;
  3648. }
  3649. }
  3650. retval = target_poll(target);
  3651. if (retval != ERROR_OK) {
  3652. free(samples);
  3653. return retval;
  3654. }
  3655. uint32_t start_address = 0;
  3656. uint32_t end_address = 0;
  3657. bool with_range = false;
  3658. if (CMD_ARGC == 4) {
  3659. with_range = true;
  3660. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
  3661. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
  3662. }
  3663. write_gmon(samples, num_of_samples, CMD_ARGV[1],
  3664. with_range, start_address, end_address, target, duration_ms);
  3665. command_print(CMD, "Wrote %s", CMD_ARGV[1]);
  3666. free(samples);
  3667. return retval;
  3668. }
  3669. static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
  3670. {
  3671. char *namebuf;
  3672. Jim_Obj *obj_name, *obj_val;
  3673. int result;
  3674. namebuf = alloc_printf("%s(%d)", varname, idx);
  3675. if (!namebuf)
  3676. return JIM_ERR;
  3677. obj_name = Jim_NewStringObj(interp, namebuf, -1);
  3678. jim_wide wide_val = val;
  3679. obj_val = Jim_NewWideObj(interp, wide_val);
  3680. if (!obj_name || !obj_val) {
  3681. free(namebuf);
  3682. return JIM_ERR;
  3683. }
  3684. Jim_IncrRefCount(obj_name);
  3685. Jim_IncrRefCount(obj_val);
  3686. result = Jim_SetVariable(interp, obj_name, obj_val);
  3687. Jim_DecrRefCount(interp, obj_name);
  3688. Jim_DecrRefCount(interp, obj_val);
  3689. free(namebuf);
  3690. /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
  3691. return result;
  3692. }
  3693. static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  3694. {
  3695. struct command_context *context;
  3696. struct target *target;
  3697. context = current_command_context(interp);
  3698. assert(context);
  3699. target = get_current_target(context);
  3700. if (!target) {
  3701. LOG_ERROR("mem2array: no current target");
  3702. return JIM_ERR;
  3703. }
  3704. return target_mem2array(interp, target, argc - 1, argv + 1);
  3705. }
  3706. static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
  3707. {
  3708. int e;
  3709. /* argv[0] = name of array to receive the data
  3710. * argv[1] = desired element width in bits
  3711. * argv[2] = memory address
  3712. * argv[3] = count of times to read
  3713. * argv[4] = optional "phys"
  3714. */
  3715. if (argc < 4 || argc > 5) {
  3716. Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
  3717. return JIM_ERR;
  3718. }
  3719. /* Arg 0: Name of the array variable */
  3720. const char *varname = Jim_GetString(argv[0], NULL);
  3721. /* Arg 1: Bit width of one element */
  3722. long l;
  3723. e = Jim_GetLong(interp, argv[1], &l);
  3724. if (e != JIM_OK)
  3725. return e;
  3726. const unsigned int width_bits = l;
  3727. if (width_bits != 8 &&
  3728. width_bits != 16 &&
  3729. width_bits != 32 &&
  3730. width_bits != 64) {
  3731. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3732. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3733. "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
  3734. return JIM_ERR;
  3735. }
  3736. const unsigned int width = width_bits / 8;
  3737. /* Arg 2: Memory address */
  3738. jim_wide wide_addr;
  3739. e = Jim_GetWide(interp, argv[2], &wide_addr);
  3740. if (e != JIM_OK)
  3741. return e;
  3742. target_addr_t addr = (target_addr_t)wide_addr;
  3743. /* Arg 3: Number of elements to read */
  3744. e = Jim_GetLong(interp, argv[3], &l);
  3745. if (e != JIM_OK)
  3746. return e;
  3747. size_t len = l;
  3748. /* Arg 4: phys */
  3749. bool is_phys = false;
  3750. if (argc > 4) {
  3751. int str_len = 0;
  3752. const char *phys = Jim_GetString(argv[4], &str_len);
  3753. if (!strncmp(phys, "phys", str_len))
  3754. is_phys = true;
  3755. else
  3756. return JIM_ERR;
  3757. }
  3758. /* Argument checks */
  3759. if (len == 0) {
  3760. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3761. Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
  3762. return JIM_ERR;
  3763. }
  3764. if ((addr + (len * width)) < addr) {
  3765. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3766. Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
  3767. return JIM_ERR;
  3768. }
  3769. if (len > 65536) {
  3770. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3771. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3772. "mem2array: too large read request, exceeds 64K items", NULL);
  3773. return JIM_ERR;
  3774. }
  3775. if ((width == 1) ||
  3776. ((width == 2) && ((addr & 1) == 0)) ||
  3777. ((width == 4) && ((addr & 3) == 0)) ||
  3778. ((width == 8) && ((addr & 7) == 0))) {
  3779. /* alignment correct */
  3780. } else {
  3781. char buf[100];
  3782. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3783. sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
  3784. addr,
  3785. width);
  3786. Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
  3787. return JIM_ERR;
  3788. }
  3789. /* Transfer loop */
  3790. /* index counter */
  3791. size_t idx = 0;
  3792. const size_t buffersize = 4096;
  3793. uint8_t *buffer = malloc(buffersize);
  3794. if (!buffer)
  3795. return JIM_ERR;
  3796. /* assume ok */
  3797. e = JIM_OK;
  3798. while (len) {
  3799. /* Slurp... in buffer size chunks */
  3800. const unsigned int max_chunk_len = buffersize / width;
  3801. const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
  3802. int retval;
  3803. if (is_phys)
  3804. retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
  3805. else
  3806. retval = target_read_memory(target, addr, width, chunk_len, buffer);
  3807. if (retval != ERROR_OK) {
  3808. /* BOO !*/
  3809. LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
  3810. addr,
  3811. width,
  3812. chunk_len);
  3813. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3814. Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
  3815. e = JIM_ERR;
  3816. break;
  3817. } else {
  3818. for (size_t i = 0; i < chunk_len ; i++, idx++) {
  3819. uint64_t v = 0;
  3820. switch (width) {
  3821. case 8:
  3822. v = target_buffer_get_u64(target, &buffer[i*width]);
  3823. break;
  3824. case 4:
  3825. v = target_buffer_get_u32(target, &buffer[i*width]);
  3826. break;
  3827. case 2:
  3828. v = target_buffer_get_u16(target, &buffer[i*width]);
  3829. break;
  3830. case 1:
  3831. v = buffer[i] & 0x0ff;
  3832. break;
  3833. }
  3834. new_u64_array_element(interp, varname, idx, v);
  3835. }
  3836. len -= chunk_len;
  3837. addr += chunk_len * width;
  3838. }
  3839. }
  3840. free(buffer);
  3841. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3842. return e;
  3843. }
  3844. static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
  3845. {
  3846. char *namebuf = alloc_printf("%s(%zu)", varname, idx);
  3847. if (!namebuf)
  3848. return JIM_ERR;
  3849. Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
  3850. if (!obj_name) {
  3851. free(namebuf);
  3852. return JIM_ERR;
  3853. }
  3854. Jim_IncrRefCount(obj_name);
  3855. Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
  3856. Jim_DecrRefCount(interp, obj_name);
  3857. free(namebuf);
  3858. if (!obj_val)
  3859. return JIM_ERR;
  3860. jim_wide wide_val;
  3861. int result = Jim_GetWide(interp, obj_val, &wide_val);
  3862. *val = wide_val;
  3863. return result;
  3864. }
  3865. static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  3866. {
  3867. struct command_context *context;
  3868. struct target *target;
  3869. context = current_command_context(interp);
  3870. assert(context);
  3871. target = get_current_target(context);
  3872. if (!target) {
  3873. LOG_ERROR("array2mem: no current target");
  3874. return JIM_ERR;
  3875. }
  3876. return target_array2mem(interp, target, argc-1, argv + 1);
  3877. }
  3878. static int target_array2mem(Jim_Interp *interp, struct target *target,
  3879. int argc, Jim_Obj *const *argv)
  3880. {
  3881. int e;
  3882. /* argv[0] = name of array from which to read the data
  3883. * argv[1] = desired element width in bits
  3884. * argv[2] = memory address
  3885. * argv[3] = number of elements to write
  3886. * argv[4] = optional "phys"
  3887. */
  3888. if (argc < 4 || argc > 5) {
  3889. Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
  3890. return JIM_ERR;
  3891. }
  3892. /* Arg 0: Name of the array variable */
  3893. const char *varname = Jim_GetString(argv[0], NULL);
  3894. /* Arg 1: Bit width of one element */
  3895. long l;
  3896. e = Jim_GetLong(interp, argv[1], &l);
  3897. if (e != JIM_OK)
  3898. return e;
  3899. const unsigned int width_bits = l;
  3900. if (width_bits != 8 &&
  3901. width_bits != 16 &&
  3902. width_bits != 32 &&
  3903. width_bits != 64) {
  3904. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3905. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3906. "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
  3907. return JIM_ERR;
  3908. }
  3909. const unsigned int width = width_bits / 8;
  3910. /* Arg 2: Memory address */
  3911. jim_wide wide_addr;
  3912. e = Jim_GetWide(interp, argv[2], &wide_addr);
  3913. if (e != JIM_OK)
  3914. return e;
  3915. target_addr_t addr = (target_addr_t)wide_addr;
  3916. /* Arg 3: Number of elements to write */
  3917. e = Jim_GetLong(interp, argv[3], &l);
  3918. if (e != JIM_OK)
  3919. return e;
  3920. size_t len = l;
  3921. /* Arg 4: Phys */
  3922. bool is_phys = false;
  3923. if (argc > 4) {
  3924. int str_len = 0;
  3925. const char *phys = Jim_GetString(argv[4], &str_len);
  3926. if (!strncmp(phys, "phys", str_len))
  3927. is_phys = true;
  3928. else
  3929. return JIM_ERR;
  3930. }
  3931. /* Argument checks */
  3932. if (len == 0) {
  3933. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3934. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3935. "array2mem: zero width read?", NULL);
  3936. return JIM_ERR;
  3937. }
  3938. if ((addr + (len * width)) < addr) {
  3939. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3940. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3941. "array2mem: addr + len - wraps to zero?", NULL);
  3942. return JIM_ERR;
  3943. }
  3944. if (len > 65536) {
  3945. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3946. Jim_AppendStrings(interp, Jim_GetResult(interp),
  3947. "array2mem: too large memory write request, exceeds 64K items", NULL);
  3948. return JIM_ERR;
  3949. }
  3950. if ((width == 1) ||
  3951. ((width == 2) && ((addr & 1) == 0)) ||
  3952. ((width == 4) && ((addr & 3) == 0)) ||
  3953. ((width == 8) && ((addr & 7) == 0))) {
  3954. /* alignment correct */
  3955. } else {
  3956. char buf[100];
  3957. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  3958. sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
  3959. addr,
  3960. width);
  3961. Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
  3962. return JIM_ERR;
  3963. }
  3964. /* Transfer loop */
  3965. /* assume ok */
  3966. e = JIM_OK;
  3967. const size_t buffersize = 4096;
  3968. uint8_t *buffer = malloc(buffersize);
  3969. if (!buffer)
  3970. return JIM_ERR;
  3971. /* index counter */
  3972. size_t idx = 0;
  3973. while (len) {
  3974. /* Slurp... in buffer size chunks */
  3975. const unsigned int max_chunk_len = buffersize / width;
  3976. const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
  3977. /* Fill the buffer */
  3978. for (size_t i = 0; i < chunk_len; i++, idx++) {
  3979. uint64_t v = 0;
  3980. if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
  3981. free(buffer);
  3982. return JIM_ERR;
  3983. }
  3984. switch (width) {
  3985. case 8:
  3986. target_buffer_set_u64(target, &buffer[i * width], v);
  3987. break;
  3988. case 4:
  3989. target_buffer_set_u32(target, &buffer[i * width], v);
  3990. break;
  3991. case 2:
  3992. target_buffer_set_u16(target, &buffer[i * width], v);
  3993. break;
  3994. case 1:
  3995. buffer[i] = v & 0x0ff;
  3996. break;
  3997. }
  3998. }
  3999. len -= chunk_len;
  4000. /* Write the buffer to memory */
  4001. int retval;
  4002. if (is_phys)
  4003. retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
  4004. else
  4005. retval = target_write_memory(target, addr, width, chunk_len, buffer);
  4006. if (retval != ERROR_OK) {
  4007. /* BOO !*/
  4008. LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
  4009. addr,
  4010. width,
  4011. chunk_len);
  4012. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  4013. Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
  4014. e = JIM_ERR;
  4015. break;
  4016. }
  4017. addr += chunk_len * width;
  4018. }
  4019. free(buffer);
  4020. Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
  4021. return e;
  4022. }
  4023. /* FIX? should we propagate errors here rather than printing them
  4024. * and continuing?
  4025. */
  4026. void target_handle_event(struct target *target, enum target_event e)
  4027. {
  4028. struct target_event_action *teap;
  4029. int retval;
  4030. for (teap = target->event_action; teap; teap = teap->next) {
  4031. if (teap->event == e) {
  4032. LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
  4033. target->target_number,
  4034. target_name(target),
  4035. target_type_name(target),
  4036. e,
  4037. jim_nvp_value2name_simple(nvp_target_event, e)->name,
  4038. Jim_GetString(teap->body, NULL));
  4039. /* Override current target by the target an event
  4040. * is issued from (lot of scripts need it).
  4041. * Return back to previous override as soon
  4042. * as the handler processing is done */
  4043. struct command_context *cmd_ctx = current_command_context(teap->interp);
  4044. struct target *saved_target_override = cmd_ctx->current_target_override;
  4045. cmd_ctx->current_target_override = target;
  4046. retval = Jim_EvalObj(teap->interp, teap->body);
  4047. cmd_ctx->current_target_override = saved_target_override;
  4048. if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
  4049. return;
  4050. if (retval == JIM_RETURN)
  4051. retval = teap->interp->returnCode;
  4052. if (retval != JIM_OK) {
  4053. Jim_MakeErrorMessage(teap->interp);
  4054. LOG_USER("Error executing event %s on target %s:\n%s",
  4055. jim_nvp_value2name_simple(nvp_target_event, e)->name,
  4056. target_name(target),
  4057. Jim_GetString(Jim_GetResult(teap->interp), NULL));
  4058. /* clean both error code and stacktrace before return */
  4059. Jim_Eval(teap->interp, "error \"\" \"\"");
  4060. }
  4061. }
  4062. }
  4063. }
  4064. /**
  4065. * Returns true only if the target has a handler for the specified event.
  4066. */
  4067. bool target_has_event_action(struct target *target, enum target_event event)
  4068. {
  4069. struct target_event_action *teap;
  4070. for (teap = target->event_action; teap; teap = teap->next) {
  4071. if (teap->event == event)
  4072. return true;
  4073. }
  4074. return false;
  4075. }
  4076. enum target_cfg_param {
  4077. TCFG_TYPE,
  4078. TCFG_EVENT,
  4079. TCFG_WORK_AREA_VIRT,
  4080. TCFG_WORK_AREA_PHYS,
  4081. TCFG_WORK_AREA_SIZE,
  4082. TCFG_WORK_AREA_BACKUP,
  4083. TCFG_ENDIAN,
  4084. TCFG_COREID,
  4085. TCFG_CHAIN_POSITION,
  4086. TCFG_DBGBASE,
  4087. TCFG_RTOS,
  4088. TCFG_DEFER_EXAMINE,
  4089. TCFG_GDB_PORT,
  4090. TCFG_GDB_MAX_CONNECTIONS,
  4091. };
  4092. static struct jim_nvp nvp_config_opts[] = {
  4093. { .name = "-type", .value = TCFG_TYPE },
  4094. { .name = "-event", .value = TCFG_EVENT },
  4095. { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
  4096. { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
  4097. { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
  4098. { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
  4099. { .name = "-endian", .value = TCFG_ENDIAN },
  4100. { .name = "-coreid", .value = TCFG_COREID },
  4101. { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
  4102. { .name = "-dbgbase", .value = TCFG_DBGBASE },
  4103. { .name = "-rtos", .value = TCFG_RTOS },
  4104. { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
  4105. { .name = "-gdb-port", .value = TCFG_GDB_PORT },
  4106. { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
  4107. { .name = NULL, .value = -1 }
  4108. };
  4109. static int target_configure(struct jim_getopt_info *goi, struct target *target)
  4110. {
  4111. struct jim_nvp *n;
  4112. Jim_Obj *o;
  4113. jim_wide w;
  4114. int e;
  4115. /* parse config or cget options ... */
  4116. while (goi->argc > 0) {
  4117. Jim_SetEmptyResult(goi->interp);
  4118. /* jim_getopt_debug(goi); */
  4119. if (target->type->target_jim_configure) {
  4120. /* target defines a configure function */
  4121. /* target gets first dibs on parameters */
  4122. e = (*(target->type->target_jim_configure))(target, goi);
  4123. if (e == JIM_OK) {
  4124. /* more? */
  4125. continue;
  4126. }
  4127. if (e == JIM_ERR) {
  4128. /* An error */
  4129. return e;
  4130. }
  4131. /* otherwise we 'continue' below */
  4132. }
  4133. e = jim_getopt_nvp(goi, nvp_config_opts, &n);
  4134. if (e != JIM_OK) {
  4135. jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
  4136. return e;
  4137. }
  4138. switch (n->value) {
  4139. case TCFG_TYPE:
  4140. /* not settable */
  4141. if (goi->isconfigure) {
  4142. Jim_SetResultFormatted(goi->interp,
  4143. "not settable: %s", n->name);
  4144. return JIM_ERR;
  4145. } else {
  4146. no_params:
  4147. if (goi->argc != 0) {
  4148. Jim_WrongNumArgs(goi->interp,
  4149. goi->argc, goi->argv,
  4150. "NO PARAMS");
  4151. return JIM_ERR;
  4152. }
  4153. }
  4154. Jim_SetResultString(goi->interp,
  4155. target_type_name(target), -1);
  4156. /* loop for more */
  4157. break;
  4158. case TCFG_EVENT:
  4159. if (goi->argc == 0) {
  4160. Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
  4161. return JIM_ERR;
  4162. }
  4163. e = jim_getopt_nvp(goi, nvp_target_event, &n);
  4164. if (e != JIM_OK) {
  4165. jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
  4166. return e;
  4167. }
  4168. if (goi->isconfigure) {
  4169. if (goi->argc != 1) {
  4170. Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
  4171. return JIM_ERR;
  4172. }
  4173. } else {
  4174. if (goi->argc != 0) {
  4175. Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
  4176. return JIM_ERR;
  4177. }
  4178. }
  4179. {
  4180. struct target_event_action *teap;
  4181. teap = target->event_action;
  4182. /* replace existing? */
  4183. while (teap) {
  4184. if (teap->event == (enum target_event)n->value)
  4185. break;
  4186. teap = teap->next;
  4187. }
  4188. if (goi->isconfigure) {
  4189. /* START_DEPRECATED_TPIU */
  4190. if (n->value == TARGET_EVENT_TRACE_CONFIG)
  4191. LOG_INFO("DEPRECATED target event %s", n->name);
  4192. /* END_DEPRECATED_TPIU */
  4193. bool replace = true;
  4194. if (!teap) {
  4195. /* create new */
  4196. teap = calloc(1, sizeof(*teap));
  4197. replace = false;
  4198. }
  4199. teap->event = n->value;
  4200. teap->interp = goi->interp;
  4201. jim_getopt_obj(goi, &o);
  4202. if (teap->body)
  4203. Jim_DecrRefCount(teap->interp, teap->body);
  4204. teap->body = Jim_DuplicateObj(goi->interp, o);
  4205. /*
  4206. * FIXME:
  4207. * Tcl/TK - "tk events" have a nice feature.
  4208. * See the "BIND" command.
  4209. * We should support that here.
  4210. * You can specify %X and %Y in the event code.
  4211. * The idea is: %T - target name.
  4212. * The idea is: %N - target number
  4213. * The idea is: %E - event name.
  4214. */
  4215. Jim_IncrRefCount(teap->body);
  4216. if (!replace) {
  4217. /* add to head of event list */
  4218. teap->next = target->event_action;
  4219. target->event_action = teap;
  4220. }
  4221. Jim_SetEmptyResult(goi->interp);
  4222. } else {
  4223. /* get */
  4224. if (!teap)
  4225. Jim_SetEmptyResult(goi->interp);
  4226. else
  4227. Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
  4228. }
  4229. }
  4230. /* loop for more */
  4231. break;
  4232. case TCFG_WORK_AREA_VIRT:
  4233. if (goi->isconfigure) {
  4234. target_free_all_working_areas(target);
  4235. e = jim_getopt_wide(goi, &w);
  4236. if (e != JIM_OK)
  4237. return e;
  4238. target->working_area_virt = w;
  4239. target->working_area_virt_spec = true;
  4240. } else {
  4241. if (goi->argc != 0)
  4242. goto no_params;
  4243. }
  4244. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
  4245. /* loop for more */
  4246. break;
  4247. case TCFG_WORK_AREA_PHYS:
  4248. if (goi->isconfigure) {
  4249. target_free_all_working_areas(target);
  4250. e = jim_getopt_wide(goi, &w);
  4251. if (e != JIM_OK)
  4252. return e;
  4253. target->working_area_phys = w;
  4254. target->working_area_phys_spec = true;
  4255. } else {
  4256. if (goi->argc != 0)
  4257. goto no_params;
  4258. }
  4259. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
  4260. /* loop for more */
  4261. break;
  4262. case TCFG_WORK_AREA_SIZE:
  4263. if (goi->isconfigure) {
  4264. target_free_all_working_areas(target);
  4265. e = jim_getopt_wide(goi, &w);
  4266. if (e != JIM_OK)
  4267. return e;
  4268. target->working_area_size = w;
  4269. } else {
  4270. if (goi->argc != 0)
  4271. goto no_params;
  4272. }
  4273. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
  4274. /* loop for more */
  4275. break;
  4276. case TCFG_WORK_AREA_BACKUP:
  4277. if (goi->isconfigure) {
  4278. target_free_all_working_areas(target);
  4279. e = jim_getopt_wide(goi, &w);
  4280. if (e != JIM_OK)
  4281. return e;
  4282. /* make this exactly 1 or 0 */
  4283. target->backup_working_area = (!!w);
  4284. } else {
  4285. if (goi->argc != 0)
  4286. goto no_params;
  4287. }
  4288. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
  4289. /* loop for more e*/
  4290. break;
  4291. case TCFG_ENDIAN:
  4292. if (goi->isconfigure) {
  4293. e = jim_getopt_nvp(goi, nvp_target_endian, &n);
  4294. if (e != JIM_OK) {
  4295. jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
  4296. return e;
  4297. }
  4298. target->endianness = n->value;
  4299. } else {
  4300. if (goi->argc != 0)
  4301. goto no_params;
  4302. }
  4303. n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
  4304. if (!n->name) {
  4305. target->endianness = TARGET_LITTLE_ENDIAN;
  4306. n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
  4307. }
  4308. Jim_SetResultString(goi->interp, n->name, -1);
  4309. /* loop for more */
  4310. break;
  4311. case TCFG_COREID:
  4312. if (goi->isconfigure) {
  4313. e = jim_getopt_wide(goi, &w);
  4314. if (e != JIM_OK)
  4315. return e;
  4316. target->coreid = (int32_t)w;
  4317. } else {
  4318. if (goi->argc != 0)
  4319. goto no_params;
  4320. }
  4321. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
  4322. /* loop for more */
  4323. break;
  4324. case TCFG_CHAIN_POSITION:
  4325. if (goi->isconfigure) {
  4326. Jim_Obj *o_t;
  4327. struct jtag_tap *tap;
  4328. if (target->has_dap) {
  4329. Jim_SetResultString(goi->interp,
  4330. "target requires -dap parameter instead of -chain-position!", -1);
  4331. return JIM_ERR;
  4332. }
  4333. target_free_all_working_areas(target);
  4334. e = jim_getopt_obj(goi, &o_t);
  4335. if (e != JIM_OK)
  4336. return e;
  4337. tap = jtag_tap_by_jim_obj(goi->interp, o_t);
  4338. if (!tap)
  4339. return JIM_ERR;
  4340. target->tap = tap;
  4341. target->tap_configured = true;
  4342. } else {
  4343. if (goi->argc != 0)
  4344. goto no_params;
  4345. }
  4346. Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
  4347. /* loop for more e*/
  4348. break;
  4349. case TCFG_DBGBASE:
  4350. if (goi->isconfigure) {
  4351. e = jim_getopt_wide(goi, &w);
  4352. if (e != JIM_OK)
  4353. return e;
  4354. target->dbgbase = (uint32_t)w;
  4355. target->dbgbase_set = true;
  4356. } else {
  4357. if (goi->argc != 0)
  4358. goto no_params;
  4359. }
  4360. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
  4361. /* loop for more */
  4362. break;
  4363. case TCFG_RTOS:
  4364. /* RTOS */
  4365. {
  4366. int result = rtos_create(goi, target);
  4367. if (result != JIM_OK)
  4368. return result;
  4369. }
  4370. /* loop for more */
  4371. break;
  4372. case TCFG_DEFER_EXAMINE:
  4373. /* DEFER_EXAMINE */
  4374. target->defer_examine = true;
  4375. /* loop for more */
  4376. break;
  4377. case TCFG_GDB_PORT:
  4378. if (goi->isconfigure) {
  4379. struct command_context *cmd_ctx = current_command_context(goi->interp);
  4380. if (cmd_ctx->mode != COMMAND_CONFIG) {
  4381. Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
  4382. return JIM_ERR;
  4383. }
  4384. const char *s;
  4385. e = jim_getopt_string(goi, &s, NULL);
  4386. if (e != JIM_OK)
  4387. return e;
  4388. free(target->gdb_port_override);
  4389. target->gdb_port_override = strdup(s);
  4390. } else {
  4391. if (goi->argc != 0)
  4392. goto no_params;
  4393. }
  4394. Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
  4395. /* loop for more */
  4396. break;
  4397. case TCFG_GDB_MAX_CONNECTIONS:
  4398. if (goi->isconfigure) {
  4399. struct command_context *cmd_ctx = current_command_context(goi->interp);
  4400. if (cmd_ctx->mode != COMMAND_CONFIG) {
  4401. Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
  4402. return JIM_ERR;
  4403. }
  4404. e = jim_getopt_wide(goi, &w);
  4405. if (e != JIM_OK)
  4406. return e;
  4407. target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
  4408. } else {
  4409. if (goi->argc != 0)
  4410. goto no_params;
  4411. }
  4412. Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
  4413. break;
  4414. }
  4415. } /* while (goi->argc) */
  4416. /* done - we return */
  4417. return JIM_OK;
  4418. }
  4419. static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
  4420. {
  4421. struct command *c = jim_to_command(interp);
  4422. struct jim_getopt_info goi;
  4423. jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
  4424. goi.isconfigure = !strcmp(c->name, "configure");
  4425. if (goi.argc < 1) {
  4426. Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
  4427. "missing: -option ...");
  4428. return JIM_ERR;
  4429. }
  4430. struct command_context *cmd_ctx = current_command_context(interp);
  4431. assert(cmd_ctx);
  4432. struct target *target = get_current_target(cmd_ctx);
  4433. return target_configure(&goi, target);
  4434. }
  4435. static int jim_target_mem2array(Jim_Interp *interp,
  4436. int argc, Jim_Obj *const *argv)
  4437. {
  4438. struct command_context *cmd_ctx = current_command_context(interp);
  4439. assert(cmd_ctx);
  4440. struct target *target = get_current_target(cmd_ctx);
  4441. return target_mem2array(interp, target, argc - 1, argv + 1);
  4442. }
  4443. static int jim_target_array2mem(Jim_Interp *interp,
  4444. int argc, Jim_Obj *const *argv)
  4445. {
  4446. struct command_context *cmd_ctx = current_command_context(interp);
  4447. assert(cmd_ctx);
  4448. struct target *target = get_current_target(cmd_ctx);
  4449. return target_array2mem(interp, target, argc - 1, argv + 1);
  4450. }
  4451. static int jim_target_tap_disabled(Jim_Interp *interp)
  4452. {
  4453. Jim_SetResultFormatted(interp, "[TAP is disabled]");
  4454. return JIM_ERR;
  4455. }
  4456. static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4457. {
  4458. bool allow_defer = false;
  4459. struct jim_getopt_info goi;
  4460. jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
  4461. if (goi.argc > 1) {
  4462. const char *cmd_name = Jim_GetString(argv[0], NULL);
  4463. Jim_SetResultFormatted(goi.interp,
  4464. "usage: %s ['allow-defer']", cmd_name);
  4465. return JIM_ERR;
  4466. }
  4467. if (goi.argc > 0 &&
  4468. strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
  4469. /* consume it */
  4470. Jim_Obj *obj;
  4471. int e = jim_getopt_obj(&goi, &obj);
  4472. if (e != JIM_OK)
  4473. return e;
  4474. allow_defer = true;
  4475. }
  4476. struct command_context *cmd_ctx = current_command_context(interp);
  4477. assert(cmd_ctx);
  4478. struct target *target = get_current_target(cmd_ctx);
  4479. if (!target->tap->enabled)
  4480. return jim_target_tap_disabled(interp);
  4481. if (allow_defer && target->defer_examine) {
  4482. LOG_INFO("Deferring arp_examine of %s", target_name(target));
  4483. LOG_INFO("Use arp_examine command to examine it manually!");
  4484. return JIM_OK;
  4485. }
  4486. int e = target->type->examine(target);
  4487. if (e != ERROR_OK)
  4488. return JIM_ERR;
  4489. return JIM_OK;
  4490. }
  4491. static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
  4492. {
  4493. struct command_context *cmd_ctx = current_command_context(interp);
  4494. assert(cmd_ctx);
  4495. struct target *target = get_current_target(cmd_ctx);
  4496. Jim_SetResultBool(interp, target_was_examined(target));
  4497. return JIM_OK;
  4498. }
  4499. static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
  4500. {
  4501. struct command_context *cmd_ctx = current_command_context(interp);
  4502. assert(cmd_ctx);
  4503. struct target *target = get_current_target(cmd_ctx);
  4504. Jim_SetResultBool(interp, target->defer_examine);
  4505. return JIM_OK;
  4506. }
  4507. static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4508. {
  4509. if (argc != 1) {
  4510. Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
  4511. return JIM_ERR;
  4512. }
  4513. struct command_context *cmd_ctx = current_command_context(interp);
  4514. assert(cmd_ctx);
  4515. struct target *target = get_current_target(cmd_ctx);
  4516. if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
  4517. return JIM_ERR;
  4518. return JIM_OK;
  4519. }
  4520. static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4521. {
  4522. if (argc != 1) {
  4523. Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
  4524. return JIM_ERR;
  4525. }
  4526. struct command_context *cmd_ctx = current_command_context(interp);
  4527. assert(cmd_ctx);
  4528. struct target *target = get_current_target(cmd_ctx);
  4529. if (!target->tap->enabled)
  4530. return jim_target_tap_disabled(interp);
  4531. int e;
  4532. if (!(target_was_examined(target)))
  4533. e = ERROR_TARGET_NOT_EXAMINED;
  4534. else
  4535. e = target->type->poll(target);
  4536. if (e != ERROR_OK)
  4537. return JIM_ERR;
  4538. return JIM_OK;
  4539. }
  4540. static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4541. {
  4542. struct jim_getopt_info goi;
  4543. jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
  4544. if (goi.argc != 2) {
  4545. Jim_WrongNumArgs(interp, 0, argv,
  4546. "([tT]|[fF]|assert|deassert) BOOL");
  4547. return JIM_ERR;
  4548. }
  4549. struct jim_nvp *n;
  4550. int e = jim_getopt_nvp(&goi, nvp_assert, &n);
  4551. if (e != JIM_OK) {
  4552. jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
  4553. return e;
  4554. }
  4555. /* the halt or not param */
  4556. jim_wide a;
  4557. e = jim_getopt_wide(&goi, &a);
  4558. if (e != JIM_OK)
  4559. return e;
  4560. struct command_context *cmd_ctx = current_command_context(interp);
  4561. assert(cmd_ctx);
  4562. struct target *target = get_current_target(cmd_ctx);
  4563. if (!target->tap->enabled)
  4564. return jim_target_tap_disabled(interp);
  4565. if (!target->type->assert_reset || !target->type->deassert_reset) {
  4566. Jim_SetResultFormatted(interp,
  4567. "No target-specific reset for %s",
  4568. target_name(target));
  4569. return JIM_ERR;
  4570. }
  4571. if (target->defer_examine)
  4572. target_reset_examined(target);
  4573. /* determine if we should halt or not. */
  4574. target->reset_halt = (a != 0);
  4575. /* When this happens - all workareas are invalid. */
  4576. target_free_all_working_areas_restore(target, 0);
  4577. /* do the assert */
  4578. if (n->value == NVP_ASSERT)
  4579. e = target->type->assert_reset(target);
  4580. else
  4581. e = target->type->deassert_reset(target);
  4582. return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
  4583. }
  4584. static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4585. {
  4586. if (argc != 1) {
  4587. Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
  4588. return JIM_ERR;
  4589. }
  4590. struct command_context *cmd_ctx = current_command_context(interp);
  4591. assert(cmd_ctx);
  4592. struct target *target = get_current_target(cmd_ctx);
  4593. if (!target->tap->enabled)
  4594. return jim_target_tap_disabled(interp);
  4595. int e = target->type->halt(target);
  4596. return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
  4597. }
  4598. static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4599. {
  4600. struct jim_getopt_info goi;
  4601. jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
  4602. /* params: <name> statename timeoutmsecs */
  4603. if (goi.argc != 2) {
  4604. const char *cmd_name = Jim_GetString(argv[0], NULL);
  4605. Jim_SetResultFormatted(goi.interp,
  4606. "%s <state_name> <timeout_in_msec>", cmd_name);
  4607. return JIM_ERR;
  4608. }
  4609. struct jim_nvp *n;
  4610. int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
  4611. if (e != JIM_OK) {
  4612. jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
  4613. return e;
  4614. }
  4615. jim_wide a;
  4616. e = jim_getopt_wide(&goi, &a);
  4617. if (e != JIM_OK)
  4618. return e;
  4619. struct command_context *cmd_ctx = current_command_context(interp);
  4620. assert(cmd_ctx);
  4621. struct target *target = get_current_target(cmd_ctx);
  4622. if (!target->tap->enabled)
  4623. return jim_target_tap_disabled(interp);
  4624. e = target_wait_state(target, n->value, a);
  4625. if (e != ERROR_OK) {
  4626. Jim_Obj *obj = Jim_NewIntObj(interp, e);
  4627. Jim_SetResultFormatted(goi.interp,
  4628. "target: %s wait %s fails (%#s) %s",
  4629. target_name(target), n->name,
  4630. obj, target_strerror_safe(e));
  4631. return JIM_ERR;
  4632. }
  4633. return JIM_OK;
  4634. }
  4635. /* List for human, Events defined for this target.
  4636. * scripts/programs should use 'name cget -event NAME'
  4637. */
  4638. COMMAND_HANDLER(handle_target_event_list)
  4639. {
  4640. struct target *target = get_current_target(CMD_CTX);
  4641. struct target_event_action *teap = target->event_action;
  4642. command_print(CMD, "Event actions for target (%d) %s\n",
  4643. target->target_number,
  4644. target_name(target));
  4645. command_print(CMD, "%-25s | Body", "Event");
  4646. command_print(CMD, "------------------------- | "
  4647. "----------------------------------------");
  4648. while (teap) {
  4649. struct jim_nvp *opt = jim_nvp_value2name_simple(nvp_target_event, teap->event);
  4650. command_print(CMD, "%-25s | %s",
  4651. opt->name, Jim_GetString(teap->body, NULL));
  4652. teap = teap->next;
  4653. }
  4654. command_print(CMD, "***END***");
  4655. return ERROR_OK;
  4656. }
  4657. static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4658. {
  4659. if (argc != 1) {
  4660. Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
  4661. return JIM_ERR;
  4662. }
  4663. struct command_context *cmd_ctx = current_command_context(interp);
  4664. assert(cmd_ctx);
  4665. struct target *target = get_current_target(cmd_ctx);
  4666. Jim_SetResultString(interp, target_state_name(target), -1);
  4667. return JIM_OK;
  4668. }
  4669. static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  4670. {
  4671. struct jim_getopt_info goi;
  4672. jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
  4673. if (goi.argc != 1) {
  4674. const char *cmd_name = Jim_GetString(argv[0], NULL);
  4675. Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
  4676. return JIM_ERR;
  4677. }
  4678. struct jim_nvp *n;
  4679. int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
  4680. if (e != JIM_OK) {
  4681. jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
  4682. return e;
  4683. }
  4684. struct command_context *cmd_ctx = current_command_context(interp);
  4685. assert(cmd_ctx);
  4686. struct target *target = get_current_target(cmd_ctx);
  4687. target_handle_event(target, n->value);
  4688. return JIM_OK;
  4689. }
  4690. static const struct command_registration target_instance_command_handlers[] = {
  4691. {
  4692. .name = "configure",
  4693. .mode = COMMAND_ANY,
  4694. .jim_handler = jim_target_configure,
  4695. .help = "configure a new target for use",
  4696. .usage = "[target_attribute ...]",
  4697. },
  4698. {
  4699. .name = "cget",
  4700. .mode = COMMAND_ANY,
  4701. .jim_handler = jim_target_configure,
  4702. .help = "returns the specified target attribute",
  4703. .usage = "target_attribute",
  4704. },
  4705. {
  4706. .name = "mwd",
  4707. .handler = handle_mw_command,
  4708. .mode = COMMAND_EXEC,
  4709. .help = "Write 64-bit word(s) to target memory",
  4710. .usage = "address data [count]",
  4711. },
  4712. {
  4713. .name = "mww",
  4714. .handler = handle_mw_command,
  4715. .mode = COMMAND_EXEC,
  4716. .help = "Write 32-bit word(s) to target memory",
  4717. .usage = "address data [count]",
  4718. },
  4719. {
  4720. .name = "mwh",
  4721. .handler = handle_mw_command,
  4722. .mode = COMMAND_EXEC,
  4723. .help = "Write 16-bit half-word(s) to target memory",
  4724. .usage = "address data [count]",
  4725. },
  4726. {
  4727. .name = "mwb",
  4728. .handler = handle_mw_command,
  4729. .mode = COMMAND_EXEC,
  4730. .help = "Write byte(s) to target memory",
  4731. .usage = "address data [count]",
  4732. },
  4733. {
  4734. .name = "mdd",
  4735. .handler = handle_md_command,
  4736. .mode = COMMAND_EXEC,
  4737. .help = "Display target memory as 64-bit words",
  4738. .usage = "address [count]",
  4739. },
  4740. {
  4741. .name = "mdw",
  4742. .handler = handle_md_command,
  4743. .mode = COMMAND_EXEC,
  4744. .help = "Display target memory as 32-bit words",
  4745. .usage = "address [count]",
  4746. },
  4747. {
  4748. .name = "mdh",
  4749. .handler = handle_md_command,
  4750. .mode = COMMAND_EXEC,
  4751. .help = "Display target memory as 16-bit half-words",
  4752. .usage = "address [count]",
  4753. },
  4754. {
  4755. .name = "mdb",
  4756. .handler = handle_md_command,
  4757. .mode = COMMAND_EXEC,
  4758. .help = "Display target memory as 8-bit bytes",
  4759. .usage = "address [count]",
  4760. },
  4761. {
  4762. .name = "array2mem",
  4763. .mode = COMMAND_EXEC,
  4764. .jim_handler = jim_target_array2mem,
  4765. .help = "Writes Tcl array of 8/16/32 bit numbers "
  4766. "to target memory",
  4767. .usage = "arrayname bitwidth address count",
  4768. },
  4769. {
  4770. .name = "mem2array",
  4771. .mode = COMMAND_EXEC,
  4772. .jim_handler = jim_target_mem2array,
  4773. .help = "Loads Tcl array of 8/16/32 bit numbers "
  4774. "from target memory",
  4775. .usage = "arrayname bitwidth address count",
  4776. },
  4777. {
  4778. .name = "eventlist",
  4779. .handler = handle_target_event_list,
  4780. .mode = COMMAND_EXEC,
  4781. .help = "displays a table of events defined for this target",
  4782. .usage = "",
  4783. },
  4784. {
  4785. .name = "curstate",
  4786. .mode = COMMAND_EXEC,
  4787. .jim_handler = jim_target_current_state,
  4788. .help = "displays the current state of this target",
  4789. },
  4790. {
  4791. .name = "arp_examine",
  4792. .mode = COMMAND_EXEC,
  4793. .jim_handler = jim_target_examine,
  4794. .help = "used internally for reset processing",
  4795. .usage = "['allow-defer']",
  4796. },
  4797. {
  4798. .name = "was_examined",
  4799. .mode = COMMAND_EXEC,
  4800. .jim_handler = jim_target_was_examined,
  4801. .help = "used internally for reset processing",
  4802. },
  4803. {
  4804. .name = "examine_deferred",
  4805. .mode = COMMAND_EXEC,
  4806. .jim_handler = jim_target_examine_deferred,
  4807. .help = "used internally for reset processing",
  4808. },
  4809. {
  4810. .name = "arp_halt_gdb",
  4811. .mode = COMMAND_EXEC,
  4812. .jim_handler = jim_target_halt_gdb,
  4813. .help = "used internally for reset processing to halt GDB",
  4814. },
  4815. {
  4816. .name = "arp_poll",
  4817. .mode = COMMAND_EXEC,
  4818. .jim_handler = jim_target_poll,
  4819. .help = "used internally for reset processing",
  4820. },
  4821. {
  4822. .name = "arp_reset",
  4823. .mode = COMMAND_EXEC,
  4824. .jim_handler = jim_target_reset,
  4825. .help = "used internally for reset processing",
  4826. },
  4827. {
  4828. .name = "arp_halt",
  4829. .mode = COMMAND_EXEC,
  4830. .jim_handler = jim_target_halt,
  4831. .help = "used internally for reset processing",
  4832. },
  4833. {
  4834. .name = "arp_waitstate",
  4835. .mode = COMMAND_EXEC,
  4836. .jim_handler = jim_target_wait_state,
  4837. .help = "used internally for reset processing",
  4838. },
  4839. {
  4840. .name = "invoke-event",
  4841. .mode = COMMAND_EXEC,
  4842. .jim_handler = jim_target_invoke_event,
  4843. .help = "invoke handler for specified event",
  4844. .usage = "event_name",
  4845. },
  4846. COMMAND_REGISTRATION_DONE
  4847. };
  4848. static int target_create(struct jim_getopt_info *goi)
  4849. {
  4850. Jim_Obj *new_cmd;
  4851. Jim_Cmd *cmd;
  4852. const char *cp;
  4853. int e;
  4854. int x;
  4855. struct target *target;
  4856. struct command_context *cmd_ctx;
  4857. cmd_ctx = current_command_context(goi->interp);
  4858. assert(cmd_ctx);
  4859. if (goi->argc < 3) {
  4860. Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
  4861. return JIM_ERR;
  4862. }
  4863. /* COMMAND */
  4864. jim_getopt_obj(goi, &new_cmd);
  4865. /* does this command exist? */
  4866. cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
  4867. if (cmd) {
  4868. cp = Jim_GetString(new_cmd, NULL);
  4869. Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
  4870. return JIM_ERR;
  4871. }
  4872. /* TYPE */
  4873. e = jim_getopt_string(goi, &cp, NULL);
  4874. if (e != JIM_OK)
  4875. return e;
  4876. struct transport *tr = get_current_transport();
  4877. if (tr->override_target) {
  4878. e = tr->override_target(&cp);
  4879. if (e != ERROR_OK) {
  4880. LOG_ERROR("The selected transport doesn't support this target");
  4881. return JIM_ERR;
  4882. }
  4883. LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
  4884. }
  4885. /* now does target type exist */
  4886. for (x = 0 ; target_types[x] ; x++) {
  4887. if (0 == strcmp(cp, target_types[x]->name)) {
  4888. /* found */
  4889. break;
  4890. }
  4891. }
  4892. if (!target_types[x]) {
  4893. Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
  4894. for (x = 0 ; target_types[x] ; x++) {
  4895. if (target_types[x + 1]) {
  4896. Jim_AppendStrings(goi->interp,
  4897. Jim_GetResult(goi->interp),
  4898. target_types[x]->name,
  4899. ", ", NULL);
  4900. } else {
  4901. Jim_AppendStrings(goi->interp,
  4902. Jim_GetResult(goi->interp),
  4903. " or ",
  4904. target_types[x]->name, NULL);
  4905. }
  4906. }
  4907. return JIM_ERR;
  4908. }
  4909. /* Create it */
  4910. target = calloc(1, sizeof(struct target));
  4911. if (!target) {
  4912. LOG_ERROR("Out of memory");
  4913. return JIM_ERR;
  4914. }
  4915. /* set target number */
  4916. target->target_number = new_target_number();
  4917. /* allocate memory for each unique target type */
  4918. target->type = malloc(sizeof(struct target_type));
  4919. if (!target->type) {
  4920. LOG_ERROR("Out of memory");
  4921. free(target);
  4922. return JIM_ERR;
  4923. }
  4924. memcpy(target->type, target_types[x], sizeof(struct target_type));
  4925. /* default to first core, override with -coreid */
  4926. target->coreid = 0;
  4927. target->working_area = 0x0;
  4928. target->working_area_size = 0x0;
  4929. target->working_areas = NULL;
  4930. target->backup_working_area = 0;
  4931. target->state = TARGET_UNKNOWN;
  4932. target->debug_reason = DBG_REASON_UNDEFINED;
  4933. target->reg_cache = NULL;
  4934. target->breakpoints = NULL;
  4935. target->watchpoints = NULL;
  4936. target->next = NULL;
  4937. target->arch_info = NULL;
  4938. target->verbose_halt_msg = true;
  4939. target->halt_issued = false;
  4940. /* initialize trace information */
  4941. target->trace_info = calloc(1, sizeof(struct trace));
  4942. if (!target->trace_info) {
  4943. LOG_ERROR("Out of memory");
  4944. free(target->type);
  4945. free(target);
  4946. return JIM_ERR;
  4947. }
  4948. target->dbgmsg = NULL;
  4949. target->dbg_msg_enabled = 0;
  4950. target->endianness = TARGET_ENDIAN_UNKNOWN;
  4951. target->rtos = NULL;
  4952. target->rtos_auto_detect = false;
  4953. target->gdb_port_override = NULL;
  4954. target->gdb_max_connections = 1;
  4955. /* Do the rest as "configure" options */
  4956. goi->isconfigure = 1;
  4957. e = target_configure(goi, target);
  4958. if (e == JIM_OK) {
  4959. if (target->has_dap) {
  4960. if (!target->dap_configured) {
  4961. Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
  4962. e = JIM_ERR;
  4963. }
  4964. } else {
  4965. if (!target->tap_configured) {
  4966. Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
  4967. e = JIM_ERR;
  4968. }
  4969. }
  4970. /* tap must be set after target was configured */
  4971. if (!target->tap)
  4972. e = JIM_ERR;
  4973. }
  4974. if (e != JIM_OK) {
  4975. rtos_destroy(target);
  4976. free(target->gdb_port_override);
  4977. free(target->trace_info);
  4978. free(target->type);
  4979. free(target);
  4980. return e;
  4981. }
  4982. if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
  4983. /* default endian to little if not specified */
  4984. target->endianness = TARGET_LITTLE_ENDIAN;
  4985. }
  4986. cp = Jim_GetString(new_cmd, NULL);
  4987. target->cmd_name = strdup(cp);
  4988. if (!target->cmd_name) {
  4989. LOG_ERROR("Out of memory");
  4990. rtos_destroy(target);
  4991. free(target->gdb_port_override);
  4992. free(target->trace_info);
  4993. free(target->type);
  4994. free(target);
  4995. return JIM_ERR;
  4996. }
  4997. if (target->type->target_create) {
  4998. e = (*(target->type->target_create))(target, goi->interp);
  4999. if (e != ERROR_OK) {
  5000. LOG_DEBUG("target_create failed");
  5001. free(target->cmd_name);
  5002. rtos_destroy(target);
  5003. free(target->gdb_port_override);
  5004. free(target->trace_info);
  5005. free(target->type);
  5006. free(target);
  5007. return JIM_ERR;
  5008. }
  5009. }
  5010. /* create the target specific commands */
  5011. if (target->type->commands) {
  5012. e = register_commands(cmd_ctx, NULL, target->type->commands);
  5013. if (e != ERROR_OK)
  5014. LOG_ERROR("unable to register '%s' commands", cp);
  5015. }
  5016. /* now - create the new target name command */
  5017. const struct command_registration target_subcommands[] = {
  5018. {
  5019. .chain = target_instance_command_handlers,
  5020. },
  5021. {
  5022. .chain = target->type->commands,
  5023. },
  5024. COMMAND_REGISTRATION_DONE
  5025. };
  5026. const struct command_registration target_commands[] = {
  5027. {
  5028. .name = cp,
  5029. .mode = COMMAND_ANY,
  5030. .help = "target command group",
  5031. .usage = "",
  5032. .chain = target_subcommands,
  5033. },
  5034. COMMAND_REGISTRATION_DONE
  5035. };
  5036. e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
  5037. if (e != ERROR_OK) {
  5038. if (target->type->deinit_target)
  5039. target->type->deinit_target(target);
  5040. free(target->cmd_name);
  5041. rtos_destroy(target);
  5042. free(target->gdb_port_override);
  5043. free(target->trace_info);
  5044. free(target->type);
  5045. free(target);
  5046. return JIM_ERR;
  5047. }
  5048. /* append to end of list */
  5049. append_to_list_all_targets(target);
  5050. cmd_ctx->current_target = target;
  5051. return JIM_OK;
  5052. }
  5053. static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  5054. {
  5055. if (argc != 1) {
  5056. Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
  5057. return JIM_ERR;
  5058. }
  5059. struct command_context *cmd_ctx = current_command_context(interp);
  5060. assert(cmd_ctx);
  5061. struct target *target = get_current_target_or_null(cmd_ctx);
  5062. if (target)
  5063. Jim_SetResultString(interp, target_name(target), -1);
  5064. return JIM_OK;
  5065. }
  5066. static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  5067. {
  5068. if (argc != 1) {
  5069. Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
  5070. return JIM_ERR;
  5071. }
  5072. Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
  5073. for (unsigned x = 0; NULL != target_types[x]; x++) {
  5074. Jim_ListAppendElement(interp, Jim_GetResult(interp),
  5075. Jim_NewStringObj(interp, target_types[x]->name, -1));
  5076. }
  5077. return JIM_OK;
  5078. }
  5079. static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  5080. {
  5081. if (argc != 1) {
  5082. Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
  5083. return JIM_ERR;
  5084. }
  5085. Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
  5086. struct target *target = all_targets;
  5087. while (target) {
  5088. Jim_ListAppendElement(interp, Jim_GetResult(interp),
  5089. Jim_NewStringObj(interp, target_name(target), -1));
  5090. target = target->next;
  5091. }
  5092. return JIM_OK;
  5093. }
  5094. static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  5095. {
  5096. int i;
  5097. const char *targetname;
  5098. int retval, len;
  5099. struct target *target = (struct target *) NULL;
  5100. struct target_list *head, *curr, *new;
  5101. curr = (struct target_list *) NULL;
  5102. head = (struct target_list *) NULL;
  5103. retval = 0;
  5104. LOG_DEBUG("%d", argc);
  5105. /* argv[1] = target to associate in smp
  5106. * argv[2] = target to associate in smp
  5107. * argv[3] ...
  5108. */
  5109. for (i = 1; i < argc; i++) {
  5110. targetname = Jim_GetString(argv[i], &len);
  5111. target = get_target(targetname);
  5112. LOG_DEBUG("%s ", targetname);
  5113. if (target) {
  5114. new = malloc(sizeof(struct target_list));
  5115. new->target = target;
  5116. new->next = (struct target_list *)NULL;
  5117. if (head == (struct target_list *)NULL) {
  5118. head = new;
  5119. curr = head;
  5120. } else {
  5121. curr->next = new;
  5122. curr = new;
  5123. }
  5124. }
  5125. }
  5126. /* now parse the list of cpu and put the target in smp mode*/
  5127. curr = head;
  5128. while (curr != (struct target_list *)NULL) {
  5129. target = curr->target;
  5130. target->smp = 1;
  5131. target->head = head;
  5132. curr = curr->next;
  5133. }
  5134. if (target && target->rtos)
  5135. retval = rtos_smp_init(head->target);
  5136. return retval;
  5137. }
  5138. static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
  5139. {
  5140. struct jim_getopt_info goi;
  5141. jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
  5142. if (goi.argc < 3) {
  5143. Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
  5144. "<name> <target_type> [<target_options> ...]");
  5145. return JIM_ERR;
  5146. }
  5147. return target_create(&goi);
  5148. }
  5149. static const struct command_registration target_subcommand_handlers[] = {
  5150. {
  5151. .name = "init",
  5152. .mode = COMMAND_CONFIG,
  5153. .handler = handle_target_init_command,
  5154. .help = "initialize targets",
  5155. .usage = "",
  5156. },
  5157. {
  5158. .name = "create",
  5159. .mode = COMMAND_CONFIG,
  5160. .jim_handler = jim_target_create,
  5161. .usage = "name type '-chain-position' name [options ...]",
  5162. .help = "Creates and selects a new target",
  5163. },
  5164. {
  5165. .name = "current",
  5166. .mode = COMMAND_ANY,
  5167. .jim_handler = jim_target_current,
  5168. .help = "Returns the currently selected target",
  5169. },
  5170. {
  5171. .name = "types",
  5172. .mode = COMMAND_ANY,
  5173. .jim_handler = jim_target_types,
  5174. .help = "Returns the available target types as "
  5175. "a list of strings",
  5176. },
  5177. {
  5178. .name = "names",
  5179. .mode = COMMAND_ANY,
  5180. .jim_handler = jim_target_names,
  5181. .help = "Returns the names of all targets as a list of strings",
  5182. },
  5183. {
  5184. .name = "smp",
  5185. .mode = COMMAND_ANY,
  5186. .jim_handler = jim_target_smp,
  5187. .usage = "targetname1 targetname2 ...",
  5188. .help = "gather several target in a smp list"
  5189. },
  5190. COMMAND_REGISTRATION_DONE
  5191. };
  5192. struct fast_load {
  5193. target_addr_t address;
  5194. uint8_t *data;
  5195. int length;
  5196. };
  5197. static int fastload_num;
  5198. static struct fast_load *fastload;
  5199. static void free_fastload(void)
  5200. {
  5201. if (fastload) {
  5202. for (int i = 0; i < fastload_num; i++)
  5203. free(fastload[i].data);
  5204. free(fastload);
  5205. fastload = NULL;
  5206. }
  5207. }
  5208. COMMAND_HANDLER(handle_fast_load_image_command)
  5209. {
  5210. uint8_t *buffer;
  5211. size_t buf_cnt;
  5212. uint32_t image_size;
  5213. target_addr_t min_address = 0;
  5214. target_addr_t max_address = -1;
  5215. struct image image;
  5216. int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
  5217. &image, &min_address, &max_address);
  5218. if (retval != ERROR_OK)
  5219. return retval;
  5220. struct duration bench;
  5221. duration_start(&bench);
  5222. retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
  5223. if (retval != ERROR_OK)
  5224. return retval;
  5225. image_size = 0x0;
  5226. retval = ERROR_OK;
  5227. fastload_num = image.num_sections;
  5228. fastload = malloc(sizeof(struct fast_load)*image.num_sections);
  5229. if (!fastload) {
  5230. command_print(CMD, "out of memory");
  5231. image_close(&image);
  5232. return ERROR_FAIL;
  5233. }
  5234. memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
  5235. for (unsigned int i = 0; i < image.num_sections; i++) {
  5236. buffer = malloc(image.sections[i].size);
  5237. if (!buffer) {
  5238. command_print(CMD, "error allocating buffer for section (%d bytes)",
  5239. (int)(image.sections[i].size));
  5240. retval = ERROR_FAIL;
  5241. break;
  5242. }
  5243. retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
  5244. if (retval != ERROR_OK) {
  5245. free(buffer);
  5246. break;
  5247. }
  5248. uint32_t offset = 0;
  5249. uint32_t length = buf_cnt;
  5250. /* DANGER!!! beware of unsigned comparison here!!! */
  5251. if ((image.sections[i].base_address + buf_cnt >= min_address) &&
  5252. (image.sections[i].base_address < max_address)) {
  5253. if (image.sections[i].base_address < min_address) {
  5254. /* clip addresses below */
  5255. offset += min_address-image.sections[i].base_address;
  5256. length -= offset;
  5257. }
  5258. if (image.sections[i].base_address + buf_cnt > max_address)
  5259. length -= (image.sections[i].base_address + buf_cnt)-max_address;
  5260. fastload[i].address = image.sections[i].base_address + offset;
  5261. fastload[i].data = malloc(length);
  5262. if (!fastload[i].data) {
  5263. free(buffer);
  5264. command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
  5265. length);
  5266. retval = ERROR_FAIL;
  5267. break;
  5268. }
  5269. memcpy(fastload[i].data, buffer + offset, length);
  5270. fastload[i].length = length;
  5271. image_size += length;
  5272. command_print(CMD, "%u bytes written at address 0x%8.8x",
  5273. (unsigned int)length,
  5274. ((unsigned int)(image.sections[i].base_address + offset)));
  5275. }
  5276. free(buffer);
  5277. }
  5278. if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
  5279. command_print(CMD, "Loaded %" PRIu32 " bytes "
  5280. "in %fs (%0.3f KiB/s)", image_size,
  5281. duration_elapsed(&bench), duration_kbps(&bench, image_size));
  5282. command_print(CMD,
  5283. "WARNING: image has not been loaded to target!"
  5284. "You can issue a 'fast_load' to finish loading.");
  5285. }
  5286. image_close(&image);
  5287. if (retval != ERROR_OK)
  5288. free_fastload();
  5289. return retval;
  5290. }
  5291. COMMAND_HANDLER(handle_fast_load_command)
  5292. {
  5293. if (CMD_ARGC > 0)
  5294. return ERROR_COMMAND_SYNTAX_ERROR;
  5295. if (!fastload) {
  5296. LOG_ERROR("No image in memory");
  5297. return ERROR_FAIL;
  5298. }
  5299. int i;
  5300. int64_t ms = timeval_ms();
  5301. int size = 0;
  5302. int retval = ERROR_OK;
  5303. for (i = 0; i < fastload_num; i++) {
  5304. struct target *target = get_current_target(CMD_CTX);
  5305. command_print(CMD, "Write to 0x%08x, length 0x%08x",
  5306. (unsigned int)(fastload[i].address),
  5307. (unsigned int)(fastload[i].length));
  5308. retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
  5309. if (retval != ERROR_OK)
  5310. break;
  5311. size += fastload[i].length;
  5312. }
  5313. if (retval == ERROR_OK) {
  5314. int64_t after = timeval_ms();
  5315. command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
  5316. }
  5317. return retval;
  5318. }
  5319. static const struct command_registration target_command_handlers[] = {
  5320. {
  5321. .name = "targets",
  5322. .handler = handle_targets_command,
  5323. .mode = COMMAND_ANY,
  5324. .help = "change current default target (one parameter) "
  5325. "or prints table of all targets (no parameters)",
  5326. .usage = "[target]",
  5327. },
  5328. {
  5329. .name = "target",
  5330. .mode = COMMAND_CONFIG,
  5331. .help = "configure target",
  5332. .chain = target_subcommand_handlers,
  5333. .usage = "",
  5334. },
  5335. COMMAND_REGISTRATION_DONE
  5336. };
  5337. int target_register_commands(struct command_context *cmd_ctx)
  5338. {
  5339. return register_commands(cmd_ctx, NULL, target_command_handlers);
  5340. }
  5341. static bool target_reset_nag = true;
  5342. bool get_target_reset_nag(void)
  5343. {
  5344. return target_reset_nag;
  5345. }
  5346. COMMAND_HANDLER(handle_target_reset_nag)
  5347. {
  5348. return CALL_COMMAND_HANDLER(handle_command_parse_bool,
  5349. &target_reset_nag, "Nag after each reset about options to improve "
  5350. "performance");
  5351. }
  5352. COMMAND_HANDLER(handle_ps_command)
  5353. {
  5354. struct target *target = get_current_target(CMD_CTX);
  5355. char *display;
  5356. if (target->state != TARGET_HALTED) {
  5357. LOG_INFO("target not halted !!");
  5358. return ERROR_OK;
  5359. }
  5360. if ((target->rtos) && (target->rtos->type)
  5361. && (target->rtos->type->ps_command)) {
  5362. display = target->rtos->type->ps_command(target);
  5363. command_print(CMD, "%s", display);
  5364. free(display);
  5365. return ERROR_OK;
  5366. } else {
  5367. LOG_INFO("failed");
  5368. return ERROR_TARGET_FAILURE;
  5369. }
  5370. }
  5371. static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
  5372. {
  5373. if (text)
  5374. command_print_sameline(cmd, "%s", text);
  5375. for (int i = 0; i < size; i++)
  5376. command_print_sameline(cmd, " %02x", buf[i]);
  5377. command_print(cmd, " ");
  5378. }
  5379. COMMAND_HANDLER(handle_test_mem_access_command)
  5380. {
  5381. struct target *target = get_current_target(CMD_CTX);
  5382. uint32_t test_size;
  5383. int retval = ERROR_OK;
  5384. if (target->state != TARGET_HALTED) {
  5385. LOG_INFO("target not halted !!");
  5386. return ERROR_FAIL;
  5387. }
  5388. if (CMD_ARGC != 1)
  5389. return ERROR_COMMAND_SYNTAX_ERROR;
  5390. COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
  5391. /* Test reads */
  5392. size_t num_bytes = test_size + 4;
  5393. struct working_area *wa = NULL;
  5394. retval = target_alloc_working_area(target, num_bytes, &wa);
  5395. if (retval != ERROR_OK) {
  5396. LOG_ERROR("Not enough working area");
  5397. return ERROR_FAIL;
  5398. }
  5399. uint8_t *test_pattern = malloc(num_bytes);
  5400. for (size_t i = 0; i < num_bytes; i++)
  5401. test_pattern[i] = rand();
  5402. retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
  5403. if (retval != ERROR_OK) {
  5404. LOG_ERROR("Test pattern write failed");
  5405. goto out;
  5406. }
  5407. for (int host_offset = 0; host_offset <= 1; host_offset++) {
  5408. for (int size = 1; size <= 4; size *= 2) {
  5409. for (int offset = 0; offset < 4; offset++) {
  5410. uint32_t count = test_size / size;
  5411. size_t host_bufsiz = (count + 2) * size + host_offset;
  5412. uint8_t *read_ref = malloc(host_bufsiz);
  5413. uint8_t *read_buf = malloc(host_bufsiz);
  5414. for (size_t i = 0; i < host_bufsiz; i++) {
  5415. read_ref[i] = rand();
  5416. read_buf[i] = read_ref[i];
  5417. }
  5418. command_print_sameline(CMD,
  5419. "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
  5420. size, offset, host_offset ? "un" : "");
  5421. struct duration bench;
  5422. duration_start(&bench);
  5423. retval = target_read_memory(target, wa->address + offset, size, count,
  5424. read_buf + size + host_offset);
  5425. duration_measure(&bench);
  5426. if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
  5427. command_print(CMD, "Unsupported alignment");
  5428. goto next;
  5429. } else if (retval != ERROR_OK) {
  5430. command_print(CMD, "Memory read failed");
  5431. goto next;
  5432. }
  5433. /* replay on host */
  5434. memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
  5435. /* check result */
  5436. int result = memcmp(read_ref, read_buf, host_bufsiz);
  5437. if (result == 0) {
  5438. command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
  5439. duration_elapsed(&bench),
  5440. duration_kbps(&bench, count * size));
  5441. } else {
  5442. command_print(CMD, "Compare failed");
  5443. binprint(CMD, "ref:", read_ref, host_bufsiz);
  5444. binprint(CMD, "buf:", read_buf, host_bufsiz);
  5445. }
  5446. next:
  5447. free(read_ref);
  5448. free(read_buf);
  5449. }
  5450. }
  5451. }
  5452. out:
  5453. free(test_pattern);
  5454. if (wa)
  5455. target_free_working_area(target, wa);
  5456. /* Test writes */
  5457. num_bytes = test_size + 4 + 4 + 4;
  5458. retval = target_alloc_working_area(target, num_bytes, &wa);
  5459. if (retval != ERROR_OK) {
  5460. LOG_ERROR("Not enough working area");
  5461. return ERROR_FAIL;
  5462. }
  5463. test_pattern = malloc(num_bytes);
  5464. for (size_t i = 0; i < num_bytes; i++)
  5465. test_pattern[i] = rand();
  5466. for (int host_offset = 0; host_offset <= 1; host_offset++) {
  5467. for (int size = 1; size <= 4; size *= 2) {
  5468. for (int offset = 0; offset < 4; offset++) {
  5469. uint32_t count = test_size / size;
  5470. size_t host_bufsiz = count * size + host_offset;
  5471. uint8_t *read_ref = malloc(num_bytes);
  5472. uint8_t *read_buf = malloc(num_bytes);
  5473. uint8_t *write_buf = malloc(host_bufsiz);
  5474. for (size_t i = 0; i < host_bufsiz; i++)
  5475. write_buf[i] = rand();
  5476. command_print_sameline(CMD,
  5477. "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
  5478. size, offset, host_offset ? "un" : "");
  5479. retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
  5480. if (retval != ERROR_OK) {
  5481. command_print(CMD, "Test pattern write failed");
  5482. goto nextw;
  5483. }
  5484. /* replay on host */
  5485. memcpy(read_ref, test_pattern, num_bytes);
  5486. memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
  5487. struct duration bench;
  5488. duration_start(&bench);
  5489. retval = target_write_memory(target, wa->address + size + offset, size, count,
  5490. write_buf + host_offset);
  5491. duration_measure(&bench);
  5492. if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
  5493. command_print(CMD, "Unsupported alignment");
  5494. goto nextw;
  5495. } else if (retval != ERROR_OK) {
  5496. command_print(CMD, "Memory write failed");
  5497. goto nextw;
  5498. }
  5499. /* read back */
  5500. retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
  5501. if (retval != ERROR_OK) {
  5502. command_print(CMD, "Test pattern write failed");
  5503. goto nextw;
  5504. }
  5505. /* check result */
  5506. int result = memcmp(read_ref, read_buf, num_bytes);
  5507. if (result == 0) {
  5508. command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
  5509. duration_elapsed(&bench),
  5510. duration_kbps(&bench, count * size));
  5511. } else {
  5512. command_print(CMD, "Compare failed");
  5513. binprint(CMD, "ref:", read_ref, num_bytes);
  5514. binprint(CMD, "buf:", read_buf, num_bytes);
  5515. }
  5516. nextw:
  5517. free(read_ref);
  5518. free(read_buf);
  5519. }
  5520. }
  5521. }
  5522. free(test_pattern);
  5523. if (wa)
  5524. target_free_working_area(target, wa);
  5525. return retval;
  5526. }
  5527. static const struct command_registration target_exec_command_handlers[] = {
  5528. {
  5529. .name = "fast_load_image",
  5530. .handler = handle_fast_load_image_command,
  5531. .mode = COMMAND_ANY,
  5532. .help = "Load image into server memory for later use by "
  5533. "fast_load; primarily for profiling",
  5534. .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
  5535. "[min_address [max_length]]",
  5536. },
  5537. {
  5538. .name = "fast_load",
  5539. .handler = handle_fast_load_command,
  5540. .mode = COMMAND_EXEC,
  5541. .help = "loads active fast load image to current target "
  5542. "- mainly for profiling purposes",
  5543. .usage = "",
  5544. },
  5545. {
  5546. .name = "profile",
  5547. .handler = handle_profile_command,
  5548. .mode = COMMAND_EXEC,
  5549. .usage = "seconds filename [start end]",
  5550. .help = "profiling samples the CPU PC",
  5551. },
  5552. /** @todo don't register virt2phys() unless target supports it */
  5553. {
  5554. .name = "virt2phys",
  5555. .handler = handle_virt2phys_command,
  5556. .mode = COMMAND_ANY,
  5557. .help = "translate a virtual address into a physical address",
  5558. .usage = "virtual_address",
  5559. },
  5560. {
  5561. .name = "reg",
  5562. .handler = handle_reg_command,
  5563. .mode = COMMAND_EXEC,
  5564. .help = "display (reread from target with \"force\") or set a register; "
  5565. "with no arguments, displays all registers and their values",
  5566. .usage = "[(register_number|register_name) [(value|'force')]]",
  5567. },
  5568. {
  5569. .name = "poll",
  5570. .handler = handle_poll_command,
  5571. .mode = COMMAND_EXEC,
  5572. .help = "poll target state; or reconfigure background polling",
  5573. .usage = "['on'|'off']",
  5574. },
  5575. {
  5576. .name = "wait_halt",
  5577. .handler = handle_wait_halt_command,
  5578. .mode = COMMAND_EXEC,
  5579. .help = "wait up to the specified number of milliseconds "
  5580. "(default 5000) for a previously requested halt",
  5581. .usage = "[milliseconds]",
  5582. },
  5583. {
  5584. .name = "halt",
  5585. .handler = handle_halt_command,
  5586. .mode = COMMAND_EXEC,
  5587. .help = "request target to halt, then wait up to the specified "
  5588. "number of milliseconds (default 5000) for it to complete",
  5589. .usage = "[milliseconds]",
  5590. },
  5591. {
  5592. .name = "resume",
  5593. .handler = handle_resume_command,
  5594. .mode = COMMAND_EXEC,
  5595. .help = "resume target execution from current PC or address",
  5596. .usage = "[address]",
  5597. },
  5598. {
  5599. .name = "reset",
  5600. .handler = handle_reset_command,
  5601. .mode = COMMAND_EXEC,
  5602. .usage = "[run|halt|init]",
  5603. .help = "Reset all targets into the specified mode. "
  5604. "Default reset mode is run, if not given.",
  5605. },
  5606. {
  5607. .name = "soft_reset_halt",
  5608. .handler = handle_soft_reset_halt_command,
  5609. .mode = COMMAND_EXEC,
  5610. .usage = "",
  5611. .help = "halt the target and do a soft reset",
  5612. },
  5613. {
  5614. .name = "step",
  5615. .handler = handle_step_command,
  5616. .mode = COMMAND_EXEC,
  5617. .help = "step one instruction from current PC or address",
  5618. .usage = "[address]",
  5619. },
  5620. {
  5621. .name = "mdd",
  5622. .handler = handle_md_command,
  5623. .mode = COMMAND_EXEC,
  5624. .help = "display memory double-words",
  5625. .usage = "['phys'] address [count]",
  5626. },
  5627. {
  5628. .name = "mdw",
  5629. .handler = handle_md_command,
  5630. .mode = COMMAND_EXEC,
  5631. .help = "display memory words",
  5632. .usage = "['phys'] address [count]",
  5633. },
  5634. {
  5635. .name = "mdh",
  5636. .handler = handle_md_command,
  5637. .mode = COMMAND_EXEC,
  5638. .help = "display memory half-words",
  5639. .usage = "['phys'] address [count]",
  5640. },
  5641. {
  5642. .name = "mdb",
  5643. .handler = handle_md_command,
  5644. .mode = COMMAND_EXEC,
  5645. .help = "display memory bytes",
  5646. .usage = "['phys'] address [count]",
  5647. },
  5648. {
  5649. .name = "mwd",
  5650. .handler = handle_mw_command,
  5651. .mode = COMMAND_EXEC,
  5652. .help = "write memory double-word",
  5653. .usage = "['phys'] address value [count]",
  5654. },
  5655. {
  5656. .name = "mww",
  5657. .handler = handle_mw_command,
  5658. .mode = COMMAND_EXEC,
  5659. .help = "write memory word",
  5660. .usage = "['phys'] address value [count]",
  5661. },
  5662. {
  5663. .name = "mwh",
  5664. .handler = handle_mw_command,
  5665. .mode = COMMAND_EXEC,
  5666. .help = "write memory half-word",
  5667. .usage = "['phys'] address value [count]",
  5668. },
  5669. {
  5670. .name = "mwb",
  5671. .handler = handle_mw_command,
  5672. .mode = COMMAND_EXEC,
  5673. .help = "write memory byte",
  5674. .usage = "['phys'] address value [count]",
  5675. },
  5676. {
  5677. .name = "bp",
  5678. .handler = handle_bp_command,
  5679. .mode = COMMAND_EXEC,
  5680. .help = "list or set hardware or software breakpoint",
  5681. .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
  5682. },
  5683. {
  5684. .name = "rbp",
  5685. .handler = handle_rbp_command,
  5686. .mode = COMMAND_EXEC,
  5687. .help = "remove breakpoint",
  5688. .usage = "'all' | address",
  5689. },
  5690. {
  5691. .name = "wp",
  5692. .handler = handle_wp_command,
  5693. .mode = COMMAND_EXEC,
  5694. .help = "list (no params) or create watchpoints",
  5695. .usage = "[address length [('r'|'w'|'a') value [mask]]]",
  5696. },
  5697. {
  5698. .name = "rwp",
  5699. .handler = handle_rwp_command,
  5700. .mode = COMMAND_EXEC,
  5701. .help = "remove watchpoint",
  5702. .usage = "address",
  5703. },
  5704. {
  5705. .name = "load_image",
  5706. .handler = handle_load_image_command,
  5707. .mode = COMMAND_EXEC,
  5708. .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
  5709. "[min_address] [max_length]",
  5710. },
  5711. {
  5712. .name = "dump_image",
  5713. .handler = handle_dump_image_command,
  5714. .mode = COMMAND_EXEC,
  5715. .usage = "filename address size",
  5716. },
  5717. {
  5718. .name = "verify_image_checksum",
  5719. .handler = handle_verify_image_checksum_command,
  5720. .mode = COMMAND_EXEC,
  5721. .usage = "filename [offset [type]]",
  5722. },
  5723. {
  5724. .name = "verify_image",
  5725. .handler = handle_verify_image_command,
  5726. .mode = COMMAND_EXEC,
  5727. .usage = "filename [offset [type]]",
  5728. },
  5729. {
  5730. .name = "test_image",
  5731. .handler = handle_test_image_command,
  5732. .mode = COMMAND_EXEC,
  5733. .usage = "filename [offset [type]]",
  5734. },
  5735. {
  5736. .name = "mem2array",
  5737. .mode = COMMAND_EXEC,
  5738. .jim_handler = jim_mem2array,
  5739. .help = "read 8/16/32 bit memory and return as a TCL array "
  5740. "for script processing",
  5741. .usage = "arrayname bitwidth address count",
  5742. },
  5743. {
  5744. .name = "array2mem",
  5745. .mode = COMMAND_EXEC,
  5746. .jim_handler = jim_array2mem,
  5747. .help = "convert a TCL array to memory locations "
  5748. "and write the 8/16/32 bit values",
  5749. .usage = "arrayname bitwidth address count",
  5750. },
  5751. {
  5752. .name = "reset_nag",
  5753. .handler = handle_target_reset_nag,
  5754. .mode = COMMAND_ANY,
  5755. .help = "Nag after each reset about options that could have been "
  5756. "enabled to improve performance.",
  5757. .usage = "['enable'|'disable']",
  5758. },
  5759. {
  5760. .name = "ps",
  5761. .handler = handle_ps_command,
  5762. .mode = COMMAND_EXEC,
  5763. .help = "list all tasks",
  5764. .usage = "",
  5765. },
  5766. {
  5767. .name = "test_mem_access",
  5768. .handler = handle_test_mem_access_command,
  5769. .mode = COMMAND_EXEC,
  5770. .help = "Test the target's memory access functions",
  5771. .usage = "size",
  5772. },
  5773. COMMAND_REGISTRATION_DONE
  5774. };
  5775. static int target_register_user_commands(struct command_context *cmd_ctx)
  5776. {
  5777. int retval = ERROR_OK;
  5778. retval = target_request_register_commands(cmd_ctx);
  5779. if (retval != ERROR_OK)
  5780. return retval;
  5781. retval = trace_register_commands(cmd_ctx);
  5782. if (retval != ERROR_OK)
  5783. return retval;
  5784. return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
  5785. }