You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

2266 lines
65 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2006 by Magnus Lundin *
  6. * lundin@mlu.mine.nu *
  7. * *
  8. * Copyright (C) 2008 by Spencer Oliver *
  9. * spen@spen-soft.co.uk *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program; if not, write to the *
  23. * Free Software Foundation, Inc., *
  24. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  25. * *
  26. * *
  27. * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
  28. * *
  29. ***************************************************************************/
  30. #ifdef HAVE_CONFIG_H
  31. #include "config.h"
  32. #endif
  33. #include "jtag/interface.h"
  34. #include "breakpoints.h"
  35. #include "cortex_m.h"
  36. #include "target_request.h"
  37. #include "target_type.h"
  38. #include "arm_disassembler.h"
  39. #include "register.h"
  40. #include "arm_opcodes.h"
  41. #include "arm_semihosting.h"
  42. #include <helper/time_support.h>
  43. /* NOTE: most of this should work fine for the Cortex-M1 and
  44. * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
  45. * Some differences: M0/M1 doesn't have FBP remapping or the
  46. * DWT tracing/profiling support. (So the cycle counter will
  47. * not be usable; the other stuff isn't currently used here.)
  48. *
  49. * Although there are some workarounds for errata seen only in r0p0
  50. * silicon, such old parts are hard to find and thus not much tested
  51. * any longer.
  52. */
  53. /**
  54. * Returns the type of a break point required by address location
  55. */
  56. #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
  57. /* forward declarations */
  58. static int cortex_m3_store_core_reg_u32(struct target *target,
  59. enum armv7m_regtype type, uint32_t num, uint32_t value);
  60. static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
  61. uint32_t *value, int regnum)
  62. {
  63. int retval;
  64. uint32_t dcrdr;
  65. /* because the DCB_DCRDR is used for the emulated dcc channel
  66. * we have to save/restore the DCB_DCRDR when used */
  67. retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
  68. if (retval != ERROR_OK)
  69. return retval;
  70. /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
  71. retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
  72. if (retval != ERROR_OK)
  73. return retval;
  74. retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
  75. if (retval != ERROR_OK)
  76. return retval;
  77. /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
  78. retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
  79. if (retval != ERROR_OK)
  80. return retval;
  81. retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
  82. if (retval != ERROR_OK)
  83. return retval;
  84. retval = dap_run(swjdp);
  85. if (retval != ERROR_OK)
  86. return retval;
  87. /* restore DCB_DCRDR - this needs to be in a seperate
  88. * transaction otherwise the emulated DCC channel breaks */
  89. if (retval == ERROR_OK)
  90. retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
  91. return retval;
  92. }
  93. static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
  94. uint32_t value, int regnum)
  95. {
  96. int retval;
  97. uint32_t dcrdr;
  98. /* because the DCB_DCRDR is used for the emulated dcc channel
  99. * we have to save/restore the DCB_DCRDR when used */
  100. retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
  101. if (retval != ERROR_OK)
  102. return retval;
  103. /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
  104. retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
  105. if (retval != ERROR_OK)
  106. return retval;
  107. retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
  108. if (retval != ERROR_OK)
  109. return retval;
  110. /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
  111. retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
  112. if (retval != ERROR_OK)
  113. return retval;
  114. retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
  115. if (retval != ERROR_OK)
  116. return retval;
  117. retval = dap_run(swjdp);
  118. if (retval != ERROR_OK)
  119. return retval;
  120. /* restore DCB_DCRDR - this needs to be in a seperate
  121. * transaction otherwise the emulated DCC channel breaks */
  122. if (retval == ERROR_OK)
  123. retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
  124. return retval;
  125. }
  126. static int cortex_m3_write_debug_halt_mask(struct target *target,
  127. uint32_t mask_on, uint32_t mask_off)
  128. {
  129. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  130. struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
  131. /* mask off status bits */
  132. cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
  133. /* create new register mask */
  134. cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
  135. return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
  136. }
  137. static int cortex_m3_clear_halt(struct target *target)
  138. {
  139. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  140. struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
  141. int retval;
  142. /* clear step if any */
  143. cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
  144. /* Read Debug Fault Status Register */
  145. retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
  146. if (retval != ERROR_OK)
  147. return retval;
  148. /* Clear Debug Fault Status */
  149. retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
  150. if (retval != ERROR_OK)
  151. return retval;
  152. LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
  153. return ERROR_OK;
  154. }
  155. static int cortex_m3_single_step_core(struct target *target)
  156. {
  157. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  158. struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
  159. uint32_t dhcsr_save;
  160. int retval;
  161. /* backup dhcsr reg */
  162. dhcsr_save = cortex_m3->dcb_dhcsr;
  163. /* Mask interrupts before clearing halt, if done already. This avoids
  164. * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
  165. * HALT can put the core into an unknown state.
  166. */
  167. if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
  168. retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
  169. DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
  170. if (retval != ERROR_OK)
  171. return retval;
  172. }
  173. retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
  174. DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
  175. if (retval != ERROR_OK)
  176. return retval;
  177. LOG_DEBUG(" ");
  178. /* restore dhcsr reg */
  179. cortex_m3->dcb_dhcsr = dhcsr_save;
  180. cortex_m3_clear_halt(target);
  181. return ERROR_OK;
  182. }
  183. static int cortex_m3_endreset_event(struct target *target)
  184. {
  185. int i;
  186. int retval;
  187. uint32_t dcb_demcr;
  188. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  189. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  190. struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
  191. struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
  192. struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
  193. /* REVISIT The four debug monitor bits are currently ignored... */
  194. retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
  195. if (retval != ERROR_OK)
  196. return retval;
  197. LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
  198. /* this register is used for emulated dcc channel */
  199. retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
  200. if (retval != ERROR_OK)
  201. return retval;
  202. /* Enable debug requests */
  203. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  204. if (retval != ERROR_OK)
  205. return retval;
  206. if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
  207. retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
  208. if (retval != ERROR_OK)
  209. return retval;
  210. }
  211. /* clear any interrupt masking */
  212. cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
  213. /* Enable features controlled by ITM and DWT blocks, and catch only
  214. * the vectors we were told to pay attention to.
  215. *
  216. * Target firmware is responsible for all fault handling policy
  217. * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
  218. * or manual updates to the NVIC SHCSR and CCR registers.
  219. */
  220. retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
  221. if (retval != ERROR_OK)
  222. return retval;
  223. /* Paranoia: evidently some (early?) chips don't preserve all the
  224. * debug state (including FBP, DWT, etc) across reset...
  225. */
  226. /* Enable FPB */
  227. retval = target_write_u32(target, FP_CTRL, 3);
  228. if (retval != ERROR_OK)
  229. return retval;
  230. cortex_m3->fpb_enabled = 1;
  231. /* Restore FPB registers */
  232. for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
  233. retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
  234. if (retval != ERROR_OK)
  235. return retval;
  236. }
  237. /* Restore DWT registers */
  238. for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
  239. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
  240. dwt_list[i].comp);
  241. if (retval != ERROR_OK)
  242. return retval;
  243. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
  244. dwt_list[i].mask);
  245. if (retval != ERROR_OK)
  246. return retval;
  247. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
  248. dwt_list[i].function);
  249. if (retval != ERROR_OK)
  250. return retval;
  251. }
  252. retval = dap_run(swjdp);
  253. if (retval != ERROR_OK)
  254. return retval;
  255. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  256. /* make sure we have latest dhcsr flags */
  257. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  258. return retval;
  259. }
  260. static int cortex_m3_examine_debug_reason(struct target *target)
  261. {
  262. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  263. /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
  264. * only check the debug reason if we don't know it already */
  265. if ((target->debug_reason != DBG_REASON_DBGRQ)
  266. && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
  267. if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
  268. target->debug_reason = DBG_REASON_BREAKPOINT;
  269. if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
  270. target->debug_reason = DBG_REASON_WPTANDBKPT;
  271. } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
  272. target->debug_reason = DBG_REASON_WATCHPOINT;
  273. else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
  274. target->debug_reason = DBG_REASON_BREAKPOINT;
  275. else /* EXTERNAL, HALTED */
  276. target->debug_reason = DBG_REASON_UNDEFINED;
  277. }
  278. return ERROR_OK;
  279. }
  280. static int cortex_m3_examine_exception_reason(struct target *target)
  281. {
  282. uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
  283. struct armv7m_common *armv7m = target_to_armv7m(target);
  284. struct adiv5_dap *swjdp = armv7m->arm.dap;
  285. int retval;
  286. retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
  287. if (retval != ERROR_OK)
  288. return retval;
  289. switch (armv7m->exception_number) {
  290. case 2: /* NMI */
  291. break;
  292. case 3: /* Hard Fault */
  293. retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
  294. if (retval != ERROR_OK)
  295. return retval;
  296. if (except_sr & 0x40000000) {
  297. retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
  298. if (retval != ERROR_OK)
  299. return retval;
  300. }
  301. break;
  302. case 4: /* Memory Management */
  303. retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
  304. if (retval != ERROR_OK)
  305. return retval;
  306. retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
  307. if (retval != ERROR_OK)
  308. return retval;
  309. break;
  310. case 5: /* Bus Fault */
  311. retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
  312. if (retval != ERROR_OK)
  313. return retval;
  314. retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
  315. if (retval != ERROR_OK)
  316. return retval;
  317. break;
  318. case 6: /* Usage Fault */
  319. retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
  320. if (retval != ERROR_OK)
  321. return retval;
  322. break;
  323. case 11: /* SVCall */
  324. break;
  325. case 12: /* Debug Monitor */
  326. retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
  327. if (retval != ERROR_OK)
  328. return retval;
  329. break;
  330. case 14: /* PendSV */
  331. break;
  332. case 15: /* SysTick */
  333. break;
  334. default:
  335. except_sr = 0;
  336. break;
  337. }
  338. retval = dap_run(swjdp);
  339. if (retval == ERROR_OK)
  340. LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
  341. ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
  342. armv7m_exception_string(armv7m->exception_number),
  343. shcsr, except_sr, cfsr, except_ar);
  344. return retval;
  345. }
  346. static int cortex_m3_debug_entry(struct target *target)
  347. {
  348. int i;
  349. uint32_t xPSR;
  350. int retval;
  351. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  352. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  353. struct arm *arm = &armv7m->arm;
  354. struct adiv5_dap *swjdp = armv7m->arm.dap;
  355. struct reg *r;
  356. LOG_DEBUG(" ");
  357. cortex_m3_clear_halt(target);
  358. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  359. if (retval != ERROR_OK)
  360. return retval;
  361. retval = armv7m->examine_debug_reason(target);
  362. if (retval != ERROR_OK)
  363. return retval;
  364. /* Examine target state and mode
  365. * First load register acessible through core debug port*/
  366. int num_regs = armv7m->core_cache->num_regs;
  367. for (i = 0; i < num_regs; i++) {
  368. if (!armv7m->core_cache->reg_list[i].valid)
  369. armv7m->read_core_reg(target, i);
  370. }
  371. r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
  372. xPSR = buf_get_u32(r->value, 0, 32);
  373. #ifdef ARMV7_GDB_HACKS
  374. /* FIXME this breaks on scan chains with more than one Cortex-M3.
  375. * Instead, each CM3 should have its own dummy value...
  376. */
  377. /* copy real xpsr reg for gdb, setting thumb bit */
  378. buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
  379. buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
  380. armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
  381. armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
  382. #endif
  383. /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
  384. if (xPSR & 0xf00) {
  385. r->dirty = r->valid;
  386. cortex_m3_store_core_reg_u32(target, ARMV7M_REGISTER_CORE_GP, 16, xPSR & ~0xff);
  387. }
  388. /* Are we in an exception handler */
  389. if (xPSR & 0x1FF) {
  390. armv7m->core_mode = ARMV7M_MODE_HANDLER;
  391. armv7m->exception_number = (xPSR & 0x1FF);
  392. arm->core_mode = ARM_MODE_HANDLER;
  393. arm->map = armv7m_msp_reg_map;
  394. } else {
  395. unsigned control = buf_get_u32(armv7m->core_cache
  396. ->reg_list[ARMV7M_CONTROL].value, 0, 2);
  397. /* is this thread privileged? */
  398. armv7m->core_mode = control & 1;
  399. arm->core_mode = armv7m->core_mode
  400. ? ARM_MODE_USER_THREAD
  401. : ARM_MODE_THREAD;
  402. /* which stack is it using? */
  403. if (control & 2)
  404. arm->map = armv7m_psp_reg_map;
  405. else
  406. arm->map = armv7m_msp_reg_map;
  407. armv7m->exception_number = 0;
  408. }
  409. if (armv7m->exception_number)
  410. cortex_m3_examine_exception_reason(target);
  411. LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
  412. armv7m_mode_strings[armv7m->core_mode],
  413. *(uint32_t *)(arm->pc->value),
  414. target_state_name(target));
  415. if (armv7m->post_debug_entry) {
  416. retval = armv7m->post_debug_entry(target);
  417. if (retval != ERROR_OK)
  418. return retval;
  419. }
  420. return ERROR_OK;
  421. }
  422. static int cortex_m3_poll(struct target *target)
  423. {
  424. int detected_failure = ERROR_OK;
  425. int retval = ERROR_OK;
  426. enum target_state prev_target_state = target->state;
  427. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  428. struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
  429. /* Read from Debug Halting Control and Status Register */
  430. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  431. if (retval != ERROR_OK) {
  432. target->state = TARGET_UNKNOWN;
  433. return retval;
  434. }
  435. /* Recover from lockup. See ARMv7-M architecture spec,
  436. * section B1.5.15 "Unrecoverable exception cases".
  437. */
  438. if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
  439. LOG_ERROR("%s -- clearing lockup after double fault",
  440. target_name(target));
  441. cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
  442. target->debug_reason = DBG_REASON_DBGRQ;
  443. /* We have to execute the rest (the "finally" equivalent, but
  444. * still throw this exception again).
  445. */
  446. detected_failure = ERROR_FAIL;
  447. /* refresh status bits */
  448. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  449. if (retval != ERROR_OK)
  450. return retval;
  451. }
  452. if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
  453. /* check if still in reset */
  454. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  455. if (retval != ERROR_OK)
  456. return retval;
  457. if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
  458. target->state = TARGET_RESET;
  459. return ERROR_OK;
  460. }
  461. }
  462. if (target->state == TARGET_RESET) {
  463. /* Cannot switch context while running so endreset is
  464. * called with target->state == TARGET_RESET
  465. */
  466. LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
  467. cortex_m3->dcb_dhcsr);
  468. cortex_m3_endreset_event(target);
  469. target->state = TARGET_RUNNING;
  470. prev_target_state = TARGET_RUNNING;
  471. }
  472. if (cortex_m3->dcb_dhcsr & S_HALT) {
  473. target->state = TARGET_HALTED;
  474. if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
  475. retval = cortex_m3_debug_entry(target);
  476. if (retval != ERROR_OK)
  477. return retval;
  478. if (arm_semihosting(target, &retval) != 0)
  479. return retval;
  480. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  481. }
  482. if (prev_target_state == TARGET_DEBUG_RUNNING) {
  483. LOG_DEBUG(" ");
  484. retval = cortex_m3_debug_entry(target);
  485. if (retval != ERROR_OK)
  486. return retval;
  487. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  488. }
  489. }
  490. /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
  491. * How best to model low power modes?
  492. */
  493. if (target->state == TARGET_UNKNOWN) {
  494. /* check if processor is retiring instructions */
  495. if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
  496. target->state = TARGET_RUNNING;
  497. retval = ERROR_OK;
  498. }
  499. }
  500. /* Did we detect a failure condition that we cleared? */
  501. if (detected_failure != ERROR_OK)
  502. retval = detected_failure;
  503. return retval;
  504. }
  505. static int cortex_m3_halt(struct target *target)
  506. {
  507. LOG_DEBUG("target->state: %s",
  508. target_state_name(target));
  509. if (target->state == TARGET_HALTED) {
  510. LOG_DEBUG("target was already halted");
  511. return ERROR_OK;
  512. }
  513. if (target->state == TARGET_UNKNOWN)
  514. LOG_WARNING("target was in unknown state when halt was requested");
  515. if (target->state == TARGET_RESET) {
  516. if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
  517. LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
  518. return ERROR_TARGET_FAILURE;
  519. } else {
  520. /* we came here in a reset_halt or reset_init sequence
  521. * debug entry was already prepared in cortex_m3_assert_reset()
  522. */
  523. target->debug_reason = DBG_REASON_DBGRQ;
  524. return ERROR_OK;
  525. }
  526. }
  527. /* Write to Debug Halting Control and Status Register */
  528. cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
  529. target->debug_reason = DBG_REASON_DBGRQ;
  530. return ERROR_OK;
  531. }
  532. static int cortex_m3_soft_reset_halt(struct target *target)
  533. {
  534. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  535. struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
  536. uint32_t dcb_dhcsr = 0;
  537. int retval, timeout = 0;
  538. /* Enter debug state on reset; restore DEMCR in endreset_event() */
  539. retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
  540. TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
  541. if (retval != ERROR_OK)
  542. return retval;
  543. /* Request a core-only reset */
  544. retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
  545. AIRCR_VECTKEY | AIRCR_VECTRESET);
  546. if (retval != ERROR_OK)
  547. return retval;
  548. target->state = TARGET_RESET;
  549. /* registers are now invalid */
  550. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  551. while (timeout < 100) {
  552. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
  553. if (retval == ERROR_OK) {
  554. retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
  555. &cortex_m3->nvic_dfsr);
  556. if (retval != ERROR_OK)
  557. return retval;
  558. if ((dcb_dhcsr & S_HALT)
  559. && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
  560. LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
  561. "DFSR 0x%08x",
  562. (unsigned) dcb_dhcsr,
  563. (unsigned) cortex_m3->nvic_dfsr);
  564. cortex_m3_poll(target);
  565. /* FIXME restore user's vector catch config */
  566. return ERROR_OK;
  567. } else
  568. LOG_DEBUG("waiting for system reset-halt, "
  569. "DHCSR 0x%08x, %d ms",
  570. (unsigned) dcb_dhcsr, timeout);
  571. }
  572. timeout++;
  573. alive_sleep(1);
  574. }
  575. return ERROR_OK;
  576. }
  577. static void cortex_m3_enable_breakpoints(struct target *target)
  578. {
  579. struct breakpoint *breakpoint = target->breakpoints;
  580. /* set any pending breakpoints */
  581. while (breakpoint) {
  582. if (!breakpoint->set)
  583. cortex_m3_set_breakpoint(target, breakpoint);
  584. breakpoint = breakpoint->next;
  585. }
  586. }
  587. static int cortex_m3_resume(struct target *target, int current,
  588. uint32_t address, int handle_breakpoints, int debug_execution)
  589. {
  590. struct armv7m_common *armv7m = target_to_armv7m(target);
  591. struct breakpoint *breakpoint = NULL;
  592. uint32_t resume_pc;
  593. struct reg *r;
  594. if (target->state != TARGET_HALTED) {
  595. LOG_WARNING("target not halted");
  596. return ERROR_TARGET_NOT_HALTED;
  597. }
  598. if (!debug_execution) {
  599. target_free_all_working_areas(target);
  600. cortex_m3_enable_breakpoints(target);
  601. cortex_m3_enable_watchpoints(target);
  602. }
  603. if (debug_execution) {
  604. r = armv7m->core_cache->reg_list + ARMV7M_PRIMASK;
  605. /* Disable interrupts */
  606. /* We disable interrupts in the PRIMASK register instead of
  607. * masking with C_MASKINTS. This is probably the same issue
  608. * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
  609. * in parallel with disabled interrupts can cause local faults
  610. * to not be taken.
  611. *
  612. * REVISIT this clearly breaks non-debug execution, since the
  613. * PRIMASK register state isn't saved/restored... workaround
  614. * by never resuming app code after debug execution.
  615. */
  616. buf_set_u32(r->value, 0, 1, 1);
  617. r->dirty = true;
  618. r->valid = true;
  619. /* Make sure we are in Thumb mode */
  620. r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
  621. buf_set_u32(r->value, 24, 1, 1);
  622. r->dirty = true;
  623. r->valid = true;
  624. }
  625. /* current = 1: continue on current pc, otherwise continue at <address> */
  626. r = armv7m->arm.pc;
  627. if (!current) {
  628. buf_set_u32(r->value, 0, 32, address);
  629. r->dirty = true;
  630. r->valid = true;
  631. }
  632. /* if we halted last time due to a bkpt instruction
  633. * then we have to manually step over it, otherwise
  634. * the core will break again */
  635. if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
  636. && !debug_execution)
  637. armv7m_maybe_skip_bkpt_inst(target, NULL);
  638. resume_pc = buf_get_u32(r->value, 0, 32);
  639. armv7m_restore_context(target);
  640. /* the front-end may request us not to handle breakpoints */
  641. if (handle_breakpoints) {
  642. /* Single step past breakpoint at current address */
  643. breakpoint = breakpoint_find(target, resume_pc);
  644. if (breakpoint) {
  645. LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
  646. breakpoint->address,
  647. breakpoint->unique_id);
  648. cortex_m3_unset_breakpoint(target, breakpoint);
  649. cortex_m3_single_step_core(target);
  650. cortex_m3_set_breakpoint(target, breakpoint);
  651. }
  652. }
  653. /* Restart core */
  654. cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
  655. target->debug_reason = DBG_REASON_NOTHALTED;
  656. /* registers are now invalid */
  657. register_cache_invalidate(armv7m->core_cache);
  658. if (!debug_execution) {
  659. target->state = TARGET_RUNNING;
  660. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  661. LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
  662. } else {
  663. target->state = TARGET_DEBUG_RUNNING;
  664. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  665. LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
  666. }
  667. return ERROR_OK;
  668. }
  669. /* int irqstepcount = 0; */
  670. static int cortex_m3_step(struct target *target, int current,
  671. uint32_t address, int handle_breakpoints)
  672. {
  673. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  674. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  675. struct adiv5_dap *swjdp = armv7m->arm.dap;
  676. struct breakpoint *breakpoint = NULL;
  677. struct reg *pc = armv7m->arm.pc;
  678. bool bkpt_inst_found = false;
  679. int retval;
  680. bool isr_timed_out = false;
  681. if (target->state != TARGET_HALTED) {
  682. LOG_WARNING("target not halted");
  683. return ERROR_TARGET_NOT_HALTED;
  684. }
  685. /* current = 1: continue on current pc, otherwise continue at <address> */
  686. if (!current)
  687. buf_set_u32(pc->value, 0, 32, address);
  688. uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
  689. /* the front-end may request us not to handle breakpoints */
  690. if (handle_breakpoints) {
  691. breakpoint = breakpoint_find(target, pc_value);
  692. if (breakpoint)
  693. cortex_m3_unset_breakpoint(target, breakpoint);
  694. }
  695. armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
  696. target->debug_reason = DBG_REASON_SINGLESTEP;
  697. armv7m_restore_context(target);
  698. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  699. /* if no bkpt instruction is found at pc then we can perform
  700. * a normal step, otherwise we have to manually step over the bkpt
  701. * instruction - as such simulate a step */
  702. if (bkpt_inst_found == false) {
  703. /* Automatic ISR masking mode off: Just step over the next instruction */
  704. if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
  705. cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
  706. else {
  707. /* Process interrupts during stepping in a way they don't interfere
  708. * debugging.
  709. *
  710. * Principle:
  711. *
  712. * Set a temporary break point at the current pc and let the core run
  713. * with interrupts enabled. Pending interrupts get served and we run
  714. * into the breakpoint again afterwards. Then we step over the next
  715. * instruction with interrupts disabled.
  716. *
  717. * If the pending interrupts don't complete within time, we leave the
  718. * core running. This may happen if the interrupts trigger faster
  719. * than the core can process them or the handler doesn't return.
  720. *
  721. * If no more breakpoints are available we simply do a step with
  722. * interrupts enabled.
  723. *
  724. */
  725. /* Set a temporary break point */
  726. retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
  727. bool tmp_bp_set = (retval == ERROR_OK);
  728. /* No more breakpoints left, just do a step */
  729. if (!tmp_bp_set)
  730. cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
  731. else {
  732. /* Start the core */
  733. LOG_DEBUG("Starting core to serve pending interrupts");
  734. int64_t t_start = timeval_ms();
  735. cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
  736. /* Wait for pending handlers to complete or timeout */
  737. do {
  738. retval = mem_ap_read_atomic_u32(swjdp,
  739. DCB_DHCSR,
  740. &cortex_m3->dcb_dhcsr);
  741. if (retval != ERROR_OK) {
  742. target->state = TARGET_UNKNOWN;
  743. return retval;
  744. }
  745. isr_timed_out = ((timeval_ms() - t_start) > 500);
  746. } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
  747. /* Remove the temporary breakpoint */
  748. breakpoint_remove(target, pc_value);
  749. if (isr_timed_out) {
  750. LOG_DEBUG("Interrupt handlers didn't complete within time, "
  751. "leaving target running");
  752. } else {
  753. /* Step over next instruction with interrupts disabled */
  754. cortex_m3_write_debug_halt_mask(target,
  755. C_HALT | C_MASKINTS,
  756. 0);
  757. cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
  758. /* Re-enable interrupts */
  759. cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
  760. }
  761. }
  762. }
  763. }
  764. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  765. if (retval != ERROR_OK)
  766. return retval;
  767. /* registers are now invalid */
  768. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  769. if (breakpoint)
  770. cortex_m3_set_breakpoint(target, breakpoint);
  771. if (isr_timed_out) {
  772. /* Leave the core running. The user has to stop execution manually. */
  773. target->debug_reason = DBG_REASON_NOTHALTED;
  774. target->state = TARGET_RUNNING;
  775. return ERROR_OK;
  776. }
  777. LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
  778. " nvic_icsr = 0x%" PRIx32,
  779. cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
  780. retval = cortex_m3_debug_entry(target);
  781. if (retval != ERROR_OK)
  782. return retval;
  783. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  784. LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
  785. " nvic_icsr = 0x%" PRIx32,
  786. cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
  787. return ERROR_OK;
  788. }
  789. static int cortex_m3_assert_reset(struct target *target)
  790. {
  791. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  792. struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
  793. enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
  794. LOG_DEBUG("target->state: %s",
  795. target_state_name(target));
  796. enum reset_types jtag_reset_config = jtag_get_reset_config();
  797. if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
  798. /* allow scripts to override the reset event */
  799. target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
  800. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  801. target->state = TARGET_RESET;
  802. return ERROR_OK;
  803. }
  804. /* some cores support connecting while srst is asserted
  805. * use that mode is it has been configured */
  806. bool srst_asserted = false;
  807. if (jtag_reset_config & RESET_SRST_NO_GATING) {
  808. adapter_assert_reset();
  809. srst_asserted = true;
  810. }
  811. /* Enable debug requests */
  812. int retval;
  813. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  814. if (retval != ERROR_OK)
  815. return retval;
  816. if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
  817. retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
  818. if (retval != ERROR_OK)
  819. return retval;
  820. }
  821. retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
  822. if (retval != ERROR_OK)
  823. return retval;
  824. if (!target->reset_halt) {
  825. /* Set/Clear C_MASKINTS in a separate operation */
  826. if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
  827. retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
  828. DBGKEY | C_DEBUGEN | C_HALT);
  829. if (retval != ERROR_OK)
  830. return retval;
  831. }
  832. /* clear any debug flags before resuming */
  833. cortex_m3_clear_halt(target);
  834. /* clear C_HALT in dhcsr reg */
  835. cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
  836. } else {
  837. /* Halt in debug on reset; endreset_event() restores DEMCR.
  838. *
  839. * REVISIT catching BUSERR presumably helps to defend against
  840. * bad vector table entries. Should this include MMERR or
  841. * other flags too?
  842. */
  843. retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
  844. TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
  845. if (retval != ERROR_OK)
  846. return retval;
  847. }
  848. if (jtag_reset_config & RESET_HAS_SRST) {
  849. /* default to asserting srst */
  850. if (!srst_asserted)
  851. adapter_assert_reset();
  852. } else {
  853. /* Use a standard Cortex-M3 software reset mechanism.
  854. * We default to using VECRESET as it is supported on all current cores.
  855. * This has the disadvantage of not resetting the peripherals, so a
  856. * reset-init event handler is needed to perform any peripheral resets.
  857. */
  858. retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
  859. AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
  860. ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
  861. if (retval != ERROR_OK)
  862. return retval;
  863. LOG_DEBUG("Using Cortex-M3 %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
  864. ? "SYSRESETREQ" : "VECTRESET");
  865. if (reset_config == CORTEX_M3_RESET_VECTRESET) {
  866. LOG_WARNING("Only resetting the Cortex-M3 core, use a reset-init event "
  867. "handler to reset any peripherals");
  868. }
  869. {
  870. /* I do not know why this is necessary, but it
  871. * fixes strange effects (step/resume cause NMI
  872. * after reset) on LM3S6918 -- Michael Schwingen
  873. */
  874. uint32_t tmp;
  875. retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
  876. if (retval != ERROR_OK)
  877. return retval;
  878. }
  879. }
  880. target->state = TARGET_RESET;
  881. jtag_add_sleep(50000);
  882. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  883. if (target->reset_halt) {
  884. retval = target_halt(target);
  885. if (retval != ERROR_OK)
  886. return retval;
  887. }
  888. return ERROR_OK;
  889. }
  890. static int cortex_m3_deassert_reset(struct target *target)
  891. {
  892. LOG_DEBUG("target->state: %s",
  893. target_state_name(target));
  894. /* deassert reset lines */
  895. adapter_deassert_reset();
  896. return ERROR_OK;
  897. }
  898. int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
  899. {
  900. int retval;
  901. int fp_num = 0;
  902. uint32_t hilo;
  903. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  904. struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
  905. if (breakpoint->set) {
  906. LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
  907. return ERROR_OK;
  908. }
  909. if (cortex_m3->auto_bp_type)
  910. breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
  911. if (breakpoint->type == BKPT_HARD) {
  912. while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
  913. fp_num++;
  914. if (fp_num >= cortex_m3->fp_num_code) {
  915. LOG_ERROR("Can not find free FPB Comparator!");
  916. return ERROR_FAIL;
  917. }
  918. breakpoint->set = fp_num + 1;
  919. hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
  920. comparator_list[fp_num].used = 1;
  921. comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
  922. target_write_u32(target, comparator_list[fp_num].fpcr_address,
  923. comparator_list[fp_num].fpcr_value);
  924. LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
  925. fp_num,
  926. comparator_list[fp_num].fpcr_value);
  927. if (!cortex_m3->fpb_enabled) {
  928. LOG_DEBUG("FPB wasn't enabled, do it now");
  929. target_write_u32(target, FP_CTRL, 3);
  930. }
  931. } else if (breakpoint->type == BKPT_SOFT) {
  932. uint8_t code[4];
  933. /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
  934. * semihosting; don't use that. Otherwise the BKPT
  935. * parameter is arbitrary.
  936. */
  937. buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
  938. retval = target_read_memory(target,
  939. breakpoint->address & 0xFFFFFFFE,
  940. breakpoint->length, 1,
  941. breakpoint->orig_instr);
  942. if (retval != ERROR_OK)
  943. return retval;
  944. retval = target_write_memory(target,
  945. breakpoint->address & 0xFFFFFFFE,
  946. breakpoint->length, 1,
  947. code);
  948. if (retval != ERROR_OK)
  949. return retval;
  950. breakpoint->set = true;
  951. }
  952. LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
  953. breakpoint->unique_id,
  954. (int)(breakpoint->type),
  955. breakpoint->address,
  956. breakpoint->length,
  957. breakpoint->set);
  958. return ERROR_OK;
  959. }
  960. int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
  961. {
  962. int retval;
  963. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  964. struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
  965. if (!breakpoint->set) {
  966. LOG_WARNING("breakpoint not set");
  967. return ERROR_OK;
  968. }
  969. LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
  970. breakpoint->unique_id,
  971. (int)(breakpoint->type),
  972. breakpoint->address,
  973. breakpoint->length,
  974. breakpoint->set);
  975. if (breakpoint->type == BKPT_HARD) {
  976. int fp_num = breakpoint->set - 1;
  977. if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
  978. LOG_DEBUG("Invalid FP Comparator number in breakpoint");
  979. return ERROR_OK;
  980. }
  981. comparator_list[fp_num].used = 0;
  982. comparator_list[fp_num].fpcr_value = 0;
  983. target_write_u32(target, comparator_list[fp_num].fpcr_address,
  984. comparator_list[fp_num].fpcr_value);
  985. } else {
  986. /* restore original instruction (kept in target endianness) */
  987. if (breakpoint->length == 4) {
  988. retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
  989. breakpoint->orig_instr);
  990. if (retval != ERROR_OK)
  991. return retval;
  992. } else {
  993. retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
  994. breakpoint->orig_instr);
  995. if (retval != ERROR_OK)
  996. return retval;
  997. }
  998. }
  999. breakpoint->set = false;
  1000. return ERROR_OK;
  1001. }
  1002. int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1003. {
  1004. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1005. if (cortex_m3->auto_bp_type) {
  1006. breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
  1007. #ifdef ARMV7_GDB_HACKS
  1008. if (breakpoint->length != 2) {
  1009. /* XXX Hack: Replace all breakpoints with length != 2 with
  1010. * a hardware breakpoint. */
  1011. breakpoint->type = BKPT_HARD;
  1012. breakpoint->length = 2;
  1013. }
  1014. #endif
  1015. }
  1016. if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
  1017. if (breakpoint->type == BKPT_HARD) {
  1018. LOG_INFO("flash patch comparator requested outside code memory region");
  1019. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1020. }
  1021. if (breakpoint->type == BKPT_SOFT) {
  1022. LOG_INFO("soft breakpoint requested in code (flash) memory region");
  1023. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1024. }
  1025. }
  1026. if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
  1027. LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
  1028. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1029. }
  1030. if ((breakpoint->length != 2)) {
  1031. LOG_INFO("only breakpoints of two bytes length supported");
  1032. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1033. }
  1034. if (breakpoint->type == BKPT_HARD)
  1035. cortex_m3->fp_code_available--;
  1036. return cortex_m3_set_breakpoint(target, breakpoint);
  1037. }
  1038. int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1039. {
  1040. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1041. /* REVISIT why check? FBP can be updated with core running ... */
  1042. if (target->state != TARGET_HALTED) {
  1043. LOG_WARNING("target not halted");
  1044. return ERROR_TARGET_NOT_HALTED;
  1045. }
  1046. if (cortex_m3->auto_bp_type)
  1047. breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
  1048. if (breakpoint->set)
  1049. cortex_m3_unset_breakpoint(target, breakpoint);
  1050. if (breakpoint->type == BKPT_HARD)
  1051. cortex_m3->fp_code_available++;
  1052. return ERROR_OK;
  1053. }
  1054. int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1055. {
  1056. int dwt_num = 0;
  1057. uint32_t mask, temp;
  1058. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1059. /* watchpoint params were validated earlier */
  1060. mask = 0;
  1061. temp = watchpoint->length;
  1062. while (temp) {
  1063. temp >>= 1;
  1064. mask++;
  1065. }
  1066. mask--;
  1067. /* REVISIT Don't fully trust these "not used" records ... users
  1068. * may set up breakpoints by hand, e.g. dual-address data value
  1069. * watchpoint using comparator #1; comparator #0 matching cycle
  1070. * count; send data trace info through ITM and TPIU; etc
  1071. */
  1072. struct cortex_m3_dwt_comparator *comparator;
  1073. for (comparator = cortex_m3->dwt_comparator_list;
  1074. comparator->used && dwt_num < cortex_m3->dwt_num_comp;
  1075. comparator++, dwt_num++)
  1076. continue;
  1077. if (dwt_num >= cortex_m3->dwt_num_comp) {
  1078. LOG_ERROR("Can not find free DWT Comparator");
  1079. return ERROR_FAIL;
  1080. }
  1081. comparator->used = 1;
  1082. watchpoint->set = dwt_num + 1;
  1083. comparator->comp = watchpoint->address;
  1084. target_write_u32(target, comparator->dwt_comparator_address + 0,
  1085. comparator->comp);
  1086. comparator->mask = mask;
  1087. target_write_u32(target, comparator->dwt_comparator_address + 4,
  1088. comparator->mask);
  1089. switch (watchpoint->rw) {
  1090. case WPT_READ:
  1091. comparator->function = 5;
  1092. break;
  1093. case WPT_WRITE:
  1094. comparator->function = 6;
  1095. break;
  1096. case WPT_ACCESS:
  1097. comparator->function = 7;
  1098. break;
  1099. }
  1100. target_write_u32(target, comparator->dwt_comparator_address + 8,
  1101. comparator->function);
  1102. LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
  1103. watchpoint->unique_id, dwt_num,
  1104. (unsigned) comparator->comp,
  1105. (unsigned) comparator->mask,
  1106. (unsigned) comparator->function);
  1107. return ERROR_OK;
  1108. }
  1109. int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1110. {
  1111. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1112. struct cortex_m3_dwt_comparator *comparator;
  1113. int dwt_num;
  1114. if (!watchpoint->set) {
  1115. LOG_WARNING("watchpoint (wpid: %d) not set",
  1116. watchpoint->unique_id);
  1117. return ERROR_OK;
  1118. }
  1119. dwt_num = watchpoint->set - 1;
  1120. LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
  1121. watchpoint->unique_id, dwt_num,
  1122. (unsigned) watchpoint->address);
  1123. if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
  1124. LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
  1125. return ERROR_OK;
  1126. }
  1127. comparator = cortex_m3->dwt_comparator_list + dwt_num;
  1128. comparator->used = 0;
  1129. comparator->function = 0;
  1130. target_write_u32(target, comparator->dwt_comparator_address + 8,
  1131. comparator->function);
  1132. watchpoint->set = false;
  1133. return ERROR_OK;
  1134. }
  1135. int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1136. {
  1137. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1138. if (cortex_m3->dwt_comp_available < 1) {
  1139. LOG_DEBUG("no comparators?");
  1140. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1141. }
  1142. /* hardware doesn't support data value masking */
  1143. if (watchpoint->mask != ~(uint32_t)0) {
  1144. LOG_DEBUG("watchpoint value masks not supported");
  1145. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1146. }
  1147. /* hardware allows address masks of up to 32K */
  1148. unsigned mask;
  1149. for (mask = 0; mask < 16; mask++) {
  1150. if ((1u << mask) == watchpoint->length)
  1151. break;
  1152. }
  1153. if (mask == 16) {
  1154. LOG_DEBUG("unsupported watchpoint length");
  1155. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1156. }
  1157. if (watchpoint->address & ((1 << mask) - 1)) {
  1158. LOG_DEBUG("watchpoint address is unaligned");
  1159. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1160. }
  1161. /* Caller doesn't seem to be able to describe watching for data
  1162. * values of zero; that flags "no value".
  1163. *
  1164. * REVISIT This DWT may well be able to watch for specific data
  1165. * values. Requires comparator #1 to set DATAVMATCH and match
  1166. * the data, and another comparator (DATAVADDR0) matching addr.
  1167. */
  1168. if (watchpoint->value) {
  1169. LOG_DEBUG("data value watchpoint not YET supported");
  1170. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1171. }
  1172. cortex_m3->dwt_comp_available--;
  1173. LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
  1174. return ERROR_OK;
  1175. }
  1176. int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1177. {
  1178. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1179. /* REVISIT why check? DWT can be updated with core running ... */
  1180. if (target->state != TARGET_HALTED) {
  1181. LOG_WARNING("target not halted");
  1182. return ERROR_TARGET_NOT_HALTED;
  1183. }
  1184. if (watchpoint->set)
  1185. cortex_m3_unset_watchpoint(target, watchpoint);
  1186. cortex_m3->dwt_comp_available++;
  1187. LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
  1188. return ERROR_OK;
  1189. }
  1190. void cortex_m3_enable_watchpoints(struct target *target)
  1191. {
  1192. struct watchpoint *watchpoint = target->watchpoints;
  1193. /* set any pending watchpoints */
  1194. while (watchpoint) {
  1195. if (!watchpoint->set)
  1196. cortex_m3_set_watchpoint(target, watchpoint);
  1197. watchpoint = watchpoint->next;
  1198. }
  1199. }
  1200. static int cortex_m3_load_core_reg_u32(struct target *target,
  1201. enum armv7m_regtype type, uint32_t num, uint32_t *value)
  1202. {
  1203. int retval;
  1204. struct armv7m_common *armv7m = target_to_armv7m(target);
  1205. struct adiv5_dap *swjdp = armv7m->arm.dap;
  1206. /* NOTE: we "know" here that the register identifiers used
  1207. * in the v7m header match the Cortex-M3 Debug Core Register
  1208. * Selector values for R0..R15, xPSR, MSP, and PSP.
  1209. */
  1210. switch (num) {
  1211. case 0 ... 18:
  1212. /* read a normal core register */
  1213. retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
  1214. if (retval != ERROR_OK) {
  1215. LOG_ERROR("JTAG failure %i", retval);
  1216. return ERROR_JTAG_DEVICE_ERROR;
  1217. }
  1218. LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
  1219. break;
  1220. case ARMV7M_PRIMASK:
  1221. case ARMV7M_BASEPRI:
  1222. case ARMV7M_FAULTMASK:
  1223. case ARMV7M_CONTROL:
  1224. /* Cortex-M3 packages these four registers as bitfields
  1225. * in one Debug Core register. So say r0 and r2 docs;
  1226. * it was removed from r1 docs, but still works.
  1227. */
  1228. cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
  1229. switch (num) {
  1230. case ARMV7M_PRIMASK:
  1231. *value = buf_get_u32((uint8_t *)value, 0, 1);
  1232. break;
  1233. case ARMV7M_BASEPRI:
  1234. *value = buf_get_u32((uint8_t *)value, 8, 8);
  1235. break;
  1236. case ARMV7M_FAULTMASK:
  1237. *value = buf_get_u32((uint8_t *)value, 16, 1);
  1238. break;
  1239. case ARMV7M_CONTROL:
  1240. *value = buf_get_u32((uint8_t *)value, 24, 2);
  1241. break;
  1242. }
  1243. LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
  1244. break;
  1245. default:
  1246. return ERROR_COMMAND_SYNTAX_ERROR;
  1247. }
  1248. return ERROR_OK;
  1249. }
  1250. static int cortex_m3_store_core_reg_u32(struct target *target,
  1251. enum armv7m_regtype type, uint32_t num, uint32_t value)
  1252. {
  1253. int retval;
  1254. uint32_t reg;
  1255. struct armv7m_common *armv7m = target_to_armv7m(target);
  1256. struct adiv5_dap *swjdp = armv7m->arm.dap;
  1257. #ifdef ARMV7_GDB_HACKS
  1258. /* If the LR register is being modified, make sure it will put us
  1259. * in "thumb" mode, or an INVSTATE exception will occur. This is a
  1260. * hack to deal with the fact that gdb will sometimes "forge"
  1261. * return addresses, and doesn't set the LSB correctly (i.e., when
  1262. * printing expressions containing function calls, it sets LR = 0.)
  1263. * Valid exception return codes have bit 0 set too.
  1264. */
  1265. if (num == ARMV7M_R14)
  1266. value |= 0x01;
  1267. #endif
  1268. /* NOTE: we "know" here that the register identifiers used
  1269. * in the v7m header match the Cortex-M3 Debug Core Register
  1270. * Selector values for R0..R15, xPSR, MSP, and PSP.
  1271. */
  1272. switch (num) {
  1273. case 0 ... 18:
  1274. retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
  1275. if (retval != ERROR_OK) {
  1276. struct reg *r;
  1277. LOG_ERROR("JTAG failure");
  1278. r = armv7m->core_cache->reg_list + num;
  1279. r->dirty = r->valid;
  1280. return ERROR_JTAG_DEVICE_ERROR;
  1281. }
  1282. LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
  1283. break;
  1284. case ARMV7M_PRIMASK:
  1285. case ARMV7M_BASEPRI:
  1286. case ARMV7M_FAULTMASK:
  1287. case ARMV7M_CONTROL:
  1288. /* Cortex-M3 packages these four registers as bitfields
  1289. * in one Debug Core register. So say r0 and r2 docs;
  1290. * it was removed from r1 docs, but still works.
  1291. */
  1292. cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
  1293. switch (num) {
  1294. case ARMV7M_PRIMASK:
  1295. buf_set_u32((uint8_t *)&reg, 0, 1, value);
  1296. break;
  1297. case ARMV7M_BASEPRI:
  1298. buf_set_u32((uint8_t *)&reg, 8, 8, value);
  1299. break;
  1300. case ARMV7M_FAULTMASK:
  1301. buf_set_u32((uint8_t *)&reg, 16, 1, value);
  1302. break;
  1303. case ARMV7M_CONTROL:
  1304. buf_set_u32((uint8_t *)&reg, 24, 2, value);
  1305. break;
  1306. }
  1307. cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
  1308. LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
  1309. break;
  1310. default:
  1311. return ERROR_COMMAND_SYNTAX_ERROR;
  1312. }
  1313. return ERROR_OK;
  1314. }
  1315. static int cortex_m3_read_memory(struct target *target, uint32_t address,
  1316. uint32_t size, uint32_t count, uint8_t *buffer)
  1317. {
  1318. struct armv7m_common *armv7m = target_to_armv7m(target);
  1319. struct adiv5_dap *swjdp = armv7m->arm.dap;
  1320. int retval = ERROR_COMMAND_SYNTAX_ERROR;
  1321. if (armv7m->arm.is_armv6m) {
  1322. /* armv6m does not handle unaligned memory access */
  1323. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1324. return ERROR_TARGET_UNALIGNED_ACCESS;
  1325. }
  1326. /* cortex_m3 handles unaligned memory access */
  1327. if (count && buffer) {
  1328. switch (size) {
  1329. case 4:
  1330. retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
  1331. break;
  1332. case 2:
  1333. retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
  1334. break;
  1335. case 1:
  1336. retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
  1337. break;
  1338. }
  1339. }
  1340. return retval;
  1341. }
  1342. static int cortex_m3_write_memory(struct target *target, uint32_t address,
  1343. uint32_t size, uint32_t count, const uint8_t *buffer)
  1344. {
  1345. struct armv7m_common *armv7m = target_to_armv7m(target);
  1346. struct adiv5_dap *swjdp = armv7m->arm.dap;
  1347. int retval = ERROR_COMMAND_SYNTAX_ERROR;
  1348. if (armv7m->arm.is_armv6m) {
  1349. /* armv6m does not handle unaligned memory access */
  1350. if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
  1351. return ERROR_TARGET_UNALIGNED_ACCESS;
  1352. }
  1353. if (count && buffer) {
  1354. switch (size) {
  1355. case 4:
  1356. retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
  1357. break;
  1358. case 2:
  1359. retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
  1360. break;
  1361. case 1:
  1362. retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
  1363. break;
  1364. }
  1365. }
  1366. return retval;
  1367. }
  1368. static int cortex_m3_bulk_write_memory(struct target *target, uint32_t address,
  1369. uint32_t count, const uint8_t *buffer)
  1370. {
  1371. return cortex_m3_write_memory(target, address, 4, count, buffer);
  1372. }
  1373. static int cortex_m3_init_target(struct command_context *cmd_ctx,
  1374. struct target *target)
  1375. {
  1376. armv7m_build_reg_cache(target);
  1377. return ERROR_OK;
  1378. }
  1379. /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
  1380. * on r/w if the core is not running, and clear on resume or reset ... or
  1381. * at least, in a post_restore_context() method.
  1382. */
  1383. struct dwt_reg_state {
  1384. struct target *target;
  1385. uint32_t addr;
  1386. uint32_t value; /* scratch/cache */
  1387. };
  1388. static int cortex_m3_dwt_get_reg(struct reg *reg)
  1389. {
  1390. struct dwt_reg_state *state = reg->arch_info;
  1391. return target_read_u32(state->target, state->addr, &state->value);
  1392. }
  1393. static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
  1394. {
  1395. struct dwt_reg_state *state = reg->arch_info;
  1396. return target_write_u32(state->target, state->addr,
  1397. buf_get_u32(buf, 0, reg->size));
  1398. }
  1399. struct dwt_reg {
  1400. uint32_t addr;
  1401. char *name;
  1402. unsigned size;
  1403. };
  1404. static struct dwt_reg dwt_base_regs[] = {
  1405. { DWT_CTRL, "dwt_ctrl", 32, },
  1406. /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
  1407. * increments while the core is asleep.
  1408. */
  1409. { DWT_CYCCNT, "dwt_cyccnt", 32, },
  1410. /* plus some 8 bit counters, useful for profiling with TPIU */
  1411. };
  1412. static struct dwt_reg dwt_comp[] = {
  1413. #define DWT_COMPARATOR(i) \
  1414. { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
  1415. { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
  1416. { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
  1417. DWT_COMPARATOR(0),
  1418. DWT_COMPARATOR(1),
  1419. DWT_COMPARATOR(2),
  1420. DWT_COMPARATOR(3),
  1421. #undef DWT_COMPARATOR
  1422. };
  1423. static const struct reg_arch_type dwt_reg_type = {
  1424. .get = cortex_m3_dwt_get_reg,
  1425. .set = cortex_m3_dwt_set_reg,
  1426. };
  1427. static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
  1428. {
  1429. struct dwt_reg_state *state;
  1430. state = calloc(1, sizeof *state);
  1431. if (!state)
  1432. return;
  1433. state->addr = d->addr;
  1434. state->target = t;
  1435. r->name = d->name;
  1436. r->size = d->size;
  1437. r->value = &state->value;
  1438. r->arch_info = state;
  1439. r->type = &dwt_reg_type;
  1440. }
  1441. void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
  1442. {
  1443. uint32_t dwtcr;
  1444. struct reg_cache *cache;
  1445. struct cortex_m3_dwt_comparator *comparator;
  1446. int reg, i;
  1447. target_read_u32(target, DWT_CTRL, &dwtcr);
  1448. if (!dwtcr) {
  1449. LOG_DEBUG("no DWT");
  1450. return;
  1451. }
  1452. cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
  1453. cm3->dwt_comp_available = cm3->dwt_num_comp;
  1454. cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
  1455. sizeof(struct cortex_m3_dwt_comparator));
  1456. if (!cm3->dwt_comparator_list) {
  1457. fail0:
  1458. cm3->dwt_num_comp = 0;
  1459. LOG_ERROR("out of mem");
  1460. return;
  1461. }
  1462. cache = calloc(1, sizeof *cache);
  1463. if (!cache) {
  1464. fail1:
  1465. free(cm3->dwt_comparator_list);
  1466. goto fail0;
  1467. }
  1468. cache->name = "cortex-m3 dwt registers";
  1469. cache->num_regs = 2 + cm3->dwt_num_comp * 3;
  1470. cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
  1471. if (!cache->reg_list) {
  1472. free(cache);
  1473. goto fail1;
  1474. }
  1475. for (reg = 0; reg < 2; reg++)
  1476. cortex_m3_dwt_addreg(target, cache->reg_list + reg,
  1477. dwt_base_regs + reg);
  1478. comparator = cm3->dwt_comparator_list;
  1479. for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
  1480. int j;
  1481. comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
  1482. for (j = 0; j < 3; j++, reg++)
  1483. cortex_m3_dwt_addreg(target, cache->reg_list + reg,
  1484. dwt_comp + 3 * i + j);
  1485. }
  1486. *register_get_last_cache_p(&target->reg_cache) = cache;
  1487. cm3->dwt_cache = cache;
  1488. LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
  1489. dwtcr, cm3->dwt_num_comp,
  1490. (dwtcr & (0xf << 24)) ? " only" : "/trigger");
  1491. /* REVISIT: if num_comp > 1, check whether comparator #1 can
  1492. * implement single-address data value watchpoints ... so we
  1493. * won't need to check it later, when asked to set one up.
  1494. */
  1495. }
  1496. #define MVFR0 0xe000ef40
  1497. #define MVFR1 0xe000ef44
  1498. #define MVFR0_DEFAULT_M4 0x10110021
  1499. #define MVFR1_DEFAULT_M4 0x11000011
  1500. int cortex_m3_examine(struct target *target)
  1501. {
  1502. int retval;
  1503. uint32_t cpuid, fpcr, mvfr0, mvfr1;
  1504. int i;
  1505. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1506. struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
  1507. struct armv7m_common *armv7m = target_to_armv7m(target);
  1508. /* stlink shares the examine handler but does not support
  1509. * all its calls */
  1510. if (!armv7m->stlink) {
  1511. retval = ahbap_debugport_init(swjdp);
  1512. if (retval != ERROR_OK)
  1513. return retval;
  1514. }
  1515. if (!target_was_examined(target)) {
  1516. target_set_examined(target);
  1517. /* Read from Device Identification Registers */
  1518. retval = target_read_u32(target, CPUID, &cpuid);
  1519. if (retval != ERROR_OK)
  1520. return retval;
  1521. /* Get CPU Type */
  1522. i = (cpuid >> 4) & 0xf;
  1523. LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
  1524. i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
  1525. LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
  1526. /* test for floating point feature on cortex-m4 */
  1527. if (i == 4) {
  1528. target_read_u32(target, MVFR0, &mvfr0);
  1529. target_read_u32(target, MVFR1, &mvfr1);
  1530. if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
  1531. LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
  1532. armv7m->fp_feature = FPv4_SP;
  1533. }
  1534. } else if (i == 0) {
  1535. /* Cortex-M0 does not support unaligned memory access */
  1536. armv7m->arm.is_armv6m = true;
  1537. }
  1538. if (i == 4 || i == 3) {
  1539. /* Cortex-M3/M4 has 4096 bytes autoincrement range */
  1540. armv7m->dap.tar_autoincr_block = (1 << 12);
  1541. }
  1542. /* NOTE: FPB and DWT are both optional. */
  1543. /* Setup FPB */
  1544. target_read_u32(target, FP_CTRL, &fpcr);
  1545. cortex_m3->auto_bp_type = 1;
  1546. cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
  1547. *[14:12]
  1548. *and [7:4]
  1549. **/
  1550. cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
  1551. cortex_m3->fp_code_available = cortex_m3->fp_num_code;
  1552. cortex_m3->fp_comparator_list = calloc(
  1553. cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
  1554. sizeof(struct cortex_m3_fp_comparator));
  1555. cortex_m3->fpb_enabled = fpcr & 1;
  1556. for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
  1557. cortex_m3->fp_comparator_list[i].type =
  1558. (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
  1559. cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
  1560. }
  1561. LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
  1562. fpcr,
  1563. cortex_m3->fp_num_code,
  1564. cortex_m3->fp_num_lit);
  1565. /* Setup DWT */
  1566. cortex_m3_dwt_setup(cortex_m3, target);
  1567. /* These hardware breakpoints only work for code in flash! */
  1568. LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
  1569. target_name(target),
  1570. cortex_m3->fp_num_code,
  1571. cortex_m3->dwt_num_comp);
  1572. }
  1573. return ERROR_OK;
  1574. }
  1575. static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
  1576. {
  1577. uint16_t dcrdr;
  1578. int retval;
  1579. mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
  1580. *ctrl = (uint8_t)dcrdr;
  1581. *value = (uint8_t)(dcrdr >> 8);
  1582. LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
  1583. /* write ack back to software dcc register
  1584. * signify we have read data */
  1585. if (dcrdr & (1 << 0)) {
  1586. dcrdr = 0;
  1587. retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
  1588. if (retval != ERROR_OK)
  1589. return retval;
  1590. }
  1591. return ERROR_OK;
  1592. }
  1593. static int cortex_m3_target_request_data(struct target *target,
  1594. uint32_t size, uint8_t *buffer)
  1595. {
  1596. struct armv7m_common *armv7m = target_to_armv7m(target);
  1597. struct adiv5_dap *swjdp = armv7m->arm.dap;
  1598. uint8_t data;
  1599. uint8_t ctrl;
  1600. uint32_t i;
  1601. for (i = 0; i < (size * 4); i++) {
  1602. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1603. buffer[i] = data;
  1604. }
  1605. return ERROR_OK;
  1606. }
  1607. static int cortex_m3_handle_target_request(void *priv)
  1608. {
  1609. struct target *target = priv;
  1610. if (!target_was_examined(target))
  1611. return ERROR_OK;
  1612. struct armv7m_common *armv7m = target_to_armv7m(target);
  1613. struct adiv5_dap *swjdp = armv7m->arm.dap;
  1614. if (!target->dbg_msg_enabled)
  1615. return ERROR_OK;
  1616. if (target->state == TARGET_RUNNING) {
  1617. uint8_t data;
  1618. uint8_t ctrl;
  1619. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1620. /* check if we have data */
  1621. if (ctrl & (1 << 0)) {
  1622. uint32_t request;
  1623. /* we assume target is quick enough */
  1624. request = data;
  1625. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1626. request |= (data << 8);
  1627. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1628. request |= (data << 16);
  1629. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1630. request |= (data << 24);
  1631. target_request(target, request);
  1632. }
  1633. }
  1634. return ERROR_OK;
  1635. }
  1636. static int cortex_m3_init_arch_info(struct target *target,
  1637. struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
  1638. {
  1639. int retval;
  1640. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  1641. armv7m_init_arch_info(target, armv7m);
  1642. /* prepare JTAG information for the new target */
  1643. cortex_m3->jtag_info.tap = tap;
  1644. cortex_m3->jtag_info.scann_size = 4;
  1645. /* default reset mode is to use srst if fitted
  1646. * if not it will use CORTEX_M3_RESET_VECTRESET */
  1647. cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
  1648. armv7m->arm.dap = &armv7m->dap;
  1649. /* Leave (only) generic DAP stuff for debugport_init(); */
  1650. armv7m->dap.jtag_info = &cortex_m3->jtag_info;
  1651. armv7m->dap.memaccess_tck = 8;
  1652. /* Cortex-M3/M4 has 4096 bytes autoincrement range
  1653. * but set a safe default to 1024 to support Cortex-M0
  1654. * this will be changed in cortex_m3_examine if a M3/M4 is detected */
  1655. armv7m->dap.tar_autoincr_block = (1 << 10);
  1656. /* register arch-specific functions */
  1657. armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
  1658. armv7m->post_debug_entry = NULL;
  1659. armv7m->pre_restore_context = NULL;
  1660. armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
  1661. armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
  1662. target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
  1663. retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
  1664. if (retval != ERROR_OK)
  1665. return retval;
  1666. return ERROR_OK;
  1667. }
  1668. static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
  1669. {
  1670. struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
  1671. cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
  1672. cortex_m3_init_arch_info(target, cortex_m3, target->tap);
  1673. return ERROR_OK;
  1674. }
  1675. /*--------------------------------------------------------------------------*/
  1676. static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
  1677. struct cortex_m3_common *cm3)
  1678. {
  1679. if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
  1680. command_print(cmd_ctx, "target is not a Cortex-M3");
  1681. return ERROR_TARGET_INVALID;
  1682. }
  1683. return ERROR_OK;
  1684. }
  1685. /*
  1686. * Only stuff below this line should need to verify that its target
  1687. * is a Cortex-M3. Everything else should have indirected through the
  1688. * cortexm3_target structure, which is only used with CM3 targets.
  1689. */
  1690. static const struct {
  1691. char name[10];
  1692. unsigned mask;
  1693. } vec_ids[] = {
  1694. { "hard_err", VC_HARDERR, },
  1695. { "int_err", VC_INTERR, },
  1696. { "bus_err", VC_BUSERR, },
  1697. { "state_err", VC_STATERR, },
  1698. { "chk_err", VC_CHKERR, },
  1699. { "nocp_err", VC_NOCPERR, },
  1700. { "mm_err", VC_MMERR, },
  1701. { "reset", VC_CORERESET, },
  1702. };
  1703. COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
  1704. {
  1705. struct target *target = get_current_target(CMD_CTX);
  1706. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1707. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  1708. struct adiv5_dap *swjdp = armv7m->arm.dap;
  1709. uint32_t demcr = 0;
  1710. int retval;
  1711. retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
  1712. if (retval != ERROR_OK)
  1713. return retval;
  1714. retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
  1715. if (retval != ERROR_OK)
  1716. return retval;
  1717. if (CMD_ARGC > 0) {
  1718. unsigned catch = 0;
  1719. if (CMD_ARGC == 1) {
  1720. if (strcmp(CMD_ARGV[0], "all") == 0) {
  1721. catch = VC_HARDERR | VC_INTERR | VC_BUSERR
  1722. | VC_STATERR | VC_CHKERR | VC_NOCPERR
  1723. | VC_MMERR | VC_CORERESET;
  1724. goto write;
  1725. } else if (strcmp(CMD_ARGV[0], "none") == 0)
  1726. goto write;
  1727. }
  1728. while (CMD_ARGC-- > 0) {
  1729. unsigned i;
  1730. for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
  1731. if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
  1732. continue;
  1733. catch |= vec_ids[i].mask;
  1734. break;
  1735. }
  1736. if (i == ARRAY_SIZE(vec_ids)) {
  1737. LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
  1738. return ERROR_COMMAND_SYNTAX_ERROR;
  1739. }
  1740. }
  1741. write:
  1742. /* For now, armv7m->demcr only stores vector catch flags. */
  1743. armv7m->demcr = catch;
  1744. demcr &= ~0xffff;
  1745. demcr |= catch;
  1746. /* write, but don't assume it stuck (why not??) */
  1747. retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
  1748. if (retval != ERROR_OK)
  1749. return retval;
  1750. retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
  1751. if (retval != ERROR_OK)
  1752. return retval;
  1753. /* FIXME be sure to clear DEMCR on clean server shutdown.
  1754. * Otherwise the vector catch hardware could fire when there's
  1755. * no debugger hooked up, causing much confusion...
  1756. */
  1757. }
  1758. for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
  1759. command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
  1760. (demcr & vec_ids[i].mask) ? "catch" : "ignore");
  1761. }
  1762. return ERROR_OK;
  1763. }
  1764. COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
  1765. {
  1766. struct target *target = get_current_target(CMD_CTX);
  1767. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1768. int retval;
  1769. static const Jim_Nvp nvp_maskisr_modes[] = {
  1770. { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
  1771. { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
  1772. { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
  1773. { .name = NULL, .value = -1 },
  1774. };
  1775. const Jim_Nvp *n;
  1776. retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
  1777. if (retval != ERROR_OK)
  1778. return retval;
  1779. if (target->state != TARGET_HALTED) {
  1780. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  1781. return ERROR_OK;
  1782. }
  1783. if (CMD_ARGC > 0) {
  1784. n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
  1785. if (n->name == NULL)
  1786. return ERROR_COMMAND_SYNTAX_ERROR;
  1787. cortex_m3->isrmasking_mode = n->value;
  1788. if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
  1789. cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
  1790. else
  1791. cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
  1792. }
  1793. n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
  1794. command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
  1795. return ERROR_OK;
  1796. }
  1797. COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
  1798. {
  1799. struct target *target = get_current_target(CMD_CTX);
  1800. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1801. int retval;
  1802. char *reset_config;
  1803. retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
  1804. if (retval != ERROR_OK)
  1805. return retval;
  1806. if (CMD_ARGC > 0) {
  1807. if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
  1808. cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
  1809. else if (strcmp(*CMD_ARGV, "vectreset") == 0)
  1810. cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
  1811. }
  1812. switch (cortex_m3->soft_reset_config) {
  1813. case CORTEX_M3_RESET_SYSRESETREQ:
  1814. reset_config = "sysresetreq";
  1815. break;
  1816. case CORTEX_M3_RESET_VECTRESET:
  1817. reset_config = "vectreset";
  1818. break;
  1819. default:
  1820. reset_config = "unknown";
  1821. break;
  1822. }
  1823. command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
  1824. return ERROR_OK;
  1825. }
  1826. static const struct command_registration cortex_m3_exec_command_handlers[] = {
  1827. {
  1828. .name = "maskisr",
  1829. .handler = handle_cortex_m3_mask_interrupts_command,
  1830. .mode = COMMAND_EXEC,
  1831. .help = "mask cortex_m3 interrupts",
  1832. .usage = "['auto'|'on'|'off']",
  1833. },
  1834. {
  1835. .name = "vector_catch",
  1836. .handler = handle_cortex_m3_vector_catch_command,
  1837. .mode = COMMAND_EXEC,
  1838. .help = "configure hardware vectors to trigger debug entry",
  1839. .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
  1840. },
  1841. {
  1842. .name = "reset_config",
  1843. .handler = handle_cortex_m3_reset_config_command,
  1844. .mode = COMMAND_ANY,
  1845. .help = "configure software reset handling",
  1846. .usage = "['srst'|'sysresetreq'|'vectreset']",
  1847. },
  1848. COMMAND_REGISTRATION_DONE
  1849. };
  1850. static const struct command_registration cortex_m3_command_handlers[] = {
  1851. {
  1852. .chain = armv7m_command_handlers,
  1853. },
  1854. {
  1855. .name = "cortex_m3",
  1856. .mode = COMMAND_EXEC,
  1857. .help = "Cortex-M3 command group",
  1858. .usage = "",
  1859. .chain = cortex_m3_exec_command_handlers,
  1860. },
  1861. COMMAND_REGISTRATION_DONE
  1862. };
  1863. struct target_type cortexm3_target = {
  1864. .name = "cortex_m3",
  1865. .poll = cortex_m3_poll,
  1866. .arch_state = armv7m_arch_state,
  1867. .target_request_data = cortex_m3_target_request_data,
  1868. .halt = cortex_m3_halt,
  1869. .resume = cortex_m3_resume,
  1870. .step = cortex_m3_step,
  1871. .assert_reset = cortex_m3_assert_reset,
  1872. .deassert_reset = cortex_m3_deassert_reset,
  1873. .soft_reset_halt = cortex_m3_soft_reset_halt,
  1874. .get_gdb_reg_list = armv7m_get_gdb_reg_list,
  1875. .read_memory = cortex_m3_read_memory,
  1876. .write_memory = cortex_m3_write_memory,
  1877. .bulk_write_memory = cortex_m3_bulk_write_memory,
  1878. .checksum_memory = armv7m_checksum_memory,
  1879. .blank_check_memory = armv7m_blank_check_memory,
  1880. .run_algorithm = armv7m_run_algorithm,
  1881. .start_algorithm = armv7m_start_algorithm,
  1882. .wait_algorithm = armv7m_wait_algorithm,
  1883. .add_breakpoint = cortex_m3_add_breakpoint,
  1884. .remove_breakpoint = cortex_m3_remove_breakpoint,
  1885. .add_watchpoint = cortex_m3_add_watchpoint,
  1886. .remove_watchpoint = cortex_m3_remove_watchpoint,
  1887. .commands = cortex_m3_command_handlers,
  1888. .target_create = cortex_m3_target_create,
  1889. .init_target = cortex_m3_init_target,
  1890. .examine = cortex_m3_examine,
  1891. };