You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

2349 lines
64 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2005 by Dominic Rath *
  3. * Dominic.Rath@gmx.de *
  4. * *
  5. * Copyright (C) 2006 by Magnus Lundin *
  6. * lundin@mlu.mine.nu *
  7. * *
  8. * Copyright (C) 2008 by Spencer Oliver *
  9. * spen@spen-soft.co.uk *
  10. * *
  11. * This program is free software; you can redistribute it and/or modify *
  12. * it under the terms of the GNU General Public License as published by *
  13. * the Free Software Foundation; either version 2 of the License, or *
  14. * (at your option) any later version. *
  15. * *
  16. * This program is distributed in the hope that it will be useful, *
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  19. * GNU General Public License for more details. *
  20. * *
  21. * You should have received a copy of the GNU General Public License *
  22. * along with this program; if not, write to the *
  23. * Free Software Foundation, Inc., *
  24. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  25. * *
  26. * *
  27. * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
  28. * *
  29. ***************************************************************************/
  30. #ifdef HAVE_CONFIG_H
  31. #include "config.h"
  32. #endif
  33. #include "breakpoints.h"
  34. #include "cortex_m.h"
  35. #include "target_request.h"
  36. #include "target_type.h"
  37. #include "arm_disassembler.h"
  38. #include "register.h"
  39. #include "arm_opcodes.h"
  40. #include "arm_semihosting.h"
  41. #include <helper/time_support.h>
  42. /* NOTE: most of this should work fine for the Cortex-M1 and
  43. * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
  44. * Some differences: M0/M1 doesn't have FBP remapping or the
  45. * DWT tracing/profiling support. (So the cycle counter will
  46. * not be usable; the other stuff isn't currently used here.)
  47. *
  48. * Although there are some workarounds for errata seen only in r0p0
  49. * silicon, such old parts are hard to find and thus not much tested
  50. * any longer.
  51. */
  52. /**
  53. * Returns the type of a break point required by address location
  54. */
  55. #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
  56. /* forward declarations */
  57. static int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint);
  58. static int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint);
  59. static void cortex_m3_enable_watchpoints(struct target *target);
  60. static int cortex_m3_store_core_reg_u32(struct target *target,
  61. enum armv7m_regtype type, uint32_t num, uint32_t value);
  62. static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
  63. uint32_t *value, int regnum)
  64. {
  65. int retval;
  66. uint32_t dcrdr;
  67. /* because the DCB_DCRDR is used for the emulated dcc channel
  68. * we have to save/restore the DCB_DCRDR when used */
  69. retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
  70. if (retval != ERROR_OK)
  71. return retval;
  72. /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
  73. retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
  74. if (retval != ERROR_OK)
  75. return retval;
  76. retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
  77. if (retval != ERROR_OK)
  78. return retval;
  79. /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
  80. retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
  81. if (retval != ERROR_OK)
  82. return retval;
  83. retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
  84. if (retval != ERROR_OK)
  85. return retval;
  86. retval = dap_run(swjdp);
  87. if (retval != ERROR_OK)
  88. return retval;
  89. /* restore DCB_DCRDR - this needs to be in a seperate
  90. * transaction otherwise the emulated DCC channel breaks */
  91. if (retval == ERROR_OK)
  92. retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
  93. return retval;
  94. }
  95. static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
  96. uint32_t value, int regnum)
  97. {
  98. int retval;
  99. uint32_t dcrdr;
  100. /* because the DCB_DCRDR is used for the emulated dcc channel
  101. * we have to save/restore the DCB_DCRDR when used */
  102. retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
  103. if (retval != ERROR_OK)
  104. return retval;
  105. /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
  106. retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
  107. if (retval != ERROR_OK)
  108. return retval;
  109. retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
  110. if (retval != ERROR_OK)
  111. return retval;
  112. /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
  113. retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
  114. if (retval != ERROR_OK)
  115. return retval;
  116. retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
  117. if (retval != ERROR_OK)
  118. return retval;
  119. retval = dap_run(swjdp);
  120. if (retval != ERROR_OK)
  121. return retval;
  122. /* restore DCB_DCRDR - this needs to be in a seperate
  123. * transaction otherwise the emulated DCC channel breaks */
  124. if (retval == ERROR_OK)
  125. retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
  126. return retval;
  127. }
  128. static int cortex_m3_write_debug_halt_mask(struct target *target,
  129. uint32_t mask_on, uint32_t mask_off)
  130. {
  131. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  132. struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
  133. /* mask off status bits */
  134. cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
  135. /* create new register mask */
  136. cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
  137. return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
  138. }
  139. static int cortex_m3_clear_halt(struct target *target)
  140. {
  141. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  142. struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
  143. int retval;
  144. /* clear step if any */
  145. cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
  146. /* Read Debug Fault Status Register */
  147. retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
  148. if (retval != ERROR_OK)
  149. return retval;
  150. /* Clear Debug Fault Status */
  151. retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
  152. if (retval != ERROR_OK)
  153. return retval;
  154. LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
  155. return ERROR_OK;
  156. }
  157. static int cortex_m3_single_step_core(struct target *target)
  158. {
  159. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  160. struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
  161. uint32_t dhcsr_save;
  162. int retval;
  163. /* backup dhcsr reg */
  164. dhcsr_save = cortex_m3->dcb_dhcsr;
  165. /* Mask interrupts before clearing halt, if done already. This avoids
  166. * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
  167. * HALT can put the core into an unknown state.
  168. */
  169. if (!(cortex_m3->dcb_dhcsr & C_MASKINTS))
  170. {
  171. retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
  172. DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
  173. if (retval != ERROR_OK)
  174. return retval;
  175. }
  176. retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
  177. DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
  178. if (retval != ERROR_OK)
  179. return retval;
  180. LOG_DEBUG(" ");
  181. /* restore dhcsr reg */
  182. cortex_m3->dcb_dhcsr = dhcsr_save;
  183. cortex_m3_clear_halt(target);
  184. return ERROR_OK;
  185. }
  186. static int cortex_m3_endreset_event(struct target *target)
  187. {
  188. int i;
  189. int retval;
  190. uint32_t dcb_demcr;
  191. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  192. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  193. struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
  194. struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
  195. struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
  196. /* REVISIT The four debug monitor bits are currently ignored... */
  197. retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
  198. if (retval != ERROR_OK)
  199. return retval;
  200. LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "",dcb_demcr);
  201. /* this register is used for emulated dcc channel */
  202. retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
  203. if (retval != ERROR_OK)
  204. return retval;
  205. /* Enable debug requests */
  206. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  207. if (retval != ERROR_OK)
  208. return retval;
  209. if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN))
  210. {
  211. retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
  212. if (retval != ERROR_OK)
  213. return retval;
  214. }
  215. /* clear any interrupt masking */
  216. cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
  217. /* Enable features controlled by ITM and DWT blocks, and catch only
  218. * the vectors we were told to pay attention to.
  219. *
  220. * Target firmware is responsible for all fault handling policy
  221. * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
  222. * or manual updates to the NVIC SHCSR and CCR registers.
  223. */
  224. retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
  225. if (retval != ERROR_OK)
  226. return retval;
  227. /* Paranoia: evidently some (early?) chips don't preserve all the
  228. * debug state (including FBP, DWT, etc) across reset...
  229. */
  230. /* Enable FPB */
  231. retval = target_write_u32(target, FP_CTRL, 3);
  232. if (retval != ERROR_OK)
  233. return retval;
  234. cortex_m3->fpb_enabled = 1;
  235. /* Restore FPB registers */
  236. for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++)
  237. {
  238. retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
  239. if (retval != ERROR_OK)
  240. return retval;
  241. }
  242. /* Restore DWT registers */
  243. for (i = 0; i < cortex_m3->dwt_num_comp; i++)
  244. {
  245. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
  246. dwt_list[i].comp);
  247. if (retval != ERROR_OK)
  248. return retval;
  249. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
  250. dwt_list[i].mask);
  251. if (retval != ERROR_OK)
  252. return retval;
  253. retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
  254. dwt_list[i].function);
  255. if (retval != ERROR_OK)
  256. return retval;
  257. }
  258. retval = dap_run(swjdp);
  259. if (retval != ERROR_OK)
  260. return retval;
  261. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  262. /* make sure we have latest dhcsr flags */
  263. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  264. return retval;
  265. }
  266. static int cortex_m3_examine_debug_reason(struct target *target)
  267. {
  268. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  269. /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason */
  270. /* only check the debug reason if we don't know it already */
  271. if ((target->debug_reason != DBG_REASON_DBGRQ)
  272. && (target->debug_reason != DBG_REASON_SINGLESTEP))
  273. {
  274. if (cortex_m3->nvic_dfsr & DFSR_BKPT)
  275. {
  276. target->debug_reason = DBG_REASON_BREAKPOINT;
  277. if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
  278. target->debug_reason = DBG_REASON_WPTANDBKPT;
  279. }
  280. else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
  281. target->debug_reason = DBG_REASON_WATCHPOINT;
  282. else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
  283. target->debug_reason = DBG_REASON_BREAKPOINT;
  284. else /* EXTERNAL, HALTED */
  285. target->debug_reason = DBG_REASON_UNDEFINED;
  286. }
  287. return ERROR_OK;
  288. }
  289. static int cortex_m3_examine_exception_reason(struct target *target)
  290. {
  291. uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
  292. struct armv7m_common *armv7m = target_to_armv7m(target);
  293. struct adiv5_dap *swjdp = &armv7m->dap;
  294. int retval;
  295. retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
  296. if (retval != ERROR_OK)
  297. return retval;
  298. switch (armv7m->exception_number)
  299. {
  300. case 2: /* NMI */
  301. break;
  302. case 3: /* Hard Fault */
  303. retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
  304. if (retval != ERROR_OK)
  305. return retval;
  306. if (except_sr & 0x40000000)
  307. {
  308. retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
  309. if (retval != ERROR_OK)
  310. return retval;
  311. }
  312. break;
  313. case 4: /* Memory Management */
  314. retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
  315. if (retval != ERROR_OK)
  316. return retval;
  317. retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
  318. if (retval != ERROR_OK)
  319. return retval;
  320. break;
  321. case 5: /* Bus Fault */
  322. retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
  323. if (retval != ERROR_OK)
  324. return retval;
  325. retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
  326. if (retval != ERROR_OK)
  327. return retval;
  328. break;
  329. case 6: /* Usage Fault */
  330. retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
  331. if (retval != ERROR_OK)
  332. return retval;
  333. break;
  334. case 11: /* SVCall */
  335. break;
  336. case 12: /* Debug Monitor */
  337. retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
  338. if (retval != ERROR_OK)
  339. return retval;
  340. break;
  341. case 14: /* PendSV */
  342. break;
  343. case 15: /* SysTick */
  344. break;
  345. default:
  346. except_sr = 0;
  347. break;
  348. }
  349. retval = dap_run(swjdp);
  350. if (retval == ERROR_OK)
  351. LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
  352. ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
  353. armv7m_exception_string(armv7m->exception_number),
  354. shcsr, except_sr, cfsr, except_ar);
  355. return retval;
  356. }
  357. /* PSP is used in some thread modes */
  358. static const int armv7m_psp_reg_map[17] = {
  359. ARMV7M_R0, ARMV7M_R1, ARMV7M_R2, ARMV7M_R3,
  360. ARMV7M_R4, ARMV7M_R5, ARMV7M_R6, ARMV7M_R7,
  361. ARMV7M_R8, ARMV7M_R9, ARMV7M_R10, ARMV7M_R11,
  362. ARMV7M_R12, ARMV7M_PSP, ARMV7M_R14, ARMV7M_PC,
  363. ARMV7M_xPSR,
  364. };
  365. /* MSP is used in handler and some thread modes */
  366. static const int armv7m_msp_reg_map[17] = {
  367. ARMV7M_R0, ARMV7M_R1, ARMV7M_R2, ARMV7M_R3,
  368. ARMV7M_R4, ARMV7M_R5, ARMV7M_R6, ARMV7M_R7,
  369. ARMV7M_R8, ARMV7M_R9, ARMV7M_R10, ARMV7M_R11,
  370. ARMV7M_R12, ARMV7M_MSP, ARMV7M_R14, ARMV7M_PC,
  371. ARMV7M_xPSR,
  372. };
  373. static int cortex_m3_debug_entry(struct target *target)
  374. {
  375. int i;
  376. uint32_t xPSR;
  377. int retval;
  378. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  379. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  380. struct arm *arm = &armv7m->arm;
  381. struct adiv5_dap *swjdp = &armv7m->dap;
  382. struct reg *r;
  383. LOG_DEBUG(" ");
  384. cortex_m3_clear_halt(target);
  385. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  386. if (retval != ERROR_OK)
  387. return retval;
  388. if ((retval = armv7m->examine_debug_reason(target)) != ERROR_OK)
  389. return retval;
  390. /* Examine target state and mode */
  391. /* First load register acessible through core debug port*/
  392. int num_regs = armv7m->core_cache->num_regs;
  393. for (i = 0; i < num_regs; i++)
  394. {
  395. if (!armv7m->core_cache->reg_list[i].valid)
  396. armv7m->read_core_reg(target, i);
  397. }
  398. r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
  399. xPSR = buf_get_u32(r->value, 0, 32);
  400. #ifdef ARMV7_GDB_HACKS
  401. /* FIXME this breaks on scan chains with more than one Cortex-M3.
  402. * Instead, each CM3 should have its own dummy value...
  403. */
  404. /* copy real xpsr reg for gdb, setting thumb bit */
  405. buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
  406. buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
  407. armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
  408. armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
  409. #endif
  410. /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
  411. if (xPSR & 0xf00)
  412. {
  413. r->dirty = r->valid;
  414. cortex_m3_store_core_reg_u32(target, ARMV7M_REGISTER_CORE_GP, 16, xPSR &~ 0xff);
  415. }
  416. /* Are we in an exception handler */
  417. if (xPSR & 0x1FF)
  418. {
  419. armv7m->core_mode = ARMV7M_MODE_HANDLER;
  420. armv7m->exception_number = (xPSR & 0x1FF);
  421. arm->core_mode = ARM_MODE_HANDLER;
  422. arm->map = armv7m_msp_reg_map;
  423. }
  424. else
  425. {
  426. unsigned control = buf_get_u32(armv7m->core_cache
  427. ->reg_list[ARMV7M_CONTROL].value, 0, 2);
  428. /* is this thread privileged? */
  429. armv7m->core_mode = control & 1;
  430. arm->core_mode = armv7m->core_mode
  431. ? ARM_MODE_USER_THREAD
  432. : ARM_MODE_THREAD;
  433. /* which stack is it using? */
  434. if (control & 2)
  435. arm->map = armv7m_psp_reg_map;
  436. else
  437. arm->map = armv7m_msp_reg_map;
  438. armv7m->exception_number = 0;
  439. }
  440. if (armv7m->exception_number)
  441. {
  442. cortex_m3_examine_exception_reason(target);
  443. }
  444. LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
  445. armv7m_mode_strings[armv7m->core_mode],
  446. *(uint32_t*)(arm->pc->value),
  447. target_state_name(target));
  448. if (armv7m->post_debug_entry)
  449. {
  450. retval = armv7m->post_debug_entry(target);
  451. if (retval != ERROR_OK)
  452. return retval;
  453. }
  454. return ERROR_OK;
  455. }
  456. static int cortex_m3_poll(struct target *target)
  457. {
  458. int detected_failure = ERROR_OK;
  459. int retval = ERROR_OK;
  460. enum target_state prev_target_state = target->state;
  461. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  462. struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
  463. /* Read from Debug Halting Control and Status Register */
  464. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  465. if (retval != ERROR_OK)
  466. {
  467. target->state = TARGET_UNKNOWN;
  468. return retval;
  469. }
  470. /* Recover from lockup. See ARMv7-M architecture spec,
  471. * section B1.5.15 "Unrecoverable exception cases".
  472. */
  473. if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
  474. LOG_ERROR("%s -- clearing lockup after double fault",
  475. target_name(target));
  476. cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
  477. target->debug_reason = DBG_REASON_DBGRQ;
  478. /* We have to execute the rest (the "finally" equivalent, but
  479. * still throw this exception again).
  480. */
  481. detected_failure = ERROR_FAIL;
  482. /* refresh status bits */
  483. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  484. if (retval != ERROR_OK)
  485. return retval;
  486. }
  487. if (cortex_m3->dcb_dhcsr & S_RESET_ST)
  488. {
  489. /* check if still in reset */
  490. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  491. if (retval != ERROR_OK)
  492. return retval;
  493. if (cortex_m3->dcb_dhcsr & S_RESET_ST)
  494. {
  495. target->state = TARGET_RESET;
  496. return ERROR_OK;
  497. }
  498. }
  499. if (target->state == TARGET_RESET)
  500. {
  501. /* Cannot switch context while running so endreset is
  502. * called with target->state == TARGET_RESET
  503. */
  504. LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
  505. cortex_m3->dcb_dhcsr);
  506. cortex_m3_endreset_event(target);
  507. target->state = TARGET_RUNNING;
  508. prev_target_state = TARGET_RUNNING;
  509. }
  510. if (cortex_m3->dcb_dhcsr & S_HALT)
  511. {
  512. target->state = TARGET_HALTED;
  513. if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET))
  514. {
  515. if ((retval = cortex_m3_debug_entry(target)) != ERROR_OK)
  516. return retval;
  517. if (arm_semihosting(target, &retval) != 0)
  518. return retval;
  519. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  520. }
  521. if (prev_target_state == TARGET_DEBUG_RUNNING)
  522. {
  523. LOG_DEBUG(" ");
  524. if ((retval = cortex_m3_debug_entry(target)) != ERROR_OK)
  525. return retval;
  526. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  527. }
  528. }
  529. /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
  530. * How best to model low power modes?
  531. */
  532. if (target->state == TARGET_UNKNOWN)
  533. {
  534. /* check if processor is retiring instructions */
  535. if (cortex_m3->dcb_dhcsr & S_RETIRE_ST)
  536. {
  537. target->state = TARGET_RUNNING;
  538. retval = ERROR_OK;
  539. }
  540. }
  541. /* Did we detect a failure condition that we cleared? */
  542. if (detected_failure != ERROR_OK)
  543. retval = detected_failure;
  544. return retval;
  545. }
  546. static int cortex_m3_halt(struct target *target)
  547. {
  548. LOG_DEBUG("target->state: %s",
  549. target_state_name(target));
  550. if (target->state == TARGET_HALTED)
  551. {
  552. LOG_DEBUG("target was already halted");
  553. return ERROR_OK;
  554. }
  555. if (target->state == TARGET_UNKNOWN)
  556. {
  557. LOG_WARNING("target was in unknown state when halt was requested");
  558. }
  559. if (target->state == TARGET_RESET)
  560. {
  561. if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst())
  562. {
  563. LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
  564. return ERROR_TARGET_FAILURE;
  565. }
  566. else
  567. {
  568. /* we came here in a reset_halt or reset_init sequence
  569. * debug entry was already prepared in cortex_m3_prepare_reset_halt()
  570. */
  571. target->debug_reason = DBG_REASON_DBGRQ;
  572. return ERROR_OK;
  573. }
  574. }
  575. /* Write to Debug Halting Control and Status Register */
  576. cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
  577. target->debug_reason = DBG_REASON_DBGRQ;
  578. return ERROR_OK;
  579. }
  580. static int cortex_m3_soft_reset_halt(struct target *target)
  581. {
  582. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  583. struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
  584. uint32_t dcb_dhcsr = 0;
  585. int retval, timeout = 0;
  586. /* Enter debug state on reset; restore DEMCR in endreset_event() */
  587. retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
  588. TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
  589. if (retval != ERROR_OK)
  590. return retval;
  591. /* Request a core-only reset */
  592. retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
  593. AIRCR_VECTKEY | AIRCR_VECTRESET);
  594. if (retval != ERROR_OK)
  595. return retval;
  596. target->state = TARGET_RESET;
  597. /* registers are now invalid */
  598. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  599. while (timeout < 100)
  600. {
  601. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
  602. if (retval == ERROR_OK)
  603. {
  604. retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
  605. &cortex_m3->nvic_dfsr);
  606. if (retval != ERROR_OK)
  607. return retval;
  608. if ((dcb_dhcsr & S_HALT)
  609. && (cortex_m3->nvic_dfsr & DFSR_VCATCH))
  610. {
  611. LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
  612. "DFSR 0x%08x",
  613. (unsigned) dcb_dhcsr,
  614. (unsigned) cortex_m3->nvic_dfsr);
  615. cortex_m3_poll(target);
  616. /* FIXME restore user's vector catch config */
  617. return ERROR_OK;
  618. }
  619. else
  620. LOG_DEBUG("waiting for system reset-halt, "
  621. "DHCSR 0x%08x, %d ms",
  622. (unsigned) dcb_dhcsr, timeout);
  623. }
  624. timeout++;
  625. alive_sleep(1);
  626. }
  627. return ERROR_OK;
  628. }
  629. static void cortex_m3_enable_breakpoints(struct target *target)
  630. {
  631. struct breakpoint *breakpoint = target->breakpoints;
  632. /* set any pending breakpoints */
  633. while (breakpoint)
  634. {
  635. if (!breakpoint->set)
  636. cortex_m3_set_breakpoint(target, breakpoint);
  637. breakpoint = breakpoint->next;
  638. }
  639. }
  640. static int cortex_m3_resume(struct target *target, int current,
  641. uint32_t address, int handle_breakpoints, int debug_execution)
  642. {
  643. struct armv7m_common *armv7m = target_to_armv7m(target);
  644. struct breakpoint *breakpoint = NULL;
  645. uint32_t resume_pc;
  646. struct reg *r;
  647. if (target->state != TARGET_HALTED)
  648. {
  649. LOG_WARNING("target not halted");
  650. return ERROR_TARGET_NOT_HALTED;
  651. }
  652. if (!debug_execution)
  653. {
  654. target_free_all_working_areas(target);
  655. cortex_m3_enable_breakpoints(target);
  656. cortex_m3_enable_watchpoints(target);
  657. }
  658. if (debug_execution)
  659. {
  660. r = armv7m->core_cache->reg_list + ARMV7M_PRIMASK;
  661. /* Disable interrupts */
  662. /* We disable interrupts in the PRIMASK register instead of
  663. * masking with C_MASKINTS. This is probably the same issue
  664. * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
  665. * in parallel with disabled interrupts can cause local faults
  666. * to not be taken.
  667. *
  668. * REVISIT this clearly breaks non-debug execution, since the
  669. * PRIMASK register state isn't saved/restored... workaround
  670. * by never resuming app code after debug execution.
  671. */
  672. buf_set_u32(r->value, 0, 1, 1);
  673. r->dirty = true;
  674. r->valid = true;
  675. /* Make sure we are in Thumb mode */
  676. r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
  677. buf_set_u32(r->value, 24, 1, 1);
  678. r->dirty = true;
  679. r->valid = true;
  680. }
  681. /* current = 1: continue on current pc, otherwise continue at <address> */
  682. r = armv7m->arm.pc;
  683. if (!current)
  684. {
  685. buf_set_u32(r->value, 0, 32, address);
  686. r->dirty = true;
  687. r->valid = true;
  688. }
  689. /* if we halted last time due to a bkpt instruction
  690. * then we have to manually step over it, otherwise
  691. * the core will break again */
  692. if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
  693. && !debug_execution)
  694. {
  695. armv7m_maybe_skip_bkpt_inst(target, NULL);
  696. }
  697. resume_pc = buf_get_u32(r->value, 0, 32);
  698. armv7m_restore_context(target);
  699. /* the front-end may request us not to handle breakpoints */
  700. if (handle_breakpoints)
  701. {
  702. /* Single step past breakpoint at current address */
  703. if ((breakpoint = breakpoint_find(target, resume_pc)))
  704. {
  705. LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
  706. breakpoint->address,
  707. breakpoint->unique_id);
  708. cortex_m3_unset_breakpoint(target, breakpoint);
  709. cortex_m3_single_step_core(target);
  710. cortex_m3_set_breakpoint(target, breakpoint);
  711. }
  712. }
  713. /* Restart core */
  714. cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
  715. target->debug_reason = DBG_REASON_NOTHALTED;
  716. /* registers are now invalid */
  717. register_cache_invalidate(armv7m->core_cache);
  718. if (!debug_execution)
  719. {
  720. target->state = TARGET_RUNNING;
  721. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  722. LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
  723. }
  724. else
  725. {
  726. target->state = TARGET_DEBUG_RUNNING;
  727. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  728. LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
  729. }
  730. return ERROR_OK;
  731. }
  732. /* int irqstepcount = 0; */
  733. static int cortex_m3_step(struct target *target, int current,
  734. uint32_t address, int handle_breakpoints)
  735. {
  736. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  737. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  738. struct adiv5_dap *swjdp = &armv7m->dap;
  739. struct breakpoint *breakpoint = NULL;
  740. struct reg *pc = armv7m->arm.pc;
  741. bool bkpt_inst_found = false;
  742. int retval;
  743. bool isr_timed_out = false;
  744. if (target->state != TARGET_HALTED)
  745. {
  746. LOG_WARNING("target not halted");
  747. return ERROR_TARGET_NOT_HALTED;
  748. }
  749. /* current = 1: continue on current pc, otherwise continue at <address> */
  750. if (!current)
  751. buf_set_u32(pc->value, 0, 32, address);
  752. uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
  753. /* the front-end may request us not to handle breakpoints */
  754. if (handle_breakpoints) {
  755. breakpoint = breakpoint_find(target, pc_value);
  756. if (breakpoint)
  757. cortex_m3_unset_breakpoint(target, breakpoint);
  758. }
  759. armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
  760. target->debug_reason = DBG_REASON_SINGLESTEP;
  761. armv7m_restore_context(target);
  762. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  763. /* if no bkpt instruction is found at pc then we can perform
  764. * a normal step, otherwise we have to manually step over the bkpt
  765. * instruction - as such simulate a step */
  766. if (bkpt_inst_found == false)
  767. {
  768. /* Automatic ISR masking mode off: Just step over the next instruction */
  769. if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
  770. {
  771. cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
  772. }
  773. else
  774. {
  775. /* Process interrupts during stepping in a way they don't interfere
  776. * debugging.
  777. *
  778. * Principle:
  779. *
  780. * Set a temporary break point at the current pc and let the core run
  781. * with interrupts enabled. Pending interrupts get served and we run
  782. * into the breakpoint again afterwards. Then we step over the next
  783. * instruction with interrupts disabled.
  784. *
  785. * If the pending interrupts don't complete within time, we leave the
  786. * core running. This may happen if the interrupts trigger faster
  787. * than the core can process them or the handler doesn't return.
  788. *
  789. * If no more breakpoints are available we simply do a step with
  790. * interrupts enabled.
  791. *
  792. */
  793. /* Set a temporary break point */
  794. retval = breakpoint_add(target, pc_value , 2, BKPT_TYPE_BY_ADDR(pc_value));
  795. bool tmp_bp_set = (retval == ERROR_OK);
  796. /* No more breakpoints left, just do a step */
  797. if (!tmp_bp_set)
  798. {
  799. cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
  800. }
  801. else
  802. {
  803. /* Start the core */
  804. LOG_DEBUG("Starting core to serve pending interrupts");
  805. int64_t t_start = timeval_ms();
  806. cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
  807. /* Wait for pending handlers to complete or timeout */
  808. do {
  809. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  810. if (retval != ERROR_OK)
  811. {
  812. target->state = TARGET_UNKNOWN;
  813. return retval;
  814. }
  815. isr_timed_out = ((timeval_ms() - t_start) > 500);
  816. } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
  817. /* Remove the temporary breakpoint */
  818. breakpoint_remove(target, pc_value);
  819. if (isr_timed_out)
  820. {
  821. LOG_DEBUG("Interrupt handlers didn't complete within time, "
  822. "leaving target running");
  823. }
  824. else
  825. {
  826. /* Step over next instruction with interrupts disabled */
  827. cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
  828. cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
  829. /* Re-enable interrupts */
  830. cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
  831. }
  832. }
  833. }
  834. }
  835. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  836. if (retval != ERROR_OK)
  837. return retval;
  838. /* registers are now invalid */
  839. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  840. if (breakpoint)
  841. cortex_m3_set_breakpoint(target, breakpoint);
  842. if (isr_timed_out) {
  843. /* Leave the core running. The user has to stop execution manually. */
  844. target->debug_reason = DBG_REASON_NOTHALTED;
  845. target->state = TARGET_RUNNING;
  846. return ERROR_OK;
  847. }
  848. LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
  849. " nvic_icsr = 0x%" PRIx32,
  850. cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
  851. retval = cortex_m3_debug_entry(target);
  852. if (retval != ERROR_OK)
  853. return retval;
  854. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  855. LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
  856. " nvic_icsr = 0x%" PRIx32,
  857. cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
  858. return ERROR_OK;
  859. }
  860. static int cortex_m3_assert_reset(struct target *target)
  861. {
  862. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  863. struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
  864. enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
  865. LOG_DEBUG("target->state: %s",
  866. target_state_name(target));
  867. enum reset_types jtag_reset_config = jtag_get_reset_config();
  868. if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
  869. /* allow scripts to override the reset event */
  870. target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
  871. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  872. target->state = TARGET_RESET;
  873. return ERROR_OK;
  874. }
  875. /* Enable debug requests */
  876. int retval;
  877. retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
  878. if (retval != ERROR_OK)
  879. return retval;
  880. if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN))
  881. {
  882. retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
  883. if (retval != ERROR_OK)
  884. return retval;
  885. }
  886. retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
  887. if (retval != ERROR_OK)
  888. return retval;
  889. if (!target->reset_halt)
  890. {
  891. /* Set/Clear C_MASKINTS in a separate operation */
  892. if (cortex_m3->dcb_dhcsr & C_MASKINTS)
  893. {
  894. retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
  895. DBGKEY | C_DEBUGEN | C_HALT);
  896. if (retval != ERROR_OK)
  897. return retval;
  898. }
  899. /* clear any debug flags before resuming */
  900. cortex_m3_clear_halt(target);
  901. /* clear C_HALT in dhcsr reg */
  902. cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
  903. }
  904. else
  905. {
  906. /* Halt in debug on reset; endreset_event() restores DEMCR.
  907. *
  908. * REVISIT catching BUSERR presumably helps to defend against
  909. * bad vector table entries. Should this include MMERR or
  910. * other flags too?
  911. */
  912. retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
  913. TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
  914. if (retval != ERROR_OK)
  915. return retval;
  916. }
  917. if (jtag_reset_config & RESET_HAS_SRST)
  918. {
  919. /* default to asserting srst */
  920. if (jtag_reset_config & RESET_SRST_PULLS_TRST)
  921. {
  922. jtag_add_reset(1, 1);
  923. }
  924. else
  925. {
  926. jtag_add_reset(0, 1);
  927. }
  928. }
  929. else
  930. {
  931. /* Use a standard Cortex-M3 software reset mechanism.
  932. * We default to using VECRESET as it is supported on all current cores.
  933. * This has the disadvantage of not resetting the peripherals, so a
  934. * reset-init event handler is needed to perform any peripheral resets.
  935. */
  936. retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
  937. AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
  938. ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
  939. if (retval != ERROR_OK)
  940. return retval;
  941. LOG_DEBUG("Using Cortex-M3 %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
  942. ? "SYSRESETREQ" : "VECTRESET");
  943. if (reset_config == CORTEX_M3_RESET_VECTRESET) {
  944. LOG_WARNING("Only resetting the Cortex-M3 core, use a reset-init event "
  945. "handler to reset any peripherals");
  946. }
  947. {
  948. /* I do not know why this is necessary, but it
  949. * fixes strange effects (step/resume cause NMI
  950. * after reset) on LM3S6918 -- Michael Schwingen
  951. */
  952. uint32_t tmp;
  953. retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
  954. if (retval != ERROR_OK)
  955. return retval;
  956. }
  957. }
  958. target->state = TARGET_RESET;
  959. jtag_add_sleep(50000);
  960. register_cache_invalidate(cortex_m3->armv7m.core_cache);
  961. if (target->reset_halt)
  962. {
  963. if ((retval = target_halt(target)) != ERROR_OK)
  964. return retval;
  965. }
  966. return ERROR_OK;
  967. }
  968. static int cortex_m3_deassert_reset(struct target *target)
  969. {
  970. LOG_DEBUG("target->state: %s",
  971. target_state_name(target));
  972. /* deassert reset lines */
  973. jtag_add_reset(0, 0);
  974. return ERROR_OK;
  975. }
  976. static int
  977. cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
  978. {
  979. int retval;
  980. int fp_num = 0;
  981. uint32_t hilo;
  982. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  983. struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
  984. if (breakpoint->set)
  985. {
  986. LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
  987. return ERROR_OK;
  988. }
  989. if (cortex_m3->auto_bp_type)
  990. {
  991. breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
  992. }
  993. if (breakpoint->type == BKPT_HARD)
  994. {
  995. while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
  996. fp_num++;
  997. if (fp_num >= cortex_m3->fp_num_code)
  998. {
  999. LOG_ERROR("Can not find free FPB Comparator!");
  1000. return ERROR_FAIL;
  1001. }
  1002. breakpoint->set = fp_num + 1;
  1003. hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
  1004. comparator_list[fp_num].used = 1;
  1005. comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
  1006. target_write_u32(target, comparator_list[fp_num].fpcr_address, comparator_list[fp_num].fpcr_value);
  1007. LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "", fp_num, comparator_list[fp_num].fpcr_value);
  1008. if (!cortex_m3->fpb_enabled)
  1009. {
  1010. LOG_DEBUG("FPB wasn't enabled, do it now");
  1011. target_write_u32(target, FP_CTRL, 3);
  1012. }
  1013. }
  1014. else if (breakpoint->type == BKPT_SOFT)
  1015. {
  1016. uint8_t code[4];
  1017. /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
  1018. * semihosting; don't use that. Otherwise the BKPT
  1019. * parameter is arbitrary.
  1020. */
  1021. buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
  1022. retval = target_read_memory(target,
  1023. breakpoint->address & 0xFFFFFFFE,
  1024. breakpoint->length, 1,
  1025. breakpoint->orig_instr);
  1026. if (retval != ERROR_OK)
  1027. return retval;
  1028. retval = target_write_memory(target,
  1029. breakpoint->address & 0xFFFFFFFE,
  1030. breakpoint->length, 1,
  1031. code);
  1032. if (retval != ERROR_OK)
  1033. return retval;
  1034. breakpoint->set = true;
  1035. }
  1036. LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
  1037. breakpoint->unique_id,
  1038. (int)(breakpoint->type),
  1039. breakpoint->address,
  1040. breakpoint->length,
  1041. breakpoint->set);
  1042. return ERROR_OK;
  1043. }
  1044. static int
  1045. cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1046. {
  1047. int retval;
  1048. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1049. struct cortex_m3_fp_comparator * comparator_list = cortex_m3->fp_comparator_list;
  1050. if (!breakpoint->set)
  1051. {
  1052. LOG_WARNING("breakpoint not set");
  1053. return ERROR_OK;
  1054. }
  1055. LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
  1056. breakpoint->unique_id,
  1057. (int)(breakpoint->type),
  1058. breakpoint->address,
  1059. breakpoint->length,
  1060. breakpoint->set);
  1061. if (breakpoint->type == BKPT_HARD)
  1062. {
  1063. int fp_num = breakpoint->set - 1;
  1064. if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code))
  1065. {
  1066. LOG_DEBUG("Invalid FP Comparator number in breakpoint");
  1067. return ERROR_OK;
  1068. }
  1069. comparator_list[fp_num].used = 0;
  1070. comparator_list[fp_num].fpcr_value = 0;
  1071. target_write_u32(target, comparator_list[fp_num].fpcr_address, comparator_list[fp_num].fpcr_value);
  1072. }
  1073. else
  1074. {
  1075. /* restore original instruction (kept in target endianness) */
  1076. if (breakpoint->length == 4)
  1077. {
  1078. if ((retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
  1079. {
  1080. return retval;
  1081. }
  1082. }
  1083. else
  1084. {
  1085. if ((retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
  1086. {
  1087. return retval;
  1088. }
  1089. }
  1090. }
  1091. breakpoint->set = false;
  1092. return ERROR_OK;
  1093. }
  1094. static int
  1095. cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1096. {
  1097. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1098. if (cortex_m3->auto_bp_type)
  1099. {
  1100. breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
  1101. #ifdef ARMV7_GDB_HACKS
  1102. if (breakpoint->length != 2) {
  1103. /* XXX Hack: Replace all breakpoints with length != 2 with
  1104. * a hardware breakpoint. */
  1105. breakpoint->type = BKPT_HARD;
  1106. breakpoint->length = 2;
  1107. }
  1108. #endif
  1109. }
  1110. if(breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
  1111. if (breakpoint->type == BKPT_HARD)
  1112. {
  1113. LOG_INFO("flash patch comparator requested outside code memory region");
  1114. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1115. }
  1116. if (breakpoint->type == BKPT_SOFT)
  1117. {
  1118. LOG_INFO("soft breakpoint requested in code (flash) memory region");
  1119. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1120. }
  1121. }
  1122. if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1))
  1123. {
  1124. LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
  1125. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1126. }
  1127. if ((breakpoint->length != 2))
  1128. {
  1129. LOG_INFO("only breakpoints of two bytes length supported");
  1130. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1131. }
  1132. if (breakpoint->type == BKPT_HARD)
  1133. cortex_m3->fp_code_available--;
  1134. return cortex_m3_set_breakpoint(target, breakpoint);
  1135. }
  1136. static int
  1137. cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1138. {
  1139. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1140. /* REVISIT why check? FBP can be updated with core running ... */
  1141. if (target->state != TARGET_HALTED)
  1142. {
  1143. LOG_WARNING("target not halted");
  1144. return ERROR_TARGET_NOT_HALTED;
  1145. }
  1146. if (cortex_m3->auto_bp_type)
  1147. {
  1148. breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
  1149. }
  1150. if (breakpoint->set)
  1151. {
  1152. cortex_m3_unset_breakpoint(target, breakpoint);
  1153. }
  1154. if (breakpoint->type == BKPT_HARD)
  1155. cortex_m3->fp_code_available++;
  1156. return ERROR_OK;
  1157. }
  1158. static int
  1159. cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1160. {
  1161. int dwt_num = 0;
  1162. uint32_t mask, temp;
  1163. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1164. /* watchpoint params were validated earlier */
  1165. mask = 0;
  1166. temp = watchpoint->length;
  1167. while (temp) {
  1168. temp >>= 1;
  1169. mask++;
  1170. }
  1171. mask--;
  1172. /* REVISIT Don't fully trust these "not used" records ... users
  1173. * may set up breakpoints by hand, e.g. dual-address data value
  1174. * watchpoint using comparator #1; comparator #0 matching cycle
  1175. * count; send data trace info through ITM and TPIU; etc
  1176. */
  1177. struct cortex_m3_dwt_comparator *comparator;
  1178. for (comparator = cortex_m3->dwt_comparator_list;
  1179. comparator->used && dwt_num < cortex_m3->dwt_num_comp;
  1180. comparator++, dwt_num++)
  1181. continue;
  1182. if (dwt_num >= cortex_m3->dwt_num_comp)
  1183. {
  1184. LOG_ERROR("Can not find free DWT Comparator");
  1185. return ERROR_FAIL;
  1186. }
  1187. comparator->used = 1;
  1188. watchpoint->set = dwt_num + 1;
  1189. comparator->comp = watchpoint->address;
  1190. target_write_u32(target, comparator->dwt_comparator_address + 0,
  1191. comparator->comp);
  1192. comparator->mask = mask;
  1193. target_write_u32(target, comparator->dwt_comparator_address + 4,
  1194. comparator->mask);
  1195. switch (watchpoint->rw) {
  1196. case WPT_READ:
  1197. comparator->function = 5;
  1198. break;
  1199. case WPT_WRITE:
  1200. comparator->function = 6;
  1201. break;
  1202. case WPT_ACCESS:
  1203. comparator->function = 7;
  1204. break;
  1205. }
  1206. target_write_u32(target, comparator->dwt_comparator_address + 8,
  1207. comparator->function);
  1208. LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
  1209. watchpoint->unique_id, dwt_num,
  1210. (unsigned) comparator->comp,
  1211. (unsigned) comparator->mask,
  1212. (unsigned) comparator->function);
  1213. return ERROR_OK;
  1214. }
  1215. static int
  1216. cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1217. {
  1218. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1219. struct cortex_m3_dwt_comparator *comparator;
  1220. int dwt_num;
  1221. if (!watchpoint->set)
  1222. {
  1223. LOG_WARNING("watchpoint (wpid: %d) not set",
  1224. watchpoint->unique_id);
  1225. return ERROR_OK;
  1226. }
  1227. dwt_num = watchpoint->set - 1;
  1228. LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
  1229. watchpoint->unique_id, dwt_num,
  1230. (unsigned) watchpoint->address);
  1231. if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp))
  1232. {
  1233. LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
  1234. return ERROR_OK;
  1235. }
  1236. comparator = cortex_m3->dwt_comparator_list + dwt_num;
  1237. comparator->used = 0;
  1238. comparator->function = 0;
  1239. target_write_u32(target, comparator->dwt_comparator_address + 8,
  1240. comparator->function);
  1241. watchpoint->set = false;
  1242. return ERROR_OK;
  1243. }
  1244. static int
  1245. cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1246. {
  1247. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1248. if (cortex_m3->dwt_comp_available < 1)
  1249. {
  1250. LOG_DEBUG("no comparators?");
  1251. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1252. }
  1253. /* hardware doesn't support data value masking */
  1254. if (watchpoint->mask != ~(uint32_t)0) {
  1255. LOG_DEBUG("watchpoint value masks not supported");
  1256. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1257. }
  1258. /* hardware allows address masks of up to 32K */
  1259. unsigned mask;
  1260. for (mask = 0; mask < 16; mask++) {
  1261. if ((1u << mask) == watchpoint->length)
  1262. break;
  1263. }
  1264. if (mask == 16) {
  1265. LOG_DEBUG("unsupported watchpoint length");
  1266. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1267. }
  1268. if (watchpoint->address & ((1 << mask) - 1)) {
  1269. LOG_DEBUG("watchpoint address is unaligned");
  1270. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1271. }
  1272. /* Caller doesn't seem to be able to describe watching for data
  1273. * values of zero; that flags "no value".
  1274. *
  1275. * REVISIT This DWT may well be able to watch for specific data
  1276. * values. Requires comparator #1 to set DATAVMATCH and match
  1277. * the data, and another comparator (DATAVADDR0) matching addr.
  1278. */
  1279. if (watchpoint->value) {
  1280. LOG_DEBUG("data value watchpoint not YET supported");
  1281. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1282. }
  1283. cortex_m3->dwt_comp_available--;
  1284. LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
  1285. return ERROR_OK;
  1286. }
  1287. static int
  1288. cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
  1289. {
  1290. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1291. /* REVISIT why check? DWT can be updated with core running ... */
  1292. if (target->state != TARGET_HALTED)
  1293. {
  1294. LOG_WARNING("target not halted");
  1295. return ERROR_TARGET_NOT_HALTED;
  1296. }
  1297. if (watchpoint->set)
  1298. {
  1299. cortex_m3_unset_watchpoint(target, watchpoint);
  1300. }
  1301. cortex_m3->dwt_comp_available++;
  1302. LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
  1303. return ERROR_OK;
  1304. }
  1305. static void cortex_m3_enable_watchpoints(struct target *target)
  1306. {
  1307. struct watchpoint *watchpoint = target->watchpoints;
  1308. /* set any pending watchpoints */
  1309. while (watchpoint)
  1310. {
  1311. if (!watchpoint->set)
  1312. cortex_m3_set_watchpoint(target, watchpoint);
  1313. watchpoint = watchpoint->next;
  1314. }
  1315. }
  1316. static int cortex_m3_load_core_reg_u32(struct target *target,
  1317. enum armv7m_regtype type, uint32_t num, uint32_t * value)
  1318. {
  1319. int retval;
  1320. struct armv7m_common *armv7m = target_to_armv7m(target);
  1321. struct adiv5_dap *swjdp = &armv7m->dap;
  1322. /* NOTE: we "know" here that the register identifiers used
  1323. * in the v7m header match the Cortex-M3 Debug Core Register
  1324. * Selector values for R0..R15, xPSR, MSP, and PSP.
  1325. */
  1326. switch (num) {
  1327. case 0 ... 18:
  1328. /* read a normal core register */
  1329. retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
  1330. if (retval != ERROR_OK)
  1331. {
  1332. LOG_ERROR("JTAG failure %i",retval);
  1333. return ERROR_JTAG_DEVICE_ERROR;
  1334. }
  1335. LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "",(int)num,*value);
  1336. break;
  1337. case ARMV7M_PRIMASK:
  1338. case ARMV7M_BASEPRI:
  1339. case ARMV7M_FAULTMASK:
  1340. case ARMV7M_CONTROL:
  1341. /* Cortex-M3 packages these four registers as bitfields
  1342. * in one Debug Core register. So say r0 and r2 docs;
  1343. * it was removed from r1 docs, but still works.
  1344. */
  1345. cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
  1346. switch (num)
  1347. {
  1348. case ARMV7M_PRIMASK:
  1349. *value = buf_get_u32((uint8_t*)value, 0, 1);
  1350. break;
  1351. case ARMV7M_BASEPRI:
  1352. *value = buf_get_u32((uint8_t*)value, 8, 8);
  1353. break;
  1354. case ARMV7M_FAULTMASK:
  1355. *value = buf_get_u32((uint8_t*)value, 16, 1);
  1356. break;
  1357. case ARMV7M_CONTROL:
  1358. *value = buf_get_u32((uint8_t*)value, 24, 2);
  1359. break;
  1360. }
  1361. LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
  1362. break;
  1363. default:
  1364. return ERROR_INVALID_ARGUMENTS;
  1365. }
  1366. return ERROR_OK;
  1367. }
  1368. static int cortex_m3_store_core_reg_u32(struct target *target,
  1369. enum armv7m_regtype type, uint32_t num, uint32_t value)
  1370. {
  1371. int retval;
  1372. uint32_t reg;
  1373. struct armv7m_common *armv7m = target_to_armv7m(target);
  1374. struct adiv5_dap *swjdp = &armv7m->dap;
  1375. #ifdef ARMV7_GDB_HACKS
  1376. /* If the LR register is being modified, make sure it will put us
  1377. * in "thumb" mode, or an INVSTATE exception will occur. This is a
  1378. * hack to deal with the fact that gdb will sometimes "forge"
  1379. * return addresses, and doesn't set the LSB correctly (i.e., when
  1380. * printing expressions containing function calls, it sets LR = 0.)
  1381. * Valid exception return codes have bit 0 set too.
  1382. */
  1383. if (num == ARMV7M_R14)
  1384. value |= 0x01;
  1385. #endif
  1386. /* NOTE: we "know" here that the register identifiers used
  1387. * in the v7m header match the Cortex-M3 Debug Core Register
  1388. * Selector values for R0..R15, xPSR, MSP, and PSP.
  1389. */
  1390. switch (num) {
  1391. case 0 ... 18:
  1392. retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
  1393. if (retval != ERROR_OK)
  1394. {
  1395. struct reg *r;
  1396. LOG_ERROR("JTAG failure");
  1397. r = armv7m->core_cache->reg_list + num;
  1398. r->dirty = r->valid;
  1399. return ERROR_JTAG_DEVICE_ERROR;
  1400. }
  1401. LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
  1402. break;
  1403. case ARMV7M_PRIMASK:
  1404. case ARMV7M_BASEPRI:
  1405. case ARMV7M_FAULTMASK:
  1406. case ARMV7M_CONTROL:
  1407. /* Cortex-M3 packages these four registers as bitfields
  1408. * in one Debug Core register. So say r0 and r2 docs;
  1409. * it was removed from r1 docs, but still works.
  1410. */
  1411. cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
  1412. switch (num)
  1413. {
  1414. case ARMV7M_PRIMASK:
  1415. buf_set_u32((uint8_t*)&reg, 0, 1, value);
  1416. break;
  1417. case ARMV7M_BASEPRI:
  1418. buf_set_u32((uint8_t*)&reg, 8, 8, value);
  1419. break;
  1420. case ARMV7M_FAULTMASK:
  1421. buf_set_u32((uint8_t*)&reg, 16, 1, value);
  1422. break;
  1423. case ARMV7M_CONTROL:
  1424. buf_set_u32((uint8_t*)&reg, 24, 2, value);
  1425. break;
  1426. }
  1427. cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
  1428. LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
  1429. break;
  1430. default:
  1431. return ERROR_INVALID_ARGUMENTS;
  1432. }
  1433. return ERROR_OK;
  1434. }
  1435. static int cortex_m3_read_memory(struct target *target, uint32_t address,
  1436. uint32_t size, uint32_t count, uint8_t *buffer)
  1437. {
  1438. struct armv7m_common *armv7m = target_to_armv7m(target);
  1439. struct adiv5_dap *swjdp = &armv7m->dap;
  1440. int retval = ERROR_INVALID_ARGUMENTS;
  1441. /* cortex_m3 handles unaligned memory access */
  1442. if (count && buffer) {
  1443. switch (size) {
  1444. case 4:
  1445. retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
  1446. break;
  1447. case 2:
  1448. retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
  1449. break;
  1450. case 1:
  1451. retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
  1452. break;
  1453. }
  1454. }
  1455. return retval;
  1456. }
  1457. static int cortex_m3_write_memory(struct target *target, uint32_t address,
  1458. uint32_t size, uint32_t count, const uint8_t *buffer)
  1459. {
  1460. struct armv7m_common *armv7m = target_to_armv7m(target);
  1461. struct adiv5_dap *swjdp = &armv7m->dap;
  1462. int retval = ERROR_INVALID_ARGUMENTS;
  1463. if (count && buffer) {
  1464. switch (size) {
  1465. case 4:
  1466. retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
  1467. break;
  1468. case 2:
  1469. retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
  1470. break;
  1471. case 1:
  1472. retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
  1473. break;
  1474. }
  1475. }
  1476. return retval;
  1477. }
  1478. static int cortex_m3_bulk_write_memory(struct target *target, uint32_t address,
  1479. uint32_t count, const uint8_t *buffer)
  1480. {
  1481. return cortex_m3_write_memory(target, address, 4, count, buffer);
  1482. }
  1483. static int cortex_m3_init_target(struct command_context *cmd_ctx,
  1484. struct target *target)
  1485. {
  1486. armv7m_build_reg_cache(target);
  1487. return ERROR_OK;
  1488. }
  1489. /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
  1490. * on r/w if the core is not running, and clear on resume or reset ... or
  1491. * at least, in a post_restore_context() method.
  1492. */
  1493. struct dwt_reg_state {
  1494. struct target *target;
  1495. uint32_t addr;
  1496. uint32_t value; /* scratch/cache */
  1497. };
  1498. static int cortex_m3_dwt_get_reg(struct reg *reg)
  1499. {
  1500. struct dwt_reg_state *state = reg->arch_info;
  1501. return target_read_u32(state->target, state->addr, &state->value);
  1502. }
  1503. static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
  1504. {
  1505. struct dwt_reg_state *state = reg->arch_info;
  1506. return target_write_u32(state->target, state->addr,
  1507. buf_get_u32(buf, 0, reg->size));
  1508. }
  1509. struct dwt_reg {
  1510. uint32_t addr;
  1511. char *name;
  1512. unsigned size;
  1513. };
  1514. static struct dwt_reg dwt_base_regs[] = {
  1515. { DWT_CTRL, "dwt_ctrl", 32, },
  1516. /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
  1517. * increments while the core is asleep.
  1518. */
  1519. { DWT_CYCCNT, "dwt_cyccnt", 32, },
  1520. /* plus some 8 bit counters, useful for profiling with TPIU */
  1521. };
  1522. static struct dwt_reg dwt_comp[] = {
  1523. #define DWT_COMPARATOR(i) \
  1524. { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
  1525. { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
  1526. { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
  1527. DWT_COMPARATOR(0),
  1528. DWT_COMPARATOR(1),
  1529. DWT_COMPARATOR(2),
  1530. DWT_COMPARATOR(3),
  1531. #undef DWT_COMPARATOR
  1532. };
  1533. static const struct reg_arch_type dwt_reg_type = {
  1534. .get = cortex_m3_dwt_get_reg,
  1535. .set = cortex_m3_dwt_set_reg,
  1536. };
  1537. static void
  1538. cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
  1539. {
  1540. struct dwt_reg_state *state;
  1541. state = calloc(1, sizeof *state);
  1542. if (!state)
  1543. return;
  1544. state->addr = d->addr;
  1545. state->target = t;
  1546. r->name = d->name;
  1547. r->size = d->size;
  1548. r->value = &state->value;
  1549. r->arch_info = state;
  1550. r->type = &dwt_reg_type;
  1551. }
  1552. static void
  1553. cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
  1554. {
  1555. uint32_t dwtcr;
  1556. struct reg_cache *cache;
  1557. struct cortex_m3_dwt_comparator *comparator;
  1558. int reg, i;
  1559. target_read_u32(target, DWT_CTRL, &dwtcr);
  1560. if (!dwtcr) {
  1561. LOG_DEBUG("no DWT");
  1562. return;
  1563. }
  1564. cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
  1565. cm3->dwt_comp_available = cm3->dwt_num_comp;
  1566. cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
  1567. sizeof(struct cortex_m3_dwt_comparator));
  1568. if (!cm3->dwt_comparator_list) {
  1569. fail0:
  1570. cm3->dwt_num_comp = 0;
  1571. LOG_ERROR("out of mem");
  1572. return;
  1573. }
  1574. cache = calloc(1, sizeof *cache);
  1575. if (!cache) {
  1576. fail1:
  1577. free(cm3->dwt_comparator_list);
  1578. goto fail0;
  1579. }
  1580. cache->name = "cortex-m3 dwt registers";
  1581. cache->num_regs = 2 + cm3->dwt_num_comp * 3;
  1582. cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
  1583. if (!cache->reg_list) {
  1584. free(cache);
  1585. goto fail1;
  1586. }
  1587. for (reg = 0; reg < 2; reg++)
  1588. cortex_m3_dwt_addreg(target, cache->reg_list + reg,
  1589. dwt_base_regs + reg);
  1590. comparator = cm3->dwt_comparator_list;
  1591. for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
  1592. int j;
  1593. comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
  1594. for (j = 0; j < 3; j++, reg++)
  1595. cortex_m3_dwt_addreg(target, cache->reg_list + reg,
  1596. dwt_comp + 3 * i + j);
  1597. }
  1598. *register_get_last_cache_p(&target->reg_cache) = cache;
  1599. cm3->dwt_cache = cache;
  1600. LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
  1601. dwtcr, cm3->dwt_num_comp,
  1602. (dwtcr & (0xf << 24)) ? " only" : "/trigger");
  1603. /* REVISIT: if num_comp > 1, check whether comparator #1 can
  1604. * implement single-address data value watchpoints ... so we
  1605. * won't need to check it later, when asked to set one up.
  1606. */
  1607. }
  1608. static int cortex_m3_examine(struct target *target)
  1609. {
  1610. int retval;
  1611. uint32_t cpuid, fpcr;
  1612. int i;
  1613. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1614. struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
  1615. if ((retval = ahbap_debugport_init(swjdp)) != ERROR_OK)
  1616. return retval;
  1617. if (!target_was_examined(target))
  1618. {
  1619. target_set_examined(target);
  1620. /* Read from Device Identification Registers */
  1621. retval = target_read_u32(target, CPUID, &cpuid);
  1622. if (retval != ERROR_OK)
  1623. return retval;
  1624. if (((cpuid >> 4) & 0xc3f) == 0xc23)
  1625. LOG_DEBUG("Cortex-M3 r%" PRId8 "p%" PRId8 " processor detected",
  1626. (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
  1627. LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
  1628. /* NOTE: FPB and DWT are both optional. */
  1629. /* Setup FPB */
  1630. target_read_u32(target, FP_CTRL, &fpcr);
  1631. cortex_m3->auto_bp_type = 1;
  1632. cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits [14:12] and [7:4] */
  1633. cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
  1634. cortex_m3->fp_code_available = cortex_m3->fp_num_code;
  1635. cortex_m3->fp_comparator_list = calloc(cortex_m3->fp_num_code + cortex_m3->fp_num_lit, sizeof(struct cortex_m3_fp_comparator));
  1636. cortex_m3->fpb_enabled = fpcr & 1;
  1637. for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++)
  1638. {
  1639. cortex_m3->fp_comparator_list[i].type = (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
  1640. cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
  1641. }
  1642. LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i", fpcr, cortex_m3->fp_num_code, cortex_m3->fp_num_lit);
  1643. /* Setup DWT */
  1644. cortex_m3_dwt_setup(cortex_m3, target);
  1645. /* These hardware breakpoints only work for code in flash! */
  1646. LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
  1647. target_name(target),
  1648. cortex_m3->fp_num_code,
  1649. cortex_m3->dwt_num_comp);
  1650. }
  1651. return ERROR_OK;
  1652. }
  1653. static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
  1654. {
  1655. uint16_t dcrdr;
  1656. int retval;
  1657. mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
  1658. *ctrl = (uint8_t)dcrdr;
  1659. *value = (uint8_t)(dcrdr >> 8);
  1660. LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
  1661. /* write ack back to software dcc register
  1662. * signify we have read data */
  1663. if (dcrdr & (1 << 0))
  1664. {
  1665. dcrdr = 0;
  1666. retval = mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
  1667. if (retval != ERROR_OK)
  1668. return retval;
  1669. }
  1670. return ERROR_OK;
  1671. }
  1672. static int cortex_m3_target_request_data(struct target *target,
  1673. uint32_t size, uint8_t *buffer)
  1674. {
  1675. struct armv7m_common *armv7m = target_to_armv7m(target);
  1676. struct adiv5_dap *swjdp = &armv7m->dap;
  1677. uint8_t data;
  1678. uint8_t ctrl;
  1679. uint32_t i;
  1680. for (i = 0; i < (size * 4); i++)
  1681. {
  1682. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1683. buffer[i] = data;
  1684. }
  1685. return ERROR_OK;
  1686. }
  1687. static int cortex_m3_handle_target_request(void *priv)
  1688. {
  1689. struct target *target = priv;
  1690. if (!target_was_examined(target))
  1691. return ERROR_OK;
  1692. struct armv7m_common *armv7m = target_to_armv7m(target);
  1693. struct adiv5_dap *swjdp = &armv7m->dap;
  1694. if (!target->dbg_msg_enabled)
  1695. return ERROR_OK;
  1696. if (target->state == TARGET_RUNNING)
  1697. {
  1698. uint8_t data;
  1699. uint8_t ctrl;
  1700. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1701. /* check if we have data */
  1702. if (ctrl & (1 << 0))
  1703. {
  1704. uint32_t request;
  1705. /* we assume target is quick enough */
  1706. request = data;
  1707. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1708. request |= (data << 8);
  1709. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1710. request |= (data << 16);
  1711. cortex_m3_dcc_read(swjdp, &data, &ctrl);
  1712. request |= (data << 24);
  1713. target_request(target, request);
  1714. }
  1715. }
  1716. return ERROR_OK;
  1717. }
  1718. static int cortex_m3_init_arch_info(struct target *target,
  1719. struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
  1720. {
  1721. int retval;
  1722. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  1723. armv7m_init_arch_info(target, armv7m);
  1724. /* prepare JTAG information for the new target */
  1725. cortex_m3->jtag_info.tap = tap;
  1726. cortex_m3->jtag_info.scann_size = 4;
  1727. /* default reset mode is to use srst if fitted
  1728. * if not it will use CORTEX_M3_RESET_VECTRESET */
  1729. cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
  1730. armv7m->arm.dap = &armv7m->dap;
  1731. /* Leave (only) generic DAP stuff for debugport_init(); */
  1732. armv7m->dap.jtag_info = &cortex_m3->jtag_info;
  1733. armv7m->dap.memaccess_tck = 8;
  1734. /* Cortex-M3 has 4096 bytes autoincrement range */
  1735. armv7m->dap.tar_autoincr_block = (1 << 12);
  1736. /* register arch-specific functions */
  1737. armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
  1738. armv7m->post_debug_entry = NULL;
  1739. armv7m->pre_restore_context = NULL;
  1740. armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
  1741. armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
  1742. target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
  1743. if ((retval = arm_jtag_setup_connection(&cortex_m3->jtag_info)) != ERROR_OK)
  1744. {
  1745. return retval;
  1746. }
  1747. return ERROR_OK;
  1748. }
  1749. static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
  1750. {
  1751. struct cortex_m3_common *cortex_m3 = calloc(1,sizeof(struct cortex_m3_common));
  1752. cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
  1753. cortex_m3_init_arch_info(target, cortex_m3, target->tap);
  1754. return ERROR_OK;
  1755. }
  1756. /*--------------------------------------------------------------------------*/
  1757. static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
  1758. struct cortex_m3_common *cm3)
  1759. {
  1760. if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
  1761. command_print(cmd_ctx, "target is not a Cortex-M3");
  1762. return ERROR_TARGET_INVALID;
  1763. }
  1764. return ERROR_OK;
  1765. }
  1766. /*
  1767. * Only stuff below this line should need to verify that its target
  1768. * is a Cortex-M3. Everything else should have indirected through the
  1769. * cortexm3_target structure, which is only used with CM3 targets.
  1770. */
  1771. static const struct {
  1772. char name[10];
  1773. unsigned mask;
  1774. } vec_ids[] = {
  1775. { "hard_err", VC_HARDERR, },
  1776. { "int_err", VC_INTERR, },
  1777. { "bus_err", VC_BUSERR, },
  1778. { "state_err", VC_STATERR, },
  1779. { "chk_err", VC_CHKERR, },
  1780. { "nocp_err", VC_NOCPERR, },
  1781. { "mm_err", VC_MMERR, },
  1782. { "reset", VC_CORERESET, },
  1783. };
  1784. COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
  1785. {
  1786. struct target *target = get_current_target(CMD_CTX);
  1787. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1788. struct armv7m_common *armv7m = &cortex_m3->armv7m;
  1789. struct adiv5_dap *swjdp = &armv7m->dap;
  1790. uint32_t demcr = 0;
  1791. int retval;
  1792. retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
  1793. if (retval != ERROR_OK)
  1794. return retval;
  1795. retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
  1796. if (retval != ERROR_OK)
  1797. return retval;
  1798. if (CMD_ARGC > 0) {
  1799. unsigned catch = 0;
  1800. if (CMD_ARGC == 1) {
  1801. if (strcmp(CMD_ARGV[0], "all") == 0) {
  1802. catch = VC_HARDERR | VC_INTERR | VC_BUSERR
  1803. | VC_STATERR | VC_CHKERR | VC_NOCPERR
  1804. | VC_MMERR | VC_CORERESET;
  1805. goto write;
  1806. } else if (strcmp(CMD_ARGV[0], "none") == 0) {
  1807. goto write;
  1808. }
  1809. }
  1810. while (CMD_ARGC-- > 0) {
  1811. unsigned i;
  1812. for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
  1813. if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
  1814. continue;
  1815. catch |= vec_ids[i].mask;
  1816. break;
  1817. }
  1818. if (i == ARRAY_SIZE(vec_ids)) {
  1819. LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
  1820. return ERROR_INVALID_ARGUMENTS;
  1821. }
  1822. }
  1823. write:
  1824. /* For now, armv7m->demcr only stores vector catch flags. */
  1825. armv7m->demcr = catch;
  1826. demcr &= ~0xffff;
  1827. demcr |= catch;
  1828. /* write, but don't assume it stuck (why not??) */
  1829. retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
  1830. if (retval != ERROR_OK)
  1831. return retval;
  1832. retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
  1833. if (retval != ERROR_OK)
  1834. return retval;
  1835. /* FIXME be sure to clear DEMCR on clean server shutdown.
  1836. * Otherwise the vector catch hardware could fire when there's
  1837. * no debugger hooked up, causing much confusion...
  1838. */
  1839. }
  1840. for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++)
  1841. {
  1842. command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
  1843. (demcr & vec_ids[i].mask) ? "catch" : "ignore");
  1844. }
  1845. return ERROR_OK;
  1846. }
  1847. COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
  1848. {
  1849. struct target *target = get_current_target(CMD_CTX);
  1850. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1851. int retval;
  1852. static const Jim_Nvp nvp_maskisr_modes[] = {
  1853. { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
  1854. { .name = "off" , .value = CORTEX_M3_ISRMASK_OFF },
  1855. { .name = "on" , .value = CORTEX_M3_ISRMASK_ON },
  1856. { .name = NULL , .value = -1 },
  1857. };
  1858. const Jim_Nvp *n;
  1859. retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
  1860. if (retval != ERROR_OK)
  1861. return retval;
  1862. if (target->state != TARGET_HALTED)
  1863. {
  1864. command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
  1865. return ERROR_OK;
  1866. }
  1867. if (CMD_ARGC > 0)
  1868. {
  1869. n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
  1870. if (n->name == NULL)
  1871. {
  1872. return ERROR_COMMAND_SYNTAX_ERROR;
  1873. }
  1874. cortex_m3->isrmasking_mode = n->value;
  1875. if(cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
  1876. {
  1877. cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
  1878. }
  1879. else
  1880. {
  1881. cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
  1882. }
  1883. }
  1884. n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
  1885. command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
  1886. return ERROR_OK;
  1887. }
  1888. COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
  1889. {
  1890. struct target *target = get_current_target(CMD_CTX);
  1891. struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
  1892. int retval;
  1893. char *reset_config;
  1894. retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
  1895. if (retval != ERROR_OK)
  1896. return retval;
  1897. if (CMD_ARGC > 0)
  1898. {
  1899. if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
  1900. cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
  1901. else if (strcmp(*CMD_ARGV, "vectreset") == 0)
  1902. cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
  1903. }
  1904. switch (cortex_m3->soft_reset_config)
  1905. {
  1906. case CORTEX_M3_RESET_SYSRESETREQ:
  1907. reset_config = "sysresetreq";
  1908. break;
  1909. case CORTEX_M3_RESET_VECTRESET:
  1910. reset_config = "vectreset";
  1911. break;
  1912. default:
  1913. reset_config = "unknown";
  1914. break;
  1915. }
  1916. command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
  1917. return ERROR_OK;
  1918. }
  1919. static const struct command_registration cortex_m3_exec_command_handlers[] = {
  1920. {
  1921. .name = "maskisr",
  1922. .handler = handle_cortex_m3_mask_interrupts_command,
  1923. .mode = COMMAND_EXEC,
  1924. .help = "mask cortex_m3 interrupts",
  1925. .usage = "['auto'|'on'|'off']",
  1926. },
  1927. {
  1928. .name = "vector_catch",
  1929. .handler = handle_cortex_m3_vector_catch_command,
  1930. .mode = COMMAND_EXEC,
  1931. .help = "configure hardware vectors to trigger debug entry",
  1932. .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
  1933. },
  1934. {
  1935. .name = "reset_config",
  1936. .handler = handle_cortex_m3_reset_config_command,
  1937. .mode = COMMAND_ANY,
  1938. .help = "configure software reset handling",
  1939. .usage = "['srst'|'sysresetreq'|'vectreset']",
  1940. },
  1941. COMMAND_REGISTRATION_DONE
  1942. };
  1943. static const struct command_registration cortex_m3_command_handlers[] = {
  1944. {
  1945. .chain = armv7m_command_handlers,
  1946. },
  1947. {
  1948. .name = "cortex_m3",
  1949. .mode = COMMAND_EXEC,
  1950. .help = "Cortex-M3 command group",
  1951. .chain = cortex_m3_exec_command_handlers,
  1952. },
  1953. COMMAND_REGISTRATION_DONE
  1954. };
  1955. struct target_type cortexm3_target =
  1956. {
  1957. .name = "cortex_m3",
  1958. .poll = cortex_m3_poll,
  1959. .arch_state = armv7m_arch_state,
  1960. .target_request_data = cortex_m3_target_request_data,
  1961. .halt = cortex_m3_halt,
  1962. .resume = cortex_m3_resume,
  1963. .step = cortex_m3_step,
  1964. .assert_reset = cortex_m3_assert_reset,
  1965. .deassert_reset = cortex_m3_deassert_reset,
  1966. .soft_reset_halt = cortex_m3_soft_reset_halt,
  1967. .get_gdb_reg_list = armv7m_get_gdb_reg_list,
  1968. .read_memory = cortex_m3_read_memory,
  1969. .write_memory = cortex_m3_write_memory,
  1970. .bulk_write_memory = cortex_m3_bulk_write_memory,
  1971. .checksum_memory = armv7m_checksum_memory,
  1972. .blank_check_memory = armv7m_blank_check_memory,
  1973. .run_algorithm = armv7m_run_algorithm,
  1974. .start_algorithm = armv7m_start_algorithm,
  1975. .wait_algorithm = armv7m_wait_algorithm,
  1976. .add_breakpoint = cortex_m3_add_breakpoint,
  1977. .remove_breakpoint = cortex_m3_remove_breakpoint,
  1978. .add_watchpoint = cortex_m3_add_watchpoint,
  1979. .remove_watchpoint = cortex_m3_remove_watchpoint,
  1980. .commands = cortex_m3_command_handlers,
  1981. .target_create = cortex_m3_target_create,
  1982. .init_target = cortex_m3_init_target,
  1983. .examine = cortex_m3_examine,
  1984. };