You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

3208 lines
86 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2015 by David Ung *
  3. * *
  4. * This program is free software; you can redistribute it and/or modify *
  5. * it under the terms of the GNU General Public License as published by *
  6. * the Free Software Foundation; either version 2 of the License, or *
  7. * (at your option) any later version. *
  8. * *
  9. * This program is distributed in the hope that it will be useful, *
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  12. * GNU General Public License for more details. *
  13. * *
  14. * You should have received a copy of the GNU General Public License *
  15. * along with this program; if not, write to the *
  16. * Free Software Foundation, Inc., *
  17. * *
  18. ***************************************************************************/
  19. #ifdef HAVE_CONFIG_H
  20. #include "config.h"
  21. #endif
  22. #include "breakpoints.h"
  23. #include "aarch64.h"
  24. #include "a64_disassembler.h"
  25. #include "register.h"
  26. #include "target_request.h"
  27. #include "target_type.h"
  28. #include "armv8_opcodes.h"
  29. #include "armv8_cache.h"
  30. #include "arm_semihosting.h"
  31. #include "jtag/interface.h"
  32. #include "smp.h"
  33. #include <helper/time_support.h>
  34. enum restart_mode {
  35. RESTART_LAZY,
  36. RESTART_SYNC,
  37. };
  38. enum halt_mode {
  39. HALT_LAZY,
  40. HALT_SYNC,
  41. };
  42. struct aarch64_private_config {
  43. struct adiv5_private_config adiv5_config;
  44. struct arm_cti *cti;
  45. };
  46. static int aarch64_poll(struct target *target);
  47. static int aarch64_debug_entry(struct target *target);
  48. static int aarch64_restore_context(struct target *target, bool bpwp);
  49. static int aarch64_set_breakpoint(struct target *target,
  50. struct breakpoint *breakpoint, uint8_t matchmode);
  51. static int aarch64_set_context_breakpoint(struct target *target,
  52. struct breakpoint *breakpoint, uint8_t matchmode);
  53. static int aarch64_set_hybrid_breakpoint(struct target *target,
  54. struct breakpoint *breakpoint);
  55. static int aarch64_unset_breakpoint(struct target *target,
  56. struct breakpoint *breakpoint);
  57. static int aarch64_mmu(struct target *target, int *enabled);
  58. static int aarch64_virt2phys(struct target *target,
  59. target_addr_t virt, target_addr_t *phys);
  60. static int aarch64_read_cpu_memory(struct target *target,
  61. uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
  62. static int aarch64_restore_system_control_reg(struct target *target)
  63. {
  64. enum arm_mode target_mode = ARM_MODE_ANY;
  65. int retval = ERROR_OK;
  66. uint32_t instr;
  67. struct aarch64_common *aarch64 = target_to_aarch64(target);
  68. struct armv8_common *armv8 = target_to_armv8(target);
  69. if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
  70. aarch64->system_control_reg_curr = aarch64->system_control_reg;
  71. /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
  72. switch (armv8->arm.core_mode) {
  73. case ARMV8_64_EL0T:
  74. target_mode = ARMV8_64_EL1H;
  75. /* fall through */
  76. case ARMV8_64_EL1T:
  77. case ARMV8_64_EL1H:
  78. instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
  79. break;
  80. case ARMV8_64_EL2T:
  81. case ARMV8_64_EL2H:
  82. instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
  83. break;
  84. case ARMV8_64_EL3H:
  85. case ARMV8_64_EL3T:
  86. instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
  87. break;
  88. case ARM_MODE_SVC:
  89. case ARM_MODE_ABT:
  90. case ARM_MODE_FIQ:
  91. case ARM_MODE_IRQ:
  92. case ARM_MODE_HYP:
  93. case ARM_MODE_SYS:
  94. instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
  95. break;
  96. default:
  97. LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
  98. armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
  99. return ERROR_FAIL;
  100. }
  101. if (target_mode != ARM_MODE_ANY)
  102. armv8_dpm_modeswitch(&armv8->dpm, target_mode);
  103. retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
  104. if (retval != ERROR_OK)
  105. return retval;
  106. if (target_mode != ARM_MODE_ANY)
  107. armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
  108. }
  109. return retval;
  110. }
  111. /* modify system_control_reg in order to enable or disable mmu for :
  112. * - virt2phys address conversion
  113. * - read or write memory in phys or virt address */
  114. static int aarch64_mmu_modify(struct target *target, int enable)
  115. {
  116. struct aarch64_common *aarch64 = target_to_aarch64(target);
  117. struct armv8_common *armv8 = &aarch64->armv8_common;
  118. int retval = ERROR_OK;
  119. enum arm_mode target_mode = ARM_MODE_ANY;
  120. uint32_t instr = 0;
  121. if (enable) {
  122. /* if mmu enabled at target stop and mmu not enable */
  123. if (!(aarch64->system_control_reg & 0x1U)) {
  124. LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
  125. return ERROR_FAIL;
  126. }
  127. if (!(aarch64->system_control_reg_curr & 0x1U))
  128. aarch64->system_control_reg_curr |= 0x1U;
  129. } else {
  130. if (aarch64->system_control_reg_curr & 0x4U) {
  131. /* data cache is active */
  132. aarch64->system_control_reg_curr &= ~0x4U;
  133. /* flush data cache armv8 function to be called */
  134. if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
  135. armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
  136. }
  137. if ((aarch64->system_control_reg_curr & 0x1U)) {
  138. aarch64->system_control_reg_curr &= ~0x1U;
  139. }
  140. }
  141. switch (armv8->arm.core_mode) {
  142. case ARMV8_64_EL0T:
  143. target_mode = ARMV8_64_EL1H;
  144. /* fall through */
  145. case ARMV8_64_EL1T:
  146. case ARMV8_64_EL1H:
  147. instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
  148. break;
  149. case ARMV8_64_EL2T:
  150. case ARMV8_64_EL2H:
  151. instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
  152. break;
  153. case ARMV8_64_EL3H:
  154. case ARMV8_64_EL3T:
  155. instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
  156. break;
  157. case ARM_MODE_SVC:
  158. case ARM_MODE_ABT:
  159. case ARM_MODE_FIQ:
  160. case ARM_MODE_IRQ:
  161. case ARM_MODE_HYP:
  162. case ARM_MODE_SYS:
  163. instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
  164. break;
  165. default:
  166. LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
  167. break;
  168. }
  169. if (target_mode != ARM_MODE_ANY)
  170. armv8_dpm_modeswitch(&armv8->dpm, target_mode);
  171. retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
  172. aarch64->system_control_reg_curr);
  173. if (target_mode != ARM_MODE_ANY)
  174. armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
  175. return retval;
  176. }
  177. /*
  178. * Basic debug access, very low level assumes state is saved
  179. */
  180. static int aarch64_init_debug_access(struct target *target)
  181. {
  182. struct armv8_common *armv8 = target_to_armv8(target);
  183. int retval;
  184. uint32_t dummy;
  185. LOG_DEBUG("%s", target_name(target));
  186. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  187. armv8->debug_base + CPUV8_DBG_OSLAR, 0);
  188. if (retval != ERROR_OK) {
  189. LOG_DEBUG("Examine %s failed", "oslock");
  190. return retval;
  191. }
  192. /* Clear Sticky Power Down status Bit in PRSR to enable access to
  193. the registers in the Core Power Domain */
  194. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  195. armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
  196. if (retval != ERROR_OK)
  197. return retval;
  198. /*
  199. * Static CTI configuration:
  200. * Channel 0 -> trigger outputs HALT request to PE
  201. * Channel 1 -> trigger outputs Resume request to PE
  202. * Gate all channel trigger events from entering the CTM
  203. */
  204. /* Enable CTI */
  205. retval = arm_cti_enable(armv8->cti, true);
  206. /* By default, gate all channel events to and from the CTM */
  207. if (retval == ERROR_OK)
  208. retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
  209. /* output halt requests to PE on channel 0 event */
  210. if (retval == ERROR_OK)
  211. retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
  212. /* output restart requests to PE on channel 1 event */
  213. if (retval == ERROR_OK)
  214. retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
  215. if (retval != ERROR_OK)
  216. return retval;
  217. /* Resync breakpoint registers */
  218. return ERROR_OK;
  219. }
  220. /* Write to memory mapped registers directly with no cache or mmu handling */
  221. static int aarch64_dap_write_memap_register_u32(struct target *target,
  222. target_addr_t address,
  223. uint32_t value)
  224. {
  225. int retval;
  226. struct armv8_common *armv8 = target_to_armv8(target);
  227. retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
  228. return retval;
  229. }
  230. static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
  231. {
  232. struct arm_dpm *dpm = &a8->armv8_common.dpm;
  233. int retval;
  234. dpm->arm = &a8->armv8_common.arm;
  235. dpm->didr = debug;
  236. retval = armv8_dpm_setup(dpm);
  237. if (retval == ERROR_OK)
  238. retval = armv8_dpm_initialize(dpm);
  239. return retval;
  240. }
  241. static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
  242. {
  243. struct armv8_common *armv8 = target_to_armv8(target);
  244. return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
  245. }
  246. static int aarch64_check_state_one(struct target *target,
  247. uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
  248. {
  249. struct armv8_common *armv8 = target_to_armv8(target);
  250. uint32_t prsr;
  251. int retval;
  252. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  253. armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
  254. if (retval != ERROR_OK)
  255. return retval;
  256. if (p_prsr)
  257. *p_prsr = prsr;
  258. if (p_result)
  259. *p_result = (prsr & mask) == (val & mask);
  260. return ERROR_OK;
  261. }
  262. static int aarch64_wait_halt_one(struct target *target)
  263. {
  264. int retval = ERROR_OK;
  265. uint32_t prsr;
  266. int64_t then = timeval_ms();
  267. for (;;) {
  268. int halted;
  269. retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
  270. if (retval != ERROR_OK || halted)
  271. break;
  272. if (timeval_ms() > then + 1000) {
  273. retval = ERROR_TARGET_TIMEOUT;
  274. LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
  275. break;
  276. }
  277. }
  278. return retval;
  279. }
  280. static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
  281. {
  282. int retval = ERROR_OK;
  283. struct target_list *head = target->head;
  284. struct target *first = NULL;
  285. LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
  286. while (head) {
  287. struct target *curr = head->target;
  288. struct armv8_common *armv8 = target_to_armv8(curr);
  289. head = head->next;
  290. if (exc_target && curr == target)
  291. continue;
  292. if (!target_was_examined(curr))
  293. continue;
  294. if (curr->state != TARGET_RUNNING)
  295. continue;
  296. /* HACK: mark this target as prepared for halting */
  297. curr->debug_reason = DBG_REASON_DBGRQ;
  298. /* open the gate for channel 0 to let HALT requests pass to the CTM */
  299. retval = arm_cti_ungate_channel(armv8->cti, 0);
  300. if (retval == ERROR_OK)
  301. retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
  302. if (retval != ERROR_OK)
  303. break;
  304. LOG_DEBUG("target %s prepared", target_name(curr));
  305. if (!first)
  306. first = curr;
  307. }
  308. if (p_first) {
  309. if (exc_target && first)
  310. *p_first = first;
  311. else
  312. *p_first = target;
  313. }
  314. return retval;
  315. }
  316. static int aarch64_halt_one(struct target *target, enum halt_mode mode)
  317. {
  318. int retval = ERROR_OK;
  319. struct armv8_common *armv8 = target_to_armv8(target);
  320. LOG_DEBUG("%s", target_name(target));
  321. /* allow Halting Debug Mode */
  322. retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
  323. if (retval != ERROR_OK)
  324. return retval;
  325. /* trigger an event on channel 0, this outputs a halt request to the PE */
  326. retval = arm_cti_pulse_channel(armv8->cti, 0);
  327. if (retval != ERROR_OK)
  328. return retval;
  329. if (mode == HALT_SYNC) {
  330. retval = aarch64_wait_halt_one(target);
  331. if (retval != ERROR_OK) {
  332. if (retval == ERROR_TARGET_TIMEOUT)
  333. LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
  334. return retval;
  335. }
  336. }
  337. return ERROR_OK;
  338. }
  339. static int aarch64_halt_smp(struct target *target, bool exc_target)
  340. {
  341. struct target *next = target;
  342. int retval;
  343. /* prepare halt on all PEs of the group */
  344. retval = aarch64_prepare_halt_smp(target, exc_target, &next);
  345. if (exc_target && next == target)
  346. return retval;
  347. /* halt the target PE */
  348. if (retval == ERROR_OK)
  349. retval = aarch64_halt_one(next, HALT_LAZY);
  350. if (retval != ERROR_OK)
  351. return retval;
  352. /* wait for all PEs to halt */
  353. int64_t then = timeval_ms();
  354. for (;;) {
  355. bool all_halted = true;
  356. struct target_list *head;
  357. struct target *curr;
  358. foreach_smp_target(head, target->head) {
  359. int halted;
  360. curr = head->target;
  361. if (!target_was_examined(curr))
  362. continue;
  363. retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
  364. if (retval != ERROR_OK || !halted) {
  365. all_halted = false;
  366. break;
  367. }
  368. }
  369. if (all_halted)
  370. break;
  371. if (timeval_ms() > then + 1000) {
  372. retval = ERROR_TARGET_TIMEOUT;
  373. break;
  374. }
  375. /*
  376. * HACK: on Hi6220 there are 8 cores organized in 2 clusters
  377. * and it looks like the CTI's are not connected by a common
  378. * trigger matrix. It seems that we need to halt one core in each
  379. * cluster explicitly. So if we find that a core has not halted
  380. * yet, we trigger an explicit halt for the second cluster.
  381. */
  382. retval = aarch64_halt_one(curr, HALT_LAZY);
  383. if (retval != ERROR_OK)
  384. break;
  385. }
  386. return retval;
  387. }
  388. static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
  389. {
  390. struct target *gdb_target = NULL;
  391. struct target_list *head;
  392. struct target *curr;
  393. if (debug_reason == DBG_REASON_NOTHALTED) {
  394. LOG_DEBUG("Halting remaining targets in SMP group");
  395. aarch64_halt_smp(target, true);
  396. }
  397. /* poll all targets in the group, but skip the target that serves GDB */
  398. foreach_smp_target(head, target->head) {
  399. curr = head->target;
  400. /* skip calling context */
  401. if (curr == target)
  402. continue;
  403. if (!target_was_examined(curr))
  404. continue;
  405. /* skip targets that were already halted */
  406. if (curr->state == TARGET_HALTED)
  407. continue;
  408. /* remember the gdb_service->target */
  409. if (curr->gdb_service)
  410. gdb_target = curr->gdb_service->target;
  411. /* skip it */
  412. if (curr == gdb_target)
  413. continue;
  414. /* avoid recursion in aarch64_poll() */
  415. curr->smp = 0;
  416. aarch64_poll(curr);
  417. curr->smp = 1;
  418. }
  419. /* after all targets were updated, poll the gdb serving target */
  420. if (gdb_target && gdb_target != target)
  421. aarch64_poll(gdb_target);
  422. return ERROR_OK;
  423. }
  424. /*
  425. * Aarch64 Run control
  426. */
  427. static int aarch64_poll(struct target *target)
  428. {
  429. enum target_state prev_target_state;
  430. int retval = ERROR_OK;
  431. int halted;
  432. retval = aarch64_check_state_one(target,
  433. PRSR_HALT, PRSR_HALT, &halted, NULL);
  434. if (retval != ERROR_OK)
  435. return retval;
  436. if (halted) {
  437. prev_target_state = target->state;
  438. if (prev_target_state != TARGET_HALTED) {
  439. enum target_debug_reason debug_reason = target->debug_reason;
  440. /* We have a halting debug event */
  441. target->state = TARGET_HALTED;
  442. LOG_DEBUG("Target %s halted", target_name(target));
  443. retval = aarch64_debug_entry(target);
  444. if (retval != ERROR_OK)
  445. return retval;
  446. if (target->smp)
  447. update_halt_gdb(target, debug_reason);
  448. if (arm_semihosting(target, &retval) != 0)
  449. return retval;
  450. switch (prev_target_state) {
  451. case TARGET_RUNNING:
  452. case TARGET_UNKNOWN:
  453. case TARGET_RESET:
  454. target_call_event_callbacks(target, TARGET_EVENT_HALTED);
  455. break;
  456. case TARGET_DEBUG_RUNNING:
  457. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
  458. break;
  459. default:
  460. break;
  461. }
  462. }
  463. } else
  464. target->state = TARGET_RUNNING;
  465. return retval;
  466. }
  467. static int aarch64_halt(struct target *target)
  468. {
  469. struct armv8_common *armv8 = target_to_armv8(target);
  470. armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
  471. if (target->smp)
  472. return aarch64_halt_smp(target, false);
  473. return aarch64_halt_one(target, HALT_SYNC);
  474. }
  475. static int aarch64_restore_one(struct target *target, int current,
  476. uint64_t *address, int handle_breakpoints, int debug_execution)
  477. {
  478. struct armv8_common *armv8 = target_to_armv8(target);
  479. struct arm *arm = &armv8->arm;
  480. int retval;
  481. uint64_t resume_pc;
  482. LOG_DEBUG("%s", target_name(target));
  483. if (!debug_execution)
  484. target_free_all_working_areas(target);
  485. /* current = 1: continue on current pc, otherwise continue at <address> */
  486. resume_pc = buf_get_u64(arm->pc->value, 0, 64);
  487. if (!current)
  488. resume_pc = *address;
  489. else
  490. *address = resume_pc;
  491. /* Make sure that the Armv7 gdb thumb fixups does not
  492. * kill the return address
  493. */
  494. switch (arm->core_state) {
  495. case ARM_STATE_ARM:
  496. resume_pc &= 0xFFFFFFFC;
  497. break;
  498. case ARM_STATE_AARCH64:
  499. resume_pc &= 0xFFFFFFFFFFFFFFFC;
  500. break;
  501. case ARM_STATE_THUMB:
  502. case ARM_STATE_THUMB_EE:
  503. /* When the return address is loaded into PC
  504. * bit 0 must be 1 to stay in Thumb state
  505. */
  506. resume_pc |= 0x1;
  507. break;
  508. case ARM_STATE_JAZELLE:
  509. LOG_ERROR("How do I resume into Jazelle state??");
  510. return ERROR_FAIL;
  511. }
  512. LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
  513. buf_set_u64(arm->pc->value, 0, 64, resume_pc);
  514. arm->pc->dirty = true;
  515. arm->pc->valid = true;
  516. /* called it now before restoring context because it uses cpu
  517. * register r0 for restoring system control register */
  518. retval = aarch64_restore_system_control_reg(target);
  519. if (retval == ERROR_OK)
  520. retval = aarch64_restore_context(target, handle_breakpoints);
  521. return retval;
  522. }
  523. /**
  524. * prepare single target for restart
  525. *
  526. *
  527. */
  528. static int aarch64_prepare_restart_one(struct target *target)
  529. {
  530. struct armv8_common *armv8 = target_to_armv8(target);
  531. int retval;
  532. uint32_t dscr;
  533. uint32_t tmp;
  534. LOG_DEBUG("%s", target_name(target));
  535. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  536. armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
  537. if (retval != ERROR_OK)
  538. return retval;
  539. if ((dscr & DSCR_ITE) == 0)
  540. LOG_ERROR("DSCR.ITE must be set before leaving debug!");
  541. if ((dscr & DSCR_ERR) != 0)
  542. LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
  543. /* acknowledge a pending CTI halt event */
  544. retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
  545. /*
  546. * open the CTI gate for channel 1 so that the restart events
  547. * get passed along to all PEs. Also close gate for channel 0
  548. * to isolate the PE from halt events.
  549. */
  550. if (retval == ERROR_OK)
  551. retval = arm_cti_ungate_channel(armv8->cti, 1);
  552. if (retval == ERROR_OK)
  553. retval = arm_cti_gate_channel(armv8->cti, 0);
  554. /* make sure that DSCR.HDE is set */
  555. if (retval == ERROR_OK) {
  556. dscr |= DSCR_HDE;
  557. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  558. armv8->debug_base + CPUV8_DBG_DSCR, dscr);
  559. }
  560. if (retval == ERROR_OK) {
  561. /* clear sticky bits in PRSR, SDR is now 0 */
  562. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  563. armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
  564. }
  565. return retval;
  566. }
  567. static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
  568. {
  569. struct armv8_common *armv8 = target_to_armv8(target);
  570. int retval;
  571. LOG_DEBUG("%s", target_name(target));
  572. /* trigger an event on channel 1, generates a restart request to the PE */
  573. retval = arm_cti_pulse_channel(armv8->cti, 1);
  574. if (retval != ERROR_OK)
  575. return retval;
  576. if (mode == RESTART_SYNC) {
  577. int64_t then = timeval_ms();
  578. for (;;) {
  579. int resumed;
  580. /*
  581. * if PRSR.SDR is set now, the target did restart, even
  582. * if it's now already halted again (e.g. due to breakpoint)
  583. */
  584. retval = aarch64_check_state_one(target,
  585. PRSR_SDR, PRSR_SDR, &resumed, NULL);
  586. if (retval != ERROR_OK || resumed)
  587. break;
  588. if (timeval_ms() > then + 1000) {
  589. LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
  590. retval = ERROR_TARGET_TIMEOUT;
  591. break;
  592. }
  593. }
  594. }
  595. if (retval != ERROR_OK)
  596. return retval;
  597. target->debug_reason = DBG_REASON_NOTHALTED;
  598. target->state = TARGET_RUNNING;
  599. return ERROR_OK;
  600. }
  601. static int aarch64_restart_one(struct target *target, enum restart_mode mode)
  602. {
  603. int retval;
  604. LOG_DEBUG("%s", target_name(target));
  605. retval = aarch64_prepare_restart_one(target);
  606. if (retval == ERROR_OK)
  607. retval = aarch64_do_restart_one(target, mode);
  608. return retval;
  609. }
  610. /*
  611. * prepare all but the current target for restart
  612. */
  613. static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
  614. {
  615. int retval = ERROR_OK;
  616. struct target_list *head;
  617. struct target *first = NULL;
  618. uint64_t address;
  619. foreach_smp_target(head, target->head) {
  620. struct target *curr = head->target;
  621. /* skip calling target */
  622. if (curr == target)
  623. continue;
  624. if (!target_was_examined(curr))
  625. continue;
  626. if (curr->state != TARGET_HALTED)
  627. continue;
  628. /* resume at current address, not in step mode */
  629. retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
  630. if (retval == ERROR_OK)
  631. retval = aarch64_prepare_restart_one(curr);
  632. if (retval != ERROR_OK) {
  633. LOG_ERROR("failed to restore target %s", target_name(curr));
  634. break;
  635. }
  636. /* remember the first valid target in the group */
  637. if (!first)
  638. first = curr;
  639. }
  640. if (p_first)
  641. *p_first = first;
  642. return retval;
  643. }
  644. static int aarch64_step_restart_smp(struct target *target)
  645. {
  646. int retval = ERROR_OK;
  647. struct target_list *head;
  648. struct target *first = NULL;
  649. LOG_DEBUG("%s", target_name(target));
  650. retval = aarch64_prep_restart_smp(target, 0, &first);
  651. if (retval != ERROR_OK)
  652. return retval;
  653. if (first)
  654. retval = aarch64_do_restart_one(first, RESTART_LAZY);
  655. if (retval != ERROR_OK) {
  656. LOG_DEBUG("error restarting target %s", target_name(first));
  657. return retval;
  658. }
  659. int64_t then = timeval_ms();
  660. for (;;) {
  661. struct target *curr = target;
  662. bool all_resumed = true;
  663. foreach_smp_target(head, target->head) {
  664. uint32_t prsr;
  665. int resumed;
  666. curr = head->target;
  667. if (curr == target)
  668. continue;
  669. if (!target_was_examined(curr))
  670. continue;
  671. retval = aarch64_check_state_one(curr,
  672. PRSR_SDR, PRSR_SDR, &resumed, &prsr);
  673. if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
  674. all_resumed = false;
  675. break;
  676. }
  677. if (curr->state != TARGET_RUNNING) {
  678. curr->state = TARGET_RUNNING;
  679. curr->debug_reason = DBG_REASON_NOTHALTED;
  680. target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
  681. }
  682. }
  683. if (all_resumed)
  684. break;
  685. if (timeval_ms() > then + 1000) {
  686. LOG_ERROR("%s: timeout waiting for target resume", __func__);
  687. retval = ERROR_TARGET_TIMEOUT;
  688. break;
  689. }
  690. /*
  691. * HACK: on Hi6220 there are 8 cores organized in 2 clusters
  692. * and it looks like the CTI's are not connected by a common
  693. * trigger matrix. It seems that we need to halt one core in each
  694. * cluster explicitly. So if we find that a core has not halted
  695. * yet, we trigger an explicit resume for the second cluster.
  696. */
  697. retval = aarch64_do_restart_one(curr, RESTART_LAZY);
  698. if (retval != ERROR_OK)
  699. break;
  700. }
  701. return retval;
  702. }
  703. static int aarch64_resume(struct target *target, int current,
  704. target_addr_t address, int handle_breakpoints, int debug_execution)
  705. {
  706. int retval = 0;
  707. uint64_t addr = address;
  708. struct armv8_common *armv8 = target_to_armv8(target);
  709. armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
  710. if (target->state != TARGET_HALTED)
  711. return ERROR_TARGET_NOT_HALTED;
  712. /*
  713. * If this target is part of a SMP group, prepare the others
  714. * targets for resuming. This involves restoring the complete
  715. * target register context and setting up CTI gates to accept
  716. * resume events from the trigger matrix.
  717. */
  718. if (target->smp) {
  719. retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
  720. if (retval != ERROR_OK)
  721. return retval;
  722. }
  723. /* all targets prepared, restore and restart the current target */
  724. retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
  725. debug_execution);
  726. if (retval == ERROR_OK)
  727. retval = aarch64_restart_one(target, RESTART_SYNC);
  728. if (retval != ERROR_OK)
  729. return retval;
  730. if (target->smp) {
  731. int64_t then = timeval_ms();
  732. for (;;) {
  733. struct target *curr = target;
  734. struct target_list *head;
  735. bool all_resumed = true;
  736. foreach_smp_target(head, target->head) {
  737. uint32_t prsr;
  738. int resumed;
  739. curr = head->target;
  740. if (curr == target)
  741. continue;
  742. if (!target_was_examined(curr))
  743. continue;
  744. retval = aarch64_check_state_one(curr,
  745. PRSR_SDR, PRSR_SDR, &resumed, &prsr);
  746. if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
  747. all_resumed = false;
  748. break;
  749. }
  750. if (curr->state != TARGET_RUNNING) {
  751. curr->state = TARGET_RUNNING;
  752. curr->debug_reason = DBG_REASON_NOTHALTED;
  753. target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
  754. }
  755. }
  756. if (all_resumed)
  757. break;
  758. if (timeval_ms() > then + 1000) {
  759. LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
  760. retval = ERROR_TARGET_TIMEOUT;
  761. break;
  762. }
  763. /*
  764. * HACK: on Hi6220 there are 8 cores organized in 2 clusters
  765. * and it looks like the CTI's are not connected by a common
  766. * trigger matrix. It seems that we need to halt one core in each
  767. * cluster explicitly. So if we find that a core has not halted
  768. * yet, we trigger an explicit resume for the second cluster.
  769. */
  770. retval = aarch64_do_restart_one(curr, RESTART_LAZY);
  771. if (retval != ERROR_OK)
  772. break;
  773. }
  774. }
  775. if (retval != ERROR_OK)
  776. return retval;
  777. target->debug_reason = DBG_REASON_NOTHALTED;
  778. if (!debug_execution) {
  779. target->state = TARGET_RUNNING;
  780. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  781. LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
  782. } else {
  783. target->state = TARGET_DEBUG_RUNNING;
  784. target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
  785. LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
  786. }
  787. return ERROR_OK;
  788. }
  789. static int aarch64_debug_entry(struct target *target)
  790. {
  791. int retval = ERROR_OK;
  792. struct armv8_common *armv8 = target_to_armv8(target);
  793. struct arm_dpm *dpm = &armv8->dpm;
  794. enum arm_state core_state;
  795. uint32_t dscr;
  796. /* make sure to clear all sticky errors */
  797. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  798. armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
  799. if (retval == ERROR_OK)
  800. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  801. armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
  802. if (retval == ERROR_OK)
  803. retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
  804. if (retval != ERROR_OK)
  805. return retval;
  806. LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
  807. dpm->dscr = dscr;
  808. core_state = armv8_dpm_get_core_state(dpm);
  809. armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
  810. armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
  811. /* close the CTI gate for all events */
  812. if (retval == ERROR_OK)
  813. retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
  814. /* discard async exceptions */
  815. if (retval == ERROR_OK)
  816. retval = dpm->instr_cpsr_sync(dpm);
  817. if (retval != ERROR_OK)
  818. return retval;
  819. /* Examine debug reason */
  820. armv8_dpm_report_dscr(dpm, dscr);
  821. /* save the memory address that triggered the watchpoint */
  822. if (target->debug_reason == DBG_REASON_WATCHPOINT) {
  823. uint32_t tmp;
  824. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  825. armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
  826. if (retval != ERROR_OK)
  827. return retval;
  828. target_addr_t edwar = tmp;
  829. /* EDWAR[63:32] has unknown content in aarch32 state */
  830. if (core_state == ARM_STATE_AARCH64) {
  831. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  832. armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
  833. if (retval != ERROR_OK)
  834. return retval;
  835. edwar |= ((target_addr_t)tmp) << 32;
  836. }
  837. armv8->dpm.wp_addr = edwar;
  838. }
  839. retval = armv8_dpm_read_current_registers(&armv8->dpm);
  840. if (retval == ERROR_OK && armv8->post_debug_entry)
  841. retval = armv8->post_debug_entry(target);
  842. return retval;
  843. }
  844. static int aarch64_post_debug_entry(struct target *target)
  845. {
  846. struct aarch64_common *aarch64 = target_to_aarch64(target);
  847. struct armv8_common *armv8 = &aarch64->armv8_common;
  848. int retval;
  849. enum arm_mode target_mode = ARM_MODE_ANY;
  850. uint32_t instr;
  851. switch (armv8->arm.core_mode) {
  852. case ARMV8_64_EL0T:
  853. target_mode = ARMV8_64_EL1H;
  854. /* fall through */
  855. case ARMV8_64_EL1T:
  856. case ARMV8_64_EL1H:
  857. instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
  858. break;
  859. case ARMV8_64_EL2T:
  860. case ARMV8_64_EL2H:
  861. instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
  862. break;
  863. case ARMV8_64_EL3H:
  864. case ARMV8_64_EL3T:
  865. instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
  866. break;
  867. case ARM_MODE_SVC:
  868. case ARM_MODE_ABT:
  869. case ARM_MODE_FIQ:
  870. case ARM_MODE_IRQ:
  871. case ARM_MODE_HYP:
  872. case ARM_MODE_SYS:
  873. instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
  874. break;
  875. default:
  876. LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
  877. armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
  878. return ERROR_FAIL;
  879. }
  880. if (target_mode != ARM_MODE_ANY)
  881. armv8_dpm_modeswitch(&armv8->dpm, target_mode);
  882. retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
  883. if (retval != ERROR_OK)
  884. return retval;
  885. if (target_mode != ARM_MODE_ANY)
  886. armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
  887. LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
  888. aarch64->system_control_reg_curr = aarch64->system_control_reg;
  889. if (armv8->armv8_mmu.armv8_cache.info == -1) {
  890. armv8_identify_cache(armv8);
  891. armv8_read_mpidr(armv8);
  892. }
  893. armv8->armv8_mmu.mmu_enabled =
  894. (aarch64->system_control_reg & 0x1U) ? 1 : 0;
  895. armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
  896. (aarch64->system_control_reg & 0x4U) ? 1 : 0;
  897. armv8->armv8_mmu.armv8_cache.i_cache_enabled =
  898. (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
  899. return ERROR_OK;
  900. }
  901. /*
  902. * single-step a target
  903. */
  904. static int aarch64_step(struct target *target, int current, target_addr_t address,
  905. int handle_breakpoints)
  906. {
  907. struct armv8_common *armv8 = target_to_armv8(target);
  908. struct aarch64_common *aarch64 = target_to_aarch64(target);
  909. int saved_retval = ERROR_OK;
  910. int retval;
  911. uint32_t edecr;
  912. armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
  913. if (target->state != TARGET_HALTED) {
  914. LOG_WARNING("target not halted");
  915. return ERROR_TARGET_NOT_HALTED;
  916. }
  917. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  918. armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
  919. /* make sure EDECR.SS is not set when restoring the register */
  920. if (retval == ERROR_OK) {
  921. edecr &= ~0x4;
  922. /* set EDECR.SS to enter hardware step mode */
  923. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  924. armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
  925. }
  926. /* disable interrupts while stepping */
  927. if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
  928. retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
  929. /* bail out if stepping setup has failed */
  930. if (retval != ERROR_OK)
  931. return retval;
  932. if (target->smp && (current == 1)) {
  933. /*
  934. * isolate current target so that it doesn't get resumed
  935. * together with the others
  936. */
  937. retval = arm_cti_gate_channel(armv8->cti, 1);
  938. /* resume all other targets in the group */
  939. if (retval == ERROR_OK)
  940. retval = aarch64_step_restart_smp(target);
  941. if (retval != ERROR_OK) {
  942. LOG_ERROR("Failed to restart non-stepping targets in SMP group");
  943. return retval;
  944. }
  945. LOG_DEBUG("Restarted all non-stepping targets in SMP group");
  946. }
  947. /* all other targets running, restore and restart the current target */
  948. retval = aarch64_restore_one(target, current, &address, 0, 0);
  949. if (retval == ERROR_OK)
  950. retval = aarch64_restart_one(target, RESTART_LAZY);
  951. if (retval != ERROR_OK)
  952. return retval;
  953. LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
  954. if (!handle_breakpoints)
  955. target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
  956. int64_t then = timeval_ms();
  957. for (;;) {
  958. int stepped;
  959. uint32_t prsr;
  960. retval = aarch64_check_state_one(target,
  961. PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
  962. if (retval != ERROR_OK || stepped)
  963. break;
  964. if (timeval_ms() > then + 100) {
  965. LOG_ERROR("timeout waiting for target %s halt after step",
  966. target_name(target));
  967. retval = ERROR_TARGET_TIMEOUT;
  968. break;
  969. }
  970. }
  971. /*
  972. * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
  973. * causes a timeout. The core takes the step but doesn't complete it and so
  974. * debug state is never entered. However, you can manually halt the core
  975. * as an external debug even is also a WFI wakeup event.
  976. */
  977. if (retval == ERROR_TARGET_TIMEOUT)
  978. saved_retval = aarch64_halt_one(target, HALT_SYNC);
  979. /* restore EDECR */
  980. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  981. armv8->debug_base + CPUV8_DBG_EDECR, edecr);
  982. if (retval != ERROR_OK)
  983. return retval;
  984. /* restore interrupts */
  985. if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
  986. retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
  987. if (retval != ERROR_OK)
  988. return ERROR_OK;
  989. }
  990. if (saved_retval != ERROR_OK)
  991. return saved_retval;
  992. return ERROR_OK;
  993. }
  994. static int aarch64_restore_context(struct target *target, bool bpwp)
  995. {
  996. struct armv8_common *armv8 = target_to_armv8(target);
  997. struct arm *arm = &armv8->arm;
  998. int retval;
  999. LOG_DEBUG("%s", target_name(target));
  1000. if (armv8->pre_restore_context)
  1001. armv8->pre_restore_context(target);
  1002. retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
  1003. if (retval == ERROR_OK) {
  1004. /* registers are now invalid */
  1005. register_cache_invalidate(arm->core_cache);
  1006. register_cache_invalidate(arm->core_cache->next);
  1007. }
  1008. return retval;
  1009. }
  1010. /*
  1011. * Cortex-A8 Breakpoint and watchpoint functions
  1012. */
  1013. /* Setup hardware Breakpoint Register Pair */
  1014. static int aarch64_set_breakpoint(struct target *target,
  1015. struct breakpoint *breakpoint, uint8_t matchmode)
  1016. {
  1017. int retval;
  1018. int brp_i = 0;
  1019. uint32_t control;
  1020. uint8_t byte_addr_select = 0x0F;
  1021. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1022. struct armv8_common *armv8 = &aarch64->armv8_common;
  1023. struct aarch64_brp *brp_list = aarch64->brp_list;
  1024. if (breakpoint->set) {
  1025. LOG_WARNING("breakpoint already set");
  1026. return ERROR_OK;
  1027. }
  1028. if (breakpoint->type == BKPT_HARD) {
  1029. int64_t bpt_value;
  1030. while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
  1031. brp_i++;
  1032. if (brp_i >= aarch64->brp_num) {
  1033. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1034. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1035. }
  1036. breakpoint->set = brp_i + 1;
  1037. if (breakpoint->length == 2)
  1038. byte_addr_select = (3 << (breakpoint->address & 0x02));
  1039. control = ((matchmode & 0x7) << 20)
  1040. | (1 << 13)
  1041. | (byte_addr_select << 5)
  1042. | (3 << 1) | 1;
  1043. brp_list[brp_i].used = 1;
  1044. brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
  1045. brp_list[brp_i].control = control;
  1046. bpt_value = brp_list[brp_i].value;
  1047. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1048. + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
  1049. (uint32_t)(bpt_value & 0xFFFFFFFF));
  1050. if (retval != ERROR_OK)
  1051. return retval;
  1052. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1053. + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
  1054. (uint32_t)(bpt_value >> 32));
  1055. if (retval != ERROR_OK)
  1056. return retval;
  1057. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1058. + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
  1059. brp_list[brp_i].control);
  1060. if (retval != ERROR_OK)
  1061. return retval;
  1062. LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
  1063. brp_list[brp_i].control,
  1064. brp_list[brp_i].value);
  1065. } else if (breakpoint->type == BKPT_SOFT) {
  1066. uint32_t opcode;
  1067. uint8_t code[4];
  1068. if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
  1069. opcode = ARMV8_HLT(11);
  1070. if (breakpoint->length != 4)
  1071. LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
  1072. } else {
  1073. /**
  1074. * core_state is ARM_STATE_ARM
  1075. * in that case the opcode depends on breakpoint length:
  1076. * - if length == 4 => A32 opcode
  1077. * - if length == 2 => T32 opcode
  1078. * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
  1079. * in that case the length should be changed from 3 to 4 bytes
  1080. **/
  1081. opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
  1082. (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
  1083. if (breakpoint->length == 3)
  1084. breakpoint->length = 4;
  1085. }
  1086. buf_set_u32(code, 0, 32, opcode);
  1087. retval = target_read_memory(target,
  1088. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1089. breakpoint->length, 1,
  1090. breakpoint->orig_instr);
  1091. if (retval != ERROR_OK)
  1092. return retval;
  1093. armv8_cache_d_inner_flush_virt(armv8,
  1094. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1095. breakpoint->length);
  1096. retval = target_write_memory(target,
  1097. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1098. breakpoint->length, 1, code);
  1099. if (retval != ERROR_OK)
  1100. return retval;
  1101. armv8_cache_d_inner_flush_virt(armv8,
  1102. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1103. breakpoint->length);
  1104. armv8_cache_i_inner_inval_virt(armv8,
  1105. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1106. breakpoint->length);
  1107. breakpoint->set = 0x11; /* Any nice value but 0 */
  1108. }
  1109. /* Ensure that halting debug mode is enable */
  1110. retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
  1111. if (retval != ERROR_OK) {
  1112. LOG_DEBUG("Failed to set DSCR.HDE");
  1113. return retval;
  1114. }
  1115. return ERROR_OK;
  1116. }
  1117. static int aarch64_set_context_breakpoint(struct target *target,
  1118. struct breakpoint *breakpoint, uint8_t matchmode)
  1119. {
  1120. int retval = ERROR_FAIL;
  1121. int brp_i = 0;
  1122. uint32_t control;
  1123. uint8_t byte_addr_select = 0x0F;
  1124. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1125. struct armv8_common *armv8 = &aarch64->armv8_common;
  1126. struct aarch64_brp *brp_list = aarch64->brp_list;
  1127. if (breakpoint->set) {
  1128. LOG_WARNING("breakpoint already set");
  1129. return retval;
  1130. }
  1131. /*check available context BRPs*/
  1132. while ((brp_list[brp_i].used ||
  1133. (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
  1134. brp_i++;
  1135. if (brp_i >= aarch64->brp_num) {
  1136. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1137. return ERROR_FAIL;
  1138. }
  1139. breakpoint->set = brp_i + 1;
  1140. control = ((matchmode & 0x7) << 20)
  1141. | (1 << 13)
  1142. | (byte_addr_select << 5)
  1143. | (3 << 1) | 1;
  1144. brp_list[brp_i].used = 1;
  1145. brp_list[brp_i].value = (breakpoint->asid);
  1146. brp_list[brp_i].control = control;
  1147. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1148. + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
  1149. brp_list[brp_i].value);
  1150. if (retval != ERROR_OK)
  1151. return retval;
  1152. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1153. + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
  1154. brp_list[brp_i].control);
  1155. if (retval != ERROR_OK)
  1156. return retval;
  1157. LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
  1158. brp_list[brp_i].control,
  1159. brp_list[brp_i].value);
  1160. return ERROR_OK;
  1161. }
  1162. static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1163. {
  1164. int retval = ERROR_FAIL;
  1165. int brp_1 = 0; /* holds the contextID pair */
  1166. int brp_2 = 0; /* holds the IVA pair */
  1167. uint32_t control_ctx, control_iva;
  1168. uint8_t ctx_byte_addr_select = 0x0F;
  1169. uint8_t iva_byte_addr_select = 0x0F;
  1170. uint8_t ctx_machmode = 0x03;
  1171. uint8_t iva_machmode = 0x01;
  1172. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1173. struct armv8_common *armv8 = &aarch64->armv8_common;
  1174. struct aarch64_brp *brp_list = aarch64->brp_list;
  1175. if (breakpoint->set) {
  1176. LOG_WARNING("breakpoint already set");
  1177. return retval;
  1178. }
  1179. /*check available context BRPs*/
  1180. while ((brp_list[brp_1].used ||
  1181. (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
  1182. brp_1++;
  1183. LOG_DEBUG("brp(CTX) found num: %d", brp_1);
  1184. if (brp_1 >= aarch64->brp_num) {
  1185. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1186. return ERROR_FAIL;
  1187. }
  1188. while ((brp_list[brp_2].used ||
  1189. (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
  1190. brp_2++;
  1191. LOG_DEBUG("brp(IVA) found num: %d", brp_2);
  1192. if (brp_2 >= aarch64->brp_num) {
  1193. LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
  1194. return ERROR_FAIL;
  1195. }
  1196. breakpoint->set = brp_1 + 1;
  1197. breakpoint->linked_brp = brp_2;
  1198. control_ctx = ((ctx_machmode & 0x7) << 20)
  1199. | (brp_2 << 16)
  1200. | (0 << 14)
  1201. | (ctx_byte_addr_select << 5)
  1202. | (3 << 1) | 1;
  1203. brp_list[brp_1].used = 1;
  1204. brp_list[brp_1].value = (breakpoint->asid);
  1205. brp_list[brp_1].control = control_ctx;
  1206. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1207. + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
  1208. brp_list[brp_1].value);
  1209. if (retval != ERROR_OK)
  1210. return retval;
  1211. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1212. + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
  1213. brp_list[brp_1].control);
  1214. if (retval != ERROR_OK)
  1215. return retval;
  1216. control_iva = ((iva_machmode & 0x7) << 20)
  1217. | (brp_1 << 16)
  1218. | (1 << 13)
  1219. | (iva_byte_addr_select << 5)
  1220. | (3 << 1) | 1;
  1221. brp_list[brp_2].used = 1;
  1222. brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
  1223. brp_list[brp_2].control = control_iva;
  1224. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1225. + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
  1226. brp_list[brp_2].value & 0xFFFFFFFF);
  1227. if (retval != ERROR_OK)
  1228. return retval;
  1229. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1230. + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
  1231. brp_list[brp_2].value >> 32);
  1232. if (retval != ERROR_OK)
  1233. return retval;
  1234. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1235. + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
  1236. brp_list[brp_2].control);
  1237. if (retval != ERROR_OK)
  1238. return retval;
  1239. return ERROR_OK;
  1240. }
  1241. static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1242. {
  1243. int retval;
  1244. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1245. struct armv8_common *armv8 = &aarch64->armv8_common;
  1246. struct aarch64_brp *brp_list = aarch64->brp_list;
  1247. if (!breakpoint->set) {
  1248. LOG_WARNING("breakpoint not set");
  1249. return ERROR_OK;
  1250. }
  1251. if (breakpoint->type == BKPT_HARD) {
  1252. if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
  1253. int brp_i = breakpoint->set - 1;
  1254. int brp_j = breakpoint->linked_brp;
  1255. if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
  1256. LOG_DEBUG("Invalid BRP number in breakpoint");
  1257. return ERROR_OK;
  1258. }
  1259. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
  1260. brp_list[brp_i].control, brp_list[brp_i].value);
  1261. brp_list[brp_i].used = 0;
  1262. brp_list[brp_i].value = 0;
  1263. brp_list[brp_i].control = 0;
  1264. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1265. + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
  1266. brp_list[brp_i].control);
  1267. if (retval != ERROR_OK)
  1268. return retval;
  1269. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1270. + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
  1271. (uint32_t)brp_list[brp_i].value);
  1272. if (retval != ERROR_OK)
  1273. return retval;
  1274. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1275. + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
  1276. (uint32_t)brp_list[brp_i].value);
  1277. if (retval != ERROR_OK)
  1278. return retval;
  1279. if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
  1280. LOG_DEBUG("Invalid BRP number in breakpoint");
  1281. return ERROR_OK;
  1282. }
  1283. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
  1284. brp_list[brp_j].control, brp_list[brp_j].value);
  1285. brp_list[brp_j].used = 0;
  1286. brp_list[brp_j].value = 0;
  1287. brp_list[brp_j].control = 0;
  1288. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1289. + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
  1290. brp_list[brp_j].control);
  1291. if (retval != ERROR_OK)
  1292. return retval;
  1293. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1294. + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
  1295. (uint32_t)brp_list[brp_j].value);
  1296. if (retval != ERROR_OK)
  1297. return retval;
  1298. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1299. + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
  1300. (uint32_t)brp_list[brp_j].value);
  1301. if (retval != ERROR_OK)
  1302. return retval;
  1303. breakpoint->linked_brp = 0;
  1304. breakpoint->set = 0;
  1305. return ERROR_OK;
  1306. } else {
  1307. int brp_i = breakpoint->set - 1;
  1308. if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
  1309. LOG_DEBUG("Invalid BRP number in breakpoint");
  1310. return ERROR_OK;
  1311. }
  1312. LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
  1313. brp_list[brp_i].control, brp_list[brp_i].value);
  1314. brp_list[brp_i].used = 0;
  1315. brp_list[brp_i].value = 0;
  1316. brp_list[brp_i].control = 0;
  1317. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1318. + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
  1319. brp_list[brp_i].control);
  1320. if (retval != ERROR_OK)
  1321. return retval;
  1322. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1323. + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
  1324. brp_list[brp_i].value);
  1325. if (retval != ERROR_OK)
  1326. return retval;
  1327. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1328. + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
  1329. (uint32_t)brp_list[brp_i].value);
  1330. if (retval != ERROR_OK)
  1331. return retval;
  1332. breakpoint->set = 0;
  1333. return ERROR_OK;
  1334. }
  1335. } else {
  1336. /* restore original instruction (kept in target endianness) */
  1337. armv8_cache_d_inner_flush_virt(armv8,
  1338. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1339. breakpoint->length);
  1340. if (breakpoint->length == 4) {
  1341. retval = target_write_memory(target,
  1342. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1343. 4, 1, breakpoint->orig_instr);
  1344. if (retval != ERROR_OK)
  1345. return retval;
  1346. } else {
  1347. retval = target_write_memory(target,
  1348. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1349. 2, 1, breakpoint->orig_instr);
  1350. if (retval != ERROR_OK)
  1351. return retval;
  1352. }
  1353. armv8_cache_d_inner_flush_virt(armv8,
  1354. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1355. breakpoint->length);
  1356. armv8_cache_i_inner_inval_virt(armv8,
  1357. breakpoint->address & 0xFFFFFFFFFFFFFFFE,
  1358. breakpoint->length);
  1359. }
  1360. breakpoint->set = 0;
  1361. return ERROR_OK;
  1362. }
  1363. static int aarch64_add_breakpoint(struct target *target,
  1364. struct breakpoint *breakpoint)
  1365. {
  1366. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1367. if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
  1368. LOG_INFO("no hardware breakpoint available");
  1369. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1370. }
  1371. if (breakpoint->type == BKPT_HARD)
  1372. aarch64->brp_num_available--;
  1373. return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
  1374. }
  1375. static int aarch64_add_context_breakpoint(struct target *target,
  1376. struct breakpoint *breakpoint)
  1377. {
  1378. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1379. if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
  1380. LOG_INFO("no hardware breakpoint available");
  1381. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1382. }
  1383. if (breakpoint->type == BKPT_HARD)
  1384. aarch64->brp_num_available--;
  1385. return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
  1386. }
  1387. static int aarch64_add_hybrid_breakpoint(struct target *target,
  1388. struct breakpoint *breakpoint)
  1389. {
  1390. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1391. if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
  1392. LOG_INFO("no hardware breakpoint available");
  1393. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1394. }
  1395. if (breakpoint->type == BKPT_HARD)
  1396. aarch64->brp_num_available--;
  1397. return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
  1398. }
  1399. static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1400. {
  1401. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1402. #if 0
  1403. /* It is perfectly possible to remove breakpoints while the target is running */
  1404. if (target->state != TARGET_HALTED) {
  1405. LOG_WARNING("target not halted");
  1406. return ERROR_TARGET_NOT_HALTED;
  1407. }
  1408. #endif
  1409. if (breakpoint->set) {
  1410. aarch64_unset_breakpoint(target, breakpoint);
  1411. if (breakpoint->type == BKPT_HARD)
  1412. aarch64->brp_num_available++;
  1413. }
  1414. return ERROR_OK;
  1415. }
  1416. /* Setup hardware Watchpoint Register Pair */
  1417. static int aarch64_set_watchpoint(struct target *target,
  1418. struct watchpoint *watchpoint)
  1419. {
  1420. int retval;
  1421. int wp_i = 0;
  1422. uint32_t control, offset, length;
  1423. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1424. struct armv8_common *armv8 = &aarch64->armv8_common;
  1425. struct aarch64_brp *wp_list = aarch64->wp_list;
  1426. if (watchpoint->set) {
  1427. LOG_WARNING("watchpoint already set");
  1428. return ERROR_OK;
  1429. }
  1430. while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
  1431. wp_i++;
  1432. if (wp_i >= aarch64->wp_num) {
  1433. LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
  1434. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1435. }
  1436. control = (1 << 0) /* enable */
  1437. | (3 << 1) /* both user and privileged access */
  1438. | (1 << 13); /* higher mode control */
  1439. switch (watchpoint->rw) {
  1440. case WPT_READ:
  1441. control |= 1 << 3;
  1442. break;
  1443. case WPT_WRITE:
  1444. control |= 2 << 3;
  1445. break;
  1446. case WPT_ACCESS:
  1447. control |= 3 << 3;
  1448. break;
  1449. }
  1450. /* Match up to 8 bytes. */
  1451. offset = watchpoint->address & 7;
  1452. length = watchpoint->length;
  1453. if (offset + length > sizeof(uint64_t)) {
  1454. length = sizeof(uint64_t) - offset;
  1455. LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
  1456. }
  1457. for (; length > 0; offset++, length--)
  1458. control |= (1 << offset) << 5;
  1459. wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
  1460. wp_list[wp_i].control = control;
  1461. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1462. + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
  1463. (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
  1464. if (retval != ERROR_OK)
  1465. return retval;
  1466. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1467. + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
  1468. (uint32_t)(wp_list[wp_i].value >> 32));
  1469. if (retval != ERROR_OK)
  1470. return retval;
  1471. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1472. + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
  1473. control);
  1474. if (retval != ERROR_OK)
  1475. return retval;
  1476. LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
  1477. wp_list[wp_i].control, wp_list[wp_i].value);
  1478. /* Ensure that halting debug mode is enable */
  1479. retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
  1480. if (retval != ERROR_OK) {
  1481. LOG_DEBUG("Failed to set DSCR.HDE");
  1482. return retval;
  1483. }
  1484. wp_list[wp_i].used = 1;
  1485. watchpoint->set = wp_i + 1;
  1486. return ERROR_OK;
  1487. }
  1488. /* Clear hardware Watchpoint Register Pair */
  1489. static int aarch64_unset_watchpoint(struct target *target,
  1490. struct watchpoint *watchpoint)
  1491. {
  1492. int retval, wp_i;
  1493. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1494. struct armv8_common *armv8 = &aarch64->armv8_common;
  1495. struct aarch64_brp *wp_list = aarch64->wp_list;
  1496. if (!watchpoint->set) {
  1497. LOG_WARNING("watchpoint not set");
  1498. return ERROR_OK;
  1499. }
  1500. wp_i = watchpoint->set - 1;
  1501. if ((wp_i < 0) || (wp_i >= aarch64->wp_num)) {
  1502. LOG_DEBUG("Invalid WP number in watchpoint");
  1503. return ERROR_OK;
  1504. }
  1505. LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
  1506. wp_list[wp_i].control, wp_list[wp_i].value);
  1507. wp_list[wp_i].used = 0;
  1508. wp_list[wp_i].value = 0;
  1509. wp_list[wp_i].control = 0;
  1510. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1511. + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
  1512. wp_list[wp_i].control);
  1513. if (retval != ERROR_OK)
  1514. return retval;
  1515. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1516. + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
  1517. wp_list[wp_i].value);
  1518. if (retval != ERROR_OK)
  1519. return retval;
  1520. retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
  1521. + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
  1522. (uint32_t)wp_list[wp_i].value);
  1523. if (retval != ERROR_OK)
  1524. return retval;
  1525. watchpoint->set = 0;
  1526. return ERROR_OK;
  1527. }
  1528. static int aarch64_add_watchpoint(struct target *target,
  1529. struct watchpoint *watchpoint)
  1530. {
  1531. int retval;
  1532. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1533. if (aarch64->wp_num_available < 1) {
  1534. LOG_INFO("no hardware watchpoint available");
  1535. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1536. }
  1537. retval = aarch64_set_watchpoint(target, watchpoint);
  1538. if (retval == ERROR_OK)
  1539. aarch64->wp_num_available--;
  1540. return retval;
  1541. }
  1542. static int aarch64_remove_watchpoint(struct target *target,
  1543. struct watchpoint *watchpoint)
  1544. {
  1545. struct aarch64_common *aarch64 = target_to_aarch64(target);
  1546. if (watchpoint->set) {
  1547. aarch64_unset_watchpoint(target, watchpoint);
  1548. aarch64->wp_num_available++;
  1549. }
  1550. return ERROR_OK;
  1551. }
  1552. /**
  1553. * find out which watchpoint hits
  1554. * get exception address and compare the address to watchpoints
  1555. */
  1556. int aarch64_hit_watchpoint(struct target *target,
  1557. struct watchpoint **hit_watchpoint)
  1558. {
  1559. if (target->debug_reason != DBG_REASON_WATCHPOINT)
  1560. return ERROR_FAIL;
  1561. struct armv8_common *armv8 = target_to_armv8(target);
  1562. target_addr_t exception_address;
  1563. struct watchpoint *wp;
  1564. exception_address = armv8->dpm.wp_addr;
  1565. if (exception_address == 0xFFFFFFFF)
  1566. return ERROR_FAIL;
  1567. for (wp = target->watchpoints; wp; wp = wp->next)
  1568. if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
  1569. *hit_watchpoint = wp;
  1570. return ERROR_OK;
  1571. }
  1572. return ERROR_FAIL;
  1573. }
  1574. /*
  1575. * Cortex-A8 Reset functions
  1576. */
  1577. static int aarch64_enable_reset_catch(struct target *target, bool enable)
  1578. {
  1579. struct armv8_common *armv8 = target_to_armv8(target);
  1580. uint32_t edecr;
  1581. int retval;
  1582. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  1583. armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
  1584. LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
  1585. if (retval != ERROR_OK)
  1586. return retval;
  1587. if (enable)
  1588. edecr |= ECR_RCE;
  1589. else
  1590. edecr &= ~ECR_RCE;
  1591. return mem_ap_write_atomic_u32(armv8->debug_ap,
  1592. armv8->debug_base + CPUV8_DBG_EDECR, edecr);
  1593. }
  1594. static int aarch64_clear_reset_catch(struct target *target)
  1595. {
  1596. struct armv8_common *armv8 = target_to_armv8(target);
  1597. uint32_t edesr;
  1598. int retval;
  1599. bool was_triggered;
  1600. /* check if Reset Catch debug event triggered as expected */
  1601. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  1602. armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
  1603. if (retval != ERROR_OK)
  1604. return retval;
  1605. was_triggered = !!(edesr & ESR_RC);
  1606. LOG_DEBUG("Reset Catch debug event %s",
  1607. was_triggered ? "triggered" : "NOT triggered!");
  1608. if (was_triggered) {
  1609. /* clear pending Reset Catch debug event */
  1610. edesr &= ~ESR_RC;
  1611. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1612. armv8->debug_base + CPUV8_DBG_EDESR, edesr);
  1613. if (retval != ERROR_OK)
  1614. return retval;
  1615. }
  1616. return ERROR_OK;
  1617. }
  1618. static int aarch64_assert_reset(struct target *target)
  1619. {
  1620. struct armv8_common *armv8 = target_to_armv8(target);
  1621. enum reset_types reset_config = jtag_get_reset_config();
  1622. int retval;
  1623. LOG_DEBUG(" ");
  1624. /* Issue some kind of warm reset. */
  1625. if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
  1626. target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
  1627. else if (reset_config & RESET_HAS_SRST) {
  1628. bool srst_asserted = false;
  1629. if (target->reset_halt) {
  1630. if (target_was_examined(target)) {
  1631. if (reset_config & RESET_SRST_NO_GATING) {
  1632. /*
  1633. * SRST needs to be asserted *before* Reset Catch
  1634. * debug event can be set up.
  1635. */
  1636. adapter_assert_reset();
  1637. srst_asserted = true;
  1638. /* make sure to clear all sticky errors */
  1639. mem_ap_write_atomic_u32(armv8->debug_ap,
  1640. armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
  1641. }
  1642. /* set up Reset Catch debug event to halt the CPU after reset */
  1643. retval = aarch64_enable_reset_catch(target, true);
  1644. if (retval != ERROR_OK)
  1645. LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
  1646. target_name(target));
  1647. } else {
  1648. LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
  1649. target_name(target));
  1650. }
  1651. }
  1652. /* REVISIT handle "pulls" cases, if there's
  1653. * hardware that needs them to work.
  1654. */
  1655. if (!srst_asserted)
  1656. adapter_assert_reset();
  1657. } else {
  1658. LOG_ERROR("%s: how to reset?", target_name(target));
  1659. return ERROR_FAIL;
  1660. }
  1661. /* registers are now invalid */
  1662. if (target_was_examined(target)) {
  1663. register_cache_invalidate(armv8->arm.core_cache);
  1664. register_cache_invalidate(armv8->arm.core_cache->next);
  1665. }
  1666. target->state = TARGET_RESET;
  1667. return ERROR_OK;
  1668. }
  1669. static int aarch64_deassert_reset(struct target *target)
  1670. {
  1671. int retval;
  1672. LOG_DEBUG(" ");
  1673. /* be certain SRST is off */
  1674. adapter_deassert_reset();
  1675. if (!target_was_examined(target))
  1676. return ERROR_OK;
  1677. retval = aarch64_init_debug_access(target);
  1678. if (retval != ERROR_OK)
  1679. return retval;
  1680. retval = aarch64_poll(target);
  1681. if (retval != ERROR_OK)
  1682. return retval;
  1683. if (target->reset_halt) {
  1684. /* clear pending Reset Catch debug event */
  1685. retval = aarch64_clear_reset_catch(target);
  1686. if (retval != ERROR_OK)
  1687. LOG_WARNING("%s: Clearing Reset Catch debug event failed",
  1688. target_name(target));
  1689. /* disable Reset Catch debug event */
  1690. retval = aarch64_enable_reset_catch(target, false);
  1691. if (retval != ERROR_OK)
  1692. LOG_WARNING("%s: Disabling Reset Catch debug event failed",
  1693. target_name(target));
  1694. if (target->state != TARGET_HALTED) {
  1695. LOG_WARNING("%s: ran after reset and before halt ...",
  1696. target_name(target));
  1697. retval = target_halt(target);
  1698. if (retval != ERROR_OK)
  1699. return retval;
  1700. }
  1701. }
  1702. return ERROR_OK;
  1703. }
  1704. static int aarch64_write_cpu_memory_slow(struct target *target,
  1705. uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
  1706. {
  1707. struct armv8_common *armv8 = target_to_armv8(target);
  1708. struct arm_dpm *dpm = &armv8->dpm;
  1709. struct arm *arm = &armv8->arm;
  1710. int retval;
  1711. armv8_reg_current(arm, 1)->dirty = true;
  1712. /* change DCC to normal mode if necessary */
  1713. if (*dscr & DSCR_MA) {
  1714. *dscr &= ~DSCR_MA;
  1715. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1716. armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
  1717. if (retval != ERROR_OK)
  1718. return retval;
  1719. }
  1720. while (count) {
  1721. uint32_t data, opcode;
  1722. /* write the data to store into DTRRX */
  1723. if (size == 1)
  1724. data = *buffer;
  1725. else if (size == 2)
  1726. data = target_buffer_get_u16(target, buffer);
  1727. else
  1728. data = target_buffer_get_u32(target, buffer);
  1729. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1730. armv8->debug_base + CPUV8_DBG_DTRRX, data);
  1731. if (retval != ERROR_OK)
  1732. return retval;
  1733. if (arm->core_state == ARM_STATE_AARCH64)
  1734. retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
  1735. else
  1736. retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
  1737. if (retval != ERROR_OK)
  1738. return retval;
  1739. if (size == 1)
  1740. opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
  1741. else if (size == 2)
  1742. opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
  1743. else
  1744. opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
  1745. retval = dpm->instr_execute(dpm, opcode);
  1746. if (retval != ERROR_OK)
  1747. return retval;
  1748. /* Advance */
  1749. buffer += size;
  1750. --count;
  1751. }
  1752. return ERROR_OK;
  1753. }
  1754. static int aarch64_write_cpu_memory_fast(struct target *target,
  1755. uint32_t count, const uint8_t *buffer, uint32_t *dscr)
  1756. {
  1757. struct armv8_common *armv8 = target_to_armv8(target);
  1758. struct arm *arm = &armv8->arm;
  1759. int retval;
  1760. armv8_reg_current(arm, 1)->dirty = true;
  1761. /* Step 1.d - Change DCC to memory mode */
  1762. *dscr |= DSCR_MA;
  1763. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1764. armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
  1765. if (retval != ERROR_OK)
  1766. return retval;
  1767. /* Step 2.a - Do the write */
  1768. retval = mem_ap_write_buf_noincr(armv8->debug_ap,
  1769. buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
  1770. if (retval != ERROR_OK)
  1771. return retval;
  1772. /* Step 3.a - Switch DTR mode back to Normal mode */
  1773. *dscr &= ~DSCR_MA;
  1774. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1775. armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
  1776. if (retval != ERROR_OK)
  1777. return retval;
  1778. return ERROR_OK;
  1779. }
  1780. static int aarch64_write_cpu_memory(struct target *target,
  1781. uint64_t address, uint32_t size,
  1782. uint32_t count, const uint8_t *buffer)
  1783. {
  1784. /* write memory through APB-AP */
  1785. int retval = ERROR_COMMAND_SYNTAX_ERROR;
  1786. struct armv8_common *armv8 = target_to_armv8(target);
  1787. struct arm_dpm *dpm = &armv8->dpm;
  1788. struct arm *arm = &armv8->arm;
  1789. uint32_t dscr;
  1790. if (target->state != TARGET_HALTED) {
  1791. LOG_WARNING("target not halted");
  1792. return ERROR_TARGET_NOT_HALTED;
  1793. }
  1794. /* Mark register X0 as dirty, as it will be used
  1795. * for transferring the data.
  1796. * It will be restored automatically when exiting
  1797. * debug mode
  1798. */
  1799. armv8_reg_current(arm, 0)->dirty = true;
  1800. /* This algorithm comes from DDI0487A.g, chapter J9.1 */
  1801. /* Read DSCR */
  1802. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  1803. armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
  1804. if (retval != ERROR_OK)
  1805. return retval;
  1806. /* Set Normal access mode */
  1807. dscr = (dscr & ~DSCR_MA);
  1808. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1809. armv8->debug_base + CPUV8_DBG_DSCR, dscr);
  1810. if (retval != ERROR_OK)
  1811. return retval;
  1812. if (arm->core_state == ARM_STATE_AARCH64) {
  1813. /* Write X0 with value 'address' using write procedure */
  1814. /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
  1815. /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
  1816. retval = dpm->instr_write_data_dcc_64(dpm,
  1817. ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
  1818. } else {
  1819. /* Write R0 with value 'address' using write procedure */
  1820. /* Step 1.a+b - Write the address for read access into DBGDTRRX */
  1821. /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
  1822. retval = dpm->instr_write_data_dcc(dpm,
  1823. ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
  1824. }
  1825. if (retval != ERROR_OK)
  1826. return retval;
  1827. if (size == 4 && (address % 4) == 0)
  1828. retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
  1829. else
  1830. retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
  1831. if (retval != ERROR_OK) {
  1832. /* Unset DTR mode */
  1833. mem_ap_read_atomic_u32(armv8->debug_ap,
  1834. armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
  1835. dscr &= ~DSCR_MA;
  1836. mem_ap_write_atomic_u32(armv8->debug_ap,
  1837. armv8->debug_base + CPUV8_DBG_DSCR, dscr);
  1838. }
  1839. /* Check for sticky abort flags in the DSCR */
  1840. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  1841. armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
  1842. if (retval != ERROR_OK)
  1843. return retval;
  1844. dpm->dscr = dscr;
  1845. if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
  1846. /* Abort occurred - clear it and exit */
  1847. LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
  1848. armv8_dpm_handle_exception(dpm, true);
  1849. return ERROR_FAIL;
  1850. }
  1851. /* Done */
  1852. return ERROR_OK;
  1853. }
  1854. static int aarch64_read_cpu_memory_slow(struct target *target,
  1855. uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
  1856. {
  1857. struct armv8_common *armv8 = target_to_armv8(target);
  1858. struct arm_dpm *dpm = &armv8->dpm;
  1859. struct arm *arm = &armv8->arm;
  1860. int retval;
  1861. armv8_reg_current(arm, 1)->dirty = true;
  1862. /* change DCC to normal mode (if necessary) */
  1863. if (*dscr & DSCR_MA) {
  1864. *dscr &= DSCR_MA;
  1865. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1866. armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
  1867. if (retval != ERROR_OK)
  1868. return retval;
  1869. }
  1870. while (count) {
  1871. uint32_t opcode, data;
  1872. if (size == 1)
  1873. opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
  1874. else if (size == 2)
  1875. opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
  1876. else
  1877. opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
  1878. retval = dpm->instr_execute(dpm, opcode);
  1879. if (retval != ERROR_OK)
  1880. return retval;
  1881. if (arm->core_state == ARM_STATE_AARCH64)
  1882. retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
  1883. else
  1884. retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
  1885. if (retval != ERROR_OK)
  1886. return retval;
  1887. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  1888. armv8->debug_base + CPUV8_DBG_DTRTX, &data);
  1889. if (retval != ERROR_OK)
  1890. return retval;
  1891. if (size == 1)
  1892. *buffer = (uint8_t)data;
  1893. else if (size == 2)
  1894. target_buffer_set_u16(target, buffer, (uint16_t)data);
  1895. else
  1896. target_buffer_set_u32(target, buffer, data);
  1897. /* Advance */
  1898. buffer += size;
  1899. --count;
  1900. }
  1901. return ERROR_OK;
  1902. }
  1903. static int aarch64_read_cpu_memory_fast(struct target *target,
  1904. uint32_t count, uint8_t *buffer, uint32_t *dscr)
  1905. {
  1906. struct armv8_common *armv8 = target_to_armv8(target);
  1907. struct arm_dpm *dpm = &armv8->dpm;
  1908. struct arm *arm = &armv8->arm;
  1909. int retval;
  1910. uint32_t value;
  1911. /* Mark X1 as dirty */
  1912. armv8_reg_current(arm, 1)->dirty = true;
  1913. if (arm->core_state == ARM_STATE_AARCH64) {
  1914. /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
  1915. retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
  1916. } else {
  1917. /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
  1918. retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
  1919. }
  1920. if (retval != ERROR_OK)
  1921. return retval;
  1922. /* Step 1.e - Change DCC to memory mode */
  1923. *dscr |= DSCR_MA;
  1924. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1925. armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
  1926. if (retval != ERROR_OK)
  1927. return retval;
  1928. /* Step 1.f - read DBGDTRTX and discard the value */
  1929. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  1930. armv8->debug_base + CPUV8_DBG_DTRTX, &value);
  1931. if (retval != ERROR_OK)
  1932. return retval;
  1933. count--;
  1934. /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
  1935. * Abort flags are sticky, so can be read at end of transactions
  1936. *
  1937. * This data is read in aligned to 32 bit boundary.
  1938. */
  1939. if (count) {
  1940. /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
  1941. * increments X0 by 4. */
  1942. retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
  1943. armv8->debug_base + CPUV8_DBG_DTRTX);
  1944. if (retval != ERROR_OK)
  1945. return retval;
  1946. }
  1947. /* Step 3.a - set DTR access mode back to Normal mode */
  1948. *dscr &= ~DSCR_MA;
  1949. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1950. armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
  1951. if (retval != ERROR_OK)
  1952. return retval;
  1953. /* Step 3.b - read DBGDTRTX for the final value */
  1954. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  1955. armv8->debug_base + CPUV8_DBG_DTRTX, &value);
  1956. if (retval != ERROR_OK)
  1957. return retval;
  1958. target_buffer_set_u32(target, buffer + count * 4, value);
  1959. return retval;
  1960. }
  1961. static int aarch64_read_cpu_memory(struct target *target,
  1962. target_addr_t address, uint32_t size,
  1963. uint32_t count, uint8_t *buffer)
  1964. {
  1965. /* read memory through APB-AP */
  1966. int retval = ERROR_COMMAND_SYNTAX_ERROR;
  1967. struct armv8_common *armv8 = target_to_armv8(target);
  1968. struct arm_dpm *dpm = &armv8->dpm;
  1969. struct arm *arm = &armv8->arm;
  1970. uint32_t dscr;
  1971. LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
  1972. address, size, count);
  1973. if (target->state != TARGET_HALTED) {
  1974. LOG_WARNING("target not halted");
  1975. return ERROR_TARGET_NOT_HALTED;
  1976. }
  1977. /* Mark register X0 as dirty, as it will be used
  1978. * for transferring the data.
  1979. * It will be restored automatically when exiting
  1980. * debug mode
  1981. */
  1982. armv8_reg_current(arm, 0)->dirty = true;
  1983. /* Read DSCR */
  1984. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  1985. armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
  1986. if (retval != ERROR_OK)
  1987. return retval;
  1988. /* This algorithm comes from DDI0487A.g, chapter J9.1 */
  1989. /* Set Normal access mode */
  1990. dscr &= ~DSCR_MA;
  1991. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  1992. armv8->debug_base + CPUV8_DBG_DSCR, dscr);
  1993. if (retval != ERROR_OK)
  1994. return retval;
  1995. if (arm->core_state == ARM_STATE_AARCH64) {
  1996. /* Write X0 with value 'address' using write procedure */
  1997. /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
  1998. /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
  1999. retval = dpm->instr_write_data_dcc_64(dpm,
  2000. ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
  2001. } else {
  2002. /* Write R0 with value 'address' using write procedure */
  2003. /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
  2004. /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
  2005. retval = dpm->instr_write_data_dcc(dpm,
  2006. ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
  2007. }
  2008. if (retval != ERROR_OK)
  2009. return retval;
  2010. if (size == 4 && (address % 4) == 0)
  2011. retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
  2012. else
  2013. retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
  2014. if (dscr & DSCR_MA) {
  2015. dscr &= ~DSCR_MA;
  2016. mem_ap_write_atomic_u32(armv8->debug_ap,
  2017. armv8->debug_base + CPUV8_DBG_DSCR, dscr);
  2018. }
  2019. if (retval != ERROR_OK)
  2020. return retval;
  2021. /* Check for sticky abort flags in the DSCR */
  2022. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  2023. armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
  2024. if (retval != ERROR_OK)
  2025. return retval;
  2026. dpm->dscr = dscr;
  2027. if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
  2028. /* Abort occurred - clear it and exit */
  2029. LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
  2030. armv8_dpm_handle_exception(dpm, true);
  2031. return ERROR_FAIL;
  2032. }
  2033. /* Done */
  2034. return ERROR_OK;
  2035. }
  2036. static int aarch64_read_phys_memory(struct target *target,
  2037. target_addr_t address, uint32_t size,
  2038. uint32_t count, uint8_t *buffer)
  2039. {
  2040. int retval = ERROR_COMMAND_SYNTAX_ERROR;
  2041. if (count && buffer) {
  2042. /* read memory through APB-AP */
  2043. retval = aarch64_mmu_modify(target, 0);
  2044. if (retval != ERROR_OK)
  2045. return retval;
  2046. retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
  2047. }
  2048. return retval;
  2049. }
  2050. static int aarch64_read_memory(struct target *target, target_addr_t address,
  2051. uint32_t size, uint32_t count, uint8_t *buffer)
  2052. {
  2053. int mmu_enabled = 0;
  2054. int retval;
  2055. /* determine if MMU was enabled on target stop */
  2056. retval = aarch64_mmu(target, &mmu_enabled);
  2057. if (retval != ERROR_OK)
  2058. return retval;
  2059. if (mmu_enabled) {
  2060. /* enable MMU as we could have disabled it for phys access */
  2061. retval = aarch64_mmu_modify(target, 1);
  2062. if (retval != ERROR_OK)
  2063. return retval;
  2064. }
  2065. return aarch64_read_cpu_memory(target, address, size, count, buffer);
  2066. }
  2067. static int aarch64_write_phys_memory(struct target *target,
  2068. target_addr_t address, uint32_t size,
  2069. uint32_t count, const uint8_t *buffer)
  2070. {
  2071. int retval = ERROR_COMMAND_SYNTAX_ERROR;
  2072. if (count && buffer) {
  2073. /* write memory through APB-AP */
  2074. retval = aarch64_mmu_modify(target, 0);
  2075. if (retval != ERROR_OK)
  2076. return retval;
  2077. return aarch64_write_cpu_memory(target, address, size, count, buffer);
  2078. }
  2079. return retval;
  2080. }
  2081. static int aarch64_write_memory(struct target *target, target_addr_t address,
  2082. uint32_t size, uint32_t count, const uint8_t *buffer)
  2083. {
  2084. int mmu_enabled = 0;
  2085. int retval;
  2086. /* determine if MMU was enabled on target stop */
  2087. retval = aarch64_mmu(target, &mmu_enabled);
  2088. if (retval != ERROR_OK)
  2089. return retval;
  2090. if (mmu_enabled) {
  2091. /* enable MMU as we could have disabled it for phys access */
  2092. retval = aarch64_mmu_modify(target, 1);
  2093. if (retval != ERROR_OK)
  2094. return retval;
  2095. }
  2096. return aarch64_write_cpu_memory(target, address, size, count, buffer);
  2097. }
  2098. static int aarch64_handle_target_request(void *priv)
  2099. {
  2100. struct target *target = priv;
  2101. struct armv8_common *armv8 = target_to_armv8(target);
  2102. int retval;
  2103. if (!target_was_examined(target))
  2104. return ERROR_OK;
  2105. if (!target->dbg_msg_enabled)
  2106. return ERROR_OK;
  2107. if (target->state == TARGET_RUNNING) {
  2108. uint32_t request;
  2109. uint32_t dscr;
  2110. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  2111. armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
  2112. /* check if we have data */
  2113. while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
  2114. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  2115. armv8->debug_base + CPUV8_DBG_DTRTX, &request);
  2116. if (retval == ERROR_OK) {
  2117. target_request(target, request);
  2118. retval = mem_ap_read_atomic_u32(armv8->debug_ap,
  2119. armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
  2120. }
  2121. }
  2122. }
  2123. return ERROR_OK;
  2124. }
  2125. static int aarch64_examine_first(struct target *target)
  2126. {
  2127. struct aarch64_common *aarch64 = target_to_aarch64(target);
  2128. struct armv8_common *armv8 = &aarch64->armv8_common;
  2129. struct adiv5_dap *swjdp = armv8->arm.dap;
  2130. struct aarch64_private_config *pc = target->private_config;
  2131. int i;
  2132. int retval = ERROR_OK;
  2133. uint64_t debug, ttypr;
  2134. uint32_t cpuid;
  2135. uint32_t tmp0, tmp1, tmp2, tmp3;
  2136. debug = ttypr = cpuid = 0;
  2137. if (!pc)
  2138. return ERROR_FAIL;
  2139. if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
  2140. /* Search for the APB-AB */
  2141. retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
  2142. if (retval != ERROR_OK) {
  2143. LOG_ERROR("Could not find APB-AP for debug access");
  2144. return retval;
  2145. }
  2146. } else {
  2147. armv8->debug_ap = dap_ap(swjdp, pc->adiv5_config.ap_num);
  2148. }
  2149. retval = mem_ap_init(armv8->debug_ap);
  2150. if (retval != ERROR_OK) {
  2151. LOG_ERROR("Could not initialize the APB-AP");
  2152. return retval;
  2153. }
  2154. armv8->debug_ap->memaccess_tck = 10;
  2155. if (!target->dbgbase_set) {
  2156. target_addr_t dbgbase;
  2157. /* Get ROM Table base */
  2158. uint32_t apid;
  2159. int32_t coreidx = target->coreid;
  2160. retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
  2161. if (retval != ERROR_OK)
  2162. return retval;
  2163. /* Lookup 0x15 -- Processor DAP */
  2164. retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
  2165. &armv8->debug_base, &coreidx);
  2166. if (retval != ERROR_OK)
  2167. return retval;
  2168. LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT
  2169. " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
  2170. } else
  2171. armv8->debug_base = target->dbgbase;
  2172. retval = mem_ap_write_atomic_u32(armv8->debug_ap,
  2173. armv8->debug_base + CPUV8_DBG_OSLAR, 0);
  2174. if (retval != ERROR_OK) {
  2175. LOG_DEBUG("Examine %s failed", "oslock");
  2176. return retval;
  2177. }
  2178. retval = mem_ap_read_u32(armv8->debug_ap,
  2179. armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
  2180. if (retval != ERROR_OK) {
  2181. LOG_DEBUG("Examine %s failed", "CPUID");
  2182. return retval;
  2183. }
  2184. retval = mem_ap_read_u32(armv8->debug_ap,
  2185. armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
  2186. retval += mem_ap_read_u32(armv8->debug_ap,
  2187. armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
  2188. if (retval != ERROR_OK) {
  2189. LOG_DEBUG("Examine %s failed", "Memory Model Type");
  2190. return retval;
  2191. }
  2192. retval = mem_ap_read_u32(armv8->debug_ap,
  2193. armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
  2194. retval += mem_ap_read_u32(armv8->debug_ap,
  2195. armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
  2196. if (retval != ERROR_OK) {
  2197. LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
  2198. return retval;
  2199. }
  2200. retval = dap_run(armv8->debug_ap->dap);
  2201. if (retval != ERROR_OK) {
  2202. LOG_ERROR("%s: examination failed\n", target_name(target));
  2203. return retval;
  2204. }
  2205. ttypr |= tmp1;
  2206. ttypr = (ttypr << 32) | tmp0;
  2207. debug |= tmp3;
  2208. debug = (debug << 32) | tmp2;
  2209. LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
  2210. LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
  2211. LOG_DEBUG("debug = 0x%08" PRIx64, debug);
  2212. if (!pc->cti)
  2213. return ERROR_FAIL;
  2214. armv8->cti = pc->cti;
  2215. retval = aarch64_dpm_setup(aarch64, debug);
  2216. if (retval != ERROR_OK)
  2217. return retval;
  2218. /* Setup Breakpoint Register Pairs */
  2219. aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
  2220. aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
  2221. aarch64->brp_num_available = aarch64->brp_num;
  2222. aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
  2223. for (i = 0; i < aarch64->brp_num; i++) {
  2224. aarch64->brp_list[i].used = 0;
  2225. if (i < (aarch64->brp_num-aarch64->brp_num_context))
  2226. aarch64->brp_list[i].type = BRP_NORMAL;
  2227. else
  2228. aarch64->brp_list[i].type = BRP_CONTEXT;
  2229. aarch64->brp_list[i].value = 0;
  2230. aarch64->brp_list[i].control = 0;
  2231. aarch64->brp_list[i].brpn = i;
  2232. }
  2233. /* Setup Watchpoint Register Pairs */
  2234. aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
  2235. aarch64->wp_num_available = aarch64->wp_num;
  2236. aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
  2237. for (i = 0; i < aarch64->wp_num; i++) {
  2238. aarch64->wp_list[i].used = 0;
  2239. aarch64->wp_list[i].type = BRP_NORMAL;
  2240. aarch64->wp_list[i].value = 0;
  2241. aarch64->wp_list[i].control = 0;
  2242. aarch64->wp_list[i].brpn = i;
  2243. }
  2244. LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
  2245. aarch64->brp_num, aarch64->wp_num);
  2246. target->state = TARGET_UNKNOWN;
  2247. target->debug_reason = DBG_REASON_NOTHALTED;
  2248. aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
  2249. target_set_examined(target);
  2250. return ERROR_OK;
  2251. }
  2252. static int aarch64_examine(struct target *target)
  2253. {
  2254. int retval = ERROR_OK;
  2255. /* don't re-probe hardware after each reset */
  2256. if (!target_was_examined(target))
  2257. retval = aarch64_examine_first(target);
  2258. /* Configure core debug access */
  2259. if (retval == ERROR_OK)
  2260. retval = aarch64_init_debug_access(target);
  2261. return retval;
  2262. }
  2263. /*
  2264. * Cortex-A8 target creation and initialization
  2265. */
  2266. static int aarch64_init_target(struct command_context *cmd_ctx,
  2267. struct target *target)
  2268. {
  2269. /* examine_first() does a bunch of this */
  2270. arm_semihosting_init(target);
  2271. return ERROR_OK;
  2272. }
  2273. static int aarch64_init_arch_info(struct target *target,
  2274. struct aarch64_common *aarch64, struct adiv5_dap *dap)
  2275. {
  2276. struct armv8_common *armv8 = &aarch64->armv8_common;
  2277. /* Setup struct aarch64_common */
  2278. aarch64->common_magic = AARCH64_COMMON_MAGIC;
  2279. armv8->arm.dap = dap;
  2280. /* register arch-specific functions */
  2281. armv8->examine_debug_reason = NULL;
  2282. armv8->post_debug_entry = aarch64_post_debug_entry;
  2283. armv8->pre_restore_context = NULL;
  2284. armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
  2285. armv8_init_arch_info(target, armv8);
  2286. target_register_timer_callback(aarch64_handle_target_request, 1,
  2287. TARGET_TIMER_TYPE_PERIODIC, target);
  2288. return ERROR_OK;
  2289. }
  2290. static int aarch64_target_create(struct target *target, Jim_Interp *interp)
  2291. {
  2292. struct aarch64_private_config *pc = target->private_config;
  2293. struct aarch64_common *aarch64;
  2294. if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
  2295. return ERROR_FAIL;
  2296. aarch64 = calloc(1, sizeof(struct aarch64_common));
  2297. if (!aarch64) {
  2298. LOG_ERROR("Out of memory");
  2299. return ERROR_FAIL;
  2300. }
  2301. return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
  2302. }
  2303. static void aarch64_deinit_target(struct target *target)
  2304. {
  2305. struct aarch64_common *aarch64 = target_to_aarch64(target);
  2306. struct armv8_common *armv8 = &aarch64->armv8_common;
  2307. struct arm_dpm *dpm = &armv8->dpm;
  2308. armv8_free_reg_cache(target);
  2309. free(aarch64->brp_list);
  2310. free(dpm->dbp);
  2311. free(dpm->dwp);
  2312. free(target->private_config);
  2313. free(aarch64);
  2314. }
  2315. static int aarch64_mmu(struct target *target, int *enabled)
  2316. {
  2317. if (target->state != TARGET_HALTED) {
  2318. LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
  2319. return ERROR_TARGET_INVALID;
  2320. }
  2321. *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
  2322. return ERROR_OK;
  2323. }
  2324. static int aarch64_virt2phys(struct target *target, target_addr_t virt,
  2325. target_addr_t *phys)
  2326. {
  2327. return armv8_mmu_translate_va_pa(target, virt, phys, 1);
  2328. }
  2329. /*
  2330. * private target configuration items
  2331. */
  2332. enum aarch64_cfg_param {
  2333. CFG_CTI,
  2334. };
  2335. static const struct jim_nvp nvp_config_opts[] = {
  2336. { .name = "-cti", .value = CFG_CTI },
  2337. { .name = NULL, .value = -1 }
  2338. };
  2339. static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
  2340. {
  2341. struct aarch64_private_config *pc;
  2342. struct jim_nvp *n;
  2343. int e;
  2344. pc = (struct aarch64_private_config *)target->private_config;
  2345. if (!pc) {
  2346. pc = calloc(1, sizeof(struct aarch64_private_config));
  2347. pc->adiv5_config.ap_num = DP_APSEL_INVALID;
  2348. target->private_config = pc;
  2349. }
  2350. /*
  2351. * Call adiv5_jim_configure() to parse the common DAP options
  2352. * It will return JIM_CONTINUE if it didn't find any known
  2353. * options, JIM_OK if it correctly parsed the topmost option
  2354. * and JIM_ERR if an error occurred during parameter evaluation.
  2355. * For JIM_CONTINUE, we check our own params.
  2356. *
  2357. * adiv5_jim_configure() assumes 'private_config' to point to
  2358. * 'struct adiv5_private_config'. Override 'private_config'!
  2359. */
  2360. target->private_config = &pc->adiv5_config;
  2361. e = adiv5_jim_configure(target, goi);
  2362. target->private_config = pc;
  2363. if (e != JIM_CONTINUE)
  2364. return e;
  2365. /* parse config or cget options ... */
  2366. if (goi->argc > 0) {
  2367. Jim_SetEmptyResult(goi->interp);
  2368. /* check first if topmost item is for us */
  2369. e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
  2370. goi->argv[0], &n);
  2371. if (e != JIM_OK)
  2372. return JIM_CONTINUE;
  2373. e = jim_getopt_obj(goi, NULL);
  2374. if (e != JIM_OK)
  2375. return e;
  2376. switch (n->value) {
  2377. case CFG_CTI: {
  2378. if (goi->isconfigure) {
  2379. Jim_Obj *o_cti;
  2380. struct arm_cti *cti;
  2381. e = jim_getopt_obj(goi, &o_cti);
  2382. if (e != JIM_OK)
  2383. return e;
  2384. cti = cti_instance_by_jim_obj(goi->interp, o_cti);
  2385. if (!cti) {
  2386. Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
  2387. return JIM_ERR;
  2388. }
  2389. pc->cti = cti;
  2390. } else {
  2391. if (goi->argc != 0) {
  2392. Jim_WrongNumArgs(goi->interp,
  2393. goi->argc, goi->argv,
  2394. "NO PARAMS");
  2395. return JIM_ERR;
  2396. }
  2397. if (!pc || !pc->cti) {
  2398. Jim_SetResultString(goi->interp, "CTI not configured", -1);
  2399. return JIM_ERR;
  2400. }
  2401. Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
  2402. }
  2403. break;
  2404. }
  2405. default:
  2406. return JIM_CONTINUE;
  2407. }
  2408. }
  2409. return JIM_OK;
  2410. }
  2411. COMMAND_HANDLER(aarch64_handle_cache_info_command)
  2412. {
  2413. struct target *target = get_current_target(CMD_CTX);
  2414. struct armv8_common *armv8 = target_to_armv8(target);
  2415. return armv8_handle_cache_info_command(CMD,
  2416. &armv8->armv8_mmu.armv8_cache);
  2417. }
  2418. COMMAND_HANDLER(aarch64_handle_dbginit_command)
  2419. {
  2420. struct target *target = get_current_target(CMD_CTX);
  2421. if (!target_was_examined(target)) {
  2422. LOG_ERROR("target not examined yet");
  2423. return ERROR_FAIL;
  2424. }
  2425. return aarch64_init_debug_access(target);
  2426. }
  2427. COMMAND_HANDLER(aarch64_handle_disassemble_command)
  2428. {
  2429. struct target *target = get_current_target(CMD_CTX);
  2430. if (!target) {
  2431. LOG_ERROR("No target selected");
  2432. return ERROR_FAIL;
  2433. }
  2434. struct aarch64_common *aarch64 = target_to_aarch64(target);
  2435. if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
  2436. command_print(CMD, "current target isn't an AArch64");
  2437. return ERROR_FAIL;
  2438. }
  2439. int count = 1;
  2440. target_addr_t address;
  2441. switch (CMD_ARGC) {
  2442. case 2:
  2443. COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
  2444. /* FALL THROUGH */
  2445. case 1:
  2446. COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
  2447. break;
  2448. default:
  2449. return ERROR_COMMAND_SYNTAX_ERROR;
  2450. }
  2451. return a64_disassemble(CMD, target, address, count);
  2452. }
  2453. COMMAND_HANDLER(aarch64_mask_interrupts_command)
  2454. {
  2455. struct target *target = get_current_target(CMD_CTX);
  2456. struct aarch64_common *aarch64 = target_to_aarch64(target);
  2457. static const struct jim_nvp nvp_maskisr_modes[] = {
  2458. { .name = "off", .value = AARCH64_ISRMASK_OFF },
  2459. { .name = "on", .value = AARCH64_ISRMASK_ON },
  2460. { .name = NULL, .value = -1 },
  2461. };
  2462. const struct jim_nvp *n;
  2463. if (CMD_ARGC > 0) {
  2464. n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
  2465. if (!n->name) {
  2466. LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
  2467. return ERROR_COMMAND_SYNTAX_ERROR;
  2468. }
  2469. aarch64->isrmasking_mode = n->value;
  2470. }
  2471. n = jim_nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
  2472. command_print(CMD, "aarch64 interrupt mask %s", n->name);
  2473. return ERROR_OK;
  2474. }
  2475. static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
  2476. {
  2477. struct command *c = jim_to_command(interp);
  2478. struct command_context *context;
  2479. struct target *target;
  2480. struct arm *arm;
  2481. int retval;
  2482. bool is_mcr = false;
  2483. int arg_cnt = 0;
  2484. if (!strcmp(c->name, "mcr")) {
  2485. is_mcr = true;
  2486. arg_cnt = 7;
  2487. } else {
  2488. arg_cnt = 6;
  2489. }
  2490. context = current_command_context(interp);
  2491. assert(context);
  2492. target = get_current_target(context);
  2493. if (!target) {
  2494. LOG_ERROR("%s: no current target", __func__);
  2495. return JIM_ERR;
  2496. }
  2497. if (!target_was_examined(target)) {
  2498. LOG_ERROR("%s: not yet examined", target_name(target));
  2499. return JIM_ERR;
  2500. }
  2501. arm = target_to_arm(target);
  2502. if (!is_arm(arm)) {
  2503. LOG_ERROR("%s: not an ARM", target_name(target));
  2504. return JIM_ERR;
  2505. }
  2506. if (target->state != TARGET_HALTED)
  2507. return ERROR_TARGET_NOT_HALTED;
  2508. if (arm->core_state == ARM_STATE_AARCH64) {
  2509. LOG_ERROR("%s: not 32-bit arm target", target_name(target));
  2510. return JIM_ERR;
  2511. }
  2512. if (argc != arg_cnt) {
  2513. LOG_ERROR("%s: wrong number of arguments", __func__);
  2514. return JIM_ERR;
  2515. }
  2516. int cpnum;
  2517. uint32_t op1;
  2518. uint32_t op2;
  2519. uint32_t crn;
  2520. uint32_t crm;
  2521. uint32_t value;
  2522. long l;
  2523. /* NOTE: parameter sequence matches ARM instruction set usage:
  2524. * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
  2525. * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
  2526. * The "rX" is necessarily omitted; it uses Tcl mechanisms.
  2527. */
  2528. retval = Jim_GetLong(interp, argv[1], &l);
  2529. if (retval != JIM_OK)
  2530. return retval;
  2531. if (l & ~0xf) {
  2532. LOG_ERROR("%s: %s %d out of range", __func__,
  2533. "coprocessor", (int) l);
  2534. return JIM_ERR;
  2535. }
  2536. cpnum = l;
  2537. retval = Jim_GetLong(interp, argv[2], &l);
  2538. if (retval != JIM_OK)
  2539. return retval;
  2540. if (l & ~0x7) {
  2541. LOG_ERROR("%s: %s %d out of range", __func__,
  2542. "op1", (int) l);
  2543. return JIM_ERR;
  2544. }
  2545. op1 = l;
  2546. retval = Jim_GetLong(interp, argv[3], &l);
  2547. if (retval != JIM_OK)
  2548. return retval;
  2549. if (l & ~0xf) {
  2550. LOG_ERROR("%s: %s %d out of range", __func__,
  2551. "CRn", (int) l);
  2552. return JIM_ERR;
  2553. }
  2554. crn = l;
  2555. retval = Jim_GetLong(interp, argv[4], &l);
  2556. if (retval != JIM_OK)
  2557. return retval;
  2558. if (l & ~0xf) {
  2559. LOG_ERROR("%s: %s %d out of range", __func__,
  2560. "CRm", (int) l);
  2561. return JIM_ERR;
  2562. }
  2563. crm = l;
  2564. retval = Jim_GetLong(interp, argv[5], &l);
  2565. if (retval != JIM_OK)
  2566. return retval;
  2567. if (l & ~0x7) {
  2568. LOG_ERROR("%s: %s %d out of range", __func__,
  2569. "op2", (int) l);
  2570. return JIM_ERR;
  2571. }
  2572. op2 = l;
  2573. value = 0;
  2574. if (is_mcr == true) {
  2575. retval = Jim_GetLong(interp, argv[6], &l);
  2576. if (retval != JIM_OK)
  2577. return retval;
  2578. value = l;
  2579. /* NOTE: parameters reordered! */
  2580. /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
  2581. retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
  2582. if (retval != ERROR_OK)
  2583. return JIM_ERR;
  2584. } else {
  2585. /* NOTE: parameters reordered! */
  2586. /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
  2587. retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
  2588. if (retval != ERROR_OK)
  2589. return JIM_ERR;
  2590. Jim_SetResult(interp, Jim_NewIntObj(interp, value));
  2591. }
  2592. return JIM_OK;
  2593. }
  2594. static const struct command_registration aarch64_exec_command_handlers[] = {
  2595. {
  2596. .name = "cache_info",
  2597. .handler = aarch64_handle_cache_info_command,
  2598. .mode = COMMAND_EXEC,
  2599. .help = "display information about target caches",
  2600. .usage = "",
  2601. },
  2602. {
  2603. .name = "dbginit",
  2604. .handler = aarch64_handle_dbginit_command,
  2605. .mode = COMMAND_EXEC,
  2606. .help = "Initialize core debug",
  2607. .usage = "",
  2608. },
  2609. {
  2610. .name = "disassemble",
  2611. .handler = aarch64_handle_disassemble_command,
  2612. .mode = COMMAND_EXEC,
  2613. .help = "Disassemble instructions",
  2614. .usage = "address [count]",
  2615. },
  2616. {
  2617. .name = "maskisr",
  2618. .handler = aarch64_mask_interrupts_command,
  2619. .mode = COMMAND_ANY,
  2620. .help = "mask aarch64 interrupts during single-step",
  2621. .usage = "['on'|'off']",
  2622. },
  2623. {
  2624. .name = "mcr",
  2625. .mode = COMMAND_EXEC,
  2626. .jim_handler = jim_mcrmrc,
  2627. .help = "write coprocessor register",
  2628. .usage = "cpnum op1 CRn CRm op2 value",
  2629. },
  2630. {
  2631. .name = "mrc",
  2632. .mode = COMMAND_EXEC,
  2633. .jim_handler = jim_mcrmrc,
  2634. .help = "read coprocessor register",
  2635. .usage = "cpnum op1 CRn CRm op2",
  2636. },
  2637. {
  2638. .chain = smp_command_handlers,
  2639. },
  2640. COMMAND_REGISTRATION_DONE
  2641. };
  2642. extern const struct command_registration semihosting_common_handlers[];
  2643. static const struct command_registration aarch64_command_handlers[] = {
  2644. {
  2645. .name = "arm",
  2646. .mode = COMMAND_ANY,
  2647. .help = "ARM Command Group",
  2648. .usage = "",
  2649. .chain = semihosting_common_handlers
  2650. },
  2651. {
  2652. .chain = armv8_command_handlers,
  2653. },
  2654. {
  2655. .name = "aarch64",
  2656. .mode = COMMAND_ANY,
  2657. .help = "Aarch64 command group",
  2658. .usage = "",
  2659. .chain = aarch64_exec_command_handlers,
  2660. },
  2661. COMMAND_REGISTRATION_DONE
  2662. };
  2663. struct target_type aarch64_target = {
  2664. .name = "aarch64",
  2665. .poll = aarch64_poll,
  2666. .arch_state = armv8_arch_state,
  2667. .halt = aarch64_halt,
  2668. .resume = aarch64_resume,
  2669. .step = aarch64_step,
  2670. .assert_reset = aarch64_assert_reset,
  2671. .deassert_reset = aarch64_deassert_reset,
  2672. /* REVISIT allow exporting VFP3 registers ... */
  2673. .get_gdb_arch = armv8_get_gdb_arch,
  2674. .get_gdb_reg_list = armv8_get_gdb_reg_list,
  2675. .read_memory = aarch64_read_memory,
  2676. .write_memory = aarch64_write_memory,
  2677. .add_breakpoint = aarch64_add_breakpoint,
  2678. .add_context_breakpoint = aarch64_add_context_breakpoint,
  2679. .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
  2680. .remove_breakpoint = aarch64_remove_breakpoint,
  2681. .add_watchpoint = aarch64_add_watchpoint,
  2682. .remove_watchpoint = aarch64_remove_watchpoint,
  2683. .hit_watchpoint = aarch64_hit_watchpoint,
  2684. .commands = aarch64_command_handlers,
  2685. .target_create = aarch64_target_create,
  2686. .target_jim_configure = aarch64_jim_configure,
  2687. .init_target = aarch64_init_target,
  2688. .deinit_target = aarch64_deinit_target,
  2689. .examine = aarch64_examine,
  2690. .read_phys_memory = aarch64_read_phys_memory,
  2691. .write_phys_memory = aarch64_write_phys_memory,
  2692. .mmu = aarch64_mmu,
  2693. .virt2phys = aarch64_virt2phys,
  2694. };