You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

2292 lines
66 KiB

  1. /***************************************************************************
  2. * Copyright (C) 2013-2015,2019-2020 Synopsys, Inc. *
  3. * Frank Dols <frank.dols@synopsys.com> *
  4. * Mischa Jonker <mischa.jonker@synopsys.com> *
  5. * Anton Kolesov <anton.kolesov@synopsys.com> *
  6. * Evgeniy Didin <didin@synopsys.com> *
  7. * *
  8. * SPDX-License-Identifier: GPL-2.0-or-later *
  9. ***************************************************************************/
  10. #ifdef HAVE_CONFIG_H
  11. #include "config.h"
  12. #endif
  13. #include "arc.h"
  14. /*
  15. * ARC architecture specific details.
  16. *
  17. * ARC has two types of registers:
  18. * 1) core registers(e.g. r0,r1..) [is_core = true]
  19. * 2) Auxiliary registers [is_core = false]..
  20. *
  21. * Auxiliary registers at the same time can be divided into
  22. * read-only BCR(build configuration regs, e.g. isa_config, mpu_build) and
  23. * R/RW non-BCR ("control" register, e.g. pc, status32_t, debug).
  24. *
  25. * The way of accessing to Core and AUX registers differs on Jtag level.
  26. * BCR/non-BCR describes if the register is immutable and that reading
  27. * unexisting register is safe RAZ, rather then an error.
  28. * Note, core registers cannot be BCR.
  29. *
  30. * In arc/cpu/ tcl files all registers are defined as core, non-BCR aux
  31. * and BCR aux, in "add-reg" command they are passed to three lists
  32. * respectively: core_reg_descriptions, aux_reg_descriptions,
  33. * bcr_reg_descriptions.
  34. *
  35. * Due to the specifics of accessing to BCR/non-BCR registers there are two
  36. * register caches:
  37. * 1) core_and_aux_cache - includes registers described in
  38. * core_reg_descriptions and aux_reg_descriptions lists.
  39. * Used during save/restore context step.
  40. * 2) bcr_cache - includes registers described bcr_reg_descriptions.
  41. * Currently used internally during configure step.
  42. */
  43. static int arc_remove_watchpoint(struct target *target,
  44. struct watchpoint *watchpoint);
  45. void arc_reg_data_type_add(struct target *target,
  46. struct arc_reg_data_type *data_type)
  47. {
  48. LOG_DEBUG("Adding %s reg_data_type", data_type->data_type.id);
  49. struct arc_common *arc = target_to_arc(target);
  50. assert(arc);
  51. list_add_tail(&data_type->list, &arc->reg_data_types);
  52. }
  53. /**
  54. * Private implementation of register_get_by_name() for ARC that
  55. * doesn't skip not [yet] existing registers. Used in many places
  56. * for iteration through registers and even for marking required registers as
  57. * existing.
  58. */
  59. struct reg *arc_reg_get_by_name(struct reg_cache *first,
  60. const char *name, bool search_all)
  61. {
  62. unsigned int i;
  63. struct reg_cache *cache = first;
  64. while (cache) {
  65. for (i = 0; i < cache->num_regs; i++) {
  66. if (!strcmp(cache->reg_list[i].name, name))
  67. return &(cache->reg_list[i]);
  68. }
  69. if (search_all)
  70. cache = cache->next;
  71. else
  72. break;
  73. }
  74. return NULL;
  75. }
  76. /**
  77. * Reset internal states of caches. Must be called when entering debugging.
  78. *
  79. * @param target Target for which to reset caches states.
  80. */
  81. int arc_reset_caches_states(struct target *target)
  82. {
  83. struct arc_common *arc = target_to_arc(target);
  84. LOG_DEBUG("Resetting internal variables of caches states");
  85. /* Reset caches states. */
  86. arc->dcache_flushed = false;
  87. arc->l2cache_flushed = false;
  88. arc->icache_invalidated = false;
  89. arc->dcache_invalidated = false;
  90. arc->l2cache_invalidated = false;
  91. return ERROR_OK;
  92. }
  93. /* Initialize arc_common structure, which passes to openocd target instance */
  94. static int arc_init_arch_info(struct target *target, struct arc_common *arc,
  95. struct jtag_tap *tap)
  96. {
  97. arc->common_magic = ARC_COMMON_MAGIC;
  98. target->arch_info = arc;
  99. arc->jtag_info.tap = tap;
  100. /* The only allowed ir_length is 4 for ARC jtag. */
  101. if (tap->ir_length != 4) {
  102. LOG_ERROR("ARC jtag instruction length should be equal to 4");
  103. return ERROR_FAIL;
  104. }
  105. /* On most ARC targets there is a dcache, so we enable its flushing
  106. * by default. If there no dcache, there will be no error, just a slight
  107. * performance penalty from unnecessary JTAG operations. */
  108. arc->has_dcache = true;
  109. arc->has_icache = true;
  110. /* L2$ is not available in a target by default. */
  111. arc->has_l2cache = false;
  112. arc_reset_caches_states(target);
  113. /* Add standard GDB data types */
  114. INIT_LIST_HEAD(&arc->reg_data_types);
  115. struct arc_reg_data_type *std_types = calloc(ARRAY_SIZE(standard_gdb_types),
  116. sizeof(*std_types));
  117. if (!std_types) {
  118. LOG_ERROR("Unable to allocate memory");
  119. return ERROR_FAIL;
  120. }
  121. for (unsigned int i = 0; i < ARRAY_SIZE(standard_gdb_types); i++) {
  122. std_types[i].data_type.type = standard_gdb_types[i].type;
  123. std_types[i].data_type.id = standard_gdb_types[i].id;
  124. arc_reg_data_type_add(target, &(std_types[i]));
  125. }
  126. /* Fields related to target descriptions */
  127. INIT_LIST_HEAD(&arc->core_reg_descriptions);
  128. INIT_LIST_HEAD(&arc->aux_reg_descriptions);
  129. INIT_LIST_HEAD(&arc->bcr_reg_descriptions);
  130. arc->num_regs = 0;
  131. arc->num_core_regs = 0;
  132. arc->num_aux_regs = 0;
  133. arc->num_bcr_regs = 0;
  134. arc->last_general_reg = ULONG_MAX;
  135. arc->pc_index_in_cache = ULONG_MAX;
  136. arc->debug_index_in_cache = ULONG_MAX;
  137. return ERROR_OK;
  138. }
  139. int arc_reg_add(struct target *target, struct arc_reg_desc *arc_reg,
  140. const char * const type_name, const size_t type_name_len)
  141. {
  142. assert(target);
  143. assert(arc_reg);
  144. struct arc_common *arc = target_to_arc(target);
  145. assert(arc);
  146. /* Find register type */
  147. {
  148. struct arc_reg_data_type *type;
  149. list_for_each_entry(type, &arc->reg_data_types, list)
  150. if (!strncmp(type->data_type.id, type_name, type_name_len)) {
  151. arc_reg->data_type = &(type->data_type);
  152. break;
  153. }
  154. if (!arc_reg->data_type)
  155. return ERROR_ARC_REGTYPE_NOT_FOUND;
  156. }
  157. if (arc_reg->is_core) {
  158. list_add_tail(&arc_reg->list, &arc->core_reg_descriptions);
  159. arc->num_core_regs += 1;
  160. } else if (arc_reg->is_bcr) {
  161. list_add_tail(&arc_reg->list, &arc->bcr_reg_descriptions);
  162. arc->num_bcr_regs += 1;
  163. } else {
  164. list_add_tail(&arc_reg->list, &arc->aux_reg_descriptions);
  165. arc->num_aux_regs += 1;
  166. }
  167. arc->num_regs += 1;
  168. LOG_DEBUG(
  169. "added register {name=%s, num=0x%" PRIx32 ", type=%s%s%s%s}",
  170. arc_reg->name, arc_reg->arch_num, arc_reg->data_type->id,
  171. arc_reg->is_core ? ", core" : "", arc_reg->is_bcr ? ", bcr" : "",
  172. arc_reg->is_general ? ", general" : ""
  173. );
  174. return ERROR_OK;
  175. }
  176. /* Reading core or aux register */
  177. static int arc_get_register(struct reg *reg)
  178. {
  179. assert(reg);
  180. struct arc_reg_desc *desc = reg->arch_info;
  181. struct target *target = desc->target;
  182. struct arc_common *arc = target_to_arc(target);
  183. uint32_t value;
  184. if (reg->valid) {
  185. LOG_DEBUG("Get register (cached) gdb_num=%" PRIu32 ", name=%s, value=0x%" PRIx32,
  186. reg->number, desc->name, target_buffer_get_u32(target, reg->value));
  187. return ERROR_OK;
  188. }
  189. if (desc->is_core) {
  190. /* Accessing to R61/R62 registers causes Jtag hang */
  191. if (desc->arch_num == ARC_R61 || desc->arch_num == ARC_R62) {
  192. LOG_ERROR("It is forbidden to read core registers 61 and 62.");
  193. return ERROR_FAIL;
  194. }
  195. CHECK_RETVAL(arc_jtag_read_core_reg_one(&arc->jtag_info, desc->arch_num,
  196. &value));
  197. } else {
  198. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, desc->arch_num,
  199. &value));
  200. }
  201. target_buffer_set_u32(target, reg->value, value);
  202. /* If target is unhalted all register reads should be uncached. */
  203. if (target->state == TARGET_HALTED)
  204. reg->valid = true;
  205. else
  206. reg->valid = false;
  207. reg->dirty = false;
  208. LOG_DEBUG("Get register gdb_num=%" PRIu32 ", name=%s, value=0x%" PRIx32,
  209. reg->number, desc->name, value);
  210. return ERROR_OK;
  211. }
  212. /* Writing core or aux register */
  213. static int arc_set_register(struct reg *reg, uint8_t *buf)
  214. {
  215. struct arc_reg_desc *desc = reg->arch_info;
  216. struct target *target = desc->target;
  217. uint32_t value = target_buffer_get_u32(target, buf);
  218. /* Unlike "get" function "set" is supported only if target
  219. * is in halt mode. Async writes are not supported yet. */
  220. if (target->state != TARGET_HALTED)
  221. return ERROR_TARGET_NOT_HALTED;
  222. /* Accessing to R61/R62 registers causes Jtag hang */
  223. if (desc->is_core && (desc->arch_num == ARC_R61 ||
  224. desc->arch_num == ARC_R62)) {
  225. LOG_ERROR("It is forbidden to write core registers 61 and 62.");
  226. return ERROR_FAIL;
  227. }
  228. target_buffer_set_u32(target, reg->value, value);
  229. LOG_DEBUG("Set register gdb_num=%" PRIu32 ", name=%s, value=0x%08" PRIx32,
  230. reg->number, desc->name, value);
  231. reg->valid = true;
  232. reg->dirty = true;
  233. return ERROR_OK;
  234. }
  235. const struct reg_arch_type arc_reg_type = {
  236. .get = arc_get_register,
  237. .set = arc_set_register,
  238. };
  239. /* GDB register groups. For now we support only general and "empty" */
  240. static const char * const reg_group_general = "general";
  241. static const char * const reg_group_other = "";
  242. /* Common code to initialize `struct reg` for different registers: core, aux, bcr. */
  243. static int arc_init_reg(struct target *target, struct reg *reg,
  244. struct arc_reg_desc *reg_desc, unsigned long number)
  245. {
  246. assert(target);
  247. assert(reg);
  248. assert(reg_desc);
  249. struct arc_common *arc = target_to_arc(target);
  250. /* Initialize struct reg */
  251. reg->name = reg_desc->name;
  252. reg->size = 32; /* All register in ARC are 32-bit */
  253. reg->value = reg_desc->reg_value;
  254. reg->type = &arc_reg_type;
  255. reg->arch_info = reg_desc;
  256. reg->caller_save = true; /* @todo should be configurable. */
  257. reg->reg_data_type = reg_desc->data_type;
  258. reg->feature = &reg_desc->feature;
  259. reg->feature->name = reg_desc->gdb_xml_feature;
  260. /* reg->number is used by OpenOCD as value for @regnum. Thus when setting
  261. * value of a register GDB will use it as a number of register in
  262. * P-packet. OpenOCD gdbserver will then use number of register in
  263. * P-packet as an array index in the reg_list returned by
  264. * arc_regs_get_gdb_reg_list. So to ensure that registers are assigned
  265. * correctly it would be required to either sort registers in
  266. * arc_regs_get_gdb_reg_list or to assign numbers sequentially here and
  267. * according to how registers will be sorted in
  268. * arc_regs_get_gdb_reg_list. Second options is much more simpler. */
  269. reg->number = number;
  270. if (reg_desc->is_general) {
  271. arc->last_general_reg = reg->number;
  272. reg->group = reg_group_general;
  273. } else {
  274. reg->group = reg_group_other;
  275. }
  276. return ERROR_OK;
  277. }
  278. /* Building aux/core reg_cache */
  279. static int arc_build_reg_cache(struct target *target)
  280. {
  281. unsigned long i = 0;
  282. struct arc_reg_desc *reg_desc;
  283. /* get pointers to arch-specific information */
  284. struct arc_common *arc = target_to_arc(target);
  285. const unsigned long num_regs = arc->num_core_regs + arc->num_aux_regs;
  286. struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
  287. struct reg_cache *cache = calloc(1, sizeof(*cache));
  288. struct reg *reg_list = calloc(num_regs, sizeof(*reg_list));
  289. if (!cache || !reg_list) {
  290. LOG_ERROR("Not enough memory");
  291. goto fail;
  292. }
  293. /* Build the process context cache */
  294. cache->name = "arc registers";
  295. cache->next = NULL;
  296. cache->reg_list = reg_list;
  297. cache->num_regs = num_regs;
  298. arc->core_and_aux_cache = cache;
  299. (*cache_p) = cache;
  300. if (list_empty(&arc->core_reg_descriptions)) {
  301. LOG_ERROR("No core registers were defined");
  302. goto fail;
  303. }
  304. list_for_each_entry(reg_desc, &arc->core_reg_descriptions, list) {
  305. CHECK_RETVAL(arc_init_reg(target, &reg_list[i], reg_desc, i));
  306. LOG_DEBUG("reg n=%3li name=%3s group=%s feature=%s", i,
  307. reg_list[i].name, reg_list[i].group,
  308. reg_list[i].feature->name);
  309. i += 1;
  310. }
  311. if (list_empty(&arc->aux_reg_descriptions)) {
  312. LOG_ERROR("No aux registers were defined");
  313. goto fail;
  314. }
  315. list_for_each_entry(reg_desc, &arc->aux_reg_descriptions, list) {
  316. CHECK_RETVAL(arc_init_reg(target, &reg_list[i], reg_desc, i));
  317. LOG_DEBUG("reg n=%3li name=%3s group=%s feature=%s", i,
  318. reg_list[i].name, reg_list[i].group,
  319. reg_list[i].feature->name);
  320. /* PC and DEBUG are essential so we search for them. */
  321. if (!strcmp("pc", reg_desc->name)) {
  322. if (arc->pc_index_in_cache != ULONG_MAX) {
  323. LOG_ERROR("Double definition of PC in configuration");
  324. goto fail;
  325. }
  326. arc->pc_index_in_cache = i;
  327. } else if (!strcmp("debug", reg_desc->name)) {
  328. if (arc->debug_index_in_cache != ULONG_MAX) {
  329. LOG_ERROR("Double definition of DEBUG in configuration");
  330. goto fail;
  331. }
  332. arc->debug_index_in_cache = i;
  333. }
  334. i += 1;
  335. }
  336. if (arc->pc_index_in_cache == ULONG_MAX
  337. || arc->debug_index_in_cache == ULONG_MAX) {
  338. LOG_ERROR("`pc' and `debug' registers must be present in target description.");
  339. goto fail;
  340. }
  341. assert(i == (arc->num_core_regs + arc->num_aux_regs));
  342. arc->core_aux_cache_built = true;
  343. return ERROR_OK;
  344. fail:
  345. free(cache);
  346. free(reg_list);
  347. return ERROR_FAIL;
  348. }
  349. /* Build bcr reg_cache.
  350. * This function must be called only after arc_build_reg_cache */
  351. static int arc_build_bcr_reg_cache(struct target *target)
  352. {
  353. /* get pointers to arch-specific information */
  354. struct arc_common *arc = target_to_arc(target);
  355. const unsigned long num_regs = arc->num_bcr_regs;
  356. struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
  357. struct reg_cache *cache = malloc(sizeof(*cache));
  358. struct reg *reg_list = calloc(num_regs, sizeof(*reg_list));
  359. struct arc_reg_desc *reg_desc;
  360. unsigned long i = 0;
  361. unsigned long gdb_regnum = arc->core_and_aux_cache->num_regs;
  362. if (!cache || !reg_list) {
  363. LOG_ERROR("Unable to allocate memory");
  364. goto fail;
  365. }
  366. /* Build the process context cache */
  367. cache->name = "arc.bcr";
  368. cache->next = NULL;
  369. cache->reg_list = reg_list;
  370. cache->num_regs = num_regs;
  371. arc->bcr_cache = cache;
  372. (*cache_p) = cache;
  373. if (list_empty(&arc->bcr_reg_descriptions)) {
  374. LOG_ERROR("No BCR registers are defined");
  375. goto fail;
  376. }
  377. list_for_each_entry(reg_desc, &arc->bcr_reg_descriptions, list) {
  378. CHECK_RETVAL(arc_init_reg(target, &reg_list[i], reg_desc, gdb_regnum));
  379. /* BCRs always semantically, they are just read-as-zero, if there is
  380. * not real register. */
  381. reg_list[i].exist = true;
  382. LOG_DEBUG("reg n=%3li name=%3s group=%s feature=%s", i,
  383. reg_list[i].name, reg_list[i].group,
  384. reg_list[i].feature->name);
  385. i += 1;
  386. gdb_regnum += 1;
  387. }
  388. assert(i == arc->num_bcr_regs);
  389. arc->bcr_cache_built = true;
  390. return ERROR_OK;
  391. fail:
  392. free(cache);
  393. free(reg_list);
  394. return ERROR_FAIL;
  395. }
  396. static int arc_get_gdb_reg_list(struct target *target, struct reg **reg_list[],
  397. int *reg_list_size, enum target_register_class reg_class)
  398. {
  399. assert(target->reg_cache);
  400. struct arc_common *arc = target_to_arc(target);
  401. /* get pointers to arch-specific information storage */
  402. *reg_list_size = arc->num_regs;
  403. *reg_list = calloc(*reg_list_size, sizeof(struct reg *));
  404. if (!*reg_list) {
  405. LOG_ERROR("Unable to allocate memory");
  406. return ERROR_FAIL;
  407. }
  408. /* OpenOCD gdb_server API seems to be inconsistent here: when it generates
  409. * XML tdesc it filters out !exist registers, however when creating a
  410. * g-packet it doesn't do so. REG_CLASS_ALL is used in first case, and
  411. * REG_CLASS_GENERAL used in the latter one. Due to this we had to filter
  412. * out !exist register for "general", but not for "all". Attempts to filter out
  413. * !exist for "all" as well will cause a failed check in OpenOCD GDB
  414. * server. */
  415. if (reg_class == REG_CLASS_ALL) {
  416. unsigned long i = 0;
  417. struct reg_cache *reg_cache = target->reg_cache;
  418. while (reg_cache) {
  419. for (unsigned j = 0; j < reg_cache->num_regs; j++, i++)
  420. (*reg_list)[i] = &reg_cache->reg_list[j];
  421. reg_cache = reg_cache->next;
  422. }
  423. assert(i == arc->num_regs);
  424. LOG_DEBUG("REG_CLASS_ALL: number of regs=%i", *reg_list_size);
  425. } else {
  426. unsigned long i = 0;
  427. unsigned long gdb_reg_number = 0;
  428. struct reg_cache *reg_cache = target->reg_cache;
  429. while (reg_cache) {
  430. for (unsigned j = 0;
  431. j < reg_cache->num_regs && gdb_reg_number <= arc->last_general_reg;
  432. j++) {
  433. if (reg_cache->reg_list[j].exist) {
  434. (*reg_list)[i] = &reg_cache->reg_list[j];
  435. i++;
  436. }
  437. gdb_reg_number += 1;
  438. }
  439. reg_cache = reg_cache->next;
  440. }
  441. *reg_list_size = i;
  442. LOG_DEBUG("REG_CLASS_GENERAL: number of regs=%i", *reg_list_size);
  443. }
  444. return ERROR_OK;
  445. }
  446. /* Reading field of struct_type register */
  447. int arc_reg_get_field(struct target *target, const char *reg_name,
  448. const char *field_name, uint32_t *value_ptr)
  449. {
  450. struct reg_data_type_struct_field *field;
  451. LOG_DEBUG("getting register field (reg_name=%s, field_name=%s)", reg_name, field_name);
  452. /* Get register */
  453. struct reg *reg = arc_reg_get_by_name(target->reg_cache, reg_name, true);
  454. if (!reg) {
  455. LOG_ERROR("Requested register `%s' doesn't exist.", reg_name);
  456. return ERROR_ARC_REGISTER_NOT_FOUND;
  457. }
  458. if (reg->reg_data_type->type != REG_TYPE_ARCH_DEFINED
  459. || reg->reg_data_type->type_class != REG_TYPE_CLASS_STRUCT)
  460. return ERROR_ARC_REGISTER_IS_NOT_STRUCT;
  461. /* Get field in a register */
  462. struct reg_data_type_struct *reg_struct =
  463. reg->reg_data_type->reg_type_struct;
  464. for (field = reg_struct->fields;
  465. field;
  466. field = field->next) {
  467. if (!strcmp(field->name, field_name))
  468. break;
  469. }
  470. if (!field)
  471. return ERROR_ARC_REGISTER_FIELD_NOT_FOUND;
  472. if (!field->use_bitfields)
  473. return ERROR_ARC_FIELD_IS_NOT_BITFIELD;
  474. if (!reg->valid)
  475. CHECK_RETVAL(reg->type->get(reg));
  476. /* First do endianness-safe read of register value
  477. * then convert it to binary buffer for further
  478. * field extraction */
  479. *value_ptr = buf_get_u32(reg->value, field->bitfield->start,
  480. field->bitfield->end - field->bitfield->start + 1);
  481. return ERROR_OK;
  482. }
  483. static int arc_get_register_value(struct target *target, const char *reg_name,
  484. uint32_t *value_ptr)
  485. {
  486. LOG_DEBUG("reg_name=%s", reg_name);
  487. struct reg *reg = arc_reg_get_by_name(target->reg_cache, reg_name, true);
  488. if (!reg)
  489. return ERROR_ARC_REGISTER_NOT_FOUND;
  490. if (!reg->valid)
  491. CHECK_RETVAL(reg->type->get(reg));
  492. *value_ptr = target_buffer_get_u32(target, reg->value);
  493. return ERROR_OK;
  494. }
  495. static int arc_set_register_value(struct target *target, const char *reg_name,
  496. uint32_t value)
  497. {
  498. LOG_DEBUG("reg_name=%s value=0x%08" PRIx32, reg_name, value);
  499. if (!(target && reg_name)) {
  500. LOG_ERROR("Arguments cannot be NULL.");
  501. return ERROR_FAIL;
  502. }
  503. struct reg *reg = arc_reg_get_by_name(target->reg_cache, reg_name, true);
  504. if (!reg)
  505. return ERROR_ARC_REGISTER_NOT_FOUND;
  506. uint8_t value_buf[4];
  507. buf_set_u32(value_buf, 0, 32, value);
  508. CHECK_RETVAL(reg->type->set(reg, value_buf));
  509. return ERROR_OK;
  510. }
  511. /* Configure DCCM's */
  512. static int arc_configure_dccm(struct target *target)
  513. {
  514. struct arc_common *arc = target_to_arc(target);
  515. uint32_t dccm_build_version, dccm_build_size0, dccm_build_size1;
  516. CHECK_RETVAL(arc_reg_get_field(target, "dccm_build", "version",
  517. &dccm_build_version));
  518. CHECK_RETVAL(arc_reg_get_field(target, "dccm_build", "size0",
  519. &dccm_build_size0));
  520. CHECK_RETVAL(arc_reg_get_field(target, "dccm_build", "size1",
  521. &dccm_build_size1));
  522. /* There is no yet support of configurable number of cycles,
  523. * So there is no difference between v3 and v4 */
  524. if ((dccm_build_version == 3 || dccm_build_version == 4) && dccm_build_size0 > 0) {
  525. CHECK_RETVAL(arc_get_register_value(target, "aux_dccm", &(arc->dccm_start)));
  526. uint32_t dccm_size = 0x100;
  527. dccm_size <<= dccm_build_size0;
  528. if (dccm_build_size0 == 0xF)
  529. dccm_size <<= dccm_build_size1;
  530. arc->dccm_end = arc->dccm_start + dccm_size;
  531. LOG_DEBUG("DCCM detected start=0x%" PRIx32 " end=0x%" PRIx32,
  532. arc->dccm_start, arc->dccm_end);
  533. }
  534. return ERROR_OK;
  535. }
  536. /* Configure ICCM's */
  537. static int arc_configure_iccm(struct target *target)
  538. {
  539. struct arc_common *arc = target_to_arc(target);
  540. /* ICCM0 */
  541. uint32_t iccm_build_version, iccm_build_size00, iccm_build_size01;
  542. uint32_t aux_iccm = 0;
  543. CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "version",
  544. &iccm_build_version));
  545. CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm0_size0",
  546. &iccm_build_size00));
  547. CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm0_size1",
  548. &iccm_build_size01));
  549. if (iccm_build_version == 4 && iccm_build_size00 > 0) {
  550. CHECK_RETVAL(arc_get_register_value(target, "aux_iccm", &aux_iccm));
  551. uint32_t iccm0_size = 0x100;
  552. iccm0_size <<= iccm_build_size00;
  553. if (iccm_build_size00 == 0xF)
  554. iccm0_size <<= iccm_build_size01;
  555. /* iccm0 start is located in highest 4 bits of aux_iccm */
  556. arc->iccm0_start = aux_iccm & 0xF0000000;
  557. arc->iccm0_end = arc->iccm0_start + iccm0_size;
  558. LOG_DEBUG("ICCM0 detected start=0x%" PRIx32 " end=0x%" PRIx32,
  559. arc->iccm0_start, arc->iccm0_end);
  560. }
  561. /* ICCM1 */
  562. uint32_t iccm_build_size10, iccm_build_size11;
  563. CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm1_size0",
  564. &iccm_build_size10));
  565. CHECK_RETVAL(arc_reg_get_field(target, "iccm_build", "iccm1_size1",
  566. &iccm_build_size11));
  567. if (iccm_build_version == 4 && iccm_build_size10 > 0) {
  568. /* Use value read for ICCM0 */
  569. if (!aux_iccm)
  570. CHECK_RETVAL(arc_get_register_value(target, "aux_iccm", &aux_iccm));
  571. uint32_t iccm1_size = 0x100;
  572. iccm1_size <<= iccm_build_size10;
  573. if (iccm_build_size10 == 0xF)
  574. iccm1_size <<= iccm_build_size11;
  575. arc->iccm1_start = aux_iccm & 0x0F000000;
  576. arc->iccm1_end = arc->iccm1_start + iccm1_size;
  577. LOG_DEBUG("ICCM1 detected start=0x%" PRIx32 " end=0x%" PRIx32,
  578. arc->iccm1_start, arc->iccm1_end);
  579. }
  580. return ERROR_OK;
  581. }
  582. /* Configure some core features, depending on BCRs. */
  583. static int arc_configure(struct target *target)
  584. {
  585. LOG_DEBUG("Configuring ARC ICCM and DCCM");
  586. /* Configuring DCCM if DCCM_BUILD and AUX_DCCM are known registers. */
  587. if (arc_reg_get_by_name(target->reg_cache, "dccm_build", true) &&
  588. arc_reg_get_by_name(target->reg_cache, "aux_dccm", true))
  589. CHECK_RETVAL(arc_configure_dccm(target));
  590. /* Configuring ICCM if ICCM_BUILD and AUX_ICCM are known registers. */
  591. if (arc_reg_get_by_name(target->reg_cache, "iccm_build", true) &&
  592. arc_reg_get_by_name(target->reg_cache, "aux_iccm", true))
  593. CHECK_RETVAL(arc_configure_iccm(target));
  594. return ERROR_OK;
  595. }
  596. /* arc_examine is function, which is used for all arc targets*/
  597. static int arc_examine(struct target *target)
  598. {
  599. uint32_t status;
  600. struct arc_common *arc = target_to_arc(target);
  601. CHECK_RETVAL(arc_jtag_startup(&arc->jtag_info));
  602. if (!target_was_examined(target)) {
  603. CHECK_RETVAL(arc_jtag_status(&arc->jtag_info, &status));
  604. if (status & ARC_JTAG_STAT_RU)
  605. target->state = TARGET_RUNNING;
  606. else
  607. target->state = TARGET_HALTED;
  608. /* Read BCRs and configure optional registers. */
  609. CHECK_RETVAL(arc_configure(target));
  610. target_set_examined(target);
  611. }
  612. return ERROR_OK;
  613. }
  614. static int arc_halt(struct target *target)
  615. {
  616. uint32_t value, irq_state;
  617. struct arc_common *arc = target_to_arc(target);
  618. LOG_DEBUG("target->state: %s", target_state_name(target));
  619. if (target->state == TARGET_HALTED) {
  620. LOG_DEBUG("target was already halted");
  621. return ERROR_OK;
  622. }
  623. if (target->state == TARGET_UNKNOWN)
  624. LOG_WARNING("target was in unknown state when halt was requested");
  625. if (target->state == TARGET_RESET) {
  626. if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
  627. LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
  628. return ERROR_TARGET_FAILURE;
  629. } else {
  630. target->debug_reason = DBG_REASON_DBGRQ;
  631. }
  632. }
  633. /* Break (stop) processor.
  634. * Do read-modify-write sequence, or DEBUG.UB will be reset unintentionally.
  635. * We do not use here arc_get/set_core_reg functions here because they imply
  636. * that the processor is already halted. */
  637. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG, &value));
  638. value |= SET_CORE_FORCE_HALT; /* set the HALT bit */
  639. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG, value));
  640. alive_sleep(1);
  641. /* Save current IRQ state */
  642. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, &irq_state));
  643. if (irq_state & AUX_STATUS32_REG_IE_BIT)
  644. arc->irq_state = 1;
  645. else
  646. arc->irq_state = 0;
  647. /* update state and notify gdb*/
  648. target->state = TARGET_HALTED;
  649. CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
  650. /* some more debug information */
  651. if (debug_level >= LOG_LVL_DEBUG) {
  652. LOG_DEBUG("core stopped (halted) DEGUB-REG: 0x%08" PRIx32, value);
  653. CHECK_RETVAL(arc_get_register_value(target, "status32", &value));
  654. LOG_DEBUG("core STATUS32: 0x%08" PRIx32, value);
  655. }
  656. return ERROR_OK;
  657. }
  658. /**
  659. * Read registers that are used in GDB g-packet. We don't read them one-by-one,
  660. * but do that in one batch operation to improve speed. Calls to JTAG layer are
  661. * expensive so it is better to make one big call that reads all necessary
  662. * registers, instead of many calls, one for one register.
  663. */
  664. static int arc_save_context(struct target *target)
  665. {
  666. int retval = ERROR_OK;
  667. unsigned int i;
  668. struct arc_common *arc = target_to_arc(target);
  669. struct reg *reg_list = arc->core_and_aux_cache->reg_list;
  670. LOG_DEBUG("Saving aux and core registers values");
  671. assert(reg_list);
  672. /* It is assumed that there is at least one AUX register in the list, for
  673. * example PC. */
  674. const uint32_t core_regs_size = arc->num_core_regs * sizeof(uint32_t);
  675. /* last_general_reg is inclusive number. To get count of registers it is
  676. * required to do +1. */
  677. const uint32_t regs_to_scan =
  678. MIN(arc->last_general_reg + 1, arc->num_regs);
  679. const uint32_t aux_regs_size = arc->num_aux_regs * sizeof(uint32_t);
  680. uint32_t *core_values = malloc(core_regs_size);
  681. uint32_t *aux_values = malloc(aux_regs_size);
  682. uint32_t *core_addrs = malloc(core_regs_size);
  683. uint32_t *aux_addrs = malloc(aux_regs_size);
  684. unsigned int core_cnt = 0;
  685. unsigned int aux_cnt = 0;
  686. if (!core_values || !core_addrs || !aux_values || !aux_addrs) {
  687. LOG_ERROR("Unable to allocate memory");
  688. retval = ERROR_FAIL;
  689. goto exit;
  690. }
  691. memset(core_values, 0xff, core_regs_size);
  692. memset(core_addrs, 0xff, core_regs_size);
  693. memset(aux_values, 0xff, aux_regs_size);
  694. memset(aux_addrs, 0xff, aux_regs_size);
  695. for (i = 0; i < MIN(arc->num_core_regs, regs_to_scan); i++) {
  696. struct reg *reg = &(reg_list[i]);
  697. struct arc_reg_desc *arc_reg = reg->arch_info;
  698. if (!reg->valid && reg->exist) {
  699. core_addrs[core_cnt] = arc_reg->arch_num;
  700. core_cnt += 1;
  701. }
  702. }
  703. for (i = arc->num_core_regs; i < regs_to_scan; i++) {
  704. struct reg *reg = &(reg_list[i]);
  705. struct arc_reg_desc *arc_reg = reg->arch_info;
  706. if (!reg->valid && reg->exist) {
  707. aux_addrs[aux_cnt] = arc_reg->arch_num;
  708. aux_cnt += 1;
  709. }
  710. }
  711. /* Read data from target. */
  712. if (core_cnt > 0) {
  713. retval = arc_jtag_read_core_reg(&arc->jtag_info, core_addrs, core_cnt, core_values);
  714. if (retval != ERROR_OK) {
  715. LOG_ERROR("Attempt to read core registers failed.");
  716. retval = ERROR_FAIL;
  717. goto exit;
  718. }
  719. }
  720. if (aux_cnt > 0) {
  721. retval = arc_jtag_read_aux_reg(&arc->jtag_info, aux_addrs, aux_cnt, aux_values);
  722. if (retval != ERROR_OK) {
  723. LOG_ERROR("Attempt to read aux registers failed.");
  724. retval = ERROR_FAIL;
  725. goto exit;
  726. }
  727. }
  728. /* Parse core regs */
  729. core_cnt = 0;
  730. for (i = 0; i < MIN(arc->num_core_regs, regs_to_scan); i++) {
  731. struct reg *reg = &(reg_list[i]);
  732. struct arc_reg_desc *arc_reg = reg->arch_info;
  733. if (!reg->valid && reg->exist) {
  734. target_buffer_set_u32(target, reg->value, core_values[core_cnt]);
  735. core_cnt += 1;
  736. reg->valid = true;
  737. reg->dirty = false;
  738. LOG_DEBUG("Get core register regnum=%u, name=%s, value=0x%08" PRIx32,
  739. i, arc_reg->name, core_values[core_cnt]);
  740. }
  741. }
  742. /* Parse aux regs */
  743. aux_cnt = 0;
  744. for (i = arc->num_core_regs; i < regs_to_scan; i++) {
  745. struct reg *reg = &(reg_list[i]);
  746. struct arc_reg_desc *arc_reg = reg->arch_info;
  747. if (!reg->valid && reg->exist) {
  748. target_buffer_set_u32(target, reg->value, aux_values[aux_cnt]);
  749. aux_cnt += 1;
  750. reg->valid = true;
  751. reg->dirty = false;
  752. LOG_DEBUG("Get aux register regnum=%u, name=%s, value=0x%08" PRIx32,
  753. i, arc_reg->name, aux_values[aux_cnt]);
  754. }
  755. }
  756. exit:
  757. free(core_values);
  758. free(core_addrs);
  759. free(aux_values);
  760. free(aux_addrs);
  761. return retval;
  762. }
  763. /**
  764. * Finds an actionpoint that triggered last actionpoint event, as specified by
  765. * DEBUG.ASR.
  766. *
  767. * @param target
  768. * @param actionpoint Pointer to be set to last active actionpoint. Pointer
  769. * will be set to NULL if DEBUG.AH is 0.
  770. */
  771. static int get_current_actionpoint(struct target *target,
  772. struct arc_actionpoint **actionpoint)
  773. {
  774. assert(target);
  775. assert(actionpoint);
  776. uint32_t debug_ah;
  777. /* Check if actionpoint caused halt */
  778. CHECK_RETVAL(arc_reg_get_field(target, "debug", "ah",
  779. &debug_ah));
  780. if (debug_ah) {
  781. struct arc_common *arc = target_to_arc(target);
  782. unsigned int ap;
  783. uint32_t debug_asr;
  784. CHECK_RETVAL(arc_reg_get_field(target, "debug",
  785. "asr", &debug_asr));
  786. for (ap = 0; debug_asr > 1; debug_asr >>= 1)
  787. ap += 1;
  788. assert(ap < arc->actionpoints_num);
  789. *actionpoint = &(arc->actionpoints_list[ap]);
  790. } else {
  791. *actionpoint = NULL;
  792. }
  793. return ERROR_OK;
  794. }
  795. static int arc_examine_debug_reason(struct target *target)
  796. {
  797. uint32_t debug_bh;
  798. /* Only check for reason if don't know it already. */
  799. /* BTW After singlestep at this point core is not marked as halted, so
  800. * reading from memory to get current instruction wouldn't work anyway. */
  801. if (target->debug_reason == DBG_REASON_DBGRQ ||
  802. target->debug_reason == DBG_REASON_SINGLESTEP) {
  803. return ERROR_OK;
  804. }
  805. CHECK_RETVAL(arc_reg_get_field(target, "debug", "bh",
  806. &debug_bh));
  807. if (debug_bh) {
  808. /* DEBUG.BH is set if core halted due to BRK instruction. */
  809. target->debug_reason = DBG_REASON_BREAKPOINT;
  810. } else {
  811. struct arc_actionpoint *actionpoint = NULL;
  812. CHECK_RETVAL(get_current_actionpoint(target, &actionpoint));
  813. if (actionpoint) {
  814. if (!actionpoint->used)
  815. LOG_WARNING("Target halted by an unused actionpoint.");
  816. if (actionpoint->type == ARC_AP_BREAKPOINT)
  817. target->debug_reason = DBG_REASON_BREAKPOINT;
  818. else if (actionpoint->type == ARC_AP_WATCHPOINT)
  819. target->debug_reason = DBG_REASON_WATCHPOINT;
  820. else
  821. LOG_WARNING("Unknown type of actionpoint.");
  822. }
  823. }
  824. return ERROR_OK;
  825. }
  826. static int arc_debug_entry(struct target *target)
  827. {
  828. CHECK_RETVAL(arc_save_context(target));
  829. /* TODO: reset internal indicators of caches states, otherwise D$/I$
  830. * will not be flushed/invalidated when required. */
  831. CHECK_RETVAL(arc_reset_caches_states(target));
  832. CHECK_RETVAL(arc_examine_debug_reason(target));
  833. return ERROR_OK;
  834. }
  835. static int arc_poll(struct target *target)
  836. {
  837. uint32_t status, value;
  838. struct arc_common *arc = target_to_arc(target);
  839. /* gdb calls continuously through this arc_poll() function */
  840. CHECK_RETVAL(arc_jtag_status(&arc->jtag_info, &status));
  841. /* check for processor halted */
  842. if (status & ARC_JTAG_STAT_RU) {
  843. if (target->state != TARGET_RUNNING) {
  844. LOG_WARNING("target is still running!");
  845. target->state = TARGET_RUNNING;
  846. }
  847. return ERROR_OK;
  848. }
  849. /* In some cases JTAG status register indicates that
  850. * processor is in halt mode, but processor is still running.
  851. * We check halt bit of AUX STATUS32 register for setting correct state. */
  852. if ((target->state == TARGET_RUNNING) || (target->state == TARGET_RESET)) {
  853. CHECK_RETVAL(arc_get_register_value(target, "status32", &value));
  854. if (value & AUX_STATUS32_REG_HALT_BIT) {
  855. LOG_DEBUG("ARC core in halt or reset state.");
  856. /* Save context if target was not in reset state */
  857. if (target->state == TARGET_RUNNING)
  858. CHECK_RETVAL(arc_debug_entry(target));
  859. target->state = TARGET_HALTED;
  860. CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
  861. } else {
  862. LOG_DEBUG("Discrepancy of STATUS32[0] HALT bit and ARC_JTAG_STAT_RU, "
  863. "target is still running");
  864. }
  865. } else if (target->state == TARGET_DEBUG_RUNNING) {
  866. target->state = TARGET_HALTED;
  867. LOG_DEBUG("ARC core is in debug running mode");
  868. CHECK_RETVAL(arc_debug_entry(target));
  869. CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED));
  870. }
  871. return ERROR_OK;
  872. }
  873. static int arc_assert_reset(struct target *target)
  874. {
  875. struct arc_common *arc = target_to_arc(target);
  876. enum reset_types jtag_reset_config = jtag_get_reset_config();
  877. bool srst_asserted = false;
  878. LOG_DEBUG("target->state: %s", target_state_name(target));
  879. if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
  880. /* allow scripts to override the reset event */
  881. target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
  882. register_cache_invalidate(arc->core_and_aux_cache);
  883. /* An ARC target might be in halt state after reset, so
  884. * if script requested processor to resume, then it must
  885. * be manually started to ensure that this request
  886. * is satisfied. */
  887. if (target->state == TARGET_HALTED && !target->reset_halt) {
  888. /* Resume the target and continue from the current
  889. * PC register value. */
  890. LOG_DEBUG("Starting CPU execution after reset");
  891. CHECK_RETVAL(target_resume(target, 1, 0, 0, 0));
  892. }
  893. target->state = TARGET_RESET;
  894. return ERROR_OK;
  895. }
  896. /* some cores support connecting while srst is asserted
  897. * use that mode if it has been configured */
  898. if (!(jtag_reset_config & RESET_SRST_PULLS_TRST) &&
  899. (jtag_reset_config & RESET_SRST_NO_GATING)) {
  900. jtag_add_reset(0, 1);
  901. srst_asserted = true;
  902. }
  903. if (jtag_reset_config & RESET_HAS_SRST) {
  904. /* should issue a srst only, but we may have to assert trst as well */
  905. if (jtag_reset_config & RESET_SRST_PULLS_TRST)
  906. jtag_add_reset(1, 1);
  907. else if (!srst_asserted)
  908. jtag_add_reset(0, 1);
  909. }
  910. target->state = TARGET_RESET;
  911. jtag_add_sleep(50000);
  912. register_cache_invalidate(arc->core_and_aux_cache);
  913. if (target->reset_halt)
  914. CHECK_RETVAL(target_halt(target));
  915. return ERROR_OK;
  916. }
  917. static int arc_deassert_reset(struct target *target)
  918. {
  919. LOG_DEBUG("target->state: %s", target_state_name(target));
  920. /* deassert reset lines */
  921. jtag_add_reset(0, 0);
  922. return ERROR_OK;
  923. }
  924. static int arc_arch_state(struct target *target)
  925. {
  926. uint32_t pc_value;
  927. if (debug_level < LOG_LVL_DEBUG)
  928. return ERROR_OK;
  929. CHECK_RETVAL(arc_get_register_value(target, "pc", &pc_value));
  930. LOG_DEBUG("target state: %s; PC at: 0x%08" PRIx32,
  931. target_state_name(target),
  932. pc_value);
  933. return ERROR_OK;
  934. }
  935. /**
  936. * See arc_save_context() for reason why we want to dump all regs at once.
  937. * This however means that if there are dependencies between registers they
  938. * will not be observable until target will be resumed.
  939. */
  940. static int arc_restore_context(struct target *target)
  941. {
  942. int retval = ERROR_OK;
  943. unsigned int i;
  944. struct arc_common *arc = target_to_arc(target);
  945. struct reg *reg_list = arc->core_and_aux_cache->reg_list;
  946. LOG_DEBUG("Restoring registers values");
  947. assert(reg_list);
  948. const uint32_t core_regs_size = arc->num_core_regs * sizeof(uint32_t);
  949. const uint32_t aux_regs_size = arc->num_aux_regs * sizeof(uint32_t);
  950. uint32_t *core_values = malloc(core_regs_size);
  951. uint32_t *aux_values = malloc(aux_regs_size);
  952. uint32_t *core_addrs = malloc(core_regs_size);
  953. uint32_t *aux_addrs = malloc(aux_regs_size);
  954. unsigned int core_cnt = 0;
  955. unsigned int aux_cnt = 0;
  956. if (!core_values || !core_addrs || !aux_values || !aux_addrs) {
  957. LOG_ERROR("Unable to allocate memory");
  958. retval = ERROR_FAIL;
  959. goto exit;
  960. }
  961. memset(core_values, 0xff, core_regs_size);
  962. memset(core_addrs, 0xff, core_regs_size);
  963. memset(aux_values, 0xff, aux_regs_size);
  964. memset(aux_addrs, 0xff, aux_regs_size);
  965. for (i = 0; i < arc->num_core_regs; i++) {
  966. struct reg *reg = &(reg_list[i]);
  967. struct arc_reg_desc *arc_reg = reg->arch_info;
  968. if (reg->valid && reg->exist && reg->dirty) {
  969. LOG_DEBUG("Will write regnum=%u", i);
  970. core_addrs[core_cnt] = arc_reg->arch_num;
  971. core_values[core_cnt] = target_buffer_get_u32(target, reg->value);
  972. core_cnt += 1;
  973. }
  974. }
  975. for (i = 0; i < arc->num_aux_regs; i++) {
  976. struct reg *reg = &(reg_list[arc->num_core_regs + i]);
  977. struct arc_reg_desc *arc_reg = reg->arch_info;
  978. if (reg->valid && reg->exist && reg->dirty) {
  979. LOG_DEBUG("Will write regnum=%lu", arc->num_core_regs + i);
  980. aux_addrs[aux_cnt] = arc_reg->arch_num;
  981. aux_values[aux_cnt] = target_buffer_get_u32(target, reg->value);
  982. aux_cnt += 1;
  983. }
  984. }
  985. /* Write data to target.
  986. * Check before write, if aux and core count is greater than 0. */
  987. if (core_cnt > 0) {
  988. retval = arc_jtag_write_core_reg(&arc->jtag_info, core_addrs, core_cnt, core_values);
  989. if (retval != ERROR_OK) {
  990. LOG_ERROR("Attempt to write to core registers failed.");
  991. retval = ERROR_FAIL;
  992. goto exit;
  993. }
  994. }
  995. if (aux_cnt > 0) {
  996. retval = arc_jtag_write_aux_reg(&arc->jtag_info, aux_addrs, aux_cnt, aux_values);
  997. if (retval != ERROR_OK) {
  998. LOG_ERROR("Attempt to write to aux registers failed.");
  999. retval = ERROR_FAIL;
  1000. goto exit;
  1001. }
  1002. }
  1003. exit:
  1004. free(core_values);
  1005. free(core_addrs);
  1006. free(aux_values);
  1007. free(aux_addrs);
  1008. return retval;
  1009. }
  1010. static int arc_enable_interrupts(struct target *target, int enable)
  1011. {
  1012. uint32_t value;
  1013. struct arc_common *arc = target_to_arc(target);
  1014. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, &value));
  1015. if (enable) {
  1016. /* enable interrupts */
  1017. value |= SET_CORE_ENABLE_INTERRUPTS;
  1018. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, value));
  1019. LOG_DEBUG("interrupts enabled");
  1020. } else {
  1021. /* disable interrupts */
  1022. value &= ~SET_CORE_ENABLE_INTERRUPTS;
  1023. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, value));
  1024. LOG_DEBUG("interrupts disabled");
  1025. }
  1026. return ERROR_OK;
  1027. }
  1028. static int arc_resume(struct target *target, int current, target_addr_t address,
  1029. int handle_breakpoints, int debug_execution)
  1030. {
  1031. struct arc_common *arc = target_to_arc(target);
  1032. uint32_t resume_pc = 0;
  1033. uint32_t value;
  1034. struct reg *pc = &arc->core_and_aux_cache->reg_list[arc->pc_index_in_cache];
  1035. LOG_DEBUG("current:%i, address:0x%08" TARGET_PRIxADDR ", handle_breakpoints(not supported yet):%i,"
  1036. " debug_execution:%i", current, address, handle_breakpoints, debug_execution);
  1037. /* We need to reset ARC cache variables so caches
  1038. * would be invalidated and actual data
  1039. * would be fetched from memory. */
  1040. CHECK_RETVAL(arc_reset_caches_states(target));
  1041. if (target->state != TARGET_HALTED) {
  1042. LOG_WARNING("target not halted");
  1043. return ERROR_TARGET_NOT_HALTED;
  1044. }
  1045. /* current = 1: continue on current PC, otherwise continue at <address> */
  1046. if (!current) {
  1047. target_buffer_set_u32(target, pc->value, address);
  1048. pc->dirty = 1;
  1049. pc->valid = 1;
  1050. LOG_DEBUG("Changing the value of current PC to 0x%08" TARGET_PRIxADDR, address);
  1051. }
  1052. if (!current)
  1053. resume_pc = address;
  1054. else
  1055. resume_pc = target_buffer_get_u32(target, pc->value);
  1056. CHECK_RETVAL(arc_restore_context(target));
  1057. LOG_DEBUG("Target resumes from PC=0x%" PRIx32 ", pc.dirty=%i, pc.valid=%i",
  1058. resume_pc, pc->dirty, pc->valid);
  1059. /* check if GDB tells to set our PC where to continue from */
  1060. if ((pc->valid == 1) && (resume_pc == target_buffer_get_u32(target, pc->value))) {
  1061. value = target_buffer_get_u32(target, pc->value);
  1062. LOG_DEBUG("resume Core (when start-core) with PC @:0x%08" PRIx32, value);
  1063. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_PC_REG, value));
  1064. }
  1065. /* Restore IRQ state if not in debug_execution*/
  1066. if (!debug_execution)
  1067. CHECK_RETVAL(arc_enable_interrupts(target, arc->irq_state));
  1068. else
  1069. CHECK_RETVAL(arc_enable_interrupts(target, !debug_execution));
  1070. target->debug_reason = DBG_REASON_NOTHALTED;
  1071. /* ready to get us going again */
  1072. target->state = TARGET_RUNNING;
  1073. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, &value));
  1074. value &= ~SET_CORE_HALT_BIT; /* clear the HALT bit */
  1075. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG, value));
  1076. LOG_DEBUG("Core started to run");
  1077. /* registers are now invalid */
  1078. register_cache_invalidate(arc->core_and_aux_cache);
  1079. if (!debug_execution) {
  1080. target->state = TARGET_RUNNING;
  1081. CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
  1082. LOG_DEBUG("target resumed at 0x%08" PRIx32, resume_pc);
  1083. } else {
  1084. target->state = TARGET_DEBUG_RUNNING;
  1085. CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED));
  1086. LOG_DEBUG("target debug resumed at 0x%08" PRIx32, resume_pc);
  1087. }
  1088. return ERROR_OK;
  1089. }
  1090. static int arc_init_target(struct command_context *cmd_ctx, struct target *target)
  1091. {
  1092. CHECK_RETVAL(arc_build_reg_cache(target));
  1093. CHECK_RETVAL(arc_build_bcr_reg_cache(target));
  1094. target->debug_reason = DBG_REASON_DBGRQ;
  1095. return ERROR_OK;
  1096. }
  1097. static void arc_free_reg_cache(struct reg_cache *cache)
  1098. {
  1099. free(cache->reg_list);
  1100. free(cache);
  1101. }
  1102. static void arc_deinit_target(struct target *target)
  1103. {
  1104. struct arc_common *arc = target_to_arc(target);
  1105. LOG_DEBUG("deinitialization of target");
  1106. if (arc->core_aux_cache_built)
  1107. arc_free_reg_cache(arc->core_and_aux_cache);
  1108. if (arc->bcr_cache_built)
  1109. arc_free_reg_cache(arc->bcr_cache);
  1110. struct arc_reg_data_type *type, *n;
  1111. struct arc_reg_desc *desc, *k;
  1112. /* Free arc-specific reg_data_types allocations*/
  1113. list_for_each_entry_safe_reverse(type, n, &arc->reg_data_types, list) {
  1114. if (type->data_type.type_class == REG_TYPE_CLASS_STRUCT) {
  1115. free(type->reg_type_struct_field);
  1116. free(type->bitfields);
  1117. free(type);
  1118. } else if (type->data_type.type_class == REG_TYPE_CLASS_FLAGS) {
  1119. free(type->reg_type_flags_field);
  1120. free(type->bitfields);
  1121. free(type);
  1122. }
  1123. }
  1124. /* Free standard_gdb_types reg_data_types allocations */
  1125. type = list_first_entry(&arc->reg_data_types, struct arc_reg_data_type, list);
  1126. free(type);
  1127. list_for_each_entry_safe(desc, k, &arc->aux_reg_descriptions, list)
  1128. free_reg_desc(desc);
  1129. list_for_each_entry_safe(desc, k, &arc->core_reg_descriptions, list)
  1130. free_reg_desc(desc);
  1131. list_for_each_entry_safe(desc, k, &arc->bcr_reg_descriptions, list)
  1132. free_reg_desc(desc);
  1133. free(arc->actionpoints_list);
  1134. free(arc);
  1135. }
  1136. static int arc_target_create(struct target *target, Jim_Interp *interp)
  1137. {
  1138. struct arc_common *arc = calloc(1, sizeof(*arc));
  1139. if (!arc) {
  1140. LOG_ERROR("Unable to allocate memory");
  1141. return ERROR_FAIL;
  1142. }
  1143. LOG_DEBUG("Entering");
  1144. CHECK_RETVAL(arc_init_arch_info(target, arc, target->tap));
  1145. return ERROR_OK;
  1146. }
  1147. /**
  1148. * Write 4-byte instruction to memory. This is like target_write_u32, however
  1149. * in case of little endian ARC instructions are in middle endian format, not
  1150. * little endian, so different type of conversion should be done.
  1151. * Middle endian: instruction "aabbccdd", stored as "bbaaddcc"
  1152. */
  1153. int arc_write_instruction_u32(struct target *target, uint32_t address,
  1154. uint32_t instr)
  1155. {
  1156. uint8_t value_buf[4];
  1157. if (!target_was_examined(target)) {
  1158. LOG_ERROR("Target not examined yet");
  1159. return ERROR_FAIL;
  1160. }
  1161. LOG_DEBUG("Address: 0x%08" PRIx32 ", value: 0x%08" PRIx32, address,
  1162. instr);
  1163. if (target->endianness == TARGET_LITTLE_ENDIAN)
  1164. arc_h_u32_to_me(value_buf, instr);
  1165. else
  1166. h_u32_to_be(value_buf, instr);
  1167. CHECK_RETVAL(target_write_buffer(target, address, 4, value_buf));
  1168. return ERROR_OK;
  1169. }
  1170. /**
  1171. * Read 32-bit instruction from memory. It is like target_read_u32, however in
  1172. * case of little endian ARC instructions are in middle endian format, so
  1173. * different type of conversion should be done.
  1174. */
  1175. int arc_read_instruction_u32(struct target *target, uint32_t address,
  1176. uint32_t *value)
  1177. {
  1178. uint8_t value_buf[4];
  1179. if (!target_was_examined(target)) {
  1180. LOG_ERROR("Target not examined yet");
  1181. return ERROR_FAIL;
  1182. }
  1183. *value = 0;
  1184. CHECK_RETVAL(target_read_buffer(target, address, 4, value_buf));
  1185. if (target->endianness == TARGET_LITTLE_ENDIAN)
  1186. *value = arc_me_to_h_u32(value_buf);
  1187. else
  1188. *value = be_to_h_u32(value_buf);
  1189. LOG_DEBUG("Address: 0x%08" PRIx32 ", value: 0x%08" PRIx32, address,
  1190. *value);
  1191. return ERROR_OK;
  1192. }
  1193. /* Actionpoint mechanism allows to setup HW breakpoints
  1194. * and watchpoints. Each actionpoint is controlled by
  1195. * 3 aux registers: Actionpoint(AP) match mask(AP_AMM), AP match value(AP_AMV)
  1196. * and AP control(AC).
  1197. * This function is for setting/unsetting actionpoints:
  1198. * at - actionpoint target: trigger on mem/reg access
  1199. * tt - transaction type : trigger on r/w. */
  1200. static int arc_configure_actionpoint(struct target *target, uint32_t ap_num,
  1201. uint32_t match_value, uint32_t control_tt, uint32_t control_at)
  1202. {
  1203. struct arc_common *arc = target_to_arc(target);
  1204. if (control_tt != AP_AC_TT_DISABLE) {
  1205. if (arc->actionpoints_num_avail < 1) {
  1206. LOG_ERROR("No free actionpoints, maximum amount is %u",
  1207. arc->actionpoints_num);
  1208. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1209. }
  1210. /* Names of register to set - 24 chars should be enough. Looks a little
  1211. * bit out-of-place for C code, but makes it aligned to the bigger
  1212. * concept of "ARC registers are defined in TCL" as far as possible.
  1213. */
  1214. char ap_amv_reg_name[24], ap_amm_reg_name[24], ap_ac_reg_name[24];
  1215. snprintf(ap_amv_reg_name, 24, "ap_amv%" PRIu32, ap_num);
  1216. snprintf(ap_amm_reg_name, 24, "ap_amm%" PRIu32, ap_num);
  1217. snprintf(ap_ac_reg_name, 24, "ap_ac%" PRIu32, ap_num);
  1218. CHECK_RETVAL(arc_set_register_value(target, ap_amv_reg_name,
  1219. match_value));
  1220. CHECK_RETVAL(arc_set_register_value(target, ap_amm_reg_name, 0));
  1221. CHECK_RETVAL(arc_set_register_value(target, ap_ac_reg_name,
  1222. control_tt | control_at));
  1223. arc->actionpoints_num_avail--;
  1224. } else {
  1225. char ap_ac_reg_name[24];
  1226. snprintf(ap_ac_reg_name, 24, "ap_ac%" PRIu32, ap_num);
  1227. CHECK_RETVAL(arc_set_register_value(target, ap_ac_reg_name,
  1228. AP_AC_TT_DISABLE));
  1229. arc->actionpoints_num_avail++;
  1230. }
  1231. return ERROR_OK;
  1232. }
  1233. static int arc_set_breakpoint(struct target *target,
  1234. struct breakpoint *breakpoint)
  1235. {
  1236. if (breakpoint->set) {
  1237. LOG_WARNING("breakpoint already set");
  1238. return ERROR_OK;
  1239. }
  1240. if (breakpoint->type == BKPT_SOFT) {
  1241. LOG_DEBUG("bpid: %" PRIu32, breakpoint->unique_id);
  1242. if (breakpoint->length == 4) {
  1243. uint32_t verify = 0xffffffff;
  1244. CHECK_RETVAL(target_read_buffer(target, breakpoint->address, breakpoint->length,
  1245. breakpoint->orig_instr));
  1246. CHECK_RETVAL(arc_write_instruction_u32(target, breakpoint->address,
  1247. ARC_SDBBP_32));
  1248. CHECK_RETVAL(arc_read_instruction_u32(target, breakpoint->address, &verify));
  1249. if (verify != ARC_SDBBP_32) {
  1250. LOG_ERROR("Unable to set 32bit breakpoint at address @0x%" TARGET_PRIxADDR
  1251. " - check that memory is read/writable", breakpoint->address);
  1252. return ERROR_FAIL;
  1253. }
  1254. } else if (breakpoint->length == 2) {
  1255. uint16_t verify = 0xffff;
  1256. CHECK_RETVAL(target_read_buffer(target, breakpoint->address, breakpoint->length,
  1257. breakpoint->orig_instr));
  1258. CHECK_RETVAL(target_write_u16(target, breakpoint->address, ARC_SDBBP_16));
  1259. CHECK_RETVAL(target_read_u16(target, breakpoint->address, &verify));
  1260. if (verify != ARC_SDBBP_16) {
  1261. LOG_ERROR("Unable to set 16bit breakpoint at address @0x%" TARGET_PRIxADDR
  1262. " - check that memory is read/writable", breakpoint->address);
  1263. return ERROR_FAIL;
  1264. }
  1265. } else {
  1266. LOG_ERROR("Invalid breakpoint length: target supports only 2 or 4");
  1267. return ERROR_COMMAND_ARGUMENT_INVALID;
  1268. }
  1269. breakpoint->set = 64; /* Any nice value but 0 */
  1270. } else if (breakpoint->type == BKPT_HARD) {
  1271. struct arc_common *arc = target_to_arc(target);
  1272. struct arc_actionpoint *ap_list = arc->actionpoints_list;
  1273. unsigned int bp_num;
  1274. for (bp_num = 0; bp_num < arc->actionpoints_num; bp_num++) {
  1275. if (!ap_list[bp_num].used)
  1276. break;
  1277. }
  1278. if (bp_num >= arc->actionpoints_num) {
  1279. LOG_ERROR("No free actionpoints, maximum amount is %u",
  1280. arc->actionpoints_num);
  1281. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1282. }
  1283. int retval = arc_configure_actionpoint(target, bp_num,
  1284. breakpoint->address, AP_AC_TT_READWRITE, AP_AC_AT_INST_ADDR);
  1285. if (retval == ERROR_OK) {
  1286. breakpoint->set = bp_num + 1;
  1287. ap_list[bp_num].used = 1;
  1288. ap_list[bp_num].bp_value = breakpoint->address;
  1289. ap_list[bp_num].type = ARC_AP_BREAKPOINT;
  1290. LOG_DEBUG("bpid: %" PRIu32 ", bp_num %u bp_value 0x%" PRIx32,
  1291. breakpoint->unique_id, bp_num, ap_list[bp_num].bp_value);
  1292. }
  1293. } else {
  1294. LOG_DEBUG("ERROR: setting unknown breakpoint type");
  1295. return ERROR_FAIL;
  1296. }
  1297. /* core instruction cache is now invalid. */
  1298. CHECK_RETVAL(arc_cache_invalidate(target));
  1299. return ERROR_OK;
  1300. }
  1301. static int arc_unset_breakpoint(struct target *target,
  1302. struct breakpoint *breakpoint)
  1303. {
  1304. int retval = ERROR_OK;
  1305. if (!breakpoint->set) {
  1306. LOG_WARNING("breakpoint not set");
  1307. return ERROR_OK;
  1308. }
  1309. if (breakpoint->type == BKPT_SOFT) {
  1310. /* restore original instruction (kept in target endianness) */
  1311. LOG_DEBUG("bpid: %" PRIu32, breakpoint->unique_id);
  1312. if (breakpoint->length == 4) {
  1313. uint32_t current_instr;
  1314. /* check that user program has not modified breakpoint instruction */
  1315. CHECK_RETVAL(arc_read_instruction_u32(target, breakpoint->address, &current_instr));
  1316. if (current_instr == ARC_SDBBP_32) {
  1317. retval = target_write_buffer(target, breakpoint->address,
  1318. breakpoint->length, breakpoint->orig_instr);
  1319. if (retval != ERROR_OK)
  1320. return retval;
  1321. } else {
  1322. LOG_WARNING("Software breakpoint @0x%" TARGET_PRIxADDR
  1323. " has been overwritten outside of debugger."
  1324. "Expected: @0x%x, got: @0x%" PRIx32,
  1325. breakpoint->address, ARC_SDBBP_32, current_instr);
  1326. }
  1327. } else if (breakpoint->length == 2) {
  1328. uint16_t current_instr;
  1329. /* check that user program has not modified breakpoint instruction */
  1330. CHECK_RETVAL(target_read_u16(target, breakpoint->address, &current_instr));
  1331. if (current_instr == ARC_SDBBP_16) {
  1332. retval = target_write_buffer(target, breakpoint->address,
  1333. breakpoint->length, breakpoint->orig_instr);
  1334. if (retval != ERROR_OK)
  1335. return retval;
  1336. } else {
  1337. LOG_WARNING("Software breakpoint @0x%" TARGET_PRIxADDR
  1338. " has been overwritten outside of debugger. "
  1339. "Expected: 0x%04x, got: 0x%04" PRIx16,
  1340. breakpoint->address, ARC_SDBBP_16, current_instr);
  1341. }
  1342. } else {
  1343. LOG_ERROR("Invalid breakpoint length: target supports only 2 or 4");
  1344. return ERROR_COMMAND_ARGUMENT_INVALID;
  1345. }
  1346. breakpoint->set = 0;
  1347. } else if (breakpoint->type == BKPT_HARD) {
  1348. struct arc_common *arc = target_to_arc(target);
  1349. struct arc_actionpoint *ap_list = arc->actionpoints_list;
  1350. unsigned int bp_num = breakpoint->set - 1;
  1351. if ((breakpoint->set == 0) || (bp_num >= arc->actionpoints_num)) {
  1352. LOG_DEBUG("Invalid actionpoint ID: %u in breakpoint: %" PRIu32,
  1353. bp_num, breakpoint->unique_id);
  1354. return ERROR_OK;
  1355. }
  1356. retval = arc_configure_actionpoint(target, bp_num,
  1357. breakpoint->address, AP_AC_TT_DISABLE, AP_AC_AT_INST_ADDR);
  1358. if (retval == ERROR_OK) {
  1359. breakpoint->set = 0;
  1360. ap_list[bp_num].used = 0;
  1361. ap_list[bp_num].bp_value = 0;
  1362. LOG_DEBUG("bpid: %" PRIu32 " - released actionpoint ID: %i",
  1363. breakpoint->unique_id, bp_num);
  1364. }
  1365. } else {
  1366. LOG_DEBUG("ERROR: unsetting unknown breakpoint type");
  1367. return ERROR_FAIL;
  1368. }
  1369. /* core instruction cache is now invalid. */
  1370. CHECK_RETVAL(arc_cache_invalidate(target));
  1371. return retval;
  1372. }
  1373. static int arc_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
  1374. {
  1375. if (target->state == TARGET_HALTED) {
  1376. return arc_set_breakpoint(target, breakpoint);
  1377. } else {
  1378. LOG_WARNING(" > core was not halted, please try again.");
  1379. return ERROR_TARGET_NOT_HALTED;
  1380. }
  1381. }
  1382. static int arc_remove_breakpoint(struct target *target,
  1383. struct breakpoint *breakpoint)
  1384. {
  1385. if (target->state == TARGET_HALTED) {
  1386. if (breakpoint->set)
  1387. CHECK_RETVAL(arc_unset_breakpoint(target, breakpoint));
  1388. } else {
  1389. LOG_WARNING("target not halted");
  1390. return ERROR_TARGET_NOT_HALTED;
  1391. }
  1392. return ERROR_OK;
  1393. }
  1394. void arc_reset_actionpoints(struct target *target)
  1395. {
  1396. struct arc_common *arc = target_to_arc(target);
  1397. struct arc_actionpoint *ap_list = arc->actionpoints_list;
  1398. struct breakpoint *next_b;
  1399. struct watchpoint *next_w;
  1400. while (target->breakpoints) {
  1401. next_b = target->breakpoints->next;
  1402. arc_remove_breakpoint(target, target->breakpoints);
  1403. free(target->breakpoints->orig_instr);
  1404. free(target->breakpoints);
  1405. target->breakpoints = next_b;
  1406. }
  1407. while (target->watchpoints) {
  1408. next_w = target->watchpoints->next;
  1409. arc_remove_watchpoint(target, target->watchpoints);
  1410. free(target->watchpoints);
  1411. target->watchpoints = next_w;
  1412. }
  1413. for (unsigned int i = 0; i < arc->actionpoints_num; i++) {
  1414. if ((ap_list[i].used) && (ap_list[i].reg_address))
  1415. arc_remove_auxreg_actionpoint(target, ap_list[i].reg_address);
  1416. }
  1417. }
  1418. int arc_set_actionpoints_num(struct target *target, uint32_t ap_num)
  1419. {
  1420. LOG_DEBUG("target=%s actionpoints=%" PRIu32, target_name(target), ap_num);
  1421. struct arc_common *arc = target_to_arc(target);
  1422. /* Make sure that there are no enabled actionpoints in target. */
  1423. arc_reset_actionpoints(target);
  1424. /* Assume that all points have been removed from target. */
  1425. free(arc->actionpoints_list);
  1426. arc->actionpoints_num_avail = ap_num;
  1427. arc->actionpoints_num = ap_num;
  1428. /* calloc can be safely called when ncount == 0. */
  1429. arc->actionpoints_list = calloc(ap_num, sizeof(struct arc_actionpoint));
  1430. if (!arc->actionpoints_list) {
  1431. LOG_ERROR("Unable to allocate memory");
  1432. return ERROR_FAIL;
  1433. }
  1434. return ERROR_OK;
  1435. }
  1436. int arc_add_auxreg_actionpoint(struct target *target,
  1437. uint32_t auxreg_addr, uint32_t transaction)
  1438. {
  1439. unsigned int ap_num = 0;
  1440. int retval = ERROR_OK;
  1441. if (target->state != TARGET_HALTED)
  1442. return ERROR_TARGET_NOT_HALTED;
  1443. struct arc_common *arc = target_to_arc(target);
  1444. struct arc_actionpoint *ap_list = arc->actionpoints_list;
  1445. while (ap_list[ap_num].used)
  1446. ap_num++;
  1447. if (ap_num >= arc->actionpoints_num) {
  1448. LOG_ERROR("No actionpoint free, maximum amount is %u",
  1449. arc->actionpoints_num);
  1450. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1451. }
  1452. retval = arc_configure_actionpoint(target, ap_num,
  1453. auxreg_addr, transaction, AP_AC_AT_AUXREG_ADDR);
  1454. if (retval == ERROR_OK) {
  1455. ap_list[ap_num].used = 1;
  1456. ap_list[ap_num].reg_address = auxreg_addr;
  1457. }
  1458. return retval;
  1459. }
  1460. int arc_remove_auxreg_actionpoint(struct target *target, uint32_t auxreg_addr)
  1461. {
  1462. int retval = ERROR_OK;
  1463. bool ap_found = false;
  1464. unsigned int ap_num = 0;
  1465. if (target->state != TARGET_HALTED)
  1466. return ERROR_TARGET_NOT_HALTED;
  1467. struct arc_common *arc = target_to_arc(target);
  1468. struct arc_actionpoint *ap_list = arc->actionpoints_list;
  1469. while ((ap_list[ap_num].used) && (ap_num < arc->actionpoints_num)) {
  1470. if (ap_list[ap_num].reg_address == auxreg_addr) {
  1471. ap_found = true;
  1472. break;
  1473. }
  1474. ap_num++;
  1475. }
  1476. if (ap_found) {
  1477. retval = arc_configure_actionpoint(target, ap_num,
  1478. auxreg_addr, AP_AC_TT_DISABLE, AP_AC_AT_AUXREG_ADDR);
  1479. if (retval == ERROR_OK) {
  1480. ap_list[ap_num].used = 0;
  1481. ap_list[ap_num].bp_value = 0;
  1482. }
  1483. } else {
  1484. LOG_ERROR("Register actionpoint not found");
  1485. }
  1486. return retval;
  1487. }
  1488. static int arc_set_watchpoint(struct target *target,
  1489. struct watchpoint *watchpoint)
  1490. {
  1491. unsigned int wp_num;
  1492. struct arc_common *arc = target_to_arc(target);
  1493. struct arc_actionpoint *ap_list = arc->actionpoints_list;
  1494. if (watchpoint->set) {
  1495. LOG_WARNING("watchpoint already set");
  1496. return ERROR_OK;
  1497. }
  1498. for (wp_num = 0; wp_num < arc->actionpoints_num; wp_num++) {
  1499. if (!ap_list[wp_num].used)
  1500. break;
  1501. }
  1502. if (wp_num >= arc->actionpoints_num) {
  1503. LOG_ERROR("No free actionpoints, maximum amount is %u",
  1504. arc->actionpoints_num);
  1505. return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
  1506. }
  1507. if (watchpoint->length != 4) {
  1508. LOG_ERROR("Only watchpoints of length 4 are supported");
  1509. return ERROR_TARGET_UNALIGNED_ACCESS;
  1510. }
  1511. int enable = AP_AC_TT_DISABLE;
  1512. switch (watchpoint->rw) {
  1513. case WPT_READ:
  1514. enable = AP_AC_TT_READ;
  1515. break;
  1516. case WPT_WRITE:
  1517. enable = AP_AC_TT_WRITE;
  1518. break;
  1519. case WPT_ACCESS:
  1520. enable = AP_AC_TT_READWRITE;
  1521. break;
  1522. default:
  1523. LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
  1524. return ERROR_FAIL;
  1525. }
  1526. int retval = arc_configure_actionpoint(target, wp_num,
  1527. watchpoint->address, enable, AP_AC_AT_MEMORY_ADDR);
  1528. if (retval == ERROR_OK) {
  1529. watchpoint->set = wp_num + 1;
  1530. ap_list[wp_num].used = 1;
  1531. ap_list[wp_num].bp_value = watchpoint->address;
  1532. ap_list[wp_num].type = ARC_AP_WATCHPOINT;
  1533. LOG_DEBUG("wpid: %" PRIu32 ", wp_num %u wp_value 0x%" PRIx32,
  1534. watchpoint->unique_id, wp_num, ap_list[wp_num].bp_value);
  1535. }
  1536. return retval;
  1537. }
  1538. static int arc_unset_watchpoint(struct target *target,
  1539. struct watchpoint *watchpoint)
  1540. {
  1541. /* get pointers to arch-specific information */
  1542. struct arc_common *arc = target_to_arc(target);
  1543. struct arc_actionpoint *ap_list = arc->actionpoints_list;
  1544. if (!watchpoint->set) {
  1545. LOG_WARNING("watchpoint not set");
  1546. return ERROR_OK;
  1547. }
  1548. unsigned int wp_num = watchpoint->set - 1;
  1549. if ((watchpoint->set == 0) || (wp_num >= arc->actionpoints_num)) {
  1550. LOG_DEBUG("Invalid actionpoint ID: %u in watchpoint: %" PRIu32,
  1551. wp_num, watchpoint->unique_id);
  1552. return ERROR_OK;
  1553. }
  1554. int retval = arc_configure_actionpoint(target, wp_num,
  1555. watchpoint->address, AP_AC_TT_DISABLE, AP_AC_AT_MEMORY_ADDR);
  1556. if (retval == ERROR_OK) {
  1557. watchpoint->set = 0;
  1558. ap_list[wp_num].used = 0;
  1559. ap_list[wp_num].bp_value = 0;
  1560. LOG_DEBUG("wpid: %" PRIu32 " - releasing actionpoint ID: %u",
  1561. watchpoint->unique_id, wp_num);
  1562. }
  1563. return retval;
  1564. }
  1565. static int arc_add_watchpoint(struct target *target,
  1566. struct watchpoint *watchpoint)
  1567. {
  1568. if (target->state != TARGET_HALTED) {
  1569. LOG_WARNING("target not halted");
  1570. return ERROR_TARGET_NOT_HALTED;
  1571. }
  1572. CHECK_RETVAL(arc_set_watchpoint(target, watchpoint));
  1573. return ERROR_OK;
  1574. }
  1575. static int arc_remove_watchpoint(struct target *target,
  1576. struct watchpoint *watchpoint)
  1577. {
  1578. if (target->state != TARGET_HALTED) {
  1579. LOG_WARNING("target not halted");
  1580. return ERROR_TARGET_NOT_HALTED;
  1581. }
  1582. if (watchpoint->set)
  1583. CHECK_RETVAL(arc_unset_watchpoint(target, watchpoint));
  1584. return ERROR_OK;
  1585. }
  1586. static int arc_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
  1587. {
  1588. assert(target);
  1589. assert(hit_watchpoint);
  1590. struct arc_actionpoint *actionpoint = NULL;
  1591. CHECK_RETVAL(get_current_actionpoint(target, &actionpoint));
  1592. if (actionpoint) {
  1593. if (!actionpoint->used)
  1594. LOG_WARNING("Target halted by unused actionpoint.");
  1595. /* If this check fails - that is some sort of an error in OpenOCD. */
  1596. if (actionpoint->type != ARC_AP_WATCHPOINT)
  1597. LOG_WARNING("Target halted by breakpoint, but is treated as a watchpoint.");
  1598. for (struct watchpoint *watchpoint = target->watchpoints;
  1599. watchpoint;
  1600. watchpoint = watchpoint->next) {
  1601. if (actionpoint->bp_value == watchpoint->address) {
  1602. *hit_watchpoint = watchpoint;
  1603. LOG_DEBUG("Hit watchpoint, wpid: %" PRIu32 ", watchpoint num: %i",
  1604. watchpoint->unique_id, watchpoint->set - 1);
  1605. return ERROR_OK;
  1606. }
  1607. }
  1608. }
  1609. return ERROR_FAIL;
  1610. }
  1611. /* Helper function which switches core to single_step mode by
  1612. * doing aux r/w operations. */
  1613. int arc_config_step(struct target *target, int enable_step)
  1614. {
  1615. uint32_t value;
  1616. struct arc_common *arc = target_to_arc(target);
  1617. /* enable core debug step mode */
  1618. if (enable_step) {
  1619. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG,
  1620. &value));
  1621. value &= ~SET_CORE_AE_BIT; /* clear the AE bit */
  1622. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_STATUS32_REG,
  1623. value));
  1624. LOG_DEBUG(" [status32:0x%08" PRIx32 "]", value);
  1625. /* Doing read-modify-write, because DEBUG might contain manually set
  1626. * bits like UB or ED, which should be preserved. */
  1627. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info,
  1628. AUX_DEBUG_REG, &value));
  1629. value |= SET_CORE_SINGLE_INSTR_STEP; /* set the IS bit */
  1630. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG,
  1631. value));
  1632. LOG_DEBUG("core debug step mode enabled [debug-reg:0x%08" PRIx32 "]", value);
  1633. } else { /* disable core debug step mode */
  1634. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG,
  1635. &value));
  1636. value &= ~SET_CORE_SINGLE_INSTR_STEP; /* clear the IS bit */
  1637. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DEBUG_REG,
  1638. value));
  1639. LOG_DEBUG("core debug step mode disabled");
  1640. }
  1641. return ERROR_OK;
  1642. }
  1643. int arc_step(struct target *target, int current, target_addr_t address,
  1644. int handle_breakpoints)
  1645. {
  1646. /* get pointers to arch-specific information */
  1647. struct arc_common *arc = target_to_arc(target);
  1648. struct breakpoint *breakpoint = NULL;
  1649. struct reg *pc = &(arc->core_and_aux_cache->reg_list[arc->pc_index_in_cache]);
  1650. if (target->state != TARGET_HALTED) {
  1651. LOG_WARNING("target not halted");
  1652. return ERROR_TARGET_NOT_HALTED;
  1653. }
  1654. /* current = 1: continue on current pc, otherwise continue at <address> */
  1655. if (!current) {
  1656. buf_set_u32(pc->value, 0, 32, address);
  1657. pc->dirty = 1;
  1658. pc->valid = 1;
  1659. }
  1660. LOG_DEBUG("Target steps one instruction from PC=0x%" PRIx32,
  1661. buf_get_u32(pc->value, 0, 32));
  1662. /* the front-end may request us not to handle breakpoints */
  1663. if (handle_breakpoints) {
  1664. breakpoint = breakpoint_find(target, buf_get_u32(pc->value, 0, 32));
  1665. if (breakpoint)
  1666. CHECK_RETVAL(arc_unset_breakpoint(target, breakpoint));
  1667. }
  1668. /* restore context */
  1669. CHECK_RETVAL(arc_restore_context(target));
  1670. target->debug_reason = DBG_REASON_SINGLESTEP;
  1671. CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_RESUMED));
  1672. /* disable interrupts while stepping */
  1673. CHECK_RETVAL(arc_enable_interrupts(target, 0));
  1674. /* do a single step */
  1675. CHECK_RETVAL(arc_config_step(target, 1));
  1676. /* make sure we done our step */
  1677. alive_sleep(1);
  1678. /* registers are now invalid */
  1679. register_cache_invalidate(arc->core_and_aux_cache);
  1680. if (breakpoint)
  1681. CHECK_RETVAL(arc_set_breakpoint(target, breakpoint));
  1682. LOG_DEBUG("target stepped ");
  1683. target->state = TARGET_HALTED;
  1684. /* Saving context */
  1685. CHECK_RETVAL(arc_debug_entry(target));
  1686. CHECK_RETVAL(target_call_event_callbacks(target, TARGET_EVENT_HALTED));
  1687. return ERROR_OK;
  1688. }
  1689. /* This function invalidates icache. */
  1690. static int arc_icache_invalidate(struct target *target)
  1691. {
  1692. uint32_t value;
  1693. struct arc_common *arc = target_to_arc(target);
  1694. /* Don't waste time if already done. */
  1695. if (!arc->has_icache || arc->icache_invalidated)
  1696. return ERROR_OK;
  1697. LOG_DEBUG("Invalidating I$.");
  1698. value = IC_IVIC_INVALIDATE; /* invalidate I$ */
  1699. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_IC_IVIC_REG, value));
  1700. arc->icache_invalidated = true;
  1701. return ERROR_OK;
  1702. }
  1703. /* This function invalidates dcache */
  1704. static int arc_dcache_invalidate(struct target *target)
  1705. {
  1706. uint32_t value, dc_ctrl_value;
  1707. struct arc_common *arc = target_to_arc(target);
  1708. if (!arc->has_dcache || arc->dcache_invalidated)
  1709. return ERROR_OK;
  1710. LOG_DEBUG("Invalidating D$.");
  1711. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, &value));
  1712. dc_ctrl_value = value;
  1713. value &= ~DC_CTRL_IM;
  1714. /* set DC_CTRL invalidate mode to invalidate-only (no flushing!!) */
  1715. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, value));
  1716. value = DC_IVDC_INVALIDATE; /* invalidate D$ */
  1717. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_IVDC_REG, value));
  1718. /* restore DC_CTRL invalidate mode */
  1719. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, dc_ctrl_value));
  1720. arc->dcache_invalidated = true;
  1721. return ERROR_OK;
  1722. }
  1723. /* This function invalidates l2 cache. */
  1724. static int arc_l2cache_invalidate(struct target *target)
  1725. {
  1726. uint32_t value, slc_ctrl_value;
  1727. struct arc_common *arc = target_to_arc(target);
  1728. if (!arc->has_l2cache || arc->l2cache_invalidated)
  1729. return ERROR_OK;
  1730. LOG_DEBUG("Invalidating L2$.");
  1731. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, &value));
  1732. slc_ctrl_value = value;
  1733. value &= ~L2_CTRL_IM;
  1734. /* set L2_CTRL invalidate mode to invalidate-only (no flushing!!) */
  1735. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, value));
  1736. /* invalidate L2$ */
  1737. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_INV, L2_INV_IV));
  1738. /* Wait until invalidate operation ends */
  1739. do {
  1740. LOG_DEBUG("Waiting for invalidation end.");
  1741. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, &value));
  1742. } while (value & L2_CTRL_BS);
  1743. /* restore L2_CTRL invalidate mode */
  1744. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, slc_ctrl_value));
  1745. arc->l2cache_invalidated = true;
  1746. return ERROR_OK;
  1747. }
  1748. int arc_cache_invalidate(struct target *target)
  1749. {
  1750. CHECK_RETVAL(arc_icache_invalidate(target));
  1751. CHECK_RETVAL(arc_dcache_invalidate(target));
  1752. CHECK_RETVAL(arc_l2cache_invalidate(target));
  1753. return ERROR_OK;
  1754. }
  1755. /* Flush data cache. This function is cheap to call and return quickly if D$
  1756. * already has been flushed since target had been halted. JTAG debugger reads
  1757. * values directly from memory, bypassing cache, so if there are unflushed
  1758. * lines debugger will read invalid values, which will cause a lot of troubles.
  1759. * */
  1760. int arc_dcache_flush(struct target *target)
  1761. {
  1762. uint32_t value, dc_ctrl_value;
  1763. bool has_to_set_dc_ctrl_im;
  1764. struct arc_common *arc = target_to_arc(target);
  1765. /* Don't waste time if already done. */
  1766. if (!arc->has_dcache || arc->dcache_flushed)
  1767. return ERROR_OK;
  1768. LOG_DEBUG("Flushing D$.");
  1769. /* Store current value of DC_CTRL */
  1770. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, &dc_ctrl_value));
  1771. /* Set DC_CTRL invalidate mode to flush (if not already set) */
  1772. has_to_set_dc_ctrl_im = (dc_ctrl_value & DC_CTRL_IM) == 0;
  1773. if (has_to_set_dc_ctrl_im) {
  1774. value = dc_ctrl_value | DC_CTRL_IM;
  1775. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, value));
  1776. }
  1777. /* Flush D$ */
  1778. value = DC_IVDC_INVALIDATE;
  1779. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_IVDC_REG, value));
  1780. /* Restore DC_CTRL invalidate mode (even of flush failed) */
  1781. if (has_to_set_dc_ctrl_im)
  1782. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, AUX_DC_CTRL_REG, dc_ctrl_value));
  1783. arc->dcache_flushed = true;
  1784. return ERROR_OK;
  1785. }
  1786. /* This function flushes l2cache. */
  1787. static int arc_l2cache_flush(struct target *target)
  1788. {
  1789. uint32_t value;
  1790. struct arc_common *arc = target_to_arc(target);
  1791. /* Don't waste time if already done. */
  1792. if (!arc->has_l2cache || arc->l2cache_flushed)
  1793. return ERROR_OK;
  1794. LOG_DEBUG("Flushing L2$.");
  1795. /* Flush L2 cache */
  1796. CHECK_RETVAL(arc_jtag_write_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_FLUSH, L2_FLUSH_FL));
  1797. /* Wait until flush operation ends */
  1798. do {
  1799. LOG_DEBUG("Waiting for flushing end.");
  1800. CHECK_RETVAL(arc_jtag_read_aux_reg_one(&arc->jtag_info, SLC_AUX_CACHE_CTRL, &value));
  1801. } while (value & L2_CTRL_BS);
  1802. arc->l2cache_flushed = true;
  1803. return ERROR_OK;
  1804. }
  1805. int arc_cache_flush(struct target *target)
  1806. {
  1807. CHECK_RETVAL(arc_dcache_flush(target));
  1808. CHECK_RETVAL(arc_l2cache_flush(target));
  1809. return ERROR_OK;
  1810. }
  1811. /* ARC v2 target */
  1812. struct target_type arcv2_target = {
  1813. .name = "arcv2",
  1814. .poll = arc_poll,
  1815. .arch_state = arc_arch_state,
  1816. /* TODO That seems like something similar to metaware hostlink, so perhaps
  1817. * we can exploit this in the future. */
  1818. .target_request_data = NULL,
  1819. .halt = arc_halt,
  1820. .resume = arc_resume,
  1821. .step = arc_step,
  1822. .assert_reset = arc_assert_reset,
  1823. .deassert_reset = arc_deassert_reset,
  1824. /* TODO Implement soft_reset_halt */
  1825. .soft_reset_halt = NULL,
  1826. .get_gdb_reg_list = arc_get_gdb_reg_list,
  1827. .read_memory = arc_mem_read,
  1828. .write_memory = arc_mem_write,
  1829. .checksum_memory = NULL,
  1830. .blank_check_memory = NULL,
  1831. .add_breakpoint = arc_add_breakpoint,
  1832. .add_context_breakpoint = NULL,
  1833. .add_hybrid_breakpoint = NULL,
  1834. .remove_breakpoint = arc_remove_breakpoint,
  1835. .add_watchpoint = arc_add_watchpoint,
  1836. .remove_watchpoint = arc_remove_watchpoint,
  1837. .hit_watchpoint = arc_hit_watchpoint,
  1838. .run_algorithm = NULL,
  1839. .start_algorithm = NULL,
  1840. .wait_algorithm = NULL,
  1841. .commands = arc_monitor_command_handlers,
  1842. .target_create = arc_target_create,
  1843. .init_target = arc_init_target,
  1844. .deinit_target = arc_deinit_target,
  1845. .examine = arc_examine,
  1846. .virt2phys = NULL,
  1847. .read_phys_memory = NULL,
  1848. .write_phys_memory = NULL,
  1849. .mmu = NULL,
  1850. };