You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1253 lines
42 KiB

  1. /*
  2. * Copyright(c) 2013-2016 Intel Corporation.
  3. *
  4. * Adrian Burns (adrian.burns@intel.com)
  5. * Thomas Faust (thomas.faust@intel.com)
  6. * Ivan De Cesaris (ivan.de.cesaris@intel.com)
  7. * Julien Carreno (julien.carreno@intel.com)
  8. * Jeffrey Maxwell (jeffrey.r.maxwell@intel.com)
  9. * Jessica Gomez (jessica.gomez.hernandez@intel.com)
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful, but
  17. * WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. *
  24. * Contact Information:
  25. * Intel Corporation
  26. */
  27. /*
  28. * @file
  29. * This implements the probemode operations for Lakemont 1 (LMT1).
  30. */
  31. #ifdef HAVE_CONFIG_H
  32. #include "config.h"
  33. #endif
  34. #include <helper/log.h>
  35. #include "target.h"
  36. #include "target_type.h"
  37. #include "lakemont.h"
  38. #include "register.h"
  39. #include "breakpoints.h"
  40. #include "x86_32_common.h"
  41. static int irscan(struct target *t, uint8_t *out,
  42. uint8_t *in, uint8_t ir_len);
  43. static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len);
  44. static int save_context(struct target *target);
  45. static int restore_context(struct target *target);
  46. static uint32_t get_tapstatus(struct target *t);
  47. static int enter_probemode(struct target *t);
  48. static int exit_probemode(struct target *t);
  49. static int halt_prep(struct target *t);
  50. static int do_halt(struct target *t);
  51. static int do_resume(struct target *t);
  52. static int read_all_core_hw_regs(struct target *t);
  53. static int write_all_core_hw_regs(struct target *t);
  54. static int read_hw_reg(struct target *t,
  55. int reg, uint32_t *regval, uint8_t cache);
  56. static int write_hw_reg(struct target *t,
  57. int reg, uint32_t regval, uint8_t cache);
  58. static struct reg_cache *lakemont_build_reg_cache
  59. (struct target *target);
  60. static int submit_reg_pir(struct target *t, int num);
  61. static int submit_instruction_pir(struct target *t, int num);
  62. static int submit_pir(struct target *t, uint64_t op);
  63. static int lakemont_get_core_reg(struct reg *reg);
  64. static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf);
  65. static struct scan_blk scan;
  66. /* registers and opcodes for register access, pm_idx is used to identify the
  67. * registers that are modified for lakemont probemode specific operations
  68. */
  69. static const struct {
  70. uint8_t id;
  71. const char *name;
  72. uint64_t op;
  73. uint8_t pm_idx;
  74. unsigned bits;
  75. enum reg_type type;
  76. const char *group;
  77. const char *feature;
  78. } regs[] = {
  79. /* general purpose registers */
  80. { EAX, "eax", 0x000000D01D660000, 0, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  81. { ECX, "ecx", 0x000000501D660000, 1, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  82. { EDX, "edx", 0x000000901D660000, 2, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  83. { EBX, "ebx", 0x000000101D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  84. { ESP, "esp", 0x000000E01D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
  85. { EBP, "ebp", 0x000000601D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
  86. { ESI, "esi", 0x000000A01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  87. { EDI, "edi", 0x000000201D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  88. /* instruction pointer & flags */
  89. { EIP, "eip", 0x000000C01D660000, 3, 32, REG_TYPE_CODE_PTR, "general", "org.gnu.gdb.i386.core" },
  90. { EFLAGS, "eflags", 0x000000401D660000, 4, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  91. /* segment registers */
  92. { CS, "cs", 0x000000281D660000, 5, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  93. { SS, "ss", 0x000000C81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  94. { DS, "ds", 0x000000481D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  95. { ES, "es", 0x000000A81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  96. { FS, "fs", 0x000000881D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  97. { GS, "gs", 0x000000081D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  98. /* floating point unit registers - not accessible via JTAG - here to satisfy GDB */
  99. { ST0, "st0", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  100. { ST1, "st1", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  101. { ST2, "st2", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  102. { ST3, "st3", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  103. { ST4, "st4", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  104. { ST5, "st5", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  105. { ST6, "st6", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  106. { ST7, "st7", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  107. { FCTRL, "fctrl", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  108. { FSTAT, "fstat", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  109. { FTAG, "ftag", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  110. { FISEG, "fiseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  111. { FIOFF, "fioff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  112. { FOSEG, "foseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  113. { FOOFF, "fooff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  114. { FOP, "fop", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  115. /* control registers */
  116. { CR0, "cr0", 0x000000001D660000, 6, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  117. { CR2, "cr2", 0x000000BC1D660000, 7, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  118. { CR3, "cr3", 0x000000801D660000, 8, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  119. { CR4, "cr4", 0x0000002C1D660000, 9, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  120. /* debug registers */
  121. { DR0, "dr0", 0x0000007C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  122. { DR1, "dr1", 0x000000FC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  123. { DR2, "dr2", 0x000000021D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  124. { DR3, "dr3", 0x000000821D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  125. { DR6, "dr6", 0x000000301D660000, 10, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  126. { DR7, "dr7", 0x000000B01D660000, 11, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  127. /* descriptor tables */
  128. { IDTB, "idtbase", 0x000000581D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  129. { IDTL, "idtlimit", 0x000000D81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  130. { IDTAR, "idtar", 0x000000981D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  131. { GDTB, "gdtbase", 0x000000B81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  132. { GDTL, "gdtlimit", 0x000000781D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  133. { GDTAR, "gdtar", 0x000000381D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  134. { TR, "tr", 0x000000701D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  135. { LDTR, "ldtr", 0x000000F01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  136. { LDTB, "ldbase", 0x000000041D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  137. { LDTL, "ldlimit", 0x000000841D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  138. { LDTAR, "ldtar", 0x000000F81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  139. /* segment registers */
  140. { CSB, "csbase", 0x000000F41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  141. { CSL, "cslimit", 0x0000000C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  142. { CSAR, "csar", 0x000000741D660000, 12, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  143. { DSB, "dsbase", 0x000000941D660000, 13, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  144. { DSL, "dslimit", 0x000000541D660000, 14, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  145. { DSAR, "dsar", 0x000000141D660000, 15, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  146. { ESB, "esbase", 0x0000004C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  147. { ESL, "eslimit", 0x000000CC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  148. { ESAR, "esar", 0x0000008C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  149. { FSB, "fsbase", 0x000000641D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  150. { FSL, "fslimit", 0x000000E41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  151. { FSAR, "fsar", 0x000000A41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  152. { GSB, "gsbase", 0x000000C41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  153. { GSL, "gslimit", 0x000000241D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  154. { GSAR, "gsar", 0x000000441D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  155. { SSB, "ssbase", 0x000000341D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  156. { SSL, "sslimit", 0x000000B41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  157. { SSAR, "ssar", 0x000000D41D660000, 16, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  158. { TSSB, "tssbase", 0x000000E81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  159. { TSSL, "tsslimit", 0x000000181D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  160. { TSSAR, "tssar", 0x000000681D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  161. /* probemode control register */
  162. { PMCR, "pmcr", 0x000000421D660000, 17, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  163. };
  164. static const struct {
  165. uint8_t id;
  166. const char *name;
  167. uint64_t op;
  168. } instructions[] = {
  169. /* memory read/write */
  170. { MEMRDB32, "MEMRDB32", 0x0909090909090851 },
  171. { MEMRDB16, "MEMRDB16", 0x09090909090851E6 },
  172. { MEMRDH32, "MEMRDH32", 0x090909090908D166 },
  173. { MEMRDH16, "MEMRDH16", 0x090909090908D1E6 },
  174. { MEMRDW32, "MEMRDW32", 0x09090909090908D1 },
  175. { MEMRDW16, "MEMRDW16", 0x0909090908D1E666 },
  176. { MEMWRB32, "MEMWRB32", 0x0909090909090811 },
  177. { MEMWRB16, "MEMWRB16", 0x09090909090811E6 },
  178. { MEMWRH32, "MEMWRH32", 0x0909090909089166 },
  179. { MEMWRH16, "MEMWRH16", 0x09090909090891E6 },
  180. { MEMWRW32, "MEMWRW32", 0x0909090909090891 },
  181. { MEMWRW16, "MEMWRW16", 0x090909090891E666 },
  182. /* IO read/write */
  183. { IORDB32, "IORDB32", 0x0909090909090937 },
  184. { IORDB16, "IORDB16", 0x09090909090937E6 },
  185. { IORDH32, "IORDH32", 0x090909090909B766 },
  186. { IORDH16, "IORDH16", 0x090909090909B7E6 },
  187. { IORDW32, "IORDW32", 0x09090909090909B7 },
  188. { IORDW16, "IORDW16", 0x0909090909B7E666 },
  189. { IOWRB32, "IOWRB32", 0x0909090909090977 },
  190. { IOWRB16, "IOWRB16", 0x09090909090977E6 },
  191. { IOWRH32, "IOWRH32", 0x090909090909F766 },
  192. { IOWRH16, "IOWRH16", 0x090909090909F7E6 },
  193. { IOWRW32, "IOWRW32", 0x09090909090909F7 },
  194. { IOWRW16, "IOWRW16", 0x0909090909F7E666 },
  195. /* lakemont1 core shadow ram access opcodes */
  196. { SRAMACCESS, "SRAMACCESS", 0x0000000E9D660000 },
  197. { SRAM2PDR, "SRAM2PDR", 0x4CF0000000000000 },
  198. { PDR2SRAM, "PDR2SRAM", 0x0CF0000000000000 },
  199. { WBINVD, "WBINVD", 0x09090909090990F0 },
  200. };
  201. bool check_not_halted(const struct target *t)
  202. {
  203. bool halted = t->state == TARGET_HALTED;
  204. if (!halted)
  205. LOG_ERROR("target running, halt it first");
  206. return !halted;
  207. }
  208. static int irscan(struct target *t, uint8_t *out,
  209. uint8_t *in, uint8_t ir_len)
  210. {
  211. int retval = ERROR_OK;
  212. struct x86_32_common *x86_32 = target_to_x86_32(t);
  213. if (!t->tap) {
  214. retval = ERROR_FAIL;
  215. LOG_ERROR("%s invalid target tap", __func__);
  216. return retval;
  217. }
  218. if (ir_len != t->tap->ir_length) {
  219. retval = ERROR_FAIL;
  220. if (t->tap->enabled)
  221. LOG_ERROR("%s tap enabled but tap irlen=%d",
  222. __func__, t->tap->ir_length);
  223. else
  224. LOG_ERROR("%s tap not enabled and irlen=%d",
  225. __func__, t->tap->ir_length);
  226. return retval;
  227. }
  228. struct scan_field *fields = &scan.field;
  229. fields->num_bits = ir_len;
  230. fields->out_value = out;
  231. fields->in_value = in;
  232. jtag_add_ir_scan(x86_32->curr_tap, fields, TAP_IDLE);
  233. if (x86_32->flush) {
  234. retval = jtag_execute_queue();
  235. if (retval != ERROR_OK)
  236. LOG_ERROR("%s failed to execute queue", __func__);
  237. }
  238. return retval;
  239. }
  240. static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len)
  241. {
  242. int retval = ERROR_OK;
  243. uint64_t data = 0;
  244. struct x86_32_common *x86_32 = target_to_x86_32(t);
  245. if (!t->tap) {
  246. retval = ERROR_FAIL;
  247. LOG_ERROR("%s invalid target tap", __func__);
  248. return retval;
  249. }
  250. if (len > MAX_SCAN_SIZE || 0 == len) {
  251. retval = ERROR_FAIL;
  252. LOG_ERROR("%s data len is %d bits, max is %d bits",
  253. __func__, len, MAX_SCAN_SIZE);
  254. return retval;
  255. }
  256. struct scan_field *fields = &scan.field;
  257. fields->out_value = out;
  258. fields->in_value = in;
  259. fields->num_bits = len;
  260. jtag_add_dr_scan(x86_32->curr_tap, 1, fields, TAP_IDLE);
  261. if (x86_32->flush) {
  262. retval = jtag_execute_queue();
  263. if (retval != ERROR_OK) {
  264. LOG_ERROR("%s drscan failed to execute queue", __func__);
  265. return retval;
  266. }
  267. }
  268. if (in) {
  269. if (len >= 8) {
  270. for (int n = (len / 8) - 1 ; n >= 0; n--)
  271. data = (data << 8) + *(in+n);
  272. } else
  273. LOG_DEBUG("dr in 0x%02" PRIx8, *in);
  274. } else {
  275. LOG_ERROR("%s no drscan data", __func__);
  276. retval = ERROR_FAIL;
  277. }
  278. return retval;
  279. }
  280. static int save_context(struct target *t)
  281. {
  282. int err;
  283. /* read core registers from lakemont sram */
  284. err = read_all_core_hw_regs(t);
  285. if (err != ERROR_OK) {
  286. LOG_ERROR("%s error reading regs", __func__);
  287. return err;
  288. }
  289. return ERROR_OK;
  290. }
  291. static int restore_context(struct target *t)
  292. {
  293. int err = ERROR_OK;
  294. uint32_t i;
  295. struct x86_32_common *x86_32 = target_to_x86_32(t);
  296. /* write core regs into the core PM SRAM from the reg_cache */
  297. err = write_all_core_hw_regs(t);
  298. if (err != ERROR_OK) {
  299. LOG_ERROR("%s error writing regs", __func__);
  300. return err;
  301. }
  302. for (i = 0; i < (x86_32->cache->num_regs); i++) {
  303. x86_32->cache->reg_list[i].dirty = false;
  304. x86_32->cache->reg_list[i].valid = false;
  305. }
  306. return err;
  307. }
  308. /*
  309. * we keep reg_cache in sync with hardware at halt/resume time, we avoid
  310. * writing to real hardware here because pm_regs reflects the hardware
  311. * while we are halted then reg_cache syncs with hw on resume
  312. * TODO - in order for "reg eip force" to work it assume get/set reads
  313. * and writes from hardware, may be other reasons also because generally
  314. * other openocd targets read/write from hardware in get/set - watch this!
  315. */
  316. static int lakemont_get_core_reg(struct reg *reg)
  317. {
  318. int retval = ERROR_OK;
  319. struct lakemont_core_reg *lakemont_reg = reg->arch_info;
  320. struct target *t = lakemont_reg->target;
  321. if (check_not_halted(t))
  322. return ERROR_TARGET_NOT_HALTED;
  323. LOG_DEBUG("reg=%s, value=0x%08" PRIx32, reg->name,
  324. buf_get_u32(reg->value, 0, 32));
  325. return retval;
  326. }
  327. static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf)
  328. {
  329. struct lakemont_core_reg *lakemont_reg = reg->arch_info;
  330. struct target *t = lakemont_reg->target;
  331. uint32_t value = buf_get_u32(buf, 0, 32);
  332. LOG_DEBUG("reg=%s, newval=0x%08" PRIx32, reg->name, value);
  333. if (check_not_halted(t))
  334. return ERROR_TARGET_NOT_HALTED;
  335. buf_set_u32(reg->value, 0, 32, value);
  336. reg->dirty = true;
  337. reg->valid = true;
  338. return ERROR_OK;
  339. }
  340. static const struct reg_arch_type lakemont_reg_type = {
  341. /* these get called if reg_cache doesn't have a "valid" value
  342. * of an individual reg eg "reg eip" but not for "reg" block
  343. */
  344. .get = lakemont_get_core_reg,
  345. .set = lakemont_set_core_reg,
  346. };
  347. struct reg_cache *lakemont_build_reg_cache(struct target *t)
  348. {
  349. struct x86_32_common *x86_32 = target_to_x86_32(t);
  350. int num_regs = ARRAY_SIZE(regs);
  351. struct reg_cache **cache_p = register_get_last_cache_p(&t->reg_cache);
  352. struct reg_cache *cache = malloc(sizeof(struct reg_cache));
  353. struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
  354. struct lakemont_core_reg *arch_info = malloc(sizeof(struct lakemont_core_reg) * num_regs);
  355. struct reg_feature *feature;
  356. int i;
  357. if (!cache || !reg_list || !arch_info) {
  358. free(cache);
  359. free(reg_list);
  360. free(arch_info);
  361. LOG_ERROR("%s out of memory", __func__);
  362. return NULL;
  363. }
  364. /* Build the process context cache */
  365. cache->name = "lakemont registers";
  366. cache->next = NULL;
  367. cache->reg_list = reg_list;
  368. cache->num_regs = num_regs;
  369. (*cache_p) = cache;
  370. x86_32->cache = cache;
  371. for (i = 0; i < num_regs; i++) {
  372. arch_info[i].target = t;
  373. arch_info[i].x86_32_common = x86_32;
  374. arch_info[i].op = regs[i].op;
  375. arch_info[i].pm_idx = regs[i].pm_idx;
  376. reg_list[i].name = regs[i].name;
  377. reg_list[i].size = 32;
  378. reg_list[i].value = calloc(1, 4);
  379. reg_list[i].dirty = false;
  380. reg_list[i].valid = false;
  381. reg_list[i].type = &lakemont_reg_type;
  382. reg_list[i].arch_info = &arch_info[i];
  383. reg_list[i].group = regs[i].group;
  384. reg_list[i].number = i;
  385. reg_list[i].exist = true;
  386. reg_list[i].caller_save = true; /* gdb defaults to true */
  387. feature = calloc(1, sizeof(struct reg_feature));
  388. if (feature) {
  389. feature->name = regs[i].feature;
  390. reg_list[i].feature = feature;
  391. } else
  392. LOG_ERROR("%s unable to allocate feature list", __func__);
  393. reg_list[i].reg_data_type = calloc(1, sizeof(struct reg_data_type));
  394. if (reg_list[i].reg_data_type)
  395. reg_list[i].reg_data_type->type = regs[i].type;
  396. else
  397. LOG_ERROR("%s unable to allocate reg type list", __func__);
  398. }
  399. return cache;
  400. }
  401. static uint32_t get_tapstatus(struct target *t)
  402. {
  403. scan.out[0] = TAPSTATUS;
  404. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  405. return 0;
  406. if (drscan(t, NULL, scan.out, TS_SIZE) != ERROR_OK)
  407. return 0;
  408. return buf_get_u32(scan.out, 0, 32);
  409. }
  410. static int enter_probemode(struct target *t)
  411. {
  412. uint32_t tapstatus = 0;
  413. int retries = 100;
  414. tapstatus = get_tapstatus(t);
  415. LOG_DEBUG("TS before PM enter = 0x%08" PRIx32, tapstatus);
  416. if (tapstatus & TS_PM_BIT) {
  417. LOG_DEBUG("core already in probemode");
  418. return ERROR_OK;
  419. }
  420. scan.out[0] = PROBEMODE;
  421. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  422. return ERROR_FAIL;
  423. scan.out[0] = 1;
  424. if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
  425. return ERROR_FAIL;
  426. while (retries--) {
  427. tapstatus = get_tapstatus(t);
  428. LOG_DEBUG("TS after PM enter = 0x%08" PRIx32, tapstatus);
  429. if ((tapstatus & TS_PM_BIT) && (!(tapstatus & TS_EN_PM_BIT)))
  430. return ERROR_OK;
  431. }
  432. LOG_ERROR("%s PM enter error, tapstatus = 0x%08" PRIx32
  433. , __func__, tapstatus);
  434. return ERROR_FAIL;
  435. }
  436. static int exit_probemode(struct target *t)
  437. {
  438. uint32_t tapstatus = get_tapstatus(t);
  439. LOG_DEBUG("TS before PM exit = 0x%08" PRIx32, tapstatus);
  440. if (!(tapstatus & TS_PM_BIT)) {
  441. LOG_USER("core not in PM");
  442. return ERROR_OK;
  443. }
  444. scan.out[0] = PROBEMODE;
  445. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  446. return ERROR_FAIL;
  447. scan.out[0] = 0;
  448. if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
  449. return ERROR_FAIL;
  450. return ERROR_OK;
  451. }
  452. /* do whats needed to properly enter probemode for debug on lakemont */
  453. static int halt_prep(struct target *t)
  454. {
  455. struct x86_32_common *x86_32 = target_to_x86_32(t);
  456. if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK)
  457. return ERROR_FAIL;
  458. LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB);
  459. if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK)
  460. return ERROR_FAIL;
  461. LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL);
  462. if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK)
  463. return ERROR_FAIL;
  464. LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR);
  465. if (write_hw_reg(t, CSB, PM_DSB, 0) != ERROR_OK)
  466. return ERROR_FAIL;
  467. LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSB].name, PM_DSB);
  468. if (write_hw_reg(t, CSL, PM_DSL, 0) != ERROR_OK)
  469. return ERROR_FAIL;
  470. LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSL].name, PM_DSL);
  471. if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK)
  472. return ERROR_FAIL;
  473. LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7);
  474. uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
  475. uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32);
  476. uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32);
  477. uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
  478. /* clear VM86 and IF bits if they are set */
  479. LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags,
  480. eflags & EFLAGS_VM86 ? 1 : 0,
  481. eflags & EFLAGS_IF ? 1 : 0);
  482. if ((eflags & EFLAGS_VM86) || (eflags & EFLAGS_IF)) {
  483. x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF);
  484. if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK)
  485. return ERROR_FAIL;
  486. LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d",
  487. x86_32->pm_regs[I(EFLAGS)],
  488. x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0,
  489. x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0);
  490. }
  491. /* set CPL to 0 for memory access */
  492. if (csar & CSAR_DPL) {
  493. x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL;
  494. if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK)
  495. return ERROR_FAIL;
  496. LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]);
  497. }
  498. if (ssar & SSAR_DPL) {
  499. x86_32->pm_regs[I(SSAR)] = ssar & ~SSAR_DPL;
  500. if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK)
  501. return ERROR_FAIL;
  502. LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]);
  503. }
  504. /* if cache's are enabled, disable and flush, depending on the core version */
  505. if (!(x86_32->core_type == LMT3_5) && !(cr0 & CR0_CD)) {
  506. LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0);
  507. if (cr0 & CR0_PG) {
  508. x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG;
  509. if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
  510. return ERROR_FAIL;
  511. LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
  512. /* submit wbinvd to flush cache */
  513. if (submit_reg_pir(t, WBINVD) != ERROR_OK)
  514. return ERROR_FAIL;
  515. x86_32->pm_regs[I(CR0)] =
  516. x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG);
  517. if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
  518. return ERROR_FAIL;
  519. LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
  520. }
  521. }
  522. return ERROR_OK;
  523. }
  524. static int do_halt(struct target *t)
  525. {
  526. /* needs proper handling later if doing a halt errors out */
  527. t->state = TARGET_DEBUG_RUNNING;
  528. if (enter_probemode(t) != ERROR_OK)
  529. return ERROR_FAIL;
  530. return lakemont_update_after_probemode_entry(t);
  531. }
  532. /* we need to expose the update to be able to complete the reset at SoC level */
  533. int lakemont_update_after_probemode_entry(struct target *t)
  534. {
  535. if (save_context(t) != ERROR_OK)
  536. return ERROR_FAIL;
  537. if (halt_prep(t) != ERROR_OK)
  538. return ERROR_FAIL;
  539. t->state = TARGET_HALTED;
  540. return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
  541. }
  542. static int do_resume(struct target *t)
  543. {
  544. /* needs proper handling later */
  545. t->state = TARGET_DEBUG_RUNNING;
  546. if (restore_context(t) != ERROR_OK)
  547. return ERROR_FAIL;
  548. if (exit_probemode(t) != ERROR_OK)
  549. return ERROR_FAIL;
  550. t->state = TARGET_RUNNING;
  551. t->debug_reason = DBG_REASON_NOTHALTED;
  552. LOG_USER("target running");
  553. return target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
  554. }
  555. static int read_all_core_hw_regs(struct target *t)
  556. {
  557. int err;
  558. uint32_t regval;
  559. unsigned i;
  560. struct x86_32_common *x86_32 = target_to_x86_32(t);
  561. for (i = 0; i < (x86_32->cache->num_regs); i++) {
  562. if (NOT_AVAIL_REG == regs[i].pm_idx)
  563. continue;
  564. err = read_hw_reg(t, regs[i].id, &regval, 1);
  565. if (err != ERROR_OK) {
  566. LOG_ERROR("%s error saving reg %s",
  567. __func__, x86_32->cache->reg_list[i].name);
  568. return err;
  569. }
  570. }
  571. LOG_DEBUG("read_all_core_hw_regs read %u registers ok", i);
  572. return ERROR_OK;
  573. }
  574. static int write_all_core_hw_regs(struct target *t)
  575. {
  576. int err;
  577. unsigned i;
  578. struct x86_32_common *x86_32 = target_to_x86_32(t);
  579. for (i = 0; i < (x86_32->cache->num_regs); i++) {
  580. if (NOT_AVAIL_REG == regs[i].pm_idx)
  581. continue;
  582. err = write_hw_reg(t, i, 0, 1);
  583. if (err != ERROR_OK) {
  584. LOG_ERROR("%s error restoring reg %s",
  585. __func__, x86_32->cache->reg_list[i].name);
  586. return err;
  587. }
  588. }
  589. LOG_DEBUG("write_all_core_hw_regs wrote %u registers ok", i);
  590. return ERROR_OK;
  591. }
  592. /* read reg from lakemont core shadow ram, update reg cache if needed */
  593. static int read_hw_reg(struct target *t, int reg, uint32_t *regval, uint8_t cache)
  594. {
  595. struct x86_32_common *x86_32 = target_to_x86_32(t);
  596. struct lakemont_core_reg *arch_info;
  597. arch_info = x86_32->cache->reg_list[reg].arch_info;
  598. x86_32->flush = 0; /* don't flush scans till we have a batch */
  599. if (submit_reg_pir(t, reg) != ERROR_OK)
  600. return ERROR_FAIL;
  601. if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
  602. return ERROR_FAIL;
  603. if (submit_instruction_pir(t, SRAM2PDR) != ERROR_OK)
  604. return ERROR_FAIL;
  605. x86_32->flush = 1;
  606. scan.out[0] = RDWRPDR;
  607. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  608. return ERROR_FAIL;
  609. if (drscan(t, NULL, scan.out, PDR_SIZE) != ERROR_OK)
  610. return ERROR_FAIL;
  611. jtag_add_sleep(DELAY_SUBMITPIR);
  612. *regval = buf_get_u32(scan.out, 0, 32);
  613. if (cache) {
  614. buf_set_u32(x86_32->cache->reg_list[reg].value, 0, 32, *regval);
  615. x86_32->cache->reg_list[reg].valid = true;
  616. x86_32->cache->reg_list[reg].dirty = false;
  617. }
  618. LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32,
  619. x86_32->cache->reg_list[reg].name,
  620. arch_info->op,
  621. *regval);
  622. return ERROR_OK;
  623. }
  624. /* write lakemont core shadow ram reg, update reg cache if needed */
  625. static int write_hw_reg(struct target *t, int reg, uint32_t regval, uint8_t cache)
  626. {
  627. struct x86_32_common *x86_32 = target_to_x86_32(t);
  628. struct lakemont_core_reg *arch_info;
  629. arch_info = x86_32->cache->reg_list[reg].arch_info;
  630. uint8_t reg_buf[4];
  631. if (cache)
  632. regval = buf_get_u32(x86_32->cache->reg_list[reg].value, 0, 32);
  633. buf_set_u32(reg_buf, 0, 32, regval);
  634. LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32,
  635. x86_32->cache->reg_list[reg].name,
  636. arch_info->op,
  637. regval);
  638. x86_32->flush = 0; /* don't flush scans till we have a batch */
  639. if (submit_reg_pir(t, reg) != ERROR_OK)
  640. return ERROR_FAIL;
  641. if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
  642. return ERROR_FAIL;
  643. scan.out[0] = RDWRPDR;
  644. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  645. return ERROR_FAIL;
  646. if (drscan(t, reg_buf, scan.out, PDR_SIZE) != ERROR_OK)
  647. return ERROR_FAIL;
  648. x86_32->flush = 1;
  649. if (submit_instruction_pir(t, PDR2SRAM) != ERROR_OK)
  650. return ERROR_FAIL;
  651. /* we are writing from the cache so ensure we reset flags */
  652. if (cache) {
  653. x86_32->cache->reg_list[reg].dirty = false;
  654. x86_32->cache->reg_list[reg].valid = false;
  655. }
  656. return ERROR_OK;
  657. }
  658. static bool is_paging_enabled(struct target *t)
  659. {
  660. struct x86_32_common *x86_32 = target_to_x86_32(t);
  661. if (x86_32->pm_regs[I(CR0)] & CR0_PG)
  662. return true;
  663. else
  664. return false;
  665. }
  666. static uint8_t get_num_user_regs(struct target *t)
  667. {
  668. struct x86_32_common *x86_32 = target_to_x86_32(t);
  669. return x86_32->cache->num_regs;
  670. }
  671. /* value of the CR0.PG (paging enabled) bit influences memory reads/writes */
  672. static int disable_paging(struct target *t)
  673. {
  674. struct x86_32_common *x86_32 = target_to_x86_32(t);
  675. x86_32->pm_regs[I(CR0)] = x86_32->pm_regs[I(CR0)] & ~CR0_PG;
  676. int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
  677. if (err != ERROR_OK) {
  678. LOG_ERROR("%s error disabling paging", __func__);
  679. return err;
  680. }
  681. return err;
  682. }
  683. static int enable_paging(struct target *t)
  684. {
  685. struct x86_32_common *x86_32 = target_to_x86_32(t);
  686. x86_32->pm_regs[I(CR0)] = (x86_32->pm_regs[I(CR0)] | CR0_PG);
  687. int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
  688. if (err != ERROR_OK) {
  689. LOG_ERROR("%s error enabling paging", __func__);
  690. return err;
  691. }
  692. return err;
  693. }
  694. static bool sw_bpts_supported(struct target *t)
  695. {
  696. uint32_t tapstatus = get_tapstatus(t);
  697. if (tapstatus & TS_SBP_BIT)
  698. return true;
  699. else
  700. return false;
  701. }
  702. static int transaction_status(struct target *t)
  703. {
  704. uint32_t tapstatus = get_tapstatus(t);
  705. if ((TS_EN_PM_BIT | TS_PRDY_BIT) & tapstatus) {
  706. LOG_ERROR("%s transaction error tapstatus = 0x%08" PRIx32
  707. , __func__, tapstatus);
  708. return ERROR_FAIL;
  709. } else {
  710. return ERROR_OK;
  711. }
  712. }
  713. static int submit_instruction(struct target *t, int num)
  714. {
  715. int err = submit_instruction_pir(t, num);
  716. if (err != ERROR_OK) {
  717. LOG_ERROR("%s error submitting pir", __func__);
  718. return err;
  719. }
  720. return err;
  721. }
  722. static int submit_reg_pir(struct target *t, int num)
  723. {
  724. LOG_DEBUG("reg %s op=0x%016" PRIx64, regs[num].name, regs[num].op);
  725. int err = submit_pir(t, regs[num].op);
  726. if (err != ERROR_OK) {
  727. LOG_ERROR("%s error submitting pir", __func__);
  728. return err;
  729. }
  730. return err;
  731. }
  732. static int submit_instruction_pir(struct target *t, int num)
  733. {
  734. LOG_DEBUG("%s op=0x%016" PRIx64, instructions[num].name,
  735. instructions[num].op);
  736. int err = submit_pir(t, instructions[num].op);
  737. if (err != ERROR_OK) {
  738. LOG_ERROR("%s error submitting pir", __func__);
  739. return err;
  740. }
  741. return err;
  742. }
  743. /*
  744. * PIR (Probe Mode Instruction Register), SUBMITPIR is an "IR only" TAP
  745. * command; there is no corresponding data register
  746. */
  747. static int submit_pir(struct target *t, uint64_t op)
  748. {
  749. struct x86_32_common *x86_32 = target_to_x86_32(t);
  750. uint8_t op_buf[8];
  751. buf_set_u64(op_buf, 0, 64, op);
  752. int flush = x86_32->flush;
  753. x86_32->flush = 0;
  754. scan.out[0] = WRPIR;
  755. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  756. return ERROR_FAIL;
  757. if (drscan(t, op_buf, scan.out, PIR_SIZE) != ERROR_OK)
  758. return ERROR_FAIL;
  759. scan.out[0] = SUBMITPIR;
  760. x86_32->flush = flush;
  761. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  762. return ERROR_FAIL;
  763. jtag_add_sleep(DELAY_SUBMITPIR);
  764. return ERROR_OK;
  765. }
  766. int lakemont_init_target(struct command_context *cmd_ctx, struct target *t)
  767. {
  768. lakemont_build_reg_cache(t);
  769. t->state = TARGET_RUNNING;
  770. t->debug_reason = DBG_REASON_NOTHALTED;
  771. return ERROR_OK;
  772. }
  773. int lakemont_init_arch_info(struct target *t, struct x86_32_common *x86_32)
  774. {
  775. x86_32->submit_instruction = submit_instruction;
  776. x86_32->transaction_status = transaction_status;
  777. x86_32->read_hw_reg = read_hw_reg;
  778. x86_32->write_hw_reg = write_hw_reg;
  779. x86_32->sw_bpts_supported = sw_bpts_supported;
  780. x86_32->get_num_user_regs = get_num_user_regs;
  781. x86_32->is_paging_enabled = is_paging_enabled;
  782. x86_32->disable_paging = disable_paging;
  783. x86_32->enable_paging = enable_paging;
  784. return ERROR_OK;
  785. }
  786. int lakemont_poll(struct target *t)
  787. {
  788. /* LMT1 PMCR register currently allows code breakpoints, data breakpoints,
  789. * single stepping and shutdowns to be redirected to PM but does not allow
  790. * redirecting into PM as a result of SMM enter and SMM exit
  791. */
  792. uint32_t ts = get_tapstatus(t);
  793. if (ts == 0xFFFFFFFF && t->state != TARGET_DEBUG_RUNNING) {
  794. /* something is wrong here */
  795. LOG_ERROR("tapstatus invalid - scan_chain serialization or locked JTAG access issues");
  796. /* TODO: Give a hint that unlocking is wrong or maybe a
  797. * 'jtag arp_init' helps
  798. */
  799. t->state = TARGET_DEBUG_RUNNING;
  800. return ERROR_OK;
  801. }
  802. if (t->state == TARGET_HALTED && (!(ts & TS_PM_BIT))) {
  803. LOG_INFO("target running for unknown reason");
  804. t->state = TARGET_RUNNING;
  805. }
  806. if (t->state == TARGET_RUNNING &&
  807. t->state != TARGET_DEBUG_RUNNING) {
  808. if ((ts & TS_PM_BIT) && (ts & TS_PMCR_BIT)) {
  809. LOG_DEBUG("redirect to PM, tapstatus=0x%08" PRIx32, get_tapstatus(t));
  810. t->state = TARGET_DEBUG_RUNNING;
  811. if (save_context(t) != ERROR_OK)
  812. return ERROR_FAIL;
  813. if (halt_prep(t) != ERROR_OK)
  814. return ERROR_FAIL;
  815. t->state = TARGET_HALTED;
  816. t->debug_reason = DBG_REASON_UNDEFINED;
  817. struct x86_32_common *x86_32 = target_to_x86_32(t);
  818. uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
  819. uint32_t dr6 = buf_get_u32(x86_32->cache->reg_list[DR6].value, 0, 32);
  820. uint32_t hwbreakpoint = (uint32_t)-1;
  821. if (dr6 & DR6_BRKDETECT_0)
  822. hwbreakpoint = 0;
  823. if (dr6 & DR6_BRKDETECT_1)
  824. hwbreakpoint = 1;
  825. if (dr6 & DR6_BRKDETECT_2)
  826. hwbreakpoint = 2;
  827. if (dr6 & DR6_BRKDETECT_3)
  828. hwbreakpoint = 3;
  829. if (hwbreakpoint != (uint32_t)-1) {
  830. uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
  831. uint32_t type = dr7 & (0x03 << (DR7_RW_SHIFT + hwbreakpoint*DR7_RW_LEN_SIZE));
  832. if (type == DR7_BP_EXECUTE) {
  833. LOG_USER("hit hardware breakpoint (hwreg=%" PRIu32 ") at 0x%08" PRIx32, hwbreakpoint, eip);
  834. } else {
  835. uint32_t address = 0;
  836. switch (hwbreakpoint) {
  837. default:
  838. case 0:
  839. address = buf_get_u32(x86_32->cache->reg_list[DR0].value, 0, 32);
  840. break;
  841. case 1:
  842. address = buf_get_u32(x86_32->cache->reg_list[DR1].value, 0, 32);
  843. break;
  844. case 2:
  845. address = buf_get_u32(x86_32->cache->reg_list[DR2].value, 0, 32);
  846. break;
  847. case 3:
  848. address = buf_get_u32(x86_32->cache->reg_list[DR3].value, 0, 32);
  849. break;
  850. }
  851. LOG_USER("hit '%s' watchpoint for 0x%08" PRIx32 " (hwreg=%" PRIu32 ") at 0x%08" PRIx32,
  852. type == DR7_BP_WRITE ? "write" : "access", address,
  853. hwbreakpoint, eip);
  854. }
  855. t->debug_reason = DBG_REASON_BREAKPOINT;
  856. } else {
  857. /* Check if the target hit a software breakpoint.
  858. * ! Watch out: EIP is currently pointing after the breakpoint opcode
  859. */
  860. struct breakpoint *bp = NULL;
  861. bp = breakpoint_find(t, eip-1);
  862. if (bp) {
  863. t->debug_reason = DBG_REASON_BREAKPOINT;
  864. if (bp->type == BKPT_SOFT) {
  865. /* The EIP is now pointing the next byte after the
  866. * breakpoint instruction. This needs to be corrected.
  867. */
  868. buf_set_u32(x86_32->cache->reg_list[EIP].value, 0, 32, eip-1);
  869. x86_32->cache->reg_list[EIP].dirty = true;
  870. x86_32->cache->reg_list[EIP].valid = true;
  871. LOG_USER("hit software breakpoint at 0x%08" PRIx32, eip-1);
  872. } else {
  873. /* it's not a hardware breakpoint (checked already in DR6 state)
  874. * and it's also not a software breakpoint ...
  875. */
  876. LOG_USER("hit unknown breakpoint at 0x%08" PRIx32, eip);
  877. }
  878. } else {
  879. /* There is also the case that we hit an breakpoint instruction,
  880. * which was not set by us. This needs to be handled be the
  881. * application that introduced the breakpoint.
  882. */
  883. LOG_USER("unknown break reason at 0x%08" PRIx32, eip);
  884. }
  885. }
  886. return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
  887. }
  888. }
  889. return ERROR_OK;
  890. }
  891. int lakemont_arch_state(struct target *t)
  892. {
  893. struct x86_32_common *x86_32 = target_to_x86_32(t);
  894. LOG_USER("target halted due to %s at 0x%08" PRIx32 " in %s mode",
  895. debug_reason_name(t),
  896. buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32),
  897. (buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32) & CR0_PE) ? "protected" : "real");
  898. return ERROR_OK;
  899. }
  900. int lakemont_halt(struct target *t)
  901. {
  902. if (t->state == TARGET_RUNNING) {
  903. t->debug_reason = DBG_REASON_DBGRQ;
  904. if (do_halt(t) != ERROR_OK)
  905. return ERROR_FAIL;
  906. return ERROR_OK;
  907. } else {
  908. LOG_ERROR("%s target not running", __func__);
  909. return ERROR_FAIL;
  910. }
  911. }
  912. int lakemont_resume(struct target *t, int current, target_addr_t address,
  913. int handle_breakpoints, int debug_execution)
  914. {
  915. struct breakpoint *bp = NULL;
  916. struct x86_32_common *x86_32 = target_to_x86_32(t);
  917. if (check_not_halted(t))
  918. return ERROR_TARGET_NOT_HALTED;
  919. /* TODO lakemont_enable_breakpoints(t); */
  920. if (t->state == TARGET_HALTED) {
  921. /* running away for a software breakpoint needs some special handling */
  922. uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
  923. bp = breakpoint_find(t, eip);
  924. if (bp /*&& bp->type == BKPT_SOFT*/) {
  925. /* the step will step over the breakpoint */
  926. if (lakemont_step(t, 0, 0, 1) != ERROR_OK) {
  927. LOG_ERROR("%s stepping over a software breakpoint at 0x%08" PRIx32 " "
  928. "failed to resume the target", __func__, eip);
  929. return ERROR_FAIL;
  930. }
  931. }
  932. /* if breakpoints are enabled, we need to redirect these into probe mode */
  933. struct breakpoint *activeswbp = t->breakpoints;
  934. while (activeswbp && activeswbp->set == 0)
  935. activeswbp = activeswbp->next;
  936. struct watchpoint *activehwbp = t->watchpoints;
  937. while (activehwbp && activehwbp->set == 0)
  938. activehwbp = activehwbp->next;
  939. if (activeswbp || activehwbp)
  940. buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
  941. if (do_resume(t) != ERROR_OK)
  942. return ERROR_FAIL;
  943. } else {
  944. LOG_USER("target not halted");
  945. return ERROR_FAIL;
  946. }
  947. return ERROR_OK;
  948. }
  949. int lakemont_step(struct target *t, int current,
  950. target_addr_t address, int handle_breakpoints)
  951. {
  952. struct x86_32_common *x86_32 = target_to_x86_32(t);
  953. uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
  954. uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
  955. uint32_t pmcr = buf_get_u32(x86_32->cache->reg_list[PMCR].value, 0, 32);
  956. struct breakpoint *bp = NULL;
  957. int retval = ERROR_OK;
  958. uint32_t tapstatus = 0;
  959. if (check_not_halted(t))
  960. return ERROR_TARGET_NOT_HALTED;
  961. bp = breakpoint_find(t, eip);
  962. if (retval == ERROR_OK && bp/*&& bp->type == BKPT_SOFT*/) {
  963. /* TODO: This should only be done for software breakpoints.
  964. * Stepping from hardware breakpoints should be possible with the resume flag
  965. * Needs testing.
  966. */
  967. retval = x86_32_common_remove_breakpoint(t, bp);
  968. }
  969. /* Set EFLAGS[TF] and PMCR[IR], exit pm and wait for PRDY# */
  970. LOG_DEBUG("modifying PMCR = 0x%08" PRIx32 " and EFLAGS = 0x%08" PRIx32, pmcr, eflags);
  971. eflags = eflags | (EFLAGS_TF | EFLAGS_RF);
  972. buf_set_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32, eflags);
  973. buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
  974. LOG_DEBUG("EFLAGS [TF] [RF] bits set=0x%08" PRIx32 ", PMCR=0x%08" PRIx32 ", EIP=0x%08" PRIx32,
  975. eflags, pmcr, eip);
  976. tapstatus = get_tapstatus(t);
  977. t->debug_reason = DBG_REASON_SINGLESTEP;
  978. t->state = TARGET_DEBUG_RUNNING;
  979. if (restore_context(t) != ERROR_OK)
  980. return ERROR_FAIL;
  981. if (exit_probemode(t) != ERROR_OK)
  982. return ERROR_FAIL;
  983. target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
  984. tapstatus = get_tapstatus(t);
  985. if (tapstatus & (TS_PM_BIT | TS_EN_PM_BIT | TS_PRDY_BIT | TS_PMCR_BIT)) {
  986. /* target has stopped */
  987. if (save_context(t) != ERROR_OK)
  988. return ERROR_FAIL;
  989. if (halt_prep(t) != ERROR_OK)
  990. return ERROR_FAIL;
  991. t->state = TARGET_HALTED;
  992. LOG_USER("step done from EIP 0x%08" PRIx32 " to 0x%08" PRIx32, eip,
  993. buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32));
  994. target_call_event_callbacks(t, TARGET_EVENT_HALTED);
  995. } else {
  996. /* target didn't stop
  997. * I hope the poll() will catch it, but the deleted breakpoint is gone
  998. */
  999. LOG_ERROR("%s target didn't stop after executing a single step", __func__);
  1000. t->state = TARGET_RUNNING;
  1001. return ERROR_FAIL;
  1002. }
  1003. /* try to re-apply the breakpoint, even of step failed
  1004. * TODO: When a bp was set, we should try to stop the target - fix the return above
  1005. */
  1006. if (bp/*&& bp->type == BKPT_SOFT*/) {
  1007. /* TODO: This should only be done for software breakpoints.
  1008. * Stepping from hardware breakpoints should be possible with the resume flag
  1009. * Needs testing.
  1010. */
  1011. retval = x86_32_common_add_breakpoint(t, bp);
  1012. }
  1013. return retval;
  1014. }
  1015. static int lakemont_reset_break(struct target *t)
  1016. {
  1017. struct x86_32_common *x86_32 = target_to_x86_32(t);
  1018. struct jtag_tap *saved_tap = x86_32->curr_tap;
  1019. struct scan_field *fields = &scan.field;
  1020. int retval = ERROR_OK;
  1021. LOG_DEBUG("issuing port 0xcf9 reset");
  1022. /* prepare resetbreak setting the proper bits in CLTAPC_CPU_VPREQ */
  1023. x86_32->curr_tap = jtag_tap_by_position(1);
  1024. if (!x86_32->curr_tap) {
  1025. x86_32->curr_tap = saved_tap;
  1026. LOG_ERROR("%s could not select quark_x10xx.cltap", __func__);
  1027. return ERROR_FAIL;
  1028. }
  1029. fields->in_value = NULL;
  1030. fields->num_bits = 8;
  1031. /* select CLTAPC_CPU_VPREQ instruction*/
  1032. scan.out[0] = 0x51;
  1033. fields->out_value = ((uint8_t *)scan.out);
  1034. jtag_add_ir_scan(x86_32->curr_tap, fields, TAP_IDLE);
  1035. retval = jtag_execute_queue();
  1036. if (retval != ERROR_OK) {
  1037. x86_32->curr_tap = saved_tap;
  1038. LOG_ERROR("%s irscan failed to execute queue", __func__);
  1039. return retval;
  1040. }
  1041. /* set enable_preq_on_reset & enable_preq_on_reset2 bits*/
  1042. scan.out[0] = 0x06;
  1043. fields->out_value = ((uint8_t *)scan.out);
  1044. jtag_add_dr_scan(x86_32->curr_tap, 1, fields, TAP_IDLE);
  1045. retval = jtag_execute_queue();
  1046. if (retval != ERROR_OK) {
  1047. LOG_ERROR("%s drscan failed to execute queue", __func__);
  1048. x86_32->curr_tap = saved_tap;
  1049. return retval;
  1050. }
  1051. /* restore current tap */
  1052. x86_32->curr_tap = saved_tap;
  1053. return ERROR_OK;
  1054. }
  1055. /*
  1056. * If we ever get an adapter with support for PREQ# and PRDY#, we should
  1057. * update this function to add support for using those two signals.
  1058. *
  1059. * Meanwhile, we're assuming that we only support reset break.
  1060. */
  1061. int lakemont_reset_assert(struct target *t)
  1062. {
  1063. struct x86_32_common *x86_32 = target_to_x86_32(t);
  1064. /* write 0x6 to I/O port 0xcf9 to cause the reset */
  1065. uint8_t cf9_reset_val = 0x6;
  1066. int retval;
  1067. LOG_DEBUG(" ");
  1068. if (t->state != TARGET_HALTED) {
  1069. LOG_DEBUG("target must be halted first");
  1070. retval = lakemont_halt(t);
  1071. if (retval != ERROR_OK) {
  1072. LOG_ERROR("could not halt target");
  1073. return retval;
  1074. }
  1075. x86_32->forced_halt_for_reset = true;
  1076. }
  1077. if (t->reset_halt) {
  1078. retval = lakemont_reset_break(t);
  1079. if (retval != ERROR_OK)
  1080. return retval;
  1081. }
  1082. retval = x86_32_common_write_io(t, 0xcf9, BYTE, &cf9_reset_val);
  1083. if (retval != ERROR_OK) {
  1084. LOG_ERROR("could not write to port 0xcf9");
  1085. return retval;
  1086. }
  1087. if (!t->reset_halt && x86_32->forced_halt_for_reset) {
  1088. x86_32->forced_halt_for_reset = false;
  1089. retval = lakemont_resume(t, true, 0x00, false, true);
  1090. if (retval != ERROR_OK)
  1091. return retval;
  1092. }
  1093. /* remove breakpoints and watchpoints */
  1094. x86_32_common_reset_breakpoints_watchpoints(t);
  1095. return ERROR_OK;
  1096. }
  1097. int lakemont_reset_deassert(struct target *t)
  1098. {
  1099. int retval;
  1100. LOG_DEBUG(" ");
  1101. if (target_was_examined(t)) {
  1102. retval = lakemont_poll(t);
  1103. if (retval != ERROR_OK)
  1104. return retval;
  1105. }
  1106. if (t->reset_halt) {
  1107. /* entered PM after reset, update the state */
  1108. retval = lakemont_update_after_probemode_entry(t);
  1109. if (retval != ERROR_OK) {
  1110. LOG_ERROR("could not update state after probemode entry");
  1111. return retval;
  1112. }
  1113. if (t->state != TARGET_HALTED) {
  1114. LOG_WARNING("%s: ran after reset and before halt ...",
  1115. target_name(t));
  1116. if (target_was_examined(t)) {
  1117. retval = target_halt(t);
  1118. if (retval != ERROR_OK)
  1119. return retval;
  1120. } else {
  1121. t->state = TARGET_UNKNOWN;
  1122. }
  1123. }
  1124. }
  1125. return ERROR_OK;
  1126. }