You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

1112 lines
38 KiB

  1. /*
  2. * Copyright(c) 2013 Intel Corporation.
  3. *
  4. * Adrian Burns (adrian.burns@intel.com)
  5. * Thomas Faust (thomas.faust@intel.com)
  6. * Ivan De Cesaris (ivan.de.cesaris@intel.com)
  7. * Julien Carreno (julien.carreno@intel.com)
  8. * Jeffrey Maxwell (jeffrey.r.maxwell@intel.com)
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  22. *
  23. * Contact Information:
  24. * Intel Corporation
  25. */
  26. /*
  27. * @file
  28. * This implements the probemode operations for Lakemont 1 (LMT1).
  29. */
  30. #ifdef HAVE_CONFIG_H
  31. #include "config.h"
  32. #endif
  33. #include <helper/log.h>
  34. #include "target.h"
  35. #include "target_type.h"
  36. #include "lakemont.h"
  37. #include "register.h"
  38. #include "breakpoints.h"
  39. #include "x86_32_common.h"
  40. static int irscan(struct target *t, uint8_t *out,
  41. uint8_t *in, uint8_t ir_len);
  42. static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len);
  43. static int save_context(struct target *target);
  44. static int restore_context(struct target *target);
  45. static uint32_t get_tapstatus(struct target *t);
  46. static int enter_probemode(struct target *t);
  47. static int exit_probemode(struct target *t);
  48. static int halt_prep(struct target *t);
  49. static int do_halt(struct target *t);
  50. static int do_resume(struct target *t);
  51. static int read_all_core_hw_regs(struct target *t);
  52. static int write_all_core_hw_regs(struct target *t);
  53. static int read_hw_reg(struct target *t,
  54. int reg, uint32_t *regval, uint8_t cache);
  55. static int write_hw_reg(struct target *t,
  56. int reg, uint32_t regval, uint8_t cache);
  57. static struct reg_cache *lakemont_build_reg_cache
  58. (struct target *target);
  59. static int submit_reg_pir(struct target *t, int num);
  60. static int submit_instruction_pir(struct target *t, int num);
  61. static int submit_pir(struct target *t, uint64_t op);
  62. static int lakemont_get_core_reg(struct reg *reg);
  63. static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf);
  64. static struct scan_blk scan;
  65. /* registers and opcodes for register access, pm_idx is used to identify the
  66. * registers that are modified for lakemont probemode specific operations
  67. */
  68. static const struct {
  69. uint8_t id;
  70. const char *name;
  71. uint64_t op;
  72. uint8_t pm_idx;
  73. unsigned bits;
  74. enum reg_type type;
  75. const char *group;
  76. const char *feature;
  77. } regs[] = {
  78. /* general purpose registers */
  79. { EAX, "eax", 0x000000D01D660000, 0, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  80. { ECX, "ecx", 0x000000501D660000, 1, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  81. { EDX, "edx", 0x000000901D660000, 2, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  82. { EBX, "ebx", 0x000000101D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  83. { ESP, "esp", 0x000000E01D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
  84. { EBP, "ebp", 0x000000601D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
  85. { ESI, "esi", 0x000000A01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  86. { EDI, "edi", 0x000000201D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  87. /* instruction pointer & flags */
  88. { EIP, "eip", 0x000000C01D660000, 3, 32, REG_TYPE_CODE_PTR, "general", "org.gnu.gdb.i386.core" },
  89. { EFLAGS, "eflags", 0x000000401D660000, 4, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  90. /* segment registers */
  91. { CS, "cs", 0x000000281D660000, 5, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  92. { SS, "ss", 0x000000C81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  93. { DS, "ds", 0x000000481D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  94. { ES, "es", 0x000000A81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  95. { FS, "fs", 0x000000881D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  96. { GS, "gs", 0x000000081D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  97. /* floating point unit registers - not accessible via JTAG - here to satisfy GDB */
  98. { ST0, "st0", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  99. { ST1, "st1", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  100. { ST2, "st2", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  101. { ST3, "st3", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  102. { ST4, "st4", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  103. { ST5, "st5", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  104. { ST6, "st6", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  105. { ST7, "st7", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  106. { FCTRL, "fctrl", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  107. { FSTAT, "fstat", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  108. { FTAG, "ftag", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  109. { FISEG, "fiseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  110. { FIOFF, "fioff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  111. { FOSEG, "foseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  112. { FOOFF, "fooff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  113. { FOP, "fop", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
  114. /* control registers */
  115. { CR0, "cr0", 0x000000001D660000, 6, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  116. { CR2, "cr2", 0x000000BC1D660000, 7, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  117. { CR3, "cr3", 0x000000801D660000, 8, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  118. { CR4, "cr4", 0x0000002C1D660000, 9, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  119. /* debug registers */
  120. { DR0, "dr0", 0x0000007C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  121. { DR1, "dr1", 0x000000FC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  122. { DR2, "dr2", 0x000000021D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  123. { DR3, "dr3", 0x000000821D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  124. { DR6, "dr6", 0x000000301D660000, 10, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  125. { DR7, "dr7", 0x000000B01D660000, 11, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  126. /* descriptor tables */
  127. { IDTB, "idtbase", 0x000000581D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  128. { IDTL, "idtlimit", 0x000000D81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  129. { IDTAR, "idtar", 0x000000981D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  130. { GDTB, "gdtbase", 0x000000B81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  131. { GDTL, "gdtlimit", 0x000000781D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  132. { GDTAR, "gdtar", 0x000000381D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  133. { TR, "tr", 0x000000701D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  134. { LDTR, "ldtr", 0x000000F01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  135. { LDTB, "ldbase", 0x000000041D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  136. { LDTL, "ldlimit", 0x000000841D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  137. { LDTAR, "ldtar", 0x000000F81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  138. /* segment registers */
  139. { CSB, "csbase", 0x000000F41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  140. { CSL, "cslimit", 0x0000000C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  141. { CSAR, "csar", 0x000000741D660000, 12, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  142. { DSB, "dsbase", 0x000000941D660000, 13, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  143. { DSL, "dslimit", 0x000000541D660000, 14, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  144. { DSAR, "dsar", 0x000000141D660000, 15, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  145. { ESB, "esbase", 0x0000004C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  146. { ESL, "eslimit", 0x000000CC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  147. { ESAR, "esar", 0x0000008C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  148. { FSB, "fsbase", 0x000000641D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  149. { FSL, "fslimit", 0x000000E41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  150. { FSAR, "fsar", 0x000000A41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  151. { GSB, "gsbase", 0x000000C41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  152. { GSL, "gslimit", 0x000000241D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  153. { GSAR, "gsar", 0x000000441D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  154. { SSB, "ssbase", 0x000000341D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  155. { SSL, "sslimit", 0x000000B41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  156. { SSAR, "ssar", 0x000000D41D660000, 16, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  157. { TSSB, "tssbase", 0x000000E81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  158. { TSSL, "tsslimit", 0x000000181D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  159. { TSSAR, "tssar", 0x000000681D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  160. /* probemode control register */
  161. { PMCR, "pmcr", 0x000000421D660000, 17, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
  162. };
  163. static const struct {
  164. uint8_t id;
  165. const char *name;
  166. uint64_t op;
  167. } instructions[] = {
  168. /* memory read/write */
  169. { MEMRDB32, "MEMRDB32", 0x0909090909090851 },
  170. { MEMRDB16, "MEMRDB16", 0x09090909090851E6 },
  171. { MEMRDH32, "MEMRDH32", 0x090909090908D166 },
  172. { MEMRDH16, "MEMRDH16", 0x090909090908D1E6 },
  173. { MEMRDW32, "MEMRDW32", 0x09090909090908D1 },
  174. { MEMRDW16, "MEMRDW16", 0x0909090908D1E666 },
  175. { MEMWRB32, "MEMWRB32", 0x0909090909090811 },
  176. { MEMWRB16, "MEMWRB16", 0x09090909090811E6 },
  177. { MEMWRH32, "MEMWRH32", 0x0909090909089166 },
  178. { MEMWRH16, "MEMWRH16", 0x09090909090891E6 },
  179. { MEMWRW32, "MEMWRW32", 0x0909090909090891 },
  180. { MEMWRW16, "MEMWRW16", 0x090909090891E666 },
  181. /* IO read/write */
  182. { IORDB32, "IORDB32", 0x0909090909090937 },
  183. { IORDB16, "IORDB16", 0x09090909090937E6 },
  184. { IORDH32, "IORDH32", 0x090909090909B766 },
  185. { IORDH16, "IORDH16", 0x090909090909B7E6 },
  186. { IORDW32, "IORDW32", 0x09090909090909B7 },
  187. { IORDW16, "IORDW16", 0x0909090909B7E666 },
  188. { IOWRB32, "IOWRB32", 0x0909090909090977 },
  189. { IOWRB16, "IOWRB16", 0x09090909090977E6 },
  190. { IOWRH32, "IOWRH32", 0x090909090909F766 },
  191. { IOWRH16, "IOWRH16", 0x090909090909F7E6 },
  192. { IOWRW32, "IOWRW32", 0x09090909090909F7 },
  193. { IOWRW16, "IOWRW16", 0x0909090909F7E666 },
  194. /* lakemont1 core shadow ram access opcodes */
  195. { SRAMACCESS, "SRAMACCESS", 0x0000000E9D660000 },
  196. { SRAM2PDR, "SRAM2PDR", 0x4CF0000000000000 },
  197. { PDR2SRAM, "PDR2SRAM", 0x0CF0000000000000 },
  198. { WBINVD, "WBINVD", 0x09090909090990F0 },
  199. };
  200. bool check_not_halted(const struct target *t)
  201. {
  202. bool halted = t->state == TARGET_HALTED;
  203. if (!halted)
  204. LOG_ERROR("target running, halt it first");
  205. return !halted;
  206. }
  207. static int irscan(struct target *t, uint8_t *out,
  208. uint8_t *in, uint8_t ir_len)
  209. {
  210. int retval = ERROR_OK;
  211. struct x86_32_common *x86_32 = target_to_x86_32(t);
  212. if (NULL == t->tap) {
  213. retval = ERROR_FAIL;
  214. LOG_ERROR("%s invalid target tap", __func__);
  215. return retval;
  216. }
  217. if (ir_len != t->tap->ir_length) {
  218. retval = ERROR_FAIL;
  219. if (t->tap->enabled)
  220. LOG_ERROR("%s tap enabled but tap irlen=%d",
  221. __func__, t->tap->ir_length);
  222. else
  223. LOG_ERROR("%s tap not enabled and irlen=%d",
  224. __func__, t->tap->ir_length);
  225. return retval;
  226. }
  227. struct scan_field *fields = &scan.field;
  228. fields->num_bits = ir_len;
  229. fields->out_value = out;
  230. fields->in_value = in;
  231. jtag_add_ir_scan(x86_32->curr_tap, fields, TAP_IDLE);
  232. if (x86_32->flush) {
  233. retval = jtag_execute_queue();
  234. if (retval != ERROR_OK)
  235. LOG_ERROR("%s failed to execute queue", __func__);
  236. }
  237. return retval;
  238. }
  239. static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len)
  240. {
  241. int retval = ERROR_OK;
  242. uint64_t data = 0;
  243. struct x86_32_common *x86_32 = target_to_x86_32(t);
  244. if (NULL == t->tap) {
  245. retval = ERROR_FAIL;
  246. LOG_ERROR("%s invalid target tap", __func__);
  247. return retval;
  248. }
  249. if (len > MAX_SCAN_SIZE || 0 == len) {
  250. retval = ERROR_FAIL;
  251. LOG_ERROR("%s data len is %d bits, max is %d bits",
  252. __func__, len, MAX_SCAN_SIZE);
  253. return retval;
  254. }
  255. struct scan_field *fields = &scan.field;
  256. fields->out_value = out;
  257. fields->in_value = in;
  258. fields->num_bits = len;
  259. jtag_add_dr_scan(x86_32->curr_tap, 1, fields, TAP_IDLE);
  260. if (x86_32->flush) {
  261. retval = jtag_execute_queue();
  262. if (retval != ERROR_OK) {
  263. LOG_ERROR("%s drscan failed to execute queue", __func__);
  264. return retval;
  265. }
  266. }
  267. if (in != NULL) {
  268. if (len >= 8) {
  269. for (int n = (len / 8) - 1 ; n >= 0; n--)
  270. data = (data << 8) + *(in+n);
  271. } else
  272. LOG_DEBUG("dr in 0x%02" PRIx8, *in);
  273. } else {
  274. LOG_ERROR("%s no drscan data", __func__);
  275. retval = ERROR_FAIL;
  276. }
  277. return retval;
  278. }
  279. static int save_context(struct target *t)
  280. {
  281. int err;
  282. /* read core registers from lakemont sram */
  283. err = read_all_core_hw_regs(t);
  284. if (err != ERROR_OK) {
  285. LOG_ERROR("%s error reading regs", __func__);
  286. return err;
  287. }
  288. return ERROR_OK;
  289. }
  290. static int restore_context(struct target *t)
  291. {
  292. int err = ERROR_OK;
  293. uint32_t i;
  294. struct x86_32_common *x86_32 = target_to_x86_32(t);
  295. /* write core regs into the core PM SRAM from the reg_cache */
  296. err = write_all_core_hw_regs(t);
  297. if (err != ERROR_OK) {
  298. LOG_ERROR("%s error writing regs", __func__);
  299. return err;
  300. }
  301. for (i = 0; i < (x86_32->cache->num_regs); i++) {
  302. x86_32->cache->reg_list[i].dirty = 0;
  303. x86_32->cache->reg_list[i].valid = 0;
  304. }
  305. return err;
  306. }
  307. /*
  308. * we keep reg_cache in sync with hardware at halt/resume time, we avoid
  309. * writing to real hardware here bacause pm_regs reflects the hardware
  310. * while we are halted then reg_cache syncs with hw on resume
  311. * TODO - in order for "reg eip force" to work it assume get/set reads
  312. * and writes from hardware, may be other reasons also because generally
  313. * other openocd targets read/write from hardware in get/set - watch this!
  314. */
  315. static int lakemont_get_core_reg(struct reg *reg)
  316. {
  317. int retval = ERROR_OK;
  318. struct lakemont_core_reg *lakemont_reg = reg->arch_info;
  319. struct target *t = lakemont_reg->target;
  320. if (check_not_halted(t))
  321. return ERROR_TARGET_NOT_HALTED;
  322. LOG_DEBUG("reg=%s, value=%08" PRIx32, reg->name,
  323. buf_get_u32(reg->value, 0, 32));
  324. return retval;
  325. }
  326. static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf)
  327. {
  328. struct lakemont_core_reg *lakemont_reg = reg->arch_info;
  329. struct target *t = lakemont_reg->target;
  330. uint32_t value = buf_get_u32(buf, 0, 32);
  331. LOG_DEBUG("reg=%s, newval=%08" PRIx32, reg->name, value);
  332. if (check_not_halted(t))
  333. return ERROR_TARGET_NOT_HALTED;
  334. buf_set_u32(reg->value, 0, 32, value);
  335. reg->dirty = 1;
  336. reg->valid = 1;
  337. return ERROR_OK;
  338. }
  339. static const struct reg_arch_type lakemont_reg_type = {
  340. /* these get called if reg_cache doesnt have a "valid" value
  341. * of an individual reg eg "reg eip" but not for "reg" block
  342. */
  343. .get = lakemont_get_core_reg,
  344. .set = lakemont_set_core_reg,
  345. };
  346. struct reg_cache *lakemont_build_reg_cache(struct target *t)
  347. {
  348. struct x86_32_common *x86_32 = target_to_x86_32(t);
  349. int num_regs = ARRAY_SIZE(regs);
  350. struct reg_cache **cache_p = register_get_last_cache_p(&t->reg_cache);
  351. struct reg_cache *cache = malloc(sizeof(struct reg_cache));
  352. struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
  353. struct lakemont_core_reg *arch_info = malloc(sizeof(struct lakemont_core_reg) * num_regs);
  354. struct reg_feature *feature;
  355. int i;
  356. if (cache == NULL || reg_list == NULL || arch_info == NULL) {
  357. free(cache);
  358. free(reg_list);
  359. free(arch_info);
  360. LOG_ERROR("%s out of memory", __func__);
  361. return NULL;
  362. }
  363. /* Build the process context cache */
  364. cache->name = "lakemont registers";
  365. cache->next = NULL;
  366. cache->reg_list = reg_list;
  367. cache->num_regs = num_regs;
  368. (*cache_p) = cache;
  369. x86_32->cache = cache;
  370. for (i = 0; i < num_regs; i++) {
  371. arch_info[i].target = t;
  372. arch_info[i].x86_32_common = x86_32;
  373. arch_info[i].op = regs[i].op;
  374. arch_info[i].pm_idx = regs[i].pm_idx;
  375. reg_list[i].name = regs[i].name;
  376. reg_list[i].size = 32;
  377. reg_list[i].value = calloc(1, 4);
  378. reg_list[i].dirty = 0;
  379. reg_list[i].valid = 0;
  380. reg_list[i].type = &lakemont_reg_type;
  381. reg_list[i].arch_info = &arch_info[i];
  382. reg_list[i].group = regs[i].group;
  383. reg_list[i].number = i;
  384. reg_list[i].exist = true;
  385. reg_list[i].caller_save = true; /* gdb defaults to true */
  386. feature = calloc(1, sizeof(struct reg_feature));
  387. if (feature) {
  388. feature->name = regs[i].feature;
  389. reg_list[i].feature = feature;
  390. } else
  391. LOG_ERROR("%s unable to allocate feature list", __func__);
  392. reg_list[i].reg_data_type = calloc(1, sizeof(struct reg_data_type));
  393. if (reg_list[i].reg_data_type)
  394. reg_list[i].reg_data_type->type = regs[i].type;
  395. else
  396. LOG_ERROR("%s unable to allocate reg type list", __func__);
  397. }
  398. return cache;
  399. }
  400. static uint32_t get_tapstatus(struct target *t)
  401. {
  402. scan.out[0] = TAPSTATUS;
  403. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  404. return 0;
  405. if (drscan(t, NULL, scan.out, TS_SIZE) != ERROR_OK)
  406. return 0;
  407. return buf_get_u32(scan.out, 0, 32);
  408. }
  409. static int enter_probemode(struct target *t)
  410. {
  411. uint32_t tapstatus = 0;
  412. tapstatus = get_tapstatus(t);
  413. LOG_DEBUG("TS before PM enter = %08" PRIx32, tapstatus);
  414. if (tapstatus & TS_PM_BIT) {
  415. LOG_DEBUG("core already in probemode");
  416. return ERROR_OK;
  417. }
  418. scan.out[0] = PROBEMODE;
  419. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  420. return ERROR_FAIL;
  421. scan.out[0] = 1;
  422. if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
  423. return ERROR_FAIL;
  424. tapstatus = get_tapstatus(t);
  425. LOG_DEBUG("TS after PM enter = %08" PRIx32, tapstatus);
  426. if ((tapstatus & TS_PM_BIT) && (!(tapstatus & TS_EN_PM_BIT)))
  427. return ERROR_OK;
  428. else {
  429. LOG_ERROR("%s PM enter error, tapstatus = %08" PRIx32
  430. , __func__, tapstatus);
  431. return ERROR_FAIL;
  432. }
  433. }
  434. static int exit_probemode(struct target *t)
  435. {
  436. uint32_t tapstatus = get_tapstatus(t);
  437. LOG_DEBUG("TS before PM exit = %08" PRIx32, tapstatus);
  438. if (!(tapstatus & TS_PM_BIT)) {
  439. LOG_USER("core not in PM");
  440. return ERROR_OK;
  441. }
  442. scan.out[0] = PROBEMODE;
  443. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  444. return ERROR_FAIL;
  445. scan.out[0] = 0;
  446. if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
  447. return ERROR_FAIL;
  448. return ERROR_OK;
  449. }
  450. /* do whats needed to properly enter probemode for debug on lakemont */
  451. static int halt_prep(struct target *t)
  452. {
  453. struct x86_32_common *x86_32 = target_to_x86_32(t);
  454. if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK)
  455. return ERROR_FAIL;
  456. LOG_DEBUG("write %s %08" PRIx32, regs[DSB].name, PM_DSB);
  457. if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK)
  458. return ERROR_FAIL;
  459. LOG_DEBUG("write %s %08" PRIx32, regs[DSL].name, PM_DSL);
  460. if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK)
  461. return ERROR_FAIL;
  462. LOG_DEBUG("write DSAR %08" PRIx32, PM_DSAR);
  463. if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK)
  464. return ERROR_FAIL;
  465. LOG_DEBUG("write DR7 %08" PRIx32, PM_DR7);
  466. uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
  467. uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32);
  468. uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32);
  469. uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
  470. /* clear VM86 and IF bits if they are set */
  471. LOG_DEBUG("EFLAGS = %08" PRIx32 ", VM86 = %d, IF = %d", eflags,
  472. eflags & EFLAGS_VM86 ? 1 : 0,
  473. eflags & EFLAGS_IF ? 1 : 0);
  474. if (eflags & EFLAGS_VM86
  475. || eflags & EFLAGS_IF) {
  476. x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF);
  477. if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK)
  478. return ERROR_FAIL;
  479. LOG_DEBUG("EFLAGS now = %08" PRIx32 ", VM86 = %d, IF = %d",
  480. x86_32->pm_regs[I(EFLAGS)],
  481. x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0,
  482. x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0);
  483. }
  484. /* set CPL to 0 for memory access */
  485. if (csar & CSAR_DPL) {
  486. x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL;
  487. if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK)
  488. return ERROR_FAIL;
  489. LOG_DEBUG("write CSAR_CPL to 0 %08" PRIx32, x86_32->pm_regs[I(CSAR)]);
  490. }
  491. if (ssar & SSAR_DPL) {
  492. x86_32->pm_regs[I(SSAR)] = ssar & ~CSAR_DPL;
  493. if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK)
  494. return ERROR_FAIL;
  495. LOG_DEBUG("write SSAR_CPL to 0 %08" PRIx32, x86_32->pm_regs[I(SSAR)]);
  496. }
  497. /* if cache's are enabled, disable and flush */
  498. if (!(cr0 & CR0_CD)) {
  499. LOG_DEBUG("caching enabled CR0 = %08" PRIx32, cr0);
  500. if (cr0 & CR0_PG) {
  501. x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG;
  502. if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
  503. return ERROR_FAIL;
  504. LOG_DEBUG("cleared paging CR0_PG = %08" PRIx32, x86_32->pm_regs[I(CR0)]);
  505. /* submit wbinvd to flush cache */
  506. if (submit_reg_pir(t, WBINVD) != ERROR_OK)
  507. return ERROR_FAIL;
  508. x86_32->pm_regs[I(CR0)] =
  509. x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG);
  510. if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
  511. return ERROR_FAIL;
  512. LOG_DEBUG("set CD, NW and PG, CR0 = %08" PRIx32, x86_32->pm_regs[I(CR0)]);
  513. }
  514. }
  515. return ERROR_OK;
  516. }
  517. static int do_halt(struct target *t)
  518. {
  519. /* needs proper handling later if doing a halt errors out */
  520. t->state = TARGET_DEBUG_RUNNING;
  521. if (enter_probemode(t) != ERROR_OK)
  522. return ERROR_FAIL;
  523. if (save_context(t) != ERROR_OK)
  524. return ERROR_FAIL;
  525. if (halt_prep(t) != ERROR_OK)
  526. return ERROR_FAIL;
  527. t->state = TARGET_HALTED;
  528. return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
  529. }
  530. static int do_resume(struct target *t)
  531. {
  532. /* needs proper handling later */
  533. t->state = TARGET_DEBUG_RUNNING;
  534. if (restore_context(t) != ERROR_OK)
  535. return ERROR_FAIL;
  536. if (exit_probemode(t) != ERROR_OK)
  537. return ERROR_FAIL;
  538. t->state = TARGET_RUNNING;
  539. t->debug_reason = DBG_REASON_NOTHALTED;
  540. LOG_USER("target running");
  541. return target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
  542. }
  543. static int read_all_core_hw_regs(struct target *t)
  544. {
  545. int err;
  546. uint32_t regval, i;
  547. struct x86_32_common *x86_32 = target_to_x86_32(t);
  548. for (i = 0; i < (x86_32->cache->num_regs); i++) {
  549. if (NOT_AVAIL_REG == regs[i].pm_idx)
  550. continue;
  551. err = read_hw_reg(t, regs[i].id, &regval, 1);
  552. if (err != ERROR_OK) {
  553. LOG_ERROR("%s error saving reg %s",
  554. __func__, x86_32->cache->reg_list[i].name);
  555. return err;
  556. }
  557. }
  558. LOG_DEBUG("read_all_core_hw_regs read %d registers ok", i);
  559. return ERROR_OK;
  560. }
  561. static int write_all_core_hw_regs(struct target *t)
  562. {
  563. int err;
  564. uint32_t i;
  565. struct x86_32_common *x86_32 = target_to_x86_32(t);
  566. for (i = 0; i < (x86_32->cache->num_regs); i++) {
  567. if (NOT_AVAIL_REG == regs[i].pm_idx)
  568. continue;
  569. err = write_hw_reg(t, i, 0, 1);
  570. if (err != ERROR_OK) {
  571. LOG_ERROR("%s error restoring reg %s",
  572. __func__, x86_32->cache->reg_list[i].name);
  573. return err;
  574. }
  575. }
  576. LOG_DEBUG("write_all_core_hw_regs wrote %d registers ok", i);
  577. return ERROR_OK;
  578. }
  579. /* read reg from lakemont core shadow ram, update reg cache if needed */
  580. static int read_hw_reg(struct target *t, int reg, uint32_t *regval, uint8_t cache)
  581. {
  582. struct x86_32_common *x86_32 = target_to_x86_32(t);
  583. struct lakemont_core_reg *arch_info;
  584. arch_info = x86_32->cache->reg_list[reg].arch_info;
  585. x86_32->flush = 0; /* dont flush scans till we have a batch */
  586. if (submit_reg_pir(t, reg) != ERROR_OK)
  587. return ERROR_FAIL;
  588. if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
  589. return ERROR_FAIL;
  590. if (submit_instruction_pir(t, SRAM2PDR) != ERROR_OK)
  591. return ERROR_FAIL;
  592. x86_32->flush = 1;
  593. scan.out[0] = RDWRPDR;
  594. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  595. return ERROR_FAIL;
  596. if (drscan(t, NULL, scan.out, PDR_SIZE) != ERROR_OK)
  597. return ERROR_FAIL;
  598. jtag_add_sleep(DELAY_SUBMITPIR);
  599. *regval = buf_get_u32(scan.out, 0, 32);
  600. if (cache) {
  601. buf_set_u32(x86_32->cache->reg_list[reg].value, 0, 32, *regval);
  602. x86_32->cache->reg_list[reg].valid = 1;
  603. x86_32->cache->reg_list[reg].dirty = 0;
  604. }
  605. LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=%08" PRIx32,
  606. x86_32->cache->reg_list[reg].name,
  607. arch_info->op,
  608. *regval);
  609. return ERROR_OK;
  610. }
  611. /* write lakemont core shadow ram reg, update reg cache if needed */
  612. static int write_hw_reg(struct target *t, int reg, uint32_t regval, uint8_t cache)
  613. {
  614. struct x86_32_common *x86_32 = target_to_x86_32(t);
  615. struct lakemont_core_reg *arch_info;
  616. arch_info = x86_32->cache->reg_list[reg].arch_info;
  617. uint8_t reg_buf[4];
  618. if (cache)
  619. regval = buf_get_u32(x86_32->cache->reg_list[reg].value, 0, 32);
  620. buf_set_u32(reg_buf, 0, 32, regval);
  621. LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=%08" PRIx32,
  622. x86_32->cache->reg_list[reg].name,
  623. arch_info->op,
  624. regval);
  625. scan.out[0] = RDWRPDR;
  626. x86_32->flush = 0; /* dont flush scans till we have a batch */
  627. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  628. return ERROR_FAIL;
  629. if (drscan(t, reg_buf, scan.out, PDR_SIZE) != ERROR_OK)
  630. return ERROR_FAIL;
  631. if (submit_reg_pir(t, reg) != ERROR_OK)
  632. return ERROR_FAIL;
  633. if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
  634. return ERROR_FAIL;
  635. x86_32->flush = 1;
  636. if (submit_instruction_pir(t, PDR2SRAM) != ERROR_OK)
  637. return ERROR_FAIL;
  638. /* we are writing from the cache so ensure we reset flags */
  639. if (cache) {
  640. x86_32->cache->reg_list[reg].dirty = 0;
  641. x86_32->cache->reg_list[reg].valid = 0;
  642. }
  643. return ERROR_OK;
  644. }
  645. static bool is_paging_enabled(struct target *t)
  646. {
  647. struct x86_32_common *x86_32 = target_to_x86_32(t);
  648. if (x86_32->pm_regs[I(CR0)] & CR0_PG)
  649. return true;
  650. else
  651. return false;
  652. }
  653. static uint8_t get_num_user_regs(struct target *t)
  654. {
  655. struct x86_32_common *x86_32 = target_to_x86_32(t);
  656. return x86_32->cache->num_regs;
  657. }
  658. /* value of the CR0.PG (paging enabled) bit influences memory reads/writes */
  659. static int disable_paging(struct target *t)
  660. {
  661. struct x86_32_common *x86_32 = target_to_x86_32(t);
  662. x86_32->pm_regs[I(CR0)] = x86_32->pm_regs[I(CR0)] & ~CR0_PG;
  663. int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
  664. if (err != ERROR_OK) {
  665. LOG_ERROR("%s error disabling paging", __func__);
  666. return err;
  667. }
  668. return err;
  669. }
  670. static int enable_paging(struct target *t)
  671. {
  672. struct x86_32_common *x86_32 = target_to_x86_32(t);
  673. x86_32->pm_regs[I(CR0)] = (x86_32->pm_regs[I(CR0)] | CR0_PG);
  674. int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
  675. if (err != ERROR_OK) {
  676. LOG_ERROR("%s error enabling paging", __func__);
  677. return err;
  678. }
  679. return err;
  680. }
  681. static bool sw_bpts_supported(struct target *t)
  682. {
  683. uint32_t tapstatus = get_tapstatus(t);
  684. if (tapstatus & TS_SBP_BIT)
  685. return true;
  686. else
  687. return false;
  688. }
  689. static int transaction_status(struct target *t)
  690. {
  691. uint32_t tapstatus = get_tapstatus(t);
  692. if ((TS_EN_PM_BIT | TS_PRDY_BIT) & tapstatus) {
  693. LOG_ERROR("%s transaction error tapstatus = %08" PRIx32
  694. , __func__, tapstatus);
  695. return ERROR_FAIL;
  696. } else {
  697. return ERROR_OK;
  698. }
  699. }
  700. static int submit_instruction(struct target *t, int num)
  701. {
  702. int err = submit_instruction_pir(t, num);
  703. if (err != ERROR_OK) {
  704. LOG_ERROR("%s error submitting pir", __func__);
  705. return err;
  706. }
  707. return err;
  708. }
  709. static int submit_reg_pir(struct target *t, int num)
  710. {
  711. LOG_DEBUG("reg %s op=0x%016" PRIx64, regs[num].name, regs[num].op);
  712. int err = submit_pir(t, regs[num].op);
  713. if (err != ERROR_OK) {
  714. LOG_ERROR("%s error submitting pir", __func__);
  715. return err;
  716. }
  717. return err;
  718. }
  719. static int submit_instruction_pir(struct target *t, int num)
  720. {
  721. LOG_DEBUG("%s op=0x%016" PRIx64, instructions[num].name,
  722. instructions[num].op);
  723. int err = submit_pir(t, instructions[num].op);
  724. if (err != ERROR_OK) {
  725. LOG_ERROR("%s error submitting pir", __func__);
  726. return err;
  727. }
  728. return err;
  729. }
  730. /*
  731. * PIR (Probe Mode Instruction Register), SUBMITPIR is an "IR only" TAP
  732. * command; there is no corresponding data register
  733. */
  734. static int submit_pir(struct target *t, uint64_t op)
  735. {
  736. struct x86_32_common *x86_32 = target_to_x86_32(t);
  737. uint8_t op_buf[8];
  738. buf_set_u64(op_buf, 0, 64, op);
  739. int flush = x86_32->flush;
  740. x86_32->flush = 0;
  741. scan.out[0] = WRPIR;
  742. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  743. return ERROR_FAIL;
  744. if (drscan(t, op_buf, scan.out, PIR_SIZE) != ERROR_OK)
  745. return ERROR_FAIL;
  746. scan.out[0] = SUBMITPIR;
  747. x86_32->flush = flush;
  748. if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
  749. return ERROR_FAIL;
  750. jtag_add_sleep(DELAY_SUBMITPIR);
  751. return ERROR_OK;
  752. }
  753. int lakemont_init_target(struct command_context *cmd_ctx, struct target *t)
  754. {
  755. lakemont_build_reg_cache(t);
  756. t->state = TARGET_RUNNING;
  757. t->debug_reason = DBG_REASON_NOTHALTED;
  758. return ERROR_OK;
  759. }
  760. int lakemont_init_arch_info(struct target *t, struct x86_32_common *x86_32)
  761. {
  762. x86_32->submit_instruction = submit_instruction;
  763. x86_32->transaction_status = transaction_status;
  764. x86_32->read_hw_reg = read_hw_reg;
  765. x86_32->write_hw_reg = write_hw_reg;
  766. x86_32->sw_bpts_supported = sw_bpts_supported;
  767. x86_32->get_num_user_regs = get_num_user_regs;
  768. x86_32->is_paging_enabled = is_paging_enabled;
  769. x86_32->disable_paging = disable_paging;
  770. x86_32->enable_paging = enable_paging;
  771. return ERROR_OK;
  772. }
  773. int lakemont_poll(struct target *t)
  774. {
  775. /* LMT1 PMCR register currently allows code breakpoints, data breakpoints,
  776. * single stepping and shutdowns to be redirected to PM but does not allow
  777. * redirecting into PM as a result of SMM enter and SMM exit
  778. */
  779. uint32_t ts = get_tapstatus(t);
  780. if (ts == 0xFFFFFFFF && t->state != TARGET_DEBUG_RUNNING) {
  781. /* something is wrong here */
  782. LOG_ERROR("tapstatus invalid - scan_chain serialization or locked JTAG access issues");
  783. /* TODO: Give a hint that unlocking is wrong or maybe a
  784. * 'jtag arp_init' helps
  785. */
  786. t->state = TARGET_DEBUG_RUNNING;
  787. return ERROR_OK;
  788. }
  789. if (t->state == TARGET_HALTED && (!(ts & TS_PM_BIT))) {
  790. LOG_INFO("target running for unknown reason");
  791. t->state = TARGET_RUNNING;
  792. }
  793. if (t->state == TARGET_RUNNING &&
  794. t->state != TARGET_DEBUG_RUNNING) {
  795. if ((ts & TS_PM_BIT) && (ts & TS_PMCR_BIT)) {
  796. LOG_DEBUG("redirect to PM, tapstatus=%08" PRIx32, get_tapstatus(t));
  797. t->state = TARGET_DEBUG_RUNNING;
  798. if (save_context(t) != ERROR_OK)
  799. return ERROR_FAIL;
  800. if (halt_prep(t) != ERROR_OK)
  801. return ERROR_FAIL;
  802. t->state = TARGET_HALTED;
  803. t->debug_reason = DBG_REASON_UNDEFINED;
  804. struct x86_32_common *x86_32 = target_to_x86_32(t);
  805. uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
  806. uint32_t dr6 = buf_get_u32(x86_32->cache->reg_list[DR6].value, 0, 32);
  807. uint32_t hwbreakpoint = (uint32_t)-1;
  808. if (dr6 & DR6_BRKDETECT_0)
  809. hwbreakpoint = 0;
  810. if (dr6 & DR6_BRKDETECT_1)
  811. hwbreakpoint = 1;
  812. if (dr6 & DR6_BRKDETECT_2)
  813. hwbreakpoint = 2;
  814. if (dr6 & DR6_BRKDETECT_3)
  815. hwbreakpoint = 3;
  816. if (hwbreakpoint != (uint32_t)-1) {
  817. uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
  818. uint32_t type = dr7 & (0x03 << (DR7_RW_SHIFT + hwbreakpoint*DR7_RW_LEN_SIZE));
  819. if (type == DR7_BP_EXECUTE) {
  820. LOG_USER("hit hardware breakpoint (hwreg=%d) at 0x%08" PRIx32, hwbreakpoint, eip);
  821. } else {
  822. uint32_t address = 0;
  823. switch (hwbreakpoint) {
  824. default:
  825. case 0:
  826. address = buf_get_u32(x86_32->cache->reg_list[DR0].value, 0, 32);
  827. break;
  828. case 1:
  829. address = buf_get_u32(x86_32->cache->reg_list[DR1].value, 0, 32);
  830. break;
  831. case 2:
  832. address = buf_get_u32(x86_32->cache->reg_list[DR2].value, 0, 32);
  833. break;
  834. case 3:
  835. address = buf_get_u32(x86_32->cache->reg_list[DR3].value, 0, 32);
  836. break;
  837. }
  838. LOG_USER("hit '%s' watchpoint for 0x%08" PRIx32 " (hwreg=%d) at 0x%08" PRIx32,
  839. type == DR7_BP_WRITE ? "write" : "access", address,
  840. hwbreakpoint, eip);
  841. }
  842. t->debug_reason = DBG_REASON_BREAKPOINT;
  843. } else {
  844. /* Check if the target hit a software breakpoint.
  845. * ! Watch out: EIP is currently pointing after the breakpoint opcode
  846. */
  847. struct breakpoint *bp = NULL;
  848. bp = breakpoint_find(t, eip-1);
  849. if (bp != NULL) {
  850. t->debug_reason = DBG_REASON_BREAKPOINT;
  851. if (bp->type == BKPT_SOFT) {
  852. /* The EIP is now pointing the the next byte after the
  853. * breakpoint instruction. This needs to be corrected.
  854. */
  855. buf_set_u32(x86_32->cache->reg_list[EIP].value, 0, 32, eip-1);
  856. x86_32->cache->reg_list[EIP].dirty = 1;
  857. x86_32->cache->reg_list[EIP].valid = 1;
  858. LOG_USER("hit software breakpoint at 0x%08" PRIx32, eip-1);
  859. } else {
  860. /* it's not a hardware breakpoint (checked already in DR6 state)
  861. * and it's also not a software breakpoint ...
  862. */
  863. LOG_USER("hit unknown breakpoint at 0x%08" PRIx32, eip);
  864. }
  865. } else {
  866. /* There is also the case that we hit an breakpoint instruction,
  867. * which was not set by us. This needs to be handled be the
  868. * application that introduced the breakpoint.
  869. */
  870. LOG_USER("unknown break reason at 0x%08" PRIx32, eip);
  871. }
  872. }
  873. return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
  874. }
  875. }
  876. return ERROR_OK;
  877. }
  878. int lakemont_arch_state(struct target *t)
  879. {
  880. struct x86_32_common *x86_32 = target_to_x86_32(t);
  881. LOG_USER("target halted due to %s at 0x%08" PRIx32 " in %s mode",
  882. debug_reason_name(t),
  883. buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32),
  884. (buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32) & CR0_PE) ? "protected" : "real");
  885. return ERROR_OK;
  886. }
  887. int lakemont_halt(struct target *t)
  888. {
  889. if (t->state == TARGET_RUNNING) {
  890. t->debug_reason = DBG_REASON_DBGRQ;
  891. if (do_halt(t) != ERROR_OK)
  892. return ERROR_FAIL;
  893. return ERROR_OK;
  894. } else {
  895. LOG_ERROR("%s target not running", __func__);
  896. return ERROR_FAIL;
  897. }
  898. }
  899. int lakemont_resume(struct target *t, int current, uint32_t address,
  900. int handle_breakpoints, int debug_execution)
  901. {
  902. struct breakpoint *bp = NULL;
  903. struct x86_32_common *x86_32 = target_to_x86_32(t);
  904. if (check_not_halted(t))
  905. return ERROR_TARGET_NOT_HALTED;
  906. /* TODO lakemont_enable_breakpoints(t); */
  907. if (t->state == TARGET_HALTED) {
  908. /* running away for a software breakpoint needs some special handling */
  909. uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
  910. bp = breakpoint_find(t, eip);
  911. if (bp != NULL /*&& bp->type == BKPT_SOFT*/) {
  912. /* the step will step over the breakpoint */
  913. if (lakemont_step(t, 0, 0, 1) != ERROR_OK) {
  914. LOG_ERROR("%s stepping over a software breakpoint at 0x%08" PRIx32 " "
  915. "failed to resume the target", __func__, eip);
  916. return ERROR_FAIL;
  917. }
  918. }
  919. /* if breakpoints are enabled, we need to redirect these into probe mode */
  920. struct breakpoint *activeswbp = t->breakpoints;
  921. while (activeswbp != NULL && activeswbp->set == 0)
  922. activeswbp = activeswbp->next;
  923. struct watchpoint *activehwbp = t->watchpoints;
  924. while (activehwbp != NULL && activehwbp->set == 0)
  925. activehwbp = activehwbp->next;
  926. if (activeswbp != NULL || activehwbp != NULL)
  927. buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
  928. if (do_resume(t) != ERROR_OK)
  929. return ERROR_FAIL;
  930. } else {
  931. LOG_USER("target not halted");
  932. return ERROR_FAIL;
  933. }
  934. return ERROR_OK;
  935. }
  936. int lakemont_step(struct target *t, int current,
  937. uint32_t address, int handle_breakpoints)
  938. {
  939. struct x86_32_common *x86_32 = target_to_x86_32(t);
  940. uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
  941. uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
  942. uint32_t pmcr = buf_get_u32(x86_32->cache->reg_list[PMCR].value, 0, 32);
  943. struct breakpoint *bp = NULL;
  944. int retval = ERROR_OK;
  945. uint32_t tapstatus = 0;
  946. if (check_not_halted(t))
  947. return ERROR_TARGET_NOT_HALTED;
  948. bp = breakpoint_find(t, eip);
  949. if (retval == ERROR_OK && bp != NULL/*&& bp->type == BKPT_SOFT*/) {
  950. /* TODO: This should only be done for software breakpoints.
  951. * Stepping from hardware breakpoints should be possible with the resume flag
  952. * Needs testing.
  953. */
  954. retval = x86_32_common_remove_breakpoint(t, bp);
  955. }
  956. /* Set EFLAGS[TF] and PMCR[IR], exit pm and wait for PRDY# */
  957. LOG_DEBUG("modifying PMCR = %d and EFLAGS = %08" PRIx32, pmcr, eflags);
  958. eflags = eflags | (EFLAGS_TF | EFLAGS_RF);
  959. buf_set_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32, eflags);
  960. buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
  961. LOG_DEBUG("EFLAGS [TF] [RF] bits set=%08" PRIx32 ", PMCR=%d, EIP=%08" PRIx32,
  962. eflags, pmcr, eip);
  963. tapstatus = get_tapstatus(t);
  964. t->debug_reason = DBG_REASON_SINGLESTEP;
  965. t->state = TARGET_DEBUG_RUNNING;
  966. if (restore_context(t) != ERROR_OK)
  967. return ERROR_FAIL;
  968. if (exit_probemode(t) != ERROR_OK)
  969. return ERROR_FAIL;
  970. target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
  971. tapstatus = get_tapstatus(t);
  972. if (tapstatus & (TS_PM_BIT | TS_EN_PM_BIT | TS_PRDY_BIT | TS_PMCR_BIT)) {
  973. /* target has stopped */
  974. if (save_context(t) != ERROR_OK)
  975. return ERROR_FAIL;
  976. if (halt_prep(t) != ERROR_OK)
  977. return ERROR_FAIL;
  978. t->state = TARGET_HALTED;
  979. LOG_USER("step done from EIP 0x%08" PRIx32 " to 0x%08" PRIx32, eip,
  980. buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32));
  981. target_call_event_callbacks(t, TARGET_EVENT_HALTED);
  982. } else {
  983. /* target didn't stop
  984. * I hope the poll() will catch it, but the deleted breakpoint is gone
  985. */
  986. LOG_ERROR("%s target didn't stop after executing a single step", __func__);
  987. t->state = TARGET_RUNNING;
  988. return ERROR_FAIL;
  989. }
  990. /* try to re-apply the breakpoint, even of step failed
  991. * TODO: When a bp was set, we should try to stop the target - fix the return above
  992. */
  993. if (bp != NULL/*&& bp->type == BKPT_SOFT*/) {
  994. /* TODO: This should only be done for software breakpoints.
  995. * Stepping from hardware breakpoints should be possible with the resume flag
  996. * Needs testing.
  997. */
  998. retval = x86_32_common_add_breakpoint(t, bp);
  999. }
  1000. return retval;
  1001. }
  1002. /* TODO - implement resetbreak fully through CLTAP registers */
  1003. int lakemont_reset_assert(struct target *t)
  1004. {
  1005. LOG_DEBUG("-");
  1006. return ERROR_OK;
  1007. }
  1008. int lakemont_reset_deassert(struct target *t)
  1009. {
  1010. LOG_DEBUG("-");
  1011. return ERROR_OK;
  1012. }