/* this will attempt to abort all the remote cores */
if (tm_handle_decode_fail(core) == -1) {
TM_ERR(core,Error,"Could not handle failed decode\n");
- return -1;
+ return ERR_STORE_FAIL;
}
/* we need to trigger a local abort */
return ERR_TRANS_FAULT_FAIL;
} else if (sto == ERR_STORE_MUST_ABORT) {
TM_DBG(core,EXIT,"aborting for some reason\n");
- v3_handle_trans_abort(core);
+ v3_handle_trans_abort(core, TM_ABORT_UNSPECIFIED, 0);
return TRANS_FAULT_OK;
}
} else if (sto == ERR_STORE_MUST_ABORT) {
TM_ERR(core,IFETCH,"decode failed, going out of single stepping\n");
- v3_handle_trans_abort(core);
+ v3_handle_trans_abort(core, TM_ABORT_UNSPECIFIED, 0);
return TRANS_FAULT_OK;
}
TM_DBG(core,EXIT,"we are in ABORT, call the abort handler\n");
tm->TM_ABORT = 0;
- v3_handle_trans_abort(core);
+ v3_handle_trans_abort(core, TM_ABORT_UNSPECIFIED, 0);
TM_DBG(core,EXIT,"RIP after abort: %p\n", ((void*)(core->rip)));
} else if (conflict == CHECK_IS_CONFLICT) {
TM_DBG(core,EXIT,"we have a conflict, aborting\n");
- v3_handle_trans_abort(core);
+ v3_handle_trans_abort(core, TM_ABORT_CONFLICT, 0);
return CHECK_MUST_ABORT;
}
}
+static void
+tm_set_abort_status (struct guest_info * core,
+ tm_abrt_cause_t cause,
+ uint8_t xabort_reason)
+{
+ core->vm_regs.rax = 0;
+
+ switch (cause) {
+ case TM_ABORT_XABORT:
+ // we put the xabort immediate in eax 31:24
+ // cause is zero
+ core->vm_regs.rax |= (xabort_reason << 24);
+ break;
+ case TM_ABORT_CONFLICT:
+ // if this was a conflict from another core, it may work
+ // if we try again
+ core->vm_regs.rax |= (1 << ABORT_CONFLICT) | (1 << ABORT_RETRY);
+ break;
+ case TM_ABORT_INTERNAL:
+ case TM_ABORT_BKPT:
+ core->vm_regs.rax |= (1 << cause);
+ break;
+ default:
+ TM_ERR(core, ABORT, "invalid abort cause\n");
+ break;
+ }
+}
+
+
+// xabort_reason is only used for XABORT instruction
int
-v3_handle_trans_abort (struct guest_info * core)
+v3_handle_trans_abort (struct guest_info * core,
+ tm_abrt_cause_t cause,
+ uint8_t xabort_reason)
{
struct v3_trans_mem * tm = (struct v3_trans_mem *)v3_get_ext_core_state(core, "trans_mem");
v3_tm_inc_tnum(tm);
}
-
+ tm_set_abort_status(core, cause, xabort_reason);
+
// time to garbage collect
if (tm_hash_gc(tm) == -1) {
TM_ERR(core,GC,"could not gc!\n");
{
rdtscll(tm->exit_time);
- // Error checking! make sure that we have gotten here in a legitimate manner
+ /* XEND should raise a GPF when RTM mode is not on */
if (tm->TM_MODE != TM_ON) {
TM_ERR(core, UD, "Encountered XEND while not in a transactional region\n");
v3_free_staging_page(tm);
v3_clr_vtlb(core);
v3_clear_tm_lists(tm);
- v3_raise_exception(core, UD_EXCEPTION);
+ v3_raise_exception(core, GPF_EXCEPTION);
return 0;
}
*/
static int
tm_handle_xabort (struct guest_info * core,
- struct v3_trans_mem * tm)
+ struct v3_trans_mem * tm,
+ uchar_t * instr)
{
+ uint8_t reason;
+
+ // we must reflect the immediate back into EAX 31:24
+ reason = *(uint8_t*)(instr+2);
+
/* TODO: this probably needs to move somewhere else */
rdtscll(tm->exit_time);
}
// Handle the exit
- v3_handle_trans_abort(core);
+ v3_handle_trans_abort(core, TM_ABORT_XABORT, reason);
return 0;
}
uchar_t * instr)
{
sint32_t rel_addr = 0;
+ uint8_t out_of_bounds = 0;
+ uint8_t in_compat_no_long = 0;
if (tm->TM_MODE == TM_ON) {
- TM_ERR(core,UD,"We got here while already in a transactional region!");
+ /* TODO: this is actually an indication of nesting, we'll fix this later */
+ TM_ERR(core,UD,"We don't support nested transactions yet!\n");
v3_raise_exception(core, UD_EXCEPTION);
+ return -1;
+ }
+
+ // Save the fail_call address (first 2 bytes = opcode, last 4 = fail call addr)
+ rel_addr = *(sint32_t*)(instr+2);
+
+ /* raise a GPF if we're trying to set a fail call outside of code segment */
+ in_compat_no_long = (core->cpu_mode == LONG_32_COMPAT) || ((struct efer_64*)&(core->ctrl_regs.efer))->lma == 0;
+ out_of_bounds = (core->rip + rel_addr > core->segments.cs.base + core->segments.cs.limit ||
+ core->rip + rel_addr < core->segments.cs.base);
+
+ if (in_compat_no_long && out_of_bounds) {
+ v3_raise_exception(core, GPF_EXCEPTION);
+ return 0;
}
+ /* TODO: also raise GPF if we're in long mode and failcall isn't canonical */
+
+ /* TODO: put this elsewhere */
rdtscll(tm->entry_time);
tm->entry_exits = core->num_exits;
TM_DBG(core,UD,"Set the system in TM Mode, save fallback address");
- // Save the fail_call address (first 2 bytes = opcode, last 4 = fail call addr)
- rel_addr = *(sint32_t*)(instr+2);
+
tm->fail_call = core->rip + XBEGIN_INSTR_LEN + rel_addr;
TM_DBG(core,UD,"we set fail_call to %llx, rip is %llx, rel_addr is %x", (uint64_t)tm->fail_call,(uint64_t)core->rip,rel_addr);
tm_handle_xtest (struct guest_info * core,
struct v3_trans_mem * tm)
{
+ struct rflags * rf = (struct rflags*)&(core->ctrl_regs.rflags);
+
// if we are in tm mode, set zf to 0, otherwise 1
if (tm->TM_MODE == TM_ON) {
- core->ctrl_regs.rflags &= ~(1ULL << 6);
+ rf->zf = 0;
} else {
- core->ctrl_regs.rflags |= (1ULL << 6);
+ rf->zf = 1;
}
+ rf->cf = 0;
+ rf->of = 0;
+ rf->sf = 0;
+ rf->pf = 0;
+ rf->af = 0;
+
core->rip += XTEST_INSTR_LEN;
return 0;
TM_DBG(core, UD, "Encountered Haswell-specific XABORT %x %x %d at %llx\n", byte1, byte2, byte3, (uint64_t)core->rip);
- if (tm_handle_xabort(core, tm) == -1) {
+ if (tm_handle_xabort(core, tm, instr) == -1) {
TM_ERR(core, UD, "Problem handling XABORT\n");
return -1;
}
}
else {
TM_DBG(info,EXCP,"aborting due to DE exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP1:
}
else {
TM_DBG(info,EXCP,"aborting due to DB exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP3:
}
else {
TM_DBG(info,EXCP,"aborting due to BP exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP4:
}
else {
TM_DBG(info,EXCP,"aborting due to OF exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP5:
}
else {
TM_DBG(info,EXCP,"aborting due to BR exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP7:
}
else {
TM_DBG(info,EXCP,"aborting due to NM exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP10:
}
else {
TM_DBG(info,EXCP,"aborting due to TS exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP11:
}
else {
TM_DBG(info,EXCP,"aborting due to NP exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP12:
}
else {
TM_DBG(info,EXCP,"aborting due to SS exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP13:
}
else {
TM_DBG(info,EXCP,"aborting due to GPF exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP16:
}
else {
TM_DBG(info,EXCP,"aborting due to MF exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP17:
}
else {
TM_DBG(info,EXCP,"aborting due to AC exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
case SVM_EXIT_EXCP19:
}
else {
TM_DBG(info,EXCP,"aborting due to XF exception\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
}
break;
v3_stgi();
TM_DBG(info,INTR,"we have a pending interrupt!\n");
- v3_handle_trans_abort(info);
+ v3_handle_trans_abort(info, TM_ABORT_UNSPECIFIED, 0);
// Copy new RIP state into arch dependent structure
guest_state->rip = info->rip;
TM_DBG(info,INTR,"currently guest state rip is %llx\n",(uint64_t)guest_state->rip);