v3_core_operating_mode_t core_run_state;
/* the logical cpu on which this core runs */
- uint32_t cpu_id;
+ uint32_t pcpu_id;
+
+ /* The virtual core # of this cpu (what the guest sees this core as) */
+ uint32_t vcpu_id;
};
#include <palacios/vmm_types.h>
+#include <palacios/vmm_queue.h>
+#include <palacios/vmm_lock.h>
+
+/* The locking in this file is nasty.
+ * There are 3 different locking approaches that are taken, depending on the APIC operation
+ * 1. Queue locks. Actual irq insertions are done via queueing irq ops at the dest apic.
+ * The destination apic's core is responsible for draining the queue, and actually
+ * setting the vector table.
+ * 2. State locks. This is a standard lock taken when internal apic state is read/written.
+ * When an irq's destination is determined this lock is taken to examine the apic's
+ * addressability.
+ * 3. VM barrier lock. This is taken when actual VM core state is changed (via SIPI).
+ */
+
#ifndef V3_CONFIG_DEBUG_APIC
struct v3_timer * timer;
+ v3_lock_t state_lock;
+ struct v3_queue irq_queue;
+
uint32_t eoi;
apic->spec_eoi.val = 0x00000000;
+ v3_init_queue(&(apic->irq_queue));
+
+
}
static int read_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t * dst, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
- PrintDebug("apic %u: core %u: MSR read\n", apic->lapic_id.val, core->cpu_id);
+ PrintDebug("apic %u: core %u: MSR read\n", apic->lapic_id.val, core->vcpu_id);
dst->value = apic->base_addr;
static int write_apic_msr(struct guest_info * core, uint_t msr, v3_msr_t src, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)priv_data;
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
- struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->cpu_id, apic->base_addr);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
+ struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, apic->base_addr);
- PrintDebug("apic %u: core %u: MSR write\n", apic->lapic_id.val, core->cpu_id);
+ PrintDebug("apic %u: core %u: MSR write\n", apic->lapic_id.val, core->vcpu_id);
if (old_reg == NULL) {
// uh oh...
PrintError("apic %u: core %u: APIC Base address region does not exit...\n",
- apic->lapic_id.val, core->cpu_id);
+ apic->lapic_id.val, core->vcpu_id);
return -1;
}
apic->base_addr = src.value;
- if (v3_hook_full_mem(core->vm_info, core->cpu_id, apic->base_addr,
+ if (v3_hook_full_mem(core->vm_info, core->vcpu_id, apic->base_addr,
apic->base_addr + PAGE_SIZE_4KB,
apic_read, apic_write, apic_dev) == -1) {
PrintError("apic %u: core %u: Could not hook new APIC Base address\n",
- apic->lapic_id.val, core->cpu_id);
+ apic->lapic_id.val, core->vcpu_id);
return -1;
}
// irq_num is the bit offset into a 256 bit buffer...
-// return values
-// -1 = error
-// 0 = OK, no interrupt needed now
-// 1 = OK, interrupt needed now
static int activate_apic_irq(struct apic_state * apic, uint32_t irq_num) {
int major_offset = (irq_num & ~0x00000007) >> 3;
int minor_offset = irq_num & 0x00000007;
uint8_t flag = 0x1 << minor_offset;
- if (irq_num <= 15 || irq_num>255) {
+ if (irq_num <= 15 || irq_num > 255) {
PrintError("apic %u: core %d: Attempting to raise an invalid interrupt: %d\n",
- apic->lapic_id.val, apic->core->cpu_id, irq_num);
+ apic->lapic_id.val, apic->core->vcpu_id, irq_num);
return -1;
}
- PrintDebug("apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->cpu_id, irq_num);
+ PrintDebug("apic %u: core %d: Raising APIC IRQ %d\n", apic->lapic_id.val, apic->core->vcpu_id, irq_num);
if (*req_location & flag) {
PrintDebug("Interrupt %d coallescing\n", irq_num);
return 1;
} else {
PrintDebug("apic %u: core %d: Interrupt not enabled... %.2x\n",
- apic->lapic_id.val, apic->core->cpu_id,*en_location);
+ apic->lapic_id.val, apic->core->vcpu_id, *en_location);
}
return 0;
((mda & 0x0f) & (dst_apic->log_dst.dst_log_id & 0x0f)) ) { /* I am in the set) */
PrintDebug("apic %u core %u: accepting clustered IRQ (mda 0x%x == log_dst 0x%x)\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, mda,
+ dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
dst_apic->log_dst.dst_log_id);
return 1;
} else {
PrintDebug("apic %u core %u: rejecting clustered IRQ (mda 0x%x != log_dst 0x%x)\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, mda,
+ dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
dst_apic->log_dst.dst_log_id);
return 0;
}
if (dst_apic->log_dst.dst_log_id & mda) { // I am in the set
PrintDebug("apic %u core %u: accepting flat IRQ (mda 0x%x == log_dst 0x%x)\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, mda,
+ dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
dst_apic->log_dst.dst_log_id);
return 1;
} else {
PrintDebug("apic %u core %u: rejecting flat IRQ (mda 0x%x != log_dst 0x%x)\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, mda,
+ dst_apic->lapic_id.val, dst_core->vcpu_id, mda,
dst_apic->log_dst.dst_log_id);
return 0;
}
} else {
PrintError("apic %u core %u: invalid destination format register value 0x%x for logical mode delivery.\n",
- dst_apic->lapic_id.val, dst_core->cpu_id, dst_apic->dst_fmt.model);
+ dst_apic->lapic_id.val, dst_core->vcpu_id, dst_apic->dst_fmt.model);
return -1;
}
}
int do_xcall;
- PrintDebug("delivering IRQ %d to core %u\n", vector, dst_core->cpu_id);
+ PrintDebug("delivering IRQ %d to core %u\n", vector, dst_core->vcpu_id);
do_xcall = activate_apic_irq(dst_apic, vector);
- if (do_xcall < 0) {
- PrintError("Failed to activate apic irq!\n");
- return -1;
- }
- if (do_xcall && (dst_apic != src_apic)) {
- // Assume core # is same as logical processor for now
- // TODO FIX THIS FIX THIS
- // THERE SHOULD BE: guestapicid->virtualapicid map,
- // cpu_id->logical processor map
- // host maitains logical proc->phsysical proc
+
+ if (dst_apic != src_apic) {
PrintDebug(" non-local core with new interrupt, forcing it to exit now\n");
#ifdef V3_CONFIG_MULTITHREAD_OS
- v3_interrupt_cpu(dst_core->vm_info, dst_core->cpu_id, 0);
-#else
- V3_ASSERT(0);
+ v3_interrupt_cpu(dst_core->vm_info, dst_core->pcpu_id, 0);
#endif
}
}
case APIC_INIT_DELIVERY: {
- PrintDebug(" INIT delivery to core %u\n", dst_core->cpu_id);
+ PrintDebug(" INIT delivery to core %u\n", dst_core->vcpu_id);
// TODO: any APIC reset on dest core (shouldn't be needed, but not sure...)
// Sanity check
if (dst_apic->ipi_state != INIT_ST) {
PrintError(" Warning: core %u is not in INIT state (mode = %d), ignored (assuming this is the deassert)\n",
- dst_core->cpu_id, dst_apic->ipi_state);
+ dst_core->vcpu_id, dst_apic->ipi_state);
// Only a warning, since INIT INIT SIPI is common
break;
}
// Sanity check
if (dst_apic->ipi_state != SIPI) {
PrintError(" core %u is not in SIPI state (mode = %d), ignored!\n",
- dst_core->cpu_id, dst_apic->ipi_state);
+ dst_core->vcpu_id, dst_apic->ipi_state);
break;
}
dst_core->segments.cs.base = vector << 12;
PrintDebug(" SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
- vector, dst_core->segments.cs.selector, dst_core->cpu_id);
+ vector, dst_core->segments.cs.selector, dst_core->vcpu_id);
// Maybe need to adjust the APIC?
// We transition the target core to SIPI state
{
int i;
- if (icr->dst >0 && icr->dst < apic_dev->num_apics) {
+ if ( (icr->dst > 0) && (icr->dst < apic_dev->num_apics) ) {
// see if it simply is the core id
if (apic_dev->apics[icr->dst].lapic_id.val == icr->dst) {
return &(apic_dev->apics[icr->dst]);
}
}
- for (i=0;i<apic_dev->num_apics;i++) {
+ for (i = 0; i < apic_dev->num_apics; i++) {
if (apic_dev->apics[i].lapic_id.val == icr->dst) {
return &(apic_dev->apics[i]);
}
case APIC_SHORTHAND_NONE: // no shorthand
if (icr->dst_mode == APIC_DEST_PHYSICAL) {
- dest_apic=find_physical_apic(apic_dev,icr);
+ dest_apic = find_physical_apic(apic_dev, icr);
- if (dest_apic==NULL) {
+ if (dest_apic == NULL) {
PrintError("apic: Attempted send to unregistered apic id=%u\n", icr->dst);
return -1;
}
}
- V3_Print("apic: done\n");
-
+ PrintDebug("apic: done\n");
} else if (icr->dst_mode == APIC_DEST_LOGICAL) {
- if (icr->del_mode!=APIC_LOWEST_DELIVERY ) {
+ if (icr->del_mode != APIC_LOWEST_DELIVERY) {
+ int i;
+ uint8_t mda = icr->dst;
+
// logical, but not lowest priority
// we immediately trigger
// fixed, smi, reserved, nmi, init, sipi, etc
- int i;
-
- uint8_t mda = icr->dst;
+
for (i = 0; i < apic_dev->num_apics; i++) {
+ int del_flag = 0;
dest_apic = &(apic_dev->apics[i]);
- int del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
+ del_flag = should_deliver_ipi(dest_apic->core, dest_apic, mda);
if (del_flag == -1) {
+
PrintError("apic: Error checking delivery mode\n");
return -1;
} else if (del_flag == 1) {
+
if (deliver_ipi(src_apic, dest_apic,
icr->vec, icr->del_mode) == -1) {
PrintError("apic: Error: Could not deliver IPI\n");
}
}
} else { //APIC_LOWEST_DELIVERY
- // logical, lowest priority
int i;
struct apic_state * cur_best_apic = NULL;
uint8_t mda = icr->dst;
+ // logical, lowest priority
+
for (i = 0; i < apic_dev->num_apics; i++) {
int del_flag = 0;
return -1;
} else if (del_flag == 1) {
// update priority for lowest priority scan
- if (!cur_best_apic) {
+ if (cur_best_apic == 0) {
cur_best_apic = dest_apic;
} else if (dest_apic->task_prio.val < cur_best_apic->task_prio.val) {
cur_best_apic = dest_apic;
}
} else if (icr->dst_mode == APIC_DEST_LOGICAL) { /* logical delivery */
PrintError("apic: use of logical delivery in self (untested)\n");
+
if (deliver_ipi(src_apic, src_apic, icr->vec, icr->del_mode) == -1) {
PrintError("apic: Could not deliver IPI to self (logical)\n");
return -1;
// External function, expected to acquire lock on apic
static int apic_read(struct guest_info * core, addr_t guest_addr, void * dst, uint_t length, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
addr_t reg_addr = guest_addr - apic->base_addr;
struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
uint32_t val = 0;
PrintDebug("apic %u: core %u: at %p: Read apic address space (%p)\n",
- apic->lapic_id.val, core->cpu_id, apic, (void *)guest_addr);
+ apic->lapic_id.val, core->vcpu_id, apic, (void *)guest_addr);
if (msr->apic_enable == 0) {
PrintError("apic %u: core %u: Read from APIC address space with disabled APIC, apic msr=0x%llx\n",
- apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
+ apic->lapic_id.val, core->vcpu_id, apic->base_addr_msr.value);
return -1;
}
default:
PrintError("apic %u: core %u: Read from Unhandled APIC Register: %x (getting zero)\n",
- apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
+ apic->lapic_id.val, core->vcpu_id, (uint32_t)reg_addr);
return -1;
}
} else {
PrintError("apic %u: core %u: Invalid apic read length (%d)\n",
- apic->lapic_id.val, core->cpu_id, length);
+ apic->lapic_id.val, core->vcpu_id, length);
return -1;
}
PrintDebug("apic %u: core %u: Read finished (val=%x)\n",
- apic->lapic_id.val, core->cpu_id, *(uint32_t *)dst);
+ apic->lapic_id.val, core->vcpu_id, *(uint32_t *)dst);
return length;
}
*/
static int apic_write(struct guest_info * core, addr_t guest_addr, void * src, uint_t length, void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
addr_t reg_addr = guest_addr - apic->base_addr;
struct apic_msr * msr = (struct apic_msr *)&(apic->base_addr_msr.value);
uint32_t op_val = *(uint32_t *)src;
PrintDebug("apic %u: core %u: at %p and priv_data is at %p\n",
- apic->lapic_id.val, core->cpu_id, apic, priv_data);
+ apic->lapic_id.val, core->vcpu_id, apic, priv_data);
PrintDebug("apic %u: core %u: write to address space (%p) (val=%x)\n",
- apic->lapic_id.val, core->cpu_id, (void *)guest_addr, *(uint32_t *)src);
+ apic->lapic_id.val, core->vcpu_id, (void *)guest_addr, *(uint32_t *)src);
if (msr->apic_enable == 0) {
PrintError("apic %u: core %u: Write to APIC address space with disabled APIC, apic msr=0x%llx\n",
- apic->lapic_id.val, core->cpu_id, apic->base_addr_msr.value);
+ apic->lapic_id.val, core->vcpu_id, apic->base_addr_msr.value);
return -1;
}
if (length != 4) {
PrintError("apic %u: core %u: Invalid apic write length (%d)\n",
- apic->lapic_id.val, length, core->cpu_id);
+ apic->lapic_id.val, length, core->vcpu_id);
return -1;
}
case EXT_APIC_FEATURE_OFFSET:
PrintError("apic %u: core %u: Attempting to write to read only register %p (error)\n",
- apic->lapic_id.val, core->cpu_id, (void *)reg_addr);
+ apic->lapic_id.val, core->vcpu_id, (void *)reg_addr);
break;
// Data registers
case APIC_ID_OFFSET:
//V3_Print("apic %u: core %u: my id is being changed to %u\n",
- // apic->lapic_id.val, core->cpu_id, op_val);
+ // apic->lapic_id.val, core->vcpu_id, op_val);
apic->lapic_id.val = op_val;
break;
break;
case LDR_OFFSET:
PrintDebug("apic %u: core %u: setting log_dst.val to 0x%x\n",
- apic->lapic_id.val, core->cpu_id, op_val);
+ apic->lapic_id.val, core->vcpu_id, op_val);
apic->log_dst.val = op_val;
break;
case DFR_OFFSET:
tmp_icr = apic->int_cmd;
// V3_Print("apic %u: core %u: sending cmd 0x%llx to apic %u\n",
- // apic->lapic_id.val, core->cpu_id,
+ // apic->lapic_id.val, core->vcpu_id,
// apic->int_cmd.val, apic->int_cmd.dst);
if (route_ipi(apic_dev, apic, &tmp_icr) == -1) {
PrintError("IPI Routing failure\n");
return -1;
}
+
break;
}
case INT_CMD_HI_OFFSET: {
apic->int_cmd.hi = op_val;
- V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->cpu_id,apic->int_cmd.hi);
+ V3_Print("apic %u: core %u: writing command high=0x%x\n", apic->lapic_id.val, core->vcpu_id,apic->int_cmd.hi);
break;
}
case SEOI_OFFSET:
default:
PrintError("apic %u: core %u: Write to Unhandled APIC Register: %x (ignored)\n",
- apic->lapic_id.val, core->cpu_id, (uint32_t)reg_addr);
+ apic->lapic_id.val, core->vcpu_id, (uint32_t)reg_addr);
return -1;
}
- PrintDebug("apic %u: core %u: Write finished\n", apic->lapic_id.val, core->cpu_id);
+ PrintDebug("apic %u: core %u: Write finished\n", apic->lapic_id.val, core->vcpu_id);
return length;
static int apic_intr_pending(struct guest_info * core, void * private_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
+
+ // drain irq QUEUE
+
int req_irq = get_highest_irr(apic);
int svc_irq = get_highest_isr(apic);
- // PrintDebug("apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->cpu_id,req_irq,svc_irq);
+ // PrintDebug("apic %u: core %u: req_irq=%d, svc_irq=%d\n",apic->lapic_id.val,info->vcpu_id,req_irq,svc_irq);
if ((req_irq >= 0) &&
(req_irq > svc_irq)) {
static int apic_get_intr_number(struct guest_info * core, void * private_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
int req_irq = get_highest_irr(apic);
int svc_irq = get_highest_isr(apic);
static int apic_begin_irq(struct guest_info * core, void * private_data, int irq) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(private_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
int major_offset = (irq & ~0x00000007) >> 3;
int minor_offset = irq & 0x00000007;
uint8_t *req_location = apic->int_req_reg + major_offset;
} else {
// do nothing...
//PrintDebug("apic %u: core %u: begin irq for %d ignored since I don't own it\n",
- // apic->lapic_id.val, core->cpu_id, irq);
+ // apic->lapic_id.val, core->vcpu_id, irq);
}
return 0;
uint64_t cpu_cycles, uint64_t cpu_freq,
void * priv_data) {
struct apic_dev_state * apic_dev = (struct apic_dev_state *)(priv_data);
- struct apic_state * apic = &(apic_dev->apics[core->cpu_id]);
+ struct apic_state * apic = &(apic_dev->apics[core->vcpu_id]);
// The 32 bit GCC runtime is a pile of shit
#ifdef __V3_64BIT__
if ((apic->tmr_init_cnt == 0) ||
( (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_ONESHOT) &&
(apic->tmr_cur_cnt == 0))) {
- //PrintDebug("apic %u: core %u: APIC timer not yet initialized\n",apic->lapic_id.val,info->cpu_id);
+ //PrintDebug("apic %u: core %u: APIC timer not yet initialized\n",apic->lapic_id.val,info->vcpu_id);
return;
}
break;
default:
PrintError("apic %u: core %u: Invalid Timer Divider configuration\n",
- apic->lapic_id.val, core->cpu_id);
+ apic->lapic_id.val, core->vcpu_id);
return;
}
// raise irq
PrintDebug("apic %u: core %u: Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
- apic->lapic_id.val, core->cpu_id,
+ apic->lapic_id.val, core->vcpu_id,
apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
if (apic_intr_pending(core, priv_data)) {
PrintDebug("apic %u: core %u: Overriding pending IRQ %d\n",
- apic->lapic_id.val, core->cpu_id,
+ apic->lapic_id.val, core->vcpu_id,
apic_get_intr_number(core, priv_data));
}
if (activate_internal_irq(apic, APIC_TMR_INT) == -1) {
PrintError("apic %u: core %u: Could not raise Timer interrupt\n",
- apic->lapic_id.val, core->cpu_id);
+ apic->lapic_id.val, core->vcpu_id);
}
if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
return -1;
}
- v3_hook_full_mem(vm, core->cpu_id, apic->base_addr, apic->base_addr + PAGE_SIZE_4KB, apic_read, apic_write, apic_dev);
+ v3_hook_full_mem(vm, core->vcpu_id, apic->base_addr, apic->base_addr + PAGE_SIZE_4KB, apic_read, apic_write, apic_dev);
PrintDebug("apic %u: (setup device): done, my id is %u\n", i, apic->lapic_id.val);
}
struct v3_inspector_state * vm_state = priv_data;
char core_name[50];
- snprintf(core_name, 50, "core.%d", core->cpu_id);
+ snprintf(core_name, 50, "core.%d", core->vcpu_id);
{
struct v3_mtree * core_node = v3_mtree_create_subtree(&(vm_state->state_tree), core_name);
if (khz) {
time_state->guest_cpu_freq = atoi(khz);
PrintDebug("Core %d CPU frequency requested at %d khz.\n",
- info->cpu_id, time_state->guest_cpu_freq);
+ info->pcpu_id, time_state->guest_cpu_freq);
}
if ( (khz == NULL) ||
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
(shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->cpu_id, guest_pa);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->vcpu_id, guest_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
(shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->cpu_id, guest_fault_pa);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->vcpu_id, guest_fault_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
PrintDebug("Handling PTE fault\n");
- struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_pa);
int inherited_ar_user = ((inherited_ar & PT_USER_MASK) == PT_USER_MASK) ? 1 : 0;
int inherited_ar_writable = ((inherited_ar & PT_WRITABLE_MASK) == PT_WRITABLE_MASK) ? 1 : 0;
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->cpu_id, guest_pa);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->vcpu_id, guest_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, guest_fault_pa);
int fixed = 0;
int write_pt = 0;
if ((shdw_reg->host_type == SHDW_REGION_ALLOCATED) ||
(shdw_reg->host_type == SHDW_REGION_WRITE_HOOK)) {
- addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->cpu_id, guest_fault_pa);
+ addr_t shadow_pa = v3_get_shadow_addr(shdw_reg, core->vcpu_id, guest_fault_pa);
shadow_pte->page_base_addr = PAGE_BASE_ADDR(shadow_pa);
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
PrintDebug("Handling PTE fault\n");
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
PrintDebug("Handling 4MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
PrintDebug("Handling PTE fault\n");
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_pa);
PrintDebug("Handling 2MB fault with large page (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("LargeShadowPDE=%p, LargeGuestPDE=%p\n", large_shadow_pde, large_guest_pde);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->cpu_id, guest_fault_pa);
+ struct v3_mem_region * shdw_reg = v3_get_mem_region(info->vm_info, info->vcpu_id, guest_fault_pa);
if (shdw_reg == NULL) {
// vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
// vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
- PrintDebug("Starting SVM core %u\n", info->cpu_id);
+ PrintDebug("Starting SVM core %u (on logical core %u)\n", info->vcpu_id, info->pcpu_id);
- if (info->cpu_id == 0) {
+ if (info->vcpu_id == 0) {
info->core_run_state = CORE_RUNNING;
info->vm_info->run_state = VM_RUNNING;
} else {
- PrintDebug("SVM core %u: Waiting for core initialization\n", info->cpu_id);
+ PrintDebug("SVM core %u (on %u): Waiting for core initialization\n", info->vcpu_id, info->pcpu_id);
while (info->core_run_state == CORE_STOPPED) {
v3_yield(info);
- //PrintDebug("SVM core %u: still waiting for INIT\n",info->cpu_id);
+ //PrintDebug("SVM core %u: still waiting for INIT\n", info->vcpu_id);
}
- PrintDebug("SVM core %u initialized\n", info->cpu_id);
+ PrintDebug("SVM core %u(on %u) initialized\n", info->vcpu_id, info->pcpu_id);
}
- PrintDebug("SVM core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
- info->cpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
+ PrintDebug("SVM core %u(on %u): I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
+ info->vcpu_id, info->pcpu_id,
+ info->segments.cs.selector, (void *)(info->segments.cs.base),
info->segments.cs.limit, (void *)(info->rip));
- PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p)\n", info->cpu_id, (void *)info->vmm_data);
+ PrintDebug("SVM core %u: Launching SVM VM (vmcb=%p) (on cpu %u)\n",
+ info->vcpu_id, (void *)info->vmm_data, info->pcpu_id);
//PrintDebugVMCB((vmcb_t*)(info->vmm_data));
v3_start_time(info);
info->vm_info->run_state = VM_ERROR;
- V3_Print("SVM core %u: SVM ERROR!!\n", info->cpu_id);
+ V3_Print("SVM core %u: SVM ERROR!!\n", info->vcpu_id);
v3_print_guest_state(info);
- V3_Print("SVM core %u: SVM Exit Code: %p\n", info->cpu_id, (void *)(addr_t)guest_ctrl->exit_code);
+ V3_Print("SVM core %u: SVM Exit Code: %p\n", info->vcpu_id, (void *)(addr_t)guest_ctrl->exit_code);
- V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
- V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
+ V3_Print("SVM core %u: exit_info1 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info1));
+ V3_Print("SVM core %u: exit_info1 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
- V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->cpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
- V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->cpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
+ V3_Print("SVM core %u: exit_info2 low = 0x%.8x\n", info->vcpu_id, *(uint_t*)&(guest_ctrl->exit_info2));
+ V3_Print("SVM core %u: exit_info2 high = 0x%.8x\n", info->vcpu_id, *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
v3_gva_to_hva(info, linear_addr, &host_addr);
}
- V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->cpu_id, (void *)host_addr);
+ V3_Print("SVM core %u: Host Address of rip = 0x%p\n", info->vcpu_id, (void *)host_addr);
- V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->cpu_id, (void *)host_addr);
+ V3_Print("SVM core %u: Instr (15 bytes) at %p:\n", info->vcpu_id, (void *)host_addr);
v3_dump_mem((uint8_t *)host_addr, 15);
v3_print_stack(info);
}
int v3_gpa_to_hpa(struct guest_info * info, addr_t gpa, addr_t * hpa) {
- struct v3_mem_region * reg = v3_get_mem_region(info->vm_info, info->cpu_id, gpa);
+ struct v3_mem_region * reg = v3_get_mem_region(info->vm_info, info->vcpu_id, gpa);
if (reg == NULL) {
PrintError("In GPA->HPA: Could not find address in shadow map (addr=%p) (NULL REGION)\n",
struct guest_info * core = (struct guest_info *)p;
- PrintDebug("virtual core %u: in start_core (RIP=%p)\n",
- core->cpu_id, (void *)(addr_t)core->rip);
+ PrintDebug("virtual core %u (on logical core %u): in start_core (RIP=%p)\n",
+ core->vcpu_id, core->pcpu_id, (void *)(addr_t)core->rip);
switch (v3_cpu_types[0]) {
#ifdef V3_CONFIG_SVM
i--; // We reset the logical core idx. Not strictly necessary I guess...
} else {
- /* This assumes that the core 0 thread has been mapped to physical core 0 */
if (i == V3_Get_CPU()) {
// We skip the local CPU because it is reserved for vcore 0
continue;
core_idx, start_core, core, core->exec_name);
// TODO: actually manage these threads instead of just launching them
+ core->pcpu_id = core_idx;
core_thread = V3_CREATE_THREAD_ON_CPU(core_idx, start_core, core, core->exec_name);
if (core_thread == NULL) {
sprintf(vm->cores[0].exec_name, "%s", vm->name);
+ vm->cores[0].pcpu_id = V3_Get_CPU();
+
if (start_core(&(vm->cores[0])) != 0) {
PrintError("Error starting VM core 0\n");
v3_stop_vm(vm);
for (i = 0; i < vm->num_cores; i++) {
struct guest_info * info = &(vm->cores[i]);
- info->cpu_id = i;
+ info->vcpu_id = i;
info->vm_info = vm;
info->core_cfg_data = per_core_cfg;
int pde_index = PDE32_INDEX(fault_addr);
int pte_index = PTE32_INDEX(fault_addr);
- struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->cpu_id, fault_addr);
+ struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->vcpu_id, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32, addr=%p\n",
int pde_index = PDE32PAE_INDEX(fault_addr);
int pte_index = PTE32PAE_INDEX(fault_addr);
- struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->cpu_id, fault_addr);
+ struct v3_mem_region * region = v3_get_mem_region(info->vm_info, info->vcpu_id, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32PAE, addr=%p\n",
int pde_index = PDE64_INDEX(fault_addr);
int pte_index = PTE64_INDEX(fault_addr);
- struct v3_mem_region * region = v3_get_mem_region(core->vm_info, core->cpu_id, fault_addr);
+ struct v3_mem_region * region = v3_get_mem_region(core->vm_info, core->vcpu_id, fault_addr);
int page_size = PAGE_SIZE_4KB;
if (region == NULL) {
if ((ext->impl) && (ext->impl->core_init)) {
if (ext->impl->core_init(core, ext->priv_data) == -1) {
PrintError("Error configuring per core extension %s on core %d\n",
- ext->impl->name, core->cpu_id);
+ ext->impl->name, core->vcpu_id);
return -1;
}
}
pg_start = PAGE_ADDR_4MB(page_addr);
pg_end = (pg_start + PAGE_SIZE_4MB);
- reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+ reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
if ((reg) && ((reg->host_addr % PAGE_SIZE_4MB) == 0)) {
page_size = PAGE_SIZE_4MB;
pg_start = PAGE_ADDR_2MB(page_addr);
pg_end = (pg_start + PAGE_SIZE_2MB);
- reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+ reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
page_size = PAGE_SIZE_2MB;
pg_start = PAGE_ADDR_1GB(page_addr);
pg_end = (pg_start + PAGE_SIZE_1GB);
- reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+ reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
if ((reg) && ((reg->host_addr % PAGE_SIZE_1GB) == 0)) {
page_size = PAGE_SIZE_1GB;
pg_start = PAGE_ADDR_2MB(page_addr);
pg_end = (pg_start + PAGE_SIZE_2MB);
- reg = get_overlapping_region(core->vm_info, core->cpu_id, pg_start, pg_end);
+ reg = get_overlapping_region(core->vm_info, core->vcpu_id, pg_start, pg_end);
if ((reg) && ((reg->host_addr % PAGE_SIZE_2MB) == 0)) {
page_size = PAGE_SIZE_2MB;
src_reg = reg;
} else {
// Note that this should only trigger for string operations
- src_reg = v3_get_mem_region(core->vm_info, core->cpu_id, src_mem_op_gpa);
+ src_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, src_mem_op_gpa);
}
if (src_reg == NULL) {
// We don't check whether the region is a hook here because it doesn't yet matter.
// These hva calculations will be true regardless
if (src_reg->flags.alloced == 0) {
- src_mem_op_hva = (addr_t)(hooks->hook_hvas_1 + (PAGE_SIZE * core->cpu_id));
+ src_mem_op_hva = (addr_t)(hooks->hook_hvas_1 + (PAGE_SIZE * core->vcpu_id));
} else {
// We already have the region so we can do the conversion ourselves
src_mem_op_hva = (addr_t)V3_VAddr((void *)((src_mem_op_gpa - src_reg->guest_start) + src_reg->host_addr));
dst_reg = reg;
} else {
// Note that this should only trigger for string operations
- dst_reg = v3_get_mem_region(core->vm_info, core->cpu_id, dst_mem_op_gpa);
+ dst_reg = v3_get_mem_region(core->vm_info, core->vcpu_id, dst_mem_op_gpa);
}
if (dst_reg == NULL) {
// We don't check whether the region is a hook here because it doesn't yet matter.
// These hva calculations will be true regardless
if (dst_reg->flags.alloced == 0) {
- dst_mem_op_hva = (addr_t)(hooks->hook_hvas_2 + (PAGE_SIZE * core->cpu_id));
+ dst_mem_op_hva = (addr_t)(hooks->hook_hvas_2 + (PAGE_SIZE * core->vcpu_id));
} else {
// We already have the region so we can do the conversion ourselves
dst_mem_op_hva = (addr_t)V3_VAddr((void *)((dst_mem_op_gpa - dst_reg->guest_start) + dst_reg->host_addr));
if (global_state->active == 1) {
// unmap page
- struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->cpu_id,
+ struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->vcpu_id,
(addr_t)global_state->global_guest_pa);
if (old_reg == NULL) {
if (local_state->active == 1) {
// unmap page
- struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->cpu_id,
+ struct v3_mem_region * old_reg = v3_get_mem_region(core->vm_info, core->vcpu_id,
(addr_t)local_state->local_guest_pa);
if (old_reg == NULL) {
local_state->active = 1;
// map page
- v3_add_shadow_mem(core->vm_info, core->cpu_id, (addr_t)local_state->local_guest_pa,
+ v3_add_shadow_mem(core->vm_info, core->vcpu_id, (addr_t)local_state->local_guest_pa,
(addr_t)(local_state->local_guest_pa + PAGE_SIZE_4KB - 1),
local_state->local_page_pa);
} else {
state->local_page = (struct v3_symspy_local_page *)V3_VAddr((void *)state->local_page_pa);
memset(state->local_page, 0, PAGE_SIZE_4KB);
- snprintf((uint8_t *)&(state->local_page->magic), 8, "V3V.%d", core->cpu_id);
+ snprintf((uint8_t *)&(state->local_page->magic), 8, "V3V.%d", core->vcpu_id);
return 0;
}
struct exit_event * evt = NULL;
struct rb_node * node = v3_rb_first(&(core->core_telem.exit_root));
- V3_Print("Exit information for Core %d\n", core->cpu_id);
+ V3_Print("Exit information for Core %d\n", core->vcpu_id);
if (!node) {
V3_Print("No information yet for this core\n");
if (khz) {
time_state->guest_cpu_freq = atoi(khz);
- PrintDebug("Core %d CPU frequency requested at %d khz.\n",
- info->cpu_id, time_state->guest_cpu_freq);
+ PrintDebug("Logical Core %d (vcpu=%d) CPU frequency requested at %d khz.\n",
+ info->pcpu_id, info->vcpu_id, time_state->guest_cpu_freq);
}
if ( (khz == NULL) ||
time_state->guest_cpu_freq = time_state->host_cpu_freq;
}
- PrintDebug("Core %d CPU frequency set to %d KHz (host CPU frequency = %d KHz).\n",
- info->cpu_id,
+ PrintDebug("Logical Core %d (vcpu=%d) CPU frequency set to %d KHz (host CPU frequency = %d KHz).\n",
+ info->pcpu_id, info->vcpu_id,
time_state->guest_cpu_freq,
time_state->host_cpu_freq);
v3_vmxassist_init(core, vmx_state);
} else if ((core->shdw_pg_mode == NESTED_PAGING) &&
- (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_CPU)) {
+ (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_CPU)) {
#define CR0_PE 0x00000001
#define CR0_PG 0x80000000
}
} else if ((core->shdw_pg_mode == NESTED_PAGING) &&
- (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_UG_CPU)) {
+ (v3_cpu_types[core->pcpu_id] == V3_VMX_EPT_UG_CPU)) {
int i = 0;
// For now we will assume that unrestricted guest mode is assured w/ EPT
int v3_start_vmx_guest(struct guest_info * info) {
- PrintDebug("Starting VMX core %u\n", info->cpu_id);
+ PrintDebug("Starting VMX core %u\n", info->vcpu_id);
- if (info->cpu_id == 0) {
+ if (info->vcpu_id == 0) {
info->core_run_state = CORE_RUNNING;
info->vm_info->run_state = VM_RUNNING;
} else {
- PrintDebug("VMX core %u: Waiting for core initialization\n", info->cpu_id);
+ PrintDebug("VMX core %u: Waiting for core initialization\n", info->vcpu_id);
while (info->core_run_state == CORE_STOPPED) {
v3_yield(info);
- //PrintDebug("VMX core %u: still waiting for INIT\n",info->cpu_id);
+ //PrintDebug("VMX core %u: still waiting for INIT\n",info->vcpu_id);
}
- PrintDebug("VMX core %u initialized\n", info->cpu_id);
+ PrintDebug("VMX core %u initialized\n", info->vcpu_id);
}
PrintDebug("VMX core %u: I am starting at CS=0x%x (base=0x%p, limit=0x%x), RIP=0x%p\n",
- info->cpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
+ info->vcpu_id, info->segments.cs.selector, (void *)(info->segments.cs.base),
info->segments.cs.limit, (void *)(info->rip));
- PrintDebug("VMX core %u: Launching VMX VM\n", info->cpu_id);
+ PrintDebug("VMX core %u: Launching VMX VM on logical core %u\n", info->vcpu_id, info->pcpu_id);
v3_start_time(info);
PrintDebug("VMXON pointer: 0x%p\n", (void *)host_vmcs_ptrs[cpu_id]);
if (vmx_on(host_vmcs_ptrs[cpu_id]) == VMX_SUCCESS) {
- PrintDebug("VMX Enabled\n");
+ V3_Print("VMX Enabled\n");
} else {
PrintError("VMX initialization failure\n");
return;
int pde_index = PDE64_INDEX(fault_addr);
int pte_index = PTE64_INDEX(fault_addr);
- struct v3_mem_region * region = v3_get_mem_region(core->vm_info, core->cpu_id, fault_addr);
+ struct v3_mem_region * region = v3_get_mem_region(core->vm_info, core->vcpu_id, fault_addr);
int page_size = PAGE_SIZE_4KB;