From: Patrick G. Bridges Date: Tue, 22 Feb 2011 22:21:23 +0000 (-0700) Subject: Fixups to let us run multiple guests on a single CPU again, and started fixing X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=commitdiff_plain;h=d54fc24e6cfb62cc2fedfb63d3ae68ff636e3a01;p=palacios.git Fixups to let us run multiple guests on a single CPU again, and started fixing some time virtualization issues as well. --- diff --git a/Kconfig.stdlibs b/Kconfig.stdlibs index 09e082a..4fa3315 100644 --- a/Kconfig.stdlibs +++ b/Kconfig.stdlibs @@ -109,7 +109,6 @@ config BUILT_IN_STRNCPY config BUILT_IN_STRDUP bool "strdup()" - default n depends on BUILT_IN_STDLIB help This enables Palacios' internal implementation of strdup diff --git a/palacios/include/palacios/vm_guest.h b/palacios/include/palacios/vm_guest.h index b435390..8e91634 100644 --- a/palacios/include/palacios/vm_guest.h +++ b/palacios/include/palacios/vm_guest.h @@ -125,6 +125,9 @@ struct guest_info { /* the logical cpu on which this core runs */ uint32_t cpu_id; + /* the physical cpu on which this core runs */ + uint32_t host_cpu_id; + }; diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index 6d5a7f8..384b355 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -456,8 +456,12 @@ int v3_svm_enter(struct guest_info * info) { // Conditionally yield the CPU if the timeslice has expired v3_yield_cond(info); + // Perform any additional yielding needed for time adjustment v3_adjust_time(info); + // Update timer devices prior to entering VM. + v3_update_timers(info); + // disable global interrupts for vm state transition v3_clgi(); @@ -505,7 +509,6 @@ int v3_svm_enter(struct guest_info * info) { } #endif - v3_update_timers(info); #ifdef CONFIG_TIME_HIDE_VM_COST v3_restart_time(info); #endif @@ -513,7 +516,7 @@ int v3_svm_enter(struct guest_info * info) { //V3_Print("Calling v3_svm_launch\n"); - v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]); + v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->host_cpu_id]); //V3_Print("SVM Returned: Exit Code: %x, guest_rip=%lx\n", (uint32_t)(guest_ctrl->exit_code), (unsigned long)guest_state->rip); diff --git a/palacios/src/palacios/vmm.c b/palacios/src/palacios/vmm.c index dca1018..9acfbd0 100644 --- a/palacios/src/palacios/vmm.c +++ b/palacios/src/palacios/vmm.c @@ -204,12 +204,12 @@ static int start_core(void * p) struct guest_info * core = (struct guest_info *)p; - PrintDebug("core %u: in start_core (RIP=%p)\n", - core->cpu_id, (void *)(addr_t)core->rip); + PrintDebug("virtual core %u/physical core %u: in start_core (RIP=%p)\n", + core->cpu_id, core->host_cpu_id, (void *)(addr_t)core->rip); // JRL: Whoa WTF? cpu_types are tied to the vcoreID???? - switch (v3_cpu_types[core->cpu_id]) { + switch (v3_cpu_types[core->host_cpu_id]) { #ifdef CONFIG_SVM case V3_SVM_CPU: case V3_SVM_REV3_CPU: @@ -241,73 +241,54 @@ static int start_core(void * p) int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) { uint32_t i; -#ifdef CONFIG_MULTITHREAD_OS - int vcore_id = 0; -#endif uint8_t * core_mask = (uint8_t *)&cpu_mask; // This is to make future expansion easier uint32_t avail_cores = 0; - - /// CHECK IF WE ARE MULTICORE ENABLED.... V3_Print("V3 -- Starting VM (%u cores)\n", vm->num_cores); V3_Print("CORE 0 RIP=%p\n", (void *)(addr_t)(vm->cores[0].rip)); - // Check that enough cores are present in the mask to handle vcores - for (i = 0; i < MAX_CORES; i++) { - int major = i / 8; - int minor = i % 8; - - if (core_mask[major] & (0x1 << minor)) { - avail_cores++; - } - - } - - if (vm->num_cores > avail_cores) { + if (vm->num_cores > MAX_CORES ) { PrintError("Attempted to start a VM with too many cores (vm->num_cores = %d, avail_cores = %d, MAX=%d)\n", vm->num_cores, avail_cores, MAX_CORES); return -1; } + if (vm->cores[0].host_cpu_id != 0) { + PrintError("First virtual core must run on host core 0.\n"); + return -1; + } #ifdef CONFIG_MULTITHREAD_OS // spawn off new threads, for other cores - for (i = 0, vcore_id = 1; (i < MAX_CORES) && (vcore_id < vm->num_cores); i++) { - int major = i / 8; - int minor = i % 8; + for (i = 1; i < vm->num_cores; i++) { + struct guest_info *core = &(vm->cores[i]); + int major = core->host_cpu_id / 8; + int minor = core->host_cpu_id % 8; void * core_thread = NULL; - struct guest_info * core = &(vm->cores[vcore_id]); - - /* This assumes that the core 0 thread has been mapped to physical core 0 */ - if (i == V3_Get_CPU()) { - // We skip the local CPU, because it is reserved for vcore 0 - continue; - } - if ((core_mask[major] & (0x1 << minor)) == 0) { - // cpuid not set in cpu_mask + PrintError("Host CPU %d not available for virtual core %d; not started\n", + core->host_cpu_id, i); continue; } PrintDebug("Starting virtual core %u on logical core %u\n", - vcore_id, i); + i, core->host_cpu_id); - sprintf(core->exec_name, "%s-%u", vm->name, vcore_id); + sprintf(core->exec_name, "%s-%u", vm->name, i); PrintDebug("run: core=%u, func=0x%p, arg=0x%p, name=%s\n", - i, start_core, core, core->exec_name); + core->host_cpu_id, start_core, core, core->exec_name); // TODO: actually manage these threads instead of just launching them - core_thread = V3_CREATE_THREAD_ON_CPU(i, start_core, core, core->exec_name); + core_thread = V3_CREATE_THREAD_ON_CPU(core->host_cpu_id, start_core, + core, core->exec_name); if (core_thread == NULL) { PrintError("Thread launch failed\n"); return -1; } - - vcore_id++; } #endif @@ -479,7 +460,7 @@ void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) { int v3_vm_enter(struct guest_info * info) { - switch (v3_cpu_types[info->cpu_id]) { + switch (v3_cpu_types[info->host_cpu_id]) { #ifdef CONFIG_SVM case V3_SVM_CPU: case V3_SVM_REV3_CPU: diff --git a/palacios/src/palacios/vmm_config.c b/palacios/src/palacios/vmm_config.c index 50e3b1c..789f399 100644 --- a/palacios/src/palacios/vmm_config.c +++ b/palacios/src/palacios/vmm_config.c @@ -237,6 +237,7 @@ static int pre_config_vm(struct v3_vm_info * vm, v3_cfg_tree_t * vm_cfg) { vm->mem_size = (addr_t)atoi(memory_str) * 1024 * 1024; vm->mem_align = get_alignment(align_str); + PrintDebug("Alignment for %lu bytes of memory computed as 0x%x\n", vm->mem_size, vm->mem_align); if (strcasecmp(vm_class, "PC") == 0) { @@ -291,7 +292,7 @@ static int determine_paging_mode(struct guest_info * info, v3_cfg_tree_t * core_ if (pg_mode) { if ((strcasecmp(pg_mode, "nested") == 0)) { - if (v3_cpu_types[info->cpu_id] == V3_SVM_REV3_CPU) { + if (v3_cpu_types[info->host_cpu_id] == V3_SVM_REV3_CPU) { info->shdw_pg_mode = NESTED_PAGING; } else { PrintError("Nested paging not supported on this hardware. Defaulting to shadow paging\n"); @@ -337,10 +338,22 @@ static int determine_paging_mode(struct guest_info * info, v3_cfg_tree_t * core_ } static int pre_config_core(struct guest_info * info, v3_cfg_tree_t * core_cfg) { - + char *hcpu; if (determine_paging_mode(info, core_cfg)) return -1; + hcpu = v3_cfg_val(core_cfg, "hostcpu"); + if (hcpu) { + int req_id = atoi(hcpu); + if (req_id < 0) { + PrintError("Invalid host core %d requested by" + " virtual cpu %d - ignored.\n", req_id, info->cpu_id); + } else { + PrintDebug("Assigned host core %d to virtual core %d.\n", info->cpu_id, req_id, hcpu); + info->host_cpu_id = req_id; + } + } + v3_init_core(info); if (info->vm_info->vm_class == V3_PC_VM) { @@ -457,7 +470,6 @@ struct v3_vm_info * v3_config_guest(void * cfg_blob, void * priv_data) { } num_cores = atoi(v3_cfg_val(cores_cfg, "count")); - if (num_cores == 0) { PrintError("No cores specified in configuration\n"); return NULL; @@ -483,7 +495,6 @@ struct v3_vm_info * v3_config_guest(void * cfg_blob, void * priv_data) { return NULL; } - V3_Print("Per core configuration\n"); per_core_cfg = v3_cfg_subtree(cores_cfg, "core"); @@ -494,12 +505,14 @@ struct v3_vm_info * v3_config_guest(void * cfg_blob, void * priv_data) { info->cpu_id = i; info->vm_info = vm; info->core_cfg_data = per_core_cfg; + info->host_cpu_id = i; // may be overriden by core config if (pre_config_core(info, per_core_cfg) == -1) { PrintError("Error in core %d preconfiguration\n", i); return NULL; } + per_core_cfg = v3_cfg_next_branch(per_core_cfg); } diff --git a/palacios/src/palacios/vmm_symbiotic.c b/palacios/src/palacios/vmm_symbiotic.c index 7d9ea4c..3b41305 100644 --- a/palacios/src/palacios/vmm_symbiotic.c +++ b/palacios/src/palacios/vmm_symbiotic.c @@ -32,11 +32,11 @@ static int cpuid_fn(struct guest_info * core, uint32_t cpuid, *eax = *(uint32_t *)"V3V"; - if ((v3_cpu_types[core->cpu_id] == V3_SVM_CPU) || - (v3_cpu_types[core->cpu_id] == V3_SVM_REV3_CPU)) { + if ((v3_cpu_types[core->host_cpu_id] == V3_SVM_CPU) || + (v3_cpu_types[core->host_cpu_id] == V3_SVM_REV3_CPU)) { *ebx = *(uint32_t *)"SVM"; - } else if ((v3_cpu_types[core->cpu_id] == V3_VMX_CPU) || - (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_CPU)) { + } else if ((v3_cpu_types[core->host_cpu_id] == V3_VMX_CPU) || + (v3_cpu_types[core->host_cpu_id] == V3_VMX_EPT_CPU)) { *ebx = *(uint32_t *)"VMX"; } diff --git a/palacios/src/palacios/vmm_time.c b/palacios/src/palacios/vmm_time.c index 1cb6478..6b5fc13 100644 --- a/palacios/src/palacios/vmm_time.c +++ b/palacios/src/palacios/vmm_time.c @@ -103,8 +103,8 @@ int v3_adjust_time(struct guest_info * info) { struct vm_time * time_state = &(info->time_state); if (time_state->host_cpu_freq != time_state->guest_cpu_freq) { - uint64_t guest_time, guest_elapsed, desired_elapsed; - uint64_t host_time, target_host_time; + uint64_t guest_time, host_time, target_host_time; + sint64_t guest_elapsed, desired_elapsed; guest_time = v3_get_guest_time(time_state); @@ -116,14 +116,20 @@ int v3_adjust_time(struct guest_info * info) { /* Yield until that host time is reached */ host_time = v3_get_host_time(time_state); + if (host_time < target_host_time) { + PrintDebug("Yielding until host time (%llu) greater than target (%llu).\n", host_time, target_host_time); + } + while (host_time < target_host_time) { v3_yield(info); host_time = v3_get_host_time(time_state); } - // This overrides any pause/unpause times because the difference - // is going to be too big for any pause/unpause the notice. - time_state->guest_host_offset = (sint64_t)guest_time - (sint64_t)host_time; +#ifndef CONFIG_TIME_HIDE_VM_COST + // XXX This should turn into a target offset we want to move towards XXX + time_state->guest_host_offset = + (sint64_t)guest_time - (sint64_t)host_time; +#endif } return 0; @@ -219,14 +225,12 @@ void v3_update_timers(struct guest_info * info) { int v3_rdtsc(struct guest_info * info) { uint64_t tscval = v3_get_guest_tsc(&info->time_state); - PrintDebug("Returning %llu as TSC.\n", tscval); info->vm_regs.rdx = tscval >> 32; info->vm_regs.rax = tscval & 0xffffffffLL; return 0; } int v3_handle_rdtsc(struct guest_info * info) { - PrintDebug("Handling virtual RDTSC call.\n"); v3_rdtsc(info); info->vm_regs.rax &= 0x00000000ffffffffLL; @@ -371,9 +375,10 @@ void v3_init_time_core(struct guest_info * info) { time_state->guest_cpu_freq = atoi(khz); PrintDebug("Core %d CPU frequency requested at %d khz.\n", info->cpu_id, time_state->guest_cpu_freq); - } + } - if ((khz == NULL) || (time_state->guest_cpu_freq > time_state->host_cpu_freq)) { + if ((khz == NULL) || (time_state->guest_cpu_freq <= 0) + || (time_state->guest_cpu_freq > time_state->host_cpu_freq)) { time_state->guest_cpu_freq = time_state->host_cpu_freq; }