X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_cpuid.c;h=fc8918331ad7272ce44ed0061eebefb0b5b1567c;hb=774bac9fbb03ef8bf7c2ca2c79a8b87c9bc4c526;hp=d0c3a328dd50b2d0da12438c1c6d0f9e46c8987b;hpb=ed8feff1d5dd6bf028cd5ba0960ec125505d7597;p=palacios.git diff --git a/palacios/src/palacios/vmm_cpuid.c b/palacios/src/palacios/vmm_cpuid.c index d0c3a32..fc89183 100644 --- a/palacios/src/palacios/vmm_cpuid.c +++ b/palacios/src/palacios/vmm_cpuid.c @@ -7,11 +7,10 @@ * and the University of New Mexico. You can find out more at * http://www.v3vee.org * - * Copyright (c) 2008, Jack Lange - * Copyright (c) 2008, The V3VEE Project + * Copyright (c) 2011, Jack Lange * All rights reserved. * - * Author: Jack Lange + * Author: Jack Lange * * This is free software. You are permitted to use, * redistribute, and modify it as specified in the file "V3VEE_LICENSE". @@ -20,16 +19,165 @@ #include #include #include +#include + +struct masked_cpuid { + uint32_t rax_mask; + uint32_t rbx_mask; + uint32_t rcx_mask; + uint32_t rdx_mask; + + uint32_t rax; + uint32_t rbx; + uint32_t rcx; + uint32_t rdx; +}; + + +void init_custom(struct v3_vm_info *vm) +{ + /* + CPUID 0 + EAX = maxid supported (1) + EBX = first 4 bytes of string + EDX = second 4 bytes of string + ECX = third 4 bytes of string + + GenuineIntel + AuthenticAMD + VirtualV3VEE + EBX EDX ECX + */ + uint32_t ebx, ecx, edx; + + memcpy(&ebx,"Virt",4); + memcpy(&edx,"ualV",4); + memcpy(&ecx,"3VEE",4); + + + // In the Intel Space, we are a VirtualV3VEE + // and our maximum cpuid is 0x1 + v3_cpuid_add_fields(vm,0x0, // ID 0 (Vendor) + 0xffffffff, 1, // Max CPUID is one + 0xffffffff, ebx, + 0xffffffff, ecx, + 0xffffffff, edx); + + // In the AMD Space, we are a Virtual V3VEE + // and our maximum cpuid is 0x80000001 + // other than the maximum cpuid, this is identical to Intel 0x0 + // + v3_cpuid_add_fields(vm,0x80000000, // ID 8...0 (Vendor - AMD variant) + 0xffffffff, 0x80000001, // Max CPUID is one + 0xffffffff, ebx, + 0xffffffff, ecx, + 0xffffffff, edx); + + /* CPUID 1, EAX - Family, Model, Stepping + We are Family 16, Model 1, Stepping 1 (family 16 puts us in x86-64) + 31:28 = reserved + 27:20 = extended family (extfam) + 19:16 = extended model (extmod) + 15:12 = reserved + 11:8 = base family (basfam) + 7:4 = base model (basmod) + 3:0 = stepping + + family = extfam+basefam, model=extmod:basmod + but we need to "top out" basefam first (0xf) + + So we want: 0x00100f11 + + EBX is probably bogus here, since we need the apic ids + of the vcores, not the pcores + */ + + // in Intel Space, we are family 16, model 1, stepping 1 + // and our other features are passthrough + v3_cpuid_add_fields(vm,0x1, + 0xffffffff, 0x00100f11, + 0x0, 0, + 0x0, 0, + 0x0, 0); + + // In the AMD space, we are family 16, model 1, stepping 1 + // with other features passthrough + // These other fields are *different* from Intel's 0x1, however + // in particular, long mode is here, even if it's an Intel... + v3_cpuid_add_fields(vm,0x80000001, // AMD variant + 0xffffffff, 0x00100f11, + 0x0, 0, + 0x0, 0, + 0x0, 0); + +} + +void v3_init_cpuid_map(struct v3_vm_info * vm) { + vm->cpuid_map.map.rb_node = NULL; + + // Setup default cpuid entries + +#ifdef V3_CONFIG_CUSTOM_CPUID + init_custom(vm); +#endif + + // Disable XSAVE (cpuid 0x01, ECX bit 26) + v3_cpuid_add_fields(vm, 0x01, 0, 0, 0, 0, (1 << 26), 0, 0, 0); + + // Disable MONITOR/MWAIT (cpuid 0x01, ECX bit 3) + v3_cpuid_add_fields(vm, 0x01, 0, 0, 0, 0, (1 << 3), 0, 0, 0); + + + // disable MTRR + v3_cpuid_add_fields(vm, 0x00000001, 0, 0, 0, 0, 0, 0, (1 << 12), 0); + // disable PAT + v3_cpuid_add_fields(vm, 0x00000001, 0, 0, 0, 0, 0, 0, (1 << 16), 0); + // disable HTT + v3_cpuid_add_fields(vm, 0x00000001, 0, 0, 0, 0, 0, 0, (1 << 28), 0); + + // disable X2APIC + v3_cpuid_add_fields(vm, 0x00000001, 0, 0, 0, 0, (1 << 21), 0, 0, 0); + + + // Demarcate machine as a VM + v3_cpuid_add_fields(vm, 0x00000001, + 0, 0, + 0, 0, + 0x80000000, 0x80000000, + 0, 0 + ); + + + // disable ARAT + v3_cpuid_add_fields(vm, 0x00000006, (1 << 2), 0, 0, 0, 0, 0, 0, 0); +} + + + + +int v3_deinit_cpuid_map(struct v3_vm_info * vm) { + struct rb_node * node = v3_rb_first(&(vm->cpuid_map.map)); + struct v3_cpuid_hook * hook = NULL; + struct rb_node * tmp_node = NULL; + + + while (node) { + hook = rb_entry(node, struct v3_cpuid_hook, tree_node); + tmp_node = node; + node = v3_rb_next(node); + v3_rb_erase(&(hook->tree_node), &(vm->cpuid_map.map)); + V3_Free(hook); + + } -void v3_init_cpuid_map(struct guest_info * info) { - info->cpuid_map.map.rb_node = NULL; + return 0; } -static inline struct v3_cpuid_hook * __insert_cpuid_hook(struct guest_info * info, struct v3_cpuid_hook * hook) { - struct rb_node ** p = &(info->cpuid_map.map.rb_node); +static inline struct v3_cpuid_hook * __insert_cpuid_hook(struct v3_vm_info * vm, struct v3_cpuid_hook * hook) { + struct rb_node ** p = &(vm->cpuid_map.map.rb_node); struct rb_node * parent = NULL; struct v3_cpuid_hook * tmp_hook = NULL; @@ -51,22 +199,22 @@ static inline struct v3_cpuid_hook * __insert_cpuid_hook(struct guest_info * inf } -static inline struct v3_cpuid_hook * insert_cpuid_hook(struct guest_info * info, struct v3_cpuid_hook * hook) { +static inline struct v3_cpuid_hook * insert_cpuid_hook(struct v3_vm_info * vm, struct v3_cpuid_hook * hook) { struct v3_cpuid_hook * ret; - if ((ret = __insert_cpuid_hook(info, hook))) { + if ((ret = __insert_cpuid_hook(vm, hook))) { return ret; } - v3_rb_insert_color(&(hook->tree_node), &(info->cpuid_map.map)); + v3_rb_insert_color(&(hook->tree_node), &(vm->cpuid_map.map)); return NULL; } -static struct v3_cpuid_hook * get_cpuid_hook(struct guest_info * info, uint32_t cpuid) { - struct rb_node * n = info->cpuid_map.map.rb_node; +static struct v3_cpuid_hook * get_cpuid_hook(struct v3_vm_info * vm, uint32_t cpuid) { + struct rb_node * n = vm->cpuid_map.map.rb_node; struct v3_cpuid_hook * hook = NULL; while (n) { @@ -85,22 +233,137 @@ static struct v3_cpuid_hook * get_cpuid_hook(struct guest_info * info, uint32_t } -int v3_unhook_cpuid(struct guest_info * info, uint32_t cpuid) { - struct v3_cpuid_hook * hook = get_cpuid_hook(info, cpuid); + +static int mask_hook(struct guest_info * core, uint32_t cpuid, + uint32_t * eax, uint32_t * ebx, + uint32_t * ecx, uint32_t * edx, + void * priv_data) { + struct masked_cpuid * mask = (struct masked_cpuid *)priv_data; + + v3_cpuid(cpuid, eax, ebx, ecx, edx); + + *eax &= ~(mask->rax_mask); + *eax |= (mask->rax & mask->rax_mask); + + *ebx &= ~(mask->rbx_mask); + *ebx |= (mask->rbx & mask->rbx_mask); + + *ecx &= ~(mask->rcx_mask); + *ecx |= (mask->rcx & mask->rcx_mask); + + *edx &= ~(mask->rdx_mask); + *edx |= (mask->rdx & mask->rdx_mask); + + return 0; +} + + + +/* This function allows you to reserve a set of bits in a given cpuid value + * For each cpuid return register you specify which bits you want to reserve in the mask. + * The value of those bits is set in the reg param. + * The values of the reserved bits are returned to the guest, when it reads the cpuid + */ +int v3_cpuid_add_fields(struct v3_vm_info * vm, uint32_t cpuid, + uint32_t rax_mask, uint32_t rax, + uint32_t rbx_mask, uint32_t rbx, + uint32_t rcx_mask, uint32_t rcx, + uint32_t rdx_mask, uint32_t rdx) { + struct v3_cpuid_hook * hook = get_cpuid_hook(vm, cpuid); + + + if ((~rax_mask & rax) || (~rbx_mask & rbx) || + (~rcx_mask & rcx) || (~rdx_mask & rdx)) { + PrintError(vm, VCORE_NONE, "Invalid cpuid reg value (mask overrun)\n"); + return -1; + } + if (hook == NULL) { - PrintError("Could not find cpuid to unhook (0x%x)\n", cpuid); + struct masked_cpuid * mask = V3_Malloc(sizeof(struct masked_cpuid)); + + if (!mask) { + PrintError(vm, VCORE_NONE, "Unable to alocate space for cpu id mask\n"); + return -1; + } + + memset(mask, 0, sizeof(struct masked_cpuid)); + + mask->rax_mask = rax_mask; + mask->rax = rax; + mask->rbx_mask = rbx_mask; + mask->rbx = rbx; + mask->rcx_mask = rcx_mask; + mask->rcx = rcx; + mask->rdx_mask = rdx_mask; + mask->rdx = rdx; + + if (v3_hook_cpuid(vm, cpuid, mask_hook, mask) == -1) { + PrintError(vm, VCORE_NONE, "Error hooking cpuid %d\n", cpuid); + V3_Free(mask); + return -1; + } + } else { + struct masked_cpuid * mask = NULL; + uint32_t tmp_val = 0; + + if (hook->hook_fn != mask_hook) { + PrintError(vm, VCORE_NONE, "trying to add fields to a fully hooked cpuid (%d)\n", cpuid); + return -1; + } + + mask = (struct masked_cpuid *)(hook->private_data); + + if ((mask->rax_mask & rax_mask) || + (mask->rbx_mask & rbx_mask) || + (mask->rcx_mask & rcx_mask) || + (mask->rdx_mask & rdx_mask)) { + PrintError(vm, VCORE_NONE, "Trying to add fields that have already been masked\n"); + return -1; + } + + mask->rax_mask |= rax_mask; + mask->rbx_mask |= rbx_mask; + mask->rcx_mask |= rcx_mask; + mask->rdx_mask |= rdx_mask; + + mask->rax |= rax; + tmp_val = (~rax_mask | rax); + mask->rax &= tmp_val; + + mask->rbx |= rbx; + tmp_val = (~rbx_mask | rbx); + mask->rbx &= tmp_val; + + mask->rcx |= rcx; + tmp_val = (~rcx_mask | rcx); + mask->rcx &= tmp_val; + + mask->rdx |= rdx; + tmp_val = (~rdx_mask | rdx); + mask->rdx &= tmp_val; + + } + + return 0; +} + +int v3_unhook_cpuid(struct v3_vm_info * vm, uint32_t cpuid) { + struct v3_cpuid_hook * hook = get_cpuid_hook(vm, cpuid); + + if (hook == NULL) { + PrintError(vm, VCORE_NONE, "Could not find cpuid to unhook (0x%x)\n", cpuid); return -1; } - v3_rb_erase(&(hook->tree_node), &(info->cpuid_map.map)); + v3_rb_erase(&(hook->tree_node), &(vm->cpuid_map.map)); V3_Free(hook); return 0; } -int v3_hook_cpuid(struct guest_info * info, uint32_t cpuid, +int v3_hook_cpuid(struct v3_vm_info * vm, uint32_t cpuid, int (*hook_fn)(struct guest_info * info, uint32_t cpuid, \ uint32_t * eax, uint32_t * ebx, \ uint32_t * ecx, uint32_t * edx, \ @@ -109,17 +372,23 @@ int v3_hook_cpuid(struct guest_info * info, uint32_t cpuid, struct v3_cpuid_hook * hook = NULL; if (hook_fn == NULL) { - PrintError("CPUID hook requested with null handler\n"); + PrintError(vm, VCORE_NONE, "CPUID hook requested with null handler\n"); return -1; } hook = (struct v3_cpuid_hook *)V3_Malloc(sizeof(struct v3_cpuid_hook)); + + if (!hook) { + PrintError(vm, VCORE_NONE, "Cannot allocate memory to hook cpu id\n"); + return -1; + } + hook->cpuid = cpuid; hook->private_data = private_data; hook->hook_fn = hook_fn; - if (insert_cpuid_hook(info, hook)) { - PrintError("Could not hook cpuid 0x%x (already hooked)\n", cpuid); + if (insert_cpuid_hook(vm, hook)) { + PrintError(vm, VCORE_NONE, "Could not hook cpuid 0x%x (already hooked)\n", cpuid); V3_Free(hook); return -1; } @@ -129,12 +398,12 @@ int v3_hook_cpuid(struct guest_info * info, uint32_t cpuid, int v3_handle_cpuid(struct guest_info * info) { uint32_t cpuid = info->vm_regs.rax; - struct v3_cpuid_hook * hook = get_cpuid_hook(info, cpuid); + struct v3_cpuid_hook * hook = get_cpuid_hook(info->vm_info, cpuid); - //PrintDebug("CPUID called for 0x%x\n", cpuid); + //PrintDebug(info->vm_info, info, "CPUID called for 0x%x\n", cpuid); if (hook == NULL) { - //PrintDebug("Calling passthrough handler\n"); + //PrintDebug(info->vm_info, info, "Calling passthrough handler\n"); // call the passthrough handler v3_cpuid(cpuid, (uint32_t *)&(info->vm_regs.rax), @@ -142,7 +411,7 @@ int v3_handle_cpuid(struct guest_info * info) { (uint32_t *)&(info->vm_regs.rcx), (uint32_t *)&(info->vm_regs.rdx)); } else { - // PrintDebug("Calling hook function\n"); + // PrintDebug(info->vm_info, info, "Calling hook function\n"); if (hook->hook_fn(info, cpuid, (uint32_t *)&(info->vm_regs.rax), @@ -150,12 +419,12 @@ int v3_handle_cpuid(struct guest_info * info) { (uint32_t *)&(info->vm_regs.rcx), (uint32_t *)&(info->vm_regs.rdx), hook->private_data) == -1) { - PrintError("Error in cpuid handler for 0x%x\n", cpuid); + PrintError(info->vm_info, info, "Error in cpuid handler for 0x%x\n", cpuid); return -1; } } - // PrintDebug("Cleaning up register contents\n"); + // PrintDebug(info->vm_info, info, "Cleaning up register contents\n"); info->vm_regs.rax &= 0x00000000ffffffffLL; info->vm_regs.rbx &= 0x00000000ffffffffLL; @@ -166,3 +435,8 @@ int v3_handle_cpuid(struct guest_info * info) { return 0; } + + + + +