3 * This file is part of the Palacios Virtual Machine Monitor developed
4 * by the V3VEE Project with funding from the United States National
5 * Science Foundation and the Department of Energy.
7 * The V3VEE Project is a joint project between Northwestern University
8 * and the University of New Mexico. You can find out more at
11 * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Andy Gocke <agocke@gmail.com>
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmm_ctrl_regs.h>
30 #ifndef V3_CONFIG_DEBUG_VMX
32 #define PrintDebug(fmt, args...)
35 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual);
36 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val, struct vmx_exit_info * exit_info);
37 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
38 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
40 int v3_vmx_handle_cr0_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual, struct vmx_exit_info * exit_info) {
42 if (cr_qual->access_type < 2) {
43 v3_reg_t * reg = get_reg_ptr(info, cr_qual);
45 if (cr_qual->access_type == 0) {
47 if (handle_mov_to_cr0(info, reg, exit_info) != 0) {
48 PrintError(info->vm_info, info, "Could not handle CR0 write\n");
53 PrintError(info->vm_info, info, "Mov From CR0 not handled\n");
60 PrintError(info->vm_info, info, "Invalid CR0 Access type?? (type=%d)\n", cr_qual->access_type);
64 int v3_vmx_handle_cr3_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
66 if (cr_qual->access_type < 2) {
67 v3_reg_t * reg = get_reg_ptr(info, cr_qual);
69 if (cr_qual->access_type == 0) {
70 return handle_mov_to_cr3(info, reg);
72 return handle_mov_from_cr3(info, reg);
76 PrintError(info->vm_info, info, "Invalid CR3 Access type?? (type=%d)\n", cr_qual->access_type);
80 int v3_vmx_handle_cr4_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
81 if (cr_qual->access_type < 2) {
83 if (cr_qual->access_type == 0) {
84 if (v3_handle_cr4_write(info) != 0) {
85 PrintError(info->vm_info, info, "Could not handle CR4 write\n");
88 info->ctrl_regs.cr4 |= 0x2000; // no VMX allowed in guest, so mask CR4.VMXE
90 if (v3_handle_cr4_read(info) != 0) {
91 PrintError(info->vm_info, info, "Could not handle CR4 read\n");
99 PrintError(info->vm_info, info, "Invalid CR4 Access type?? (type=%d)\n", cr_qual->access_type);
103 int v3_vmx_handle_cr8_access(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
104 if (cr_qual->access_type < 2) {
106 if (cr_qual->access_type == 0) {
107 if (v3_handle_cr8_write(info) != 0) {
108 PrintError(info->vm_info, info, "Could not handle CR8 write\n");
112 if (v3_handle_cr8_read(info) != 0) {
113 PrintError(info->vm_info, info, "Could not handle CR8 read\n");
121 PrintError(info->vm_info, info, "Invalid CR8 Access type?? (type=%d)\n", cr_qual->access_type);
125 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
127 if (info->shdw_pg_mode == SHADOW_PAGING) {
130 PrintDebug(info->vm_info, info, "Old Guest CR3=%p, Old Shadow CR3=%p\n",
131 (void *)info->ctrl_regs.cr3,
132 (void *)info->shdw_pg_state.guest_cr3);
135 if (info->cpu_mode == LONG) {
136 info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
138 info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
142 if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
143 if (v3_activate_shadow_pt(info) == -1) {
144 PrintError(info->vm_info, info, "Failed to activate 32 bit shadow page table\n");
149 PrintDebug(info->vm_info, info, "New guest CR3=%p, New shadow CR3=%p\n",
150 (void *)info->ctrl_regs.cr3,
151 (void *)info->shdw_pg_state.guest_cr3);
153 } else if (info->shdw_pg_mode == NESTED_PAGING) {
154 PrintError(info->vm_info, info, "Nested paging not available in VMX right now!\n");
163 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
166 if (info->shdw_pg_mode == SHADOW_PAGING) {
168 if ((v3_get_vm_cpu_mode(info) == LONG) ||
169 (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
171 *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
173 *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
177 PrintError(info->vm_info, info, "Unhandled paging mode\n");
185 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0, struct vmx_exit_info * exit_info) {
186 struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
187 struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
188 struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
189 struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
190 uint_t paging_transition = 0;
191 extern v3_cpu_arch_t v3_mach_type;
194 PrintDebug(info->vm_info, info, "Mov to CR0\n");
195 PrintDebug(info->vm_info, info, "Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
196 (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
198 if ((new_shdw_cr0->pe != shdw_cr0->pe) && (vmx_info->assist_state != VMXASSIST_DISABLED)) {
200 PrintDebug(info->vm_info, info, "Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
201 PrintDebug(info->vm_info, info, "Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
202 PrintDebug(info->vm_info, info, "New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
205 if (v3_vmxassist_ctx_switch(info) != 0) {
206 PrintError(info->vm_info, info, "Unable to execute VMXASSIST context switch!\n");
210 if (vmx_info->assist_state == VMXASSIST_ON) {
211 PrintDebug(info->vm_info, info, "Loading VMXASSIST at RIP: %p\n", (void *)(addr_t)info->rip);
213 PrintDebug(info->vm_info, info, "Leaving VMXASSIST and entering protected mode at RIP: %p\n",
214 (void *)(addr_t)info->rip);
217 // PE switches modify the RIP directly, so we clear the instr_len field to avoid catastrophe
218 exit_info->instr_len = 0;
220 // v3_vmx_restore_vmcs(info);
221 // v3_print_vmcs(info);
225 if (new_shdw_cr0->pg != shdw_cr0->pg) {
226 paging_transition = 1;
230 // Except PG, PE, and NE, which are always set
231 if ((info->shdw_pg_mode == SHADOW_PAGING) ||
232 (v3_mach_type != V3_VMX_EPT_UG_CPU)) {
234 // The shadow always reflects the new value
235 *shdw_cr0 = *new_shdw_cr0;
238 // We don't care about most of the flags, so lets go for it
239 // and set them to the guest values
240 *guest_cr0 = *shdw_cr0;
245 // Unrestricted guest
246 // *(uint32_t *)shdw_cr0 = (0x00000020 & *(uint32_t *)new_shdw_cr0);
248 *guest_cr0 = *new_shdw_cr0;
257 if (paging_transition) {
260 if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
261 struct efer_64 * vm_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
262 struct efer_64 * hw_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
264 if (vmx_info->assist_state != VMXASSIST_DISABLED) {
266 PrintDebug(info->vm_info, info, "Enabling long mode\n");
271 vmx_info->entry_ctrls.guest_ia32e = 1;
275 PrintDebug(info->vm_info, info, "Enabling long mode\n");
279 vmx_info->entry_ctrls.guest_ia32e = 1;
283 // PrintDebug(info->vm_info, info, "Activating Shadow Page tables\n");
285 if (info->shdw_pg_mode == SHADOW_PAGING) {
286 if (v3_activate_shadow_pt(info) == -1) {
287 PrintError(info->vm_info, info, "Failed to activate shadow page tables\n");
294 if (info->shdw_pg_mode == SHADOW_PAGING) {
295 if (v3_activate_passthrough_pt(info) == -1) {
296 PrintError(info->vm_info, info, "Failed to activate passthrough page tables\n");
300 // This is hideous... Let's hope that the 1to1 page table has not been nuked...
301 info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
310 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual * cr_qual) {
311 v3_reg_t * reg = NULL;
313 switch (cr_qual->gpr) {
315 reg = &(info->vm_regs.rax);
318 reg = &(info->vm_regs.rcx);
321 reg = &(info->vm_regs.rdx);
324 reg = &(info->vm_regs.rbx);
327 reg = &(info->vm_regs.rsp);
330 reg = &(info->vm_regs.rbp);
333 reg = &(info->vm_regs.rsi);
336 reg = &(info->vm_regs.rdi);
339 reg = &(info->vm_regs.r8);
342 reg = &(info->vm_regs.r9);
345 reg = &(info->vm_regs.r10);
348 reg = &(info->vm_regs.r11);
351 reg = &(info->vm_regs.r11);
354 reg = &(info->vm_regs.r13);
357 reg = &(info->vm_regs.r14);
360 reg = &(info->vm_regs.r15);