3 * This file is part of the Palacios Virtual Machine Monitor developed
4 * by the V3VEE Project with funding from the United States National
5 * Science Foundation and the Department of Energy.
7 * The V3VEE Project is a joint project between Northwestern University
8 * and the University of New Mexico. You can find out more at
11 * Copyright (c) 2008, Andy Gocke <agocke@gmail.com>
12 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Andy Gocke <agocke@gmail.com>
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmx_ctrl_regs.h>
22 #include <palacios/vmm.h>
23 #include <palacios/vmx_lowlevel.h>
24 #include <palacios/vmx.h>
25 #include <palacios/vmx_assist.h>
26 #include <palacios/vm_guest_mem.h>
27 #include <palacios/vmm_direct_paging.h>
28 #include <palacios/vmx_handler.h>
30 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual);
31 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_val);
32 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
33 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg);
35 int v3_vmx_handle_cr0_access(struct guest_info * info) {
36 struct vmx_exit_cr_qual cr_qual;
38 vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
40 if (cr_qual.access_type < 2) {
41 v3_reg_t * reg = get_reg_ptr(info, cr_qual);
43 if (cr_qual.access_type == 0) {
45 if (handle_mov_to_cr0(info, reg) != 0) {
46 PrintError("Could not handle CR0 write\n");
51 PrintError("Mov From CR0 not handled\n");
58 PrintError("Invalid CR0 Access type?? (type=%d)\n", cr_qual.access_type);
62 int v3_vmx_handle_cr3_access(struct guest_info * info) {
63 struct vmx_exit_cr_qual cr_qual;
65 vmcs_read(VMCS_EXIT_QUAL, &(cr_qual.value));
67 if (cr_qual.access_type < 2) {
68 v3_reg_t * reg = get_reg_ptr(info, cr_qual);
70 if (cr_qual.access_type == 0) {
71 return handle_mov_to_cr3(info, reg);
73 return handle_mov_from_cr3(info, reg);
77 PrintError("Invalid CR3 Access type?? (type=%d)\n", cr_qual.access_type);
81 static int handle_mov_to_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
84 if (info->shdw_pg_mode == SHADOW_PAGING) {
87 PrintDebug("Old Guest CR3=%p, Old Shadow CR3=%p\n",
88 (void *)info->ctrl_regs.cr3,
89 (void *)info->shdw_pg_state.guest_cr3);
91 if (info->cpu_mode == LONG) {
92 info->shdw_pg_state.guest_cr3 = (uint64_t)*cr3_reg;
94 info->shdw_pg_state.guest_cr3 = (uint32_t)*cr3_reg;
98 if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
99 if (v3_activate_shadow_pt(info) == -1) {
100 PrintError("Failed to activate 32 bit shadow page table\n");
105 PrintDebug("New guest CR3=%p, New shadow CR3=%p\n",
106 (void *)info->ctrl_regs.cr3,
107 (void *)info->shdw_pg_state.guest_cr3);
109 } else if (info->shdw_pg_mode == NESTED_PAGING) {
110 PrintError("Nested paging not available in VMX right now!\n");
115 vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
116 info->rip += instr_len;
121 static int handle_mov_from_cr3(struct guest_info * info, v3_reg_t * cr3_reg) {
124 if (info->shdw_pg_mode == SHADOW_PAGING) {
126 if ((v3_get_vm_cpu_mode(info) == LONG) ||
127 (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
129 *cr3_reg = (uint64_t)info->shdw_pg_state.guest_cr3;
131 *cr3_reg = (uint32_t)info->shdw_pg_state.guest_cr3;
135 PrintError("Unhandled paging mode\n");
140 vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
141 info->rip += instr_len;
146 static int handle_mov_to_cr0(struct guest_info * info, v3_reg_t * new_cr0) {
147 struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
148 struct cr0_32 * shdw_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
149 struct cr0_32 * new_shdw_cr0 = (struct cr0_32 *)new_cr0;
150 struct vmx_data * vmx_info = (struct vmx_data *)info->vmm_data;
151 uint_t paging_transition = 0;
155 PrintDebug("Old shadow CR0: 0x%x, New shadow CR0: 0x%x\n",
156 (uint32_t)info->shdw_pg_state.guest_cr0, (uint32_t)*new_cr0);
159 if (new_shdw_cr0->pe != shdw_cr0->pe) {
161 PrintDebug("Guest CR0: 0x%x\n", *(uint32_t *)guest_cr0);
162 PrintDebug("Old shadow CR0: 0x%x\n", *(uint32_t *)shdw_cr0);
163 PrintDebug("New shadow CR0: 0x%x\n", *(uint32_t *)new_shdw_cr0);
165 if (v3_vmxassist_ctx_switch(info) != 0) {
166 PrintError("Unable to execute VMXASSIST context switch!\n");
170 v3_load_vmcs_guest_state(info);
172 if (vmx_info->state == VMXASSIST_ENABLED) {
173 PrintDebug("Loading VMXASSIST at RIP: %p\n", (void *)info->rip);
175 PrintDebug("Leaving VMXASSIST and entering protected mode at RIP: %p\n",
179 // vmx assist sets the new cr values itself
183 if (new_shdw_cr0->pg != shdw_cr0->pg) {
184 paging_transition = 1;
187 // The shadow always reflects the new value
188 *shdw_cr0 = *new_shdw_cr0;
190 // We don't care about most of the flags, so lets go for it
191 // and set them to the guest values
192 *guest_cr0 = *shdw_cr0;
194 // Except PG, PE, and NE, which are always set
199 if (paging_transition) {
202 if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
203 struct efer_64 * guest_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
205 if (guest_efer->lme == 1) {
206 // PrintDebug("Enabling long mode\n");
211 vmx_info->entry_ctrls.guest_ia32e = 1;
214 // PrintDebug("Activating Shadow Page tables\n");
216 if (v3_activate_shadow_pt(info) == -1) {
217 PrintError("Failed to activate shadow page tables\n");
221 } else if (v3_activate_passthrough_pt(info) == -1) {
222 PrintError("Failed to activate passthrough page tables\n");
227 // PE loads its own RIP, otherwise we need to skip ahead an instruction
229 vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
230 info->rip += instr_len;
235 static v3_reg_t * get_reg_ptr(struct guest_info * info, struct vmx_exit_cr_qual cr_qual) {
236 v3_reg_t * reg = NULL;
238 switch (cr_qual.gpr) {
240 reg = &(info->vm_regs.rax);
243 reg = &(info->vm_regs.rcx);
246 reg = &(info->vm_regs.rdx);
249 reg = &(info->vm_regs.rbx);
252 reg = &(info->vm_regs.rsp);
255 reg = &(info->vm_regs.rbp);
258 reg = &(info->vm_regs.rsi);
261 reg = &(info->vm_regs.rdi);
264 reg = &(info->vm_regs.r8);
267 reg = &(info->vm_regs.r9);
270 reg = &(info->vm_regs.r10);
273 reg = &(info->vm_regs.r11);
276 reg = &(info->vm_regs.r11);
279 reg = &(info->vm_regs.r13);
282 reg = &(info->vm_regs.r14);
285 reg = &(info->vm_regs.r15);