2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_mem.h>
21 #include <palacios/vmm.h>
22 #include <palacios/vmcb.h>
23 #include <palacios/vmm_decoder.h>
24 #include <palacios/vm_guest_mem.h>
25 #include <palacios/vmm_ctrl_regs.h>
26 #include <palacios/vmm_direct_paging.h>
27 #include <palacios/svm.h>
29 #ifndef CONFIG_DEBUG_CTRL_REGS
31 #define PrintDebug(fmt, args...)
35 static int handle_lmsw(struct guest_info * info, struct x86_instr * dec_instr);
36 static int handle_clts(struct guest_info * info, struct x86_instr * dec_instr);
37 static int handle_mov_to_cr0(struct guest_info * info, struct x86_instr * dec_instr);
40 // First Attempt = 494 lines
41 // current = 106 lines
42 int v3_handle_cr0_write(struct guest_info * info) {
45 struct x86_instr dec_instr;
47 if (info->mem_mode == PHYSICAL_MEM) {
48 ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
50 ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
53 if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
54 PrintError("Could not decode instruction\n");
59 if (dec_instr.op_type == V3_OP_LMSW) {
60 if (handle_lmsw(info, &dec_instr) == -1) {
63 } else if (dec_instr.op_type == V3_OP_MOV2CR) {
64 if (handle_mov_to_cr0(info, &dec_instr) == -1) {
67 } else if (dec_instr.op_type == V3_OP_CLTS) {
68 if (handle_clts(info, &dec_instr) == -1) {
72 PrintError("Unhandled opcode in handle_cr0_write\n");
76 info->rip += dec_instr.instr_length;
84 // The CR0 register only has flags in the low 32 bits
85 // The hardware does a format check to make sure the high bits are zero
86 // Because of this we can ignore the high 32 bits here
87 static int handle_mov_to_cr0(struct guest_info * info, struct x86_instr * dec_instr) {
89 struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
90 struct cr0_32 * new_cr0 = (struct cr0_32 *)(dec_instr->src_operand.operand);
91 struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
92 uint_t paging_transition = 0;
94 PrintDebug("MOV2CR0 (MODE=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
96 PrintDebug("OperandVal = %x, length=%d\n", *(uint_t *)new_cr0, dec_instr->src_operand.size);
98 PrintDebug("Old CR0=%x\n", *(uint_t *)shadow_cr0);
99 PrintDebug("Old Guest CR0=%x\n", *(uint_t *)guest_cr0);
102 // We detect if this is a paging transition
103 if (guest_cr0->pg != new_cr0->pg) {
104 paging_transition = 1;
107 // Guest always sees the value they wrote
108 *guest_cr0 = *new_cr0;
110 // This value must always be set to 1
113 // Set the shadow register to catch non-virtualized flags
114 *shadow_cr0 = *guest_cr0;
116 // Paging is always enabled
119 if (guest_cr0->pg == 0) {
120 // If paging is not enabled by the guest, then we always enable write-protect to catch memory hooks
124 // Was there a paging transition
125 // Meaning we need to change the page tables
126 if (paging_transition) {
127 if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
129 struct efer_64 * guest_efer = (struct efer_64 *)&(info->shdw_pg_state.guest_efer);
130 struct efer_64 * shadow_efer = (struct efer_64 *)&(info->ctrl_regs.efer);
132 // Check long mode LME to set LME
133 if (guest_efer->lme == 1) {
134 PrintDebug("Enabing Long Mode\n");
137 shadow_efer->lma = 1;
138 shadow_efer->lme = 1;
140 PrintDebug("New EFER %p\n", (void *)*(addr_t *)(shadow_efer));
143 PrintDebug("Activating Shadow Page Tables\n");
145 if (v3_activate_shadow_pt(info) == -1) {
146 PrintError("Failed to activate shadow page tables\n");
153 if (v3_activate_passthrough_pt(info) == -1) {
154 PrintError("Failed to activate passthrough page tables\n");
161 PrintDebug("New Guest CR0=%x\n",*(uint_t *)guest_cr0);
162 PrintDebug("New CR0=%x\n", *(uint_t *)shadow_cr0);
170 static int handle_clts(struct guest_info * info, struct x86_instr * dec_instr) {
172 struct cr0_32 * real_cr0 = (struct cr0_32*)&(info->ctrl_regs.cr0);
176 if (info->shdw_pg_mode == SHADOW_PAGING) {
177 struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
184 static int handle_lmsw(struct guest_info * info, struct x86_instr * dec_instr) {
185 struct cr0_real * real_cr0 = (struct cr0_real *)&(info->ctrl_regs.cr0);
186 // XED is a mess, and basically reverses the operand order for an LMSW
187 struct cr0_real * new_cr0 = (struct cr0_real *)(dec_instr->dst_operand.operand);
190 PrintDebug("LMSW\n");
192 new_cr0_val = (*(char*)(new_cr0)) & 0x0f;
194 PrintDebug("OperandVal = %x\n", new_cr0_val);
196 // We can just copy the new value through
197 // we don't need to virtualize the lower 4 bits
198 PrintDebug("Old CR0=%x\n", *(uint_t *)real_cr0);
199 *(uchar_t*)real_cr0 &= 0xf0;
200 *(uchar_t*)real_cr0 |= new_cr0_val;
201 PrintDebug("New CR0=%x\n", *(uint_t *)real_cr0);
204 // If Shadow paging is enabled we push the changes to the virtualized copy of cr0
205 if (info->shdw_pg_mode == SHADOW_PAGING) {
206 struct cr0_real * guest_cr0 = (struct cr0_real*)&(info->shdw_pg_state.guest_cr0);
208 PrintDebug("Old Guest CR0=%x\n", *(uint_t *)guest_cr0);
209 *(uchar_t*)guest_cr0 &= 0xf0;
210 *(uchar_t*)guest_cr0 |= new_cr0_val;
211 PrintDebug("New Guest CR0=%x\n", *(uint_t *)guest_cr0);
220 // First attempt = 253 lines
221 // current = 51 lines
222 int v3_handle_cr0_read(struct guest_info * info) {
225 struct x86_instr dec_instr;
227 if (info->mem_mode == PHYSICAL_MEM) {
228 ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
230 ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
234 if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
235 PrintError("Could not decode instruction\n");
239 if (dec_instr.op_type == V3_OP_MOVCR2) {
240 PrintDebug("MOVCR2 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
242 if ((v3_get_vm_cpu_mode(info) == LONG) ||
243 (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
244 struct cr0_64 * dst_reg = (struct cr0_64 *)(dec_instr.dst_operand.operand);
246 if (info->shdw_pg_mode == SHADOW_PAGING) {
247 struct cr0_64 * guest_cr0 = (struct cr0_64 *)&(info->shdw_pg_state.guest_cr0);
248 *dst_reg = *guest_cr0;
250 struct cr0_64 * shadow_cr0 = (struct cr0_64 *)&(info->ctrl_regs.cr0);
251 *dst_reg = *shadow_cr0;
254 PrintDebug("returned CR0: %p\n", (void *)*(addr_t *)dst_reg);
256 struct cr0_32 * dst_reg = (struct cr0_32 *)(dec_instr.dst_operand.operand);
258 if (info->shdw_pg_mode == SHADOW_PAGING) {
259 struct cr0_32 * guest_cr0 = (struct cr0_32 *)&(info->shdw_pg_state.guest_cr0);
260 *dst_reg = *guest_cr0;
262 struct cr0_32 * shadow_cr0 = (struct cr0_32 *)&(info->ctrl_regs.cr0);
263 *dst_reg = *shadow_cr0;
266 PrintDebug("returned CR0: %x\n", *(uint_t*)dst_reg);
269 } else if (dec_instr.op_type == V3_OP_SMSW) {
270 struct cr0_real * shadow_cr0 = (struct cr0_real *)&(info->ctrl_regs.cr0);
271 struct cr0_real * dst_reg = (struct cr0_real *)(dec_instr.dst_operand.operand);
272 char cr0_val = *(char*)shadow_cr0 & 0x0f;
274 PrintDebug("SMSW\n");
276 // The lower 4 bits of the guest/shadow CR0 are mapped through
277 // We can treat nested and shadow paging the same here
278 *(char *)dst_reg &= 0xf0;
279 *(char *)dst_reg |= cr0_val;
282 PrintError("Unhandled opcode in handle_cr0_read\n");
286 info->rip += dec_instr.instr_length;
294 // First Attempt = 256 lines
295 // current = 65 lines
296 int v3_handle_cr3_write(struct guest_info * info) {
299 struct x86_instr dec_instr;
301 if (info->mem_mode == PHYSICAL_MEM) {
302 ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
304 ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
307 if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
308 PrintError("Could not decode instruction\n");
312 if (dec_instr.op_type == V3_OP_MOV2CR) {
313 PrintDebug("MOV2CR3 (cpu_mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
315 if (info->shdw_pg_mode == SHADOW_PAGING) {
316 PrintDebug("Old Shadow CR3=%p; Old Guest CR3=%p\n",
317 (void *)(addr_t)(info->ctrl_regs.cr3),
318 (void*)(addr_t)(info->shdw_pg_state.guest_cr3));
321 // We update the guest CR3
322 if (info->cpu_mode == LONG) {
323 struct cr3_64 * new_cr3 = (struct cr3_64 *)(dec_instr.src_operand.operand);
324 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
325 *guest_cr3 = *new_cr3;
327 struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand);
328 struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
329 *guest_cr3 = *new_cr3;
333 // If Paging is enabled in the guest then we need to change the shadow page tables
334 if (info->mem_mode == VIRTUAL_MEM) {
335 if (v3_activate_shadow_pt(info) == -1) {
336 PrintError("Failed to activate 32 bit shadow page table\n");
341 PrintDebug("New Shadow CR3=%p; New Guest CR3=%p\n",
342 (void *)(addr_t)(info->ctrl_regs.cr3),
343 (void*)(addr_t)(info->shdw_pg_state.guest_cr3));
345 } else if (info->shdw_pg_mode == NESTED_PAGING) {
347 // This is just a passthrough operation which we probably don't need here
348 if (info->cpu_mode == LONG) {
349 struct cr3_64 * new_cr3 = (struct cr3_64 *)(dec_instr.src_operand.operand);
350 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
351 *guest_cr3 = *new_cr3;
353 struct cr3_32 * new_cr3 = (struct cr3_32 *)(dec_instr.src_operand.operand);
354 struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
355 *guest_cr3 = *new_cr3;
360 PrintError("Unhandled opcode in handle_cr3_write\n");
364 info->rip += dec_instr.instr_length;
371 // first attempt = 156 lines
372 // current = 36 lines
373 int v3_handle_cr3_read(struct guest_info * info) {
376 struct x86_instr dec_instr;
378 if (info->mem_mode == PHYSICAL_MEM) {
379 ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
381 ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
384 if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
385 PrintError("Could not decode instruction\n");
389 if (dec_instr.op_type == V3_OP_MOVCR2) {
390 PrintDebug("MOVCR32 (mode=%s)\n", v3_cpu_mode_to_str(info->cpu_mode));
392 if (info->shdw_pg_mode == SHADOW_PAGING) {
394 if ((v3_get_vm_cpu_mode(info) == LONG) ||
395 (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
396 struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand);
397 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->shdw_pg_state.guest_cr3);
398 *dst_reg = *guest_cr3;
400 struct cr3_32 * dst_reg = (struct cr3_32 *)(dec_instr.dst_operand.operand);
401 struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->shdw_pg_state.guest_cr3);
402 *dst_reg = *guest_cr3;
405 } else if (info->shdw_pg_mode == NESTED_PAGING) {
407 // This is just a passthrough operation which we probably don't need here
408 if ((v3_get_vm_cpu_mode(info) == LONG) ||
409 (v3_get_vm_cpu_mode(info) == LONG_32_COMPAT)) {
410 struct cr3_64 * dst_reg = (struct cr3_64 *)(dec_instr.dst_operand.operand);
411 struct cr3_64 * guest_cr3 = (struct cr3_64 *)&(info->ctrl_regs.cr3);
412 *dst_reg = *guest_cr3;
414 struct cr3_32 * dst_reg = (struct cr3_32 *)(dec_instr.dst_operand.operand);
415 struct cr3_32 * guest_cr3 = (struct cr3_32 *)&(info->ctrl_regs.cr3);
416 *dst_reg = *guest_cr3;
421 PrintError("Unhandled opcode in handle_cr3_read\n");
425 info->rip += dec_instr.instr_length;
431 // We don't need to virtualize CR4, all we need is to detect the activation of PAE
432 int v3_handle_cr4_read(struct guest_info * info) {
433 // PrintError("CR4 Read not handled\n");
438 int v3_handle_cr4_write(struct guest_info * info) {
442 struct x86_instr dec_instr;
443 v3_cpu_mode_t cpu_mode = v3_get_vm_cpu_mode(info);
445 if (info->mem_mode == PHYSICAL_MEM) {
446 ret = v3_read_gpa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
448 ret = v3_read_gva_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
451 if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
452 PrintError("Could not decode instruction\n");
456 if (dec_instr.op_type != V3_OP_MOV2CR) {
457 PrintError("Invalid opcode in write to CR4\n");
461 // Check to see if we need to flush the tlb
463 if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
464 struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
465 struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
467 // if pse, pge, or pae have changed while PG (in any mode) is on
468 // the side effect is a TLB flush, which means we need to
469 // toss the current shadow page tables too
472 // TODO - PAE FLAG needs to be special cased
473 if ((cr4->pse != new_cr4->pse) ||
474 (cr4->pge != new_cr4->pge) ||
475 (cr4->pae != new_cr4->pae)) {
476 PrintDebug("Handling PSE/PGE/PAE -> TLBFlush case, flag set\n");
483 if ((cpu_mode == PROTECTED) || (cpu_mode == PROTECTED_PAE)) {
484 struct cr4_32 * new_cr4 = (struct cr4_32 *)(dec_instr.src_operand.operand);
485 struct cr4_32 * cr4 = (struct cr4_32 *)&(info->ctrl_regs.cr4);
487 PrintDebug("OperandVal = %x, length = %d\n", *(uint_t *)new_cr4, dec_instr.src_operand.size);
488 PrintDebug("Old CR4=%x\n", *(uint_t *)cr4);
490 if ((info->shdw_pg_mode == SHADOW_PAGING)) {
491 if (v3_get_vm_mem_mode(info) == PHYSICAL_MEM) {
493 if ((cr4->pae == 0) && (new_cr4->pae == 1)) {
494 PrintDebug("Creating PAE passthrough tables\n");
496 // create 32 bit PAE direct map page table
497 if (v3_reset_passthrough_pts(info) == -1) {
498 PrintError("Could not create 32 bit PAE passthrough pages tables\n");
502 // reset cr3 to new page tables
503 info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
505 } else if ((cr4->pae == 1) && (new_cr4->pae == 0)) {
506 // Create passthrough standard 32bit pagetables
507 PrintError("Switching From PAE to Protected mode not supported\n");
514 PrintDebug("New CR4=%x\n", *(uint_t *)cr4);
516 } else if ((cpu_mode == LONG) || (cpu_mode == LONG_32_COMPAT)) {
517 struct cr4_64 * new_cr4 = (struct cr4_64 *)(dec_instr.src_operand.operand);
518 struct cr4_64 * cr4 = (struct cr4_64 *)&(info->ctrl_regs.cr4);
520 PrintDebug("Old CR4=%p\n", (void *)*(addr_t *)cr4);
521 PrintDebug("New CR4=%p\n", (void *)*(addr_t *)new_cr4);
523 if (new_cr4->pae == 0) {
524 // cannot turn off PAE in long mode GPF the guest
525 PrintError("Cannot disable PAE in long mode, should send GPF\n");
532 PrintError("CR4 write not supported in CPU_MODE: %s\n", v3_cpu_mode_to_str(cpu_mode));
538 PrintDebug("Handling PSE/PGE/PAE -> TLBFlush (doing flush now!)\n");
539 if (v3_activate_shadow_pt(info) == -1) {
540 PrintError("Failed to activate shadow page tables when emulating TLB flush in handling cr4 write\n");
546 info->rip += dec_instr.instr_length;
551 int v3_handle_efer_read(struct guest_info * core, uint_t msr, struct v3_msr * dst, void * priv_data) {
552 PrintDebug("EFER Read HI=%x LO=%x\n", core->shdw_pg_state.guest_efer.hi, core->shdw_pg_state.guest_efer.lo);
554 dst->value = core->shdw_pg_state.guest_efer.value;
561 // TODO: this is a disaster we need to clean this up...
562 int v3_handle_efer_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
563 //struct efer_64 * new_efer = (struct efer_64 *)&(src.value);
564 struct efer_64 * shadow_efer = (struct efer_64 *)&(core->ctrl_regs.efer);
565 struct v3_msr * guest_efer = &(core->shdw_pg_state.guest_efer);
567 PrintDebug("EFER Write\n");
568 PrintDebug("EFER Write Values: HI=%x LO=%x\n", src.hi, src.lo);
569 //PrintDebug("Old EFER=%p\n", (void *)*(addr_t*)(shadow_efer));
571 // We virtualize the guests efer to hide the SVME and LMA bits
572 guest_efer->value = src.value;
575 // Enable/Disable Syscall
576 shadow_efer->sce = src.value & 0x1;
581 int v3_handle_vm_cr_read(struct guest_info * core, uint_t msr, struct v3_msr * dst, void * priv_data) {
582 /* tell the guest that the BIOS disabled SVM, that way it doesn't get
583 * confused by the fact that CPUID reports SVM as available but it still
586 dst->value = SVM_VM_CR_MSR_lock | SVM_VM_CR_MSR_svmdis;
587 PrintDebug("VM_CR Read HI=%x LO=%x\n", dst->hi, dst->lo);
591 int v3_handle_vm_cr_write(struct guest_info * core, uint_t msr, struct v3_msr src, void * priv_data) {
592 PrintDebug("VM_CR Write\n");
593 PrintDebug("VM_CR Write Values: HI=%x LO=%x\n", src.hi, src.lo);
595 /* writes to LOCK and SVMDIS are silently ignored (according to the spec),
596 * other writes indicate the guest wants to use some feature we haven't
599 if (src.value & ~(SVM_VM_CR_MSR_lock | SVM_VM_CR_MSR_svmdis)) {
600 PrintDebug("VM_CR write sets unsupported bits: HI=%x LO=%x\n", src.hi, src.lo);