Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


vmx_patch5
[palacios.git] / palacios / src / palacios / vmx.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Peter Dinda <pdinda@northwestern.edu>
16  *         Jack Lange <jarusl@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22
23 #include <palacios/vmx.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmm.h>
26 #include <palacios/vmx_lowlevel.h>
27 #include <palacios/vmm_lowlevel.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_ctrl_regs.h>
30
31
32 // 
33 // 
34 // CRUFT
35 //
36 //
37
38 #if 0
39
40 #include <palacios/vmm_util.h>
41 #include <palacios/vmm_string.h>
42 #include <palacios/vmm_ctrl_regs.h>
43
44
45
46 extern int Launch_VM(ullong_t vmcsPtr, uint_t eip);
47
48 #define NUMPORTS 65536
49
50
51 #define VMXASSIST_INFO_PORT   0x0e9
52 #define ROMBIOS_PANIC_PORT    0x400
53 #define ROMBIOS_PANIC_PORT2   0x401
54 #define ROMBIOS_INFO_PORT     0x402
55 #define ROMBIOS_DEBUG_PORT    0x403
56
57
58
59 static uint_t GetLinearIP(struct VM * vm) {
60   if (vm->state == VM_VMXASSIST_V8086_BIOS || vm->state == VM_VMXASSIST_V8086) { 
61     return vm->vmcs.guestStateArea.cs.baseAddr + vm->vmcs.guestStateArea.rip;
62   } else {
63     return vm->vmcs.guestStateArea.rip;
64   }
65 }
66
67
68
69
70 #define MAX_CODE 512
71 #define INSTR_OFFSET_START 17
72 #define NOP_SEQ_LEN        10
73 #define INSTR_OFFSET_END   (INSTR_OFFSET_START + NOP_SEQ_LEN - 1)
74 #define TEMPLATE_CODE_LEN  35
75
76 uint_t oldesp = 0;
77 uint_t myregs = 0;
78
79
80
81
82
83 extern uint_t VMCS_LAUNCH();
84 extern uint_t Init_VMCS_HostState();
85 extern uint_t Init_VMCS_GuestState();
86
87
88
89
90 extern int Get_CR2();
91 extern int vmRunning;
92
93
94
95
96
97 void DecodeCurrentInstruction(struct VM *vm, struct Instruction *inst)
98 {
99   // this is a gruesome hack
100   uint_t address = GetLinearIP(vm);
101   uint_t length = vm->vmcs.exitInfoFields.instrLength;
102   unsigned char *t = (unsigned char *) address;
103
104
105   
106   PrintTrace("DecodeCurrentInstruction: instruction is\n");
107   PrintTraceMemDump(t,length);
108   
109   if (length==3 && t[0]==0x0f && t[1]==0x22 && t[2]==0xc0) { 
110     // mov from eax to cr0
111     // usually used to signal
112     inst->type=VM_MOV_TO_CR0;
113     inst->address=address;
114     inst->size=length;
115     inst->input1=vm->registers.eax;
116     inst->input2=vm->vmcs.guestStateArea.cr0;
117     inst->output=vm->registers.eax;
118     PrintTrace("MOV FROM EAX TO CR0\n");
119   } else {
120     inst->type=VM_UNKNOWN_INST;
121   }
122 }
123
124
125
126 static void ConfigureExits(struct VM *vm)
127 {
128   CopyOutVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
129
130   vm->vmcs.execCtrlFields.pinCtrls |= 0 
131     // EXTERNAL_INTERRUPT_EXITING 
132     | NMI_EXITING;
133   vm->vmcs.execCtrlFields.procCtrls |= 0
134       // INTERRUPT_WINDOWS_EXIT 
135       | USE_TSC_OFFSETTING
136       | HLT_EXITING  
137       | INVLPG_EXITING           
138       | MWAIT_EXITING            
139       | RDPMC_EXITING           
140       | RDTSC_EXITING         
141       | MOVDR_EXITING         
142       | UNCONDITION_IO_EXITING
143       | MONITOR_EXITING       
144       | PAUSE_EXITING         ;
145
146   CopyInVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
147   
148   CopyOutVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
149
150   vm->vmcs.exitCtrlFields.exitCtrls |= ACK_IRQ_ON_EXIT;
151   
152   CopyInVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
153
154
155 /*   VMCS_READ(VM_EXIT_CTRLS, &flags); */
156 /*   flags |= ACK_IRQ_ON_EXIT; */
157 /*   VMCS_WRITE(VM_EXIT_CTRLS, &flags); */
158 }
159
160
161 extern int RunVMM();
162 extern int SAFE_VM_LAUNCH();
163
164 int MyLaunch(struct VM *vm)
165 {
166   ullong_t vmcs = (ullong_t)((uint_t) (vm->vmcsregion));
167   uint_t entry_eip = vm->descriptor.entry_ip;
168   uint_t exit_eip = vm->descriptor.exit_eip;
169   uint_t guest_esp = vm->descriptor.guest_esp;
170   uint_t f = 0xffffffff;
171   uint_t tmpReg = 0;
172   int ret;
173   int vmm_ret = 0;
174
175   PrintTrace("Guest ESP: 0x%x (%u)\n", guest_esp, guest_esp);
176
177   exit_eip = (uint_t)RunVMM;
178
179   PrintTrace("Clear\n");
180   VMCS_CLEAR(vmcs);
181   PrintTrace("Load\n");
182   VMCS_LOAD(vmcs);
183
184
185   PrintTrace("VMCS_LINK_PTR\n");
186   VMCS_WRITE(VMCS_LINK_PTR, &f);
187   PrintTrace("VMCS_LINK_PTR_HIGH\n");
188   VMCS_WRITE(VMCS_LINK_PTR_HIGH, &f);
189
190  
191   SetCtrlBitsCorrectly(IA32_VMX_PINBASED_CTLS_MSR, PIN_VM_EXEC_CTRLS);
192   SetCtrlBitsCorrectly(IA32_VMX_PROCBASED_CTLS_MSR, PROC_VM_EXEC_CTRLS);
193   SetCtrlBitsCorrectly(IA32_VMX_EXIT_CTLS_MSR, VM_EXIT_CTRLS);
194   SetCtrlBitsCorrectly(IA32_VMX_ENTRY_CTLS_MSR, VM_ENTRY_CTRLS);
195
196   //
197   //
198   //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL);
199   //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL_HIGH);
200
201
202   /* Host state */
203   PrintTrace("Setting up host state\n");
204   SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, HOST_CR0);
205   SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, HOST_CR4);
206   ret = Init_VMCS_HostState();
207
208   if (ret != VMX_SUCCESS) {
209     if (ret == VMX_FAIL_VALID) {
210       PrintTrace("Init Host state: VMCS FAILED WITH ERROR\n");
211     } else {
212       PrintTrace("Init Host state: Invalid VMCS\n");
213     }
214     return ret;
215   }
216
217   //  PrintTrace("HOST_RIP: %x (%u)\n", exit_eip, exit_eip);
218   VMCS_WRITE(HOST_RIP, &exit_eip);
219
220   /* Guest state */
221   PrintTrace("Setting up guest state\n");
222   PrintTrace("GUEST_RIP: %x (%u)\n", entry_eip, entry_eip);
223   VMCS_WRITE(GUEST_RIP, &entry_eip);
224
225   SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, GUEST_CR0);
226   SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, GUEST_CR4);
227   ret = Init_VMCS_GuestState();
228
229   PrintTrace("InitGuestState returned\n");
230
231   if (ret != VMX_SUCCESS) {
232     if (ret == VMX_FAIL_VALID) {
233       PrintTrace("Init Guest state: VMCS FAILED WITH ERROR\n");
234     } else {
235       PrintTrace("Init Guest state: Invalid VMCS\n");
236     }
237     return ret;
238   }
239   PrintTrace("GUEST_RSP: %x (%u)\n", guest_esp, (uint_t)guest_esp);
240   VMCS_WRITE(GUEST_RSP, &guest_esp);
241
242   //  tmpReg = 0x4100;
243   tmpReg = 0xffffffff;
244   if (VMCS_WRITE(EXCEPTION_BITMAP, &tmpReg) != VMX_SUCCESS) {
245     PrintInfo("Bitmap error\n");
246   }
247
248   ConfigureExits(vm);
249
250   PrintTrace("VMCS_LAUNCH\n");
251
252   vm->state=VM_VMXASSIST_STARTUP;
253
254   vmm_ret = SAFE_VM_LAUNCH();
255
256   PrintTrace("VMM error %d\n", vmm_ret);
257
258   return vmm_ret;
259 }
260
261
262
263   
264 int VMLaunch(struct VMDescriptor *vm) 
265 {
266   VMCS * vmcs = CreateVMCS();
267   int rc;
268
269   ullong_t vmcs_ptr = (ullong_t)((uint_t)vmcs);
270   uint_t top = (vmcs_ptr >> 32) & 0xffffffff;
271   uint_t bottom = (vmcs_ptr) & 0xffffffff;
272
273   theVM.vmcsregion = vmcs;
274   theVM.descriptor = *vm;
275
276   PrintTrace("vmcs_ptr_top=%x vmcs_ptr_bottom=%x, eip=%x\n", top, bottom, vm->entry_ip);
277   rc = MyLaunch(&theVM); // vmcs_ptr, vm->entry_ip, vm->exit_eip, vm->guest_esp);
278   PrintTrace("Returned from MyLaunch();\n");
279   return rc;
280 }
281
282
283
284
285 //
286 //
287 //  END CRUFT
288 //
289 //
290
291 #endif
292
293 static int update_vmcs_host_state(struct guest_info * info) {
294     addr_t tmp;
295
296     struct {
297         uint16_t limit;
298         addr_t base;
299     } __attribute__((packed)) tmp_seg;
300
301
302     struct v3_msr tmp_msr;
303
304     __asm__ __volatile__ ( "movq    %%cr0, %0; "                
305                            : "=q"(tmp)
306                            :
307     );
308     vmcs_write(VMCS_HOST_CR0, tmp);
309
310
311     __asm__ __volatile__ ( "movq %%cr3, %0; "           
312                            : "=q"(tmp)
313                            :
314     );
315     vmcs_write(VMCS_HOST_CR3, tmp);
316
317
318     __asm__ __volatile__ ( "movq %%cr4, %0; "           
319                            : "=q"(tmp)
320                            :
321     );
322     vmcs_write(VMCS_HOST_CR4, tmp);
323
324
325
326
327     __asm__ __volatile__ ("sgdt (%0); "
328                           : 
329                           :"q"(&tmp_seg)
330                           : "memory"
331                           );
332     vmcs_write(VMCS_HOST_GDTR_BASE, tmp_seg.base);
333
334
335     __asm__ __volatile__ ("sidt (%0); "
336                           : 
337                           :"q"(&tmp_seg)
338                           : "memory"
339                   );
340     vmcs_write(VMCS_HOST_IDTR_BASE, tmp_seg.base);
341
342     /* How do we handle this...?
343     __asm__ __volatile__ ("str (%0); "
344                           : 
345                           :"q"(&tmp_seg)
346                           : "memory"
347                           );
348     vmcs_write(VMCS_HOST_TR_BASE, tmp_seg.base);
349     */
350
351 #define FS_BASE_MSR 0xc0000100
352 #define GS_BASE_MSR 0xc0000101
353
354     // FS.BASE MSR
355     v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
356     vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
357
358     // GS.BASE MSR
359     v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
360     vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
361
362
363
364     __asm__ __volatile__ ( "movq %%cs, %0; "            
365                            : "=q"(tmp)
366                            :
367     );
368     vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
369
370     __asm__ __volatile__ ( "movq %%ss, %0; "            
371                            : "=q"(tmp)
372                            :
373     );
374     vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
375
376     __asm__ __volatile__ ( "movq %%ds, %0; "            
377                            : "=q"(tmp)
378                            :
379     );
380     vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
381
382     __asm__ __volatile__ ( "movq %%es, %0; "            
383                            : "=q"(tmp)
384                            :
385     );
386     vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
387
388     __asm__ __volatile__ ( "movq %%fs, %0; "            
389                            : "=q"(tmp)
390                            :
391     );
392     vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
393
394     __asm__ __volatile__ ( "movq %%gs, %0; "            
395                            : "=q"(tmp)
396                            :
397     );
398     vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
399
400     __asm__ __volatile__ ( "str %0; "           
401                            : "=q"(tmp)
402                            :
403     );
404     vmcs_write(VMCS_HOST_TR_SELECTOR, tmp);
405
406
407 #define SYSENTER_CS_MSR 0x00000174
408 #define SYSENTER_ESP_MSR 0x00000175
409 #define SYSENTER_EIP_MSR 0x00000176
410
411    // SYSENTER CS MSR
412     v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
413     vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.value);
414
415     // SYSENTER_ESP MSR
416     v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
417     vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
418
419     // SYSENTER_EIP MSR
420     v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
421     vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
422
423
424     // RIP
425     // RSP
426
427     return 0;
428 }
429
430
431
432
433
434 static addr_t vmxon_ptr_phys;
435
436
437 #if 0
438 // For the 32 bit reserved bit fields 
439 // MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
440 static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
441     v3_msr_t mask_msr;
442
443     PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
444
445     v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
446
447     PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
448
449     val &= mask_msr.lo;
450     val &= mask_msr.hi;
451   
452     return val;
453 }
454
455
456
457 static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
458     v3_msr_t msr0, msr1;
459     addr_t msr0_val, msr1_val;
460
461     PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
462
463     v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
464     v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
465   
466     // This generates a mask that is the natural bit width of the CPU
467     msr0_val = msr0.value;
468     msr1_val = msr1.value;
469
470     PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
471
472     val &= msr0_val;
473     val &= msr1_val;
474
475     return val;
476 }
477
478 static int setup_base_host_state() {
479     
480
481
482     //   vmwrite(HOST_IDTR_BASE, 
483
484
485 }
486
487
488 #endif
489
490 static void setup_v8086_mode_for_boot(struct guest_info* vm_info)
491 {
492
493     ((struct vmx_data *)vm_info->vmm_data)->state = VMXASSIST_V8086_BIOS;
494     ((struct rflags *)&(vm_info->ctrl_regs.rflags))->vm = 1;
495     ((struct rflags *)&(vm_info->ctrl_regs.rflags))->iopl = 3;
496
497     vm_info->rip = 0xfff0;
498
499     vm_info->segments.cs.selector = 0xf000;
500     vm_info->segments.cs.base = 0xf000 << 4;
501     vm_info->segments.cs.limit = 0xffff;
502     vm_info->segments.cs.type = 3;
503     vm_info->segments.cs.system = 1;
504     vm_info->segments.cs.dpl = 3;
505     vm_info->segments.cs.present = 1;
506     vm_info->segments.cs.granularity = 0;
507
508     int i = 0;
509     struct v3_segment * seg_ptr = (struct v3_segment *)&(vm_info->segments);
510
511     /* Set values for selectors ds through ss */
512     for(i = 1; i < 6 ; i++) {
513         seg_ptr[i].selector = 0x0000;
514         seg_ptr[i].base = 0x00000;
515         seg_ptr[i].type = 3;
516         seg_ptr[i].system = 1;
517         seg_ptr[i].dpl = 3;
518         seg_ptr[i].present = 1;
519         seg_ptr[i].granularity = 0;
520     }
521 }
522
523
524 static addr_t allocate_vmcs() 
525 {
526     reg_ex_t msr;
527     PrintDebug("Allocating page\n");
528     struct vmcs_data * vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
529
530
531     memset(vmcs_page, 0, 4096);
532
533     v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
534     
535     vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
536     PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
537
538     return (addr_t)V3_PAddr((void*)vmcs_page);
539 }
540
541
542
543 static void init_vmcs_bios(struct guest_info * vm_info) 
544 {
545
546     setup_v8086_mode_for_boot(vm_info);
547
548     // TODO: Fix vmcs fields so they're 32-bit
549    
550 }
551
552
553
554 static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
555     PrintDebug("Entering init_vmx_guest\n");
556     v3_pre_config_guest(info, config_ptr);
557
558     struct vmx_data* data;
559
560     data = (struct vmx_data*)V3_Malloc(sizeof(struct vmx_data));
561     PrintDebug("vmx_data pointer: %p\n",(void*)data);
562
563     PrintDebug("Allocating VMCS\n");
564     data->vmcs_ptr_phys = allocate_vmcs();
565     PrintDebug("VMCS pointer: %p\n",(void*)data->vmcs_ptr_phys);
566
567     info->vmm_data = (void *)data;
568
569     PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
570     init_vmcs_bios(info);
571
572  //   v3_post_config_guest(info, config_ptr);
573
574     return 0;
575 }
576
577
578 static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
579 {
580     int ret = 0;
581     ret = vmcs_write(field,val);
582
583     if (ret != VMX_SUCCESS) {
584         PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
585         return 1;
586     }
587
588     return 0;
589 }
590
591 static void inline translate_segment_access(struct v3_segment * v3_seg,  
592                                                      struct vmcs_segment_access * access)
593 {
594     access->type = v3_seg->type;
595     access->desc_type = v3_seg->system;
596     access->dpl = v3_seg->dpl;
597     access->present = v3_seg->present;
598     access->avail = v3_seg->avail;
599     access->long_mode = v3_seg->long_mode;
600     access->db = v3_seg->db;
601     access->granularity = v3_seg->granularity;
602 }
603
604 static int inline vmcs_write_guest_segments(struct guest_info* info)
605 {
606     int ret = 0;
607     struct vmcs_segment_access access;
608
609     /* CS Segment */
610     translate_segment_access(&(info->segments.cs), &access);
611
612     ret &= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
613     ret &= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
614     ret &= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
615     ret &= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
616
617     /* SS Segment */
618     translate_segment_access(&(info->segments.ss), &access);
619
620     ret &= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
621     ret &= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
622     ret &= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
623     ret &= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
624
625     /* DS Segment */
626     translate_segment_access(&(info->segments.ds), &access);
627
628     ret &= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
629     ret &= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
630     ret &= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
631     ret &= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
632
633
634     /* ES Segment */
635     translate_segment_access(&(info->segments.es), &access);
636
637     ret &= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
638     ret &= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
639     ret &= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
640     ret &= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
641
642     /* FS Segment */
643     translate_segment_access(&(info->segments.fs), &access);
644
645     ret &= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
646     ret &= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
647     ret &= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
648     ret &= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
649
650     /* GS Segment */
651     translate_segment_access(&(info->segments.gs), &access);
652
653     ret &= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
654     ret &= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
655     ret &= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
656     ret &= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
657
658     return ret;
659 }
660
661 static int start_vmx_guest(struct guest_info* info) {
662     struct vmx_data * vmx_data = (struct vmx_data *)info->vmm_data;
663     int vmx_ret;
664
665     // Have to do a whole lot of flag setting here
666     PrintDebug("Clearing VMCS\n");
667     vmx_ret = vmcs_clear(vmx_data->vmcs_ptr_phys);
668
669     if (vmx_ret != VMX_SUCCESS) {
670         PrintError("VMCLEAR failed\n");
671         return -1;
672     }
673
674     PrintDebug("Loading VMCS\n");
675     vmx_ret = vmcs_load(vmx_data->vmcs_ptr_phys);
676
677     if (vmx_ret != VMX_SUCCESS) {
678         PrintError("VMPTRLD failed\n");
679         return -1;
680     }
681
682
683     update_vmcs_host_state(info);
684
685     // Setup guest state 
686     // TODO: This is not 32-bit safe!
687     vmx_ret &= check_vmcs_write(VMCS_GUEST_RIP, info->rip);
688
689     vmx_ret &= vmcs_write_guest_segments(info);
690
691     if (vmx_ret != 0) {
692         PrintError("Could not initialize VMCS segments\n");
693         return -1;
694     }
695
696     v3_print_vmcs_guest_state();
697
698     return -1;
699 }
700
701
702
703
704
705
706 int v3_is_vmx_capable() {
707     v3_msr_t feature_msr;
708     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
709
710     v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
711
712     PrintDebug("ECX: %p\n", (void*)ecx);
713
714     if (ecx & CPUID_1_ECX_VTXFLAG) {
715         v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
716         
717         PrintTrace("MSRREGlow: 0x%.8x\n", feature_msr.lo);
718
719         if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
720             PrintDebug("VMX is locked -- enable in the BIOS\n");
721             return 0;
722         }
723
724     } else {
725         PrintDebug("VMX not supported on this cpu\n");
726         return 0;
727     }
728
729     return 1;
730 }
731
732 static int has_vmx_nested_paging() {
733     return 0;
734 }
735
736
737
738 void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
739     extern v3_cpu_arch_t v3_cpu_type;
740
741     
742     __asm__ __volatile__ (
743                           "movq %%cr4, %%rbx; "
744                           "orq  $0x00002000,%%rbx; "
745                           "movq %%rbx, %%cr4;"
746               :
747               :
748               : "%rbx"
749                           );
750
751
752
753     // Should check and return Error here.... 
754     __asm__ __volatile__ (
755                           "movq %%cr0, %%rbx; "
756                           "orq  $0x00000020,%%rbx; "
757                           "movq %%rbx, %%cr0;"
758               :
759               :
760               : "%rbx"
761                           );
762
763     // Setup VMXON Region
764     vmxon_ptr_phys = allocate_vmcs();
765     PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr_phys);
766
767     if (v3_enable_vmx(vmxon_ptr_phys) == VMX_SUCCESS) {
768         PrintDebug("VMX Enabled\n");
769     } else {
770         PrintError("VMX initialization failure\n");
771         return;
772     }
773         
774
775     if (has_vmx_nested_paging() == 1) {
776         v3_cpu_type = V3_VMX_EPT_CPU;
777     } else {
778         v3_cpu_type = V3_VMX_CPU;
779     }
780
781     // Setup the VMX specific vmm operations
782     vm_ops->init_guest = &init_vmx_guest;
783     vm_ops->start_guest = &start_vmx_guest;
784     vm_ops->has_nested_paging = &has_vmx_nested_paging;
785
786 }