Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Completed preliminary host and guest state structure for VMLAUNCH. Added assembly...
[palacios.git] / palacios / src / palacios / vmx.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Peter Dinda <pdinda@northwestern.edu>
16  *         Jack Lange <jarusl@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22
23 #include <palacios/vmx.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmm.h>
26 #include <palacios/vmx_lowlevel.h>
27 #include <palacios/vmm_lowlevel.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_ctrl_regs.h>
30
31
32 // 
33 // 
34 // CRUFT
35 //
36 //
37
38 #if 0
39
40 #include <palacios/vmm_util.h>
41 #include <palacios/vmm_string.h>
42 #include <palacios/vmm_ctrl_regs.h>
43
44
45
46 extern int Launch_VM(ullong_t vmcsPtr, uint_t eip);
47
48 #define NUMPORTS 65536
49
50
51 #define VMXASSIST_INFO_PORT   0x0e9
52 #define ROMBIOS_PANIC_PORT    0x400
53 #define ROMBIOS_PANIC_PORT2   0x401
54 #define ROMBIOS_INFO_PORT     0x402
55 #define ROMBIOS_DEBUG_PORT    0x403
56
57
58
59 static uint_t GetLinearIP(struct VM * vm) {
60   if (vm->state == VM_VMXASSIST_V8086_BIOS || vm->state == VM_VMXASSIST_V8086) { 
61     return vm->vmcs.guestStateArea.cs.baseAddr + vm->vmcs.guestStateArea.rip;
62   } else {
63     return vm->vmcs.guestStateArea.rip;
64   }
65 }
66
67
68
69
70 #define MAX_CODE 512
71 #define INSTR_OFFSET_START 17
72 #define NOP_SEQ_LEN        10
73 #define INSTR_OFFSET_END   (INSTR_OFFSET_START + NOP_SEQ_LEN - 1)
74 #define TEMPLATE_CODE_LEN  35
75
76 uint_t oldesp = 0;
77 uint_t myregs = 0;
78
79
80
81
82
83 extern uint_t VMCS_LAUNCH();
84 extern uint_t Init_VMCS_HostState();
85 extern uint_t Init_VMCS_GuestState();
86
87
88
89
90 extern int Get_CR2();
91 extern int vmRunning;
92
93
94
95
96
97 void DecodeCurrentInstruction(struct VM *vm, struct Instruction *inst)
98 {
99   // this is a gruesome hack
100   uint_t address = GetLinearIP(vm);
101   uint_t length = vm->vmcs.exitInfoFields.instrLength;
102   unsigned char *t = (unsigned char *) address;
103
104
105   
106   PrintTrace("DecodeCurrentInstruction: instruction is\n");
107   PrintTraceMemDump(t,length);
108   
109   if (length==3 && t[0]==0x0f && t[1]==0x22 && t[2]==0xc0) { 
110     // mov from eax to cr0
111     // usually used to signal
112     inst->type=VM_MOV_TO_CR0;
113     inst->address=address;
114     inst->size=length;
115     inst->input1=vm->registers.eax;
116     inst->input2=vm->vmcs.guestStateArea.cr0;
117     inst->output=vm->registers.eax;
118     PrintTrace("MOV FROM EAX TO CR0\n");
119   } else {
120     inst->type=VM_UNKNOWN_INST;
121   }
122 }
123
124
125
126 static void ConfigureExits(struct VM *vm)
127 {
128   CopyOutVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
129
130   vm->vmcs.execCtrlFields.pinCtrls |= 0 
131     // EXTERNAL_INTERRUPT_EXITING 
132     | NMI_EXITING;
133   vm->vmcs.execCtrlFields.procCtrls |= 0
134       // INTERRUPT_WINDOWS_EXIT 
135       | USE_TSC_OFFSETTING
136       | HLT_EXITING  
137       | INVLPG_EXITING           
138       | MWAIT_EXITING            
139       | RDPMC_EXITING           
140       | RDTSC_EXITING         
141       | MOVDR_EXITING         
142       | UNCONDITION_IO_EXITING
143       | MONITOR_EXITING       
144       | PAUSE_EXITING         ;
145
146   CopyInVMCSExecCtrlFields(&(vm->vmcs.execCtrlFields));
147   
148   CopyOutVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
149
150   vm->vmcs.exitCtrlFields.exitCtrls |= ACK_IRQ_ON_EXIT;
151   
152   CopyInVMCSExitCtrlFields(&(vm->vmcs.exitCtrlFields));
153
154
155 /*   VMCS_READ(VM_EXIT_CTRLS, &flags); */
156 /*   flags |= ACK_IRQ_ON_EXIT; */
157 /*   VMCS_WRITE(VM_EXIT_CTRLS, &flags); */
158 }
159
160
161 extern int RunVMM();
162 extern int SAFE_VM_LAUNCH();
163
164 int MyLaunch(struct VM *vm)
165 {
166   ullong_t vmcs = (ullong_t)((uint_t) (vm->vmcsregion));
167   uint_t entry_eip = vm->descriptor.entry_ip;
168   uint_t exit_eip = vm->descriptor.exit_eip;
169   uint_t guest_esp = vm->descriptor.guest_esp;
170   uint_t f = 0xffffffff;
171   uint_t tmpReg = 0;
172   int ret;
173   int vmm_ret = 0;
174
175   PrintTrace("Guest ESP: 0x%x (%u)\n", guest_esp, guest_esp);
176
177   exit_eip = (uint_t)RunVMM;
178
179   PrintTrace("Clear\n");
180   VMCS_CLEAR(vmcs);
181   PrintTrace("Load\n");
182   VMCS_LOAD(vmcs);
183
184
185   PrintTrace("VMCS_LINK_PTR\n");
186   VMCS_WRITE(VMCS_LINK_PTR, &f);
187   PrintTrace("VMCS_LINK_PTR_HIGH\n");
188   VMCS_WRITE(VMCS_LINK_PTR_HIGH, &f);
189
190  
191   SetCtrlBitsCorrectly(IA32_VMX_PINBASED_CTLS_MSR, PIN_VM_EXEC_CTRLS);
192   SetCtrlBitsCorrectly(IA32_VMX_PROCBASED_CTLS_MSR, PROC_VM_EXEC_CTRLS);
193   SetCtrlBitsCorrectly(IA32_VMX_EXIT_CTLS_MSR, VM_EXIT_CTRLS);
194   SetCtrlBitsCorrectly(IA32_VMX_ENTRY_CTLS_MSR, VM_ENTRY_CTRLS);
195
196   //
197   //
198   //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL);
199   //SetCtrlBitsCorrectly(IA32_something,GUEST_IA32_DEBUGCTL_HIGH);
200
201
202   /* Host state */
203   PrintTrace("Setting up host state\n");
204   SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, HOST_CR0);
205   SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, HOST_CR4);
206   ret = Init_VMCS_HostState();
207
208   if (ret != VMX_SUCCESS) {
209     if (ret == VMX_FAIL_VALID) {
210       PrintTrace("Init Host state: VMCS FAILED WITH ERROR\n");
211     } else {
212       PrintTrace("Init Host state: Invalid VMCS\n");
213     }
214     return ret;
215   }
216
217   //  PrintTrace("HOST_RIP: %x (%u)\n", exit_eip, exit_eip);
218   VMCS_WRITE(HOST_RIP, &exit_eip);
219
220   /* Guest state */
221   PrintTrace("Setting up guest state\n");
222   PrintTrace("GUEST_RIP: %x (%u)\n", entry_eip, entry_eip);
223   VMCS_WRITE(GUEST_RIP, &entry_eip);
224
225   SetCRBitsCorrectly(IA32_VMX_CR0_FIXED0_MSR, IA32_VMX_CR0_FIXED1_MSR, GUEST_CR0);
226   SetCRBitsCorrectly(IA32_VMX_CR4_FIXED0_MSR, IA32_VMX_CR4_FIXED1_MSR, GUEST_CR4);
227   ret = Init_VMCS_GuestState();
228
229   PrintTrace("InitGuestState returned\n");
230
231   if (ret != VMX_SUCCESS) {
232     if (ret == VMX_FAIL_VALID) {
233       PrintTrace("Init Guest state: VMCS FAILED WITH ERROR\n");
234     } else {
235       PrintTrace("Init Guest state: Invalid VMCS\n");
236     }
237     return ret;
238   }
239   PrintTrace("GUEST_RSP: %x (%u)\n", guest_esp, (uint_t)guest_esp);
240   VMCS_WRITE(GUEST_RSP, &guest_esp);
241
242   //  tmpReg = 0x4100;
243   tmpReg = 0xffffffff;
244   if (VMCS_WRITE(EXCEPTION_BITMAP, &tmpReg) != VMX_SUCCESS) {
245     PrintInfo("Bitmap error\n");
246   }
247
248   ConfigureExits(vm);
249
250   PrintTrace("VMCS_LAUNCH\n");
251
252   vm->state=VM_VMXASSIST_STARTUP;
253
254   vmm_ret = SAFE_VM_LAUNCH();
255
256   PrintTrace("VMM error %d\n", vmm_ret);
257
258   return vmm_ret;
259 }
260
261
262
263   
264 int VMLaunch(struct VMDescriptor *vm) 
265 {
266   VMCS * vmcs = CreateVMCS();
267   int rc;
268
269   ullong_t vmcs_ptr = (ullong_t)((uint_t)vmcs);
270   uint_t top = (vmcs_ptr >> 32) & 0xffffffff;
271   uint_t bottom = (vmcs_ptr) & 0xffffffff;
272
273   theVM.vmcsregion = vmcs;
274   theVM.descriptor = *vm;
275
276   PrintTrace("vmcs_ptr_top=%x vmcs_ptr_bottom=%x, eip=%x\n", top, bottom, vm->entry_ip);
277   rc = MyLaunch(&theVM); // vmcs_ptr, vm->entry_ip, vm->exit_eip, vm->guest_esp);
278   PrintTrace("Returned from MyLaunch();\n");
279   return rc;
280 }
281
282
283
284
285 //
286 //
287 //  END CRUFT
288 //
289 //
290
291 #endif
292
293 static int update_vmcs_host_state(struct guest_info * info) {
294     addr_t tmp;
295     struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
296     struct v3_msr tmp_msr;
297
298     __asm__ __volatile__ ( "movq    %%cr0, %0; "                
299                            : "=q"(tmp)
300                            :
301     );
302     vmcs_write(VMCS_HOST_CR0, tmp);
303
304
305     __asm__ __volatile__ ( "movq %%cr3, %0; "           
306                            : "=q"(tmp)
307                            :
308     );
309     vmcs_write(VMCS_HOST_CR3, tmp);
310
311
312     __asm__ __volatile__ ( "movq %%cr4, %0; "           
313                            : "=q"(tmp)
314                            :
315     );
316     vmcs_write(VMCS_HOST_CR4, tmp);
317
318
319
320     vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
321     vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
322     vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
323
324 #define FS_BASE_MSR 0xc0000100
325 #define GS_BASE_MSR 0xc0000101
326
327     // FS.BASE MSR
328     v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
329     vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
330
331     // GS.BASE MSR
332     v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
333     vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
334
335
336
337     __asm__ __volatile__ ( "movq %%cs, %0; "            
338                            : "=q"(tmp)
339                            :
340     );
341     vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
342
343     __asm__ __volatile__ ( "movq %%ss, %0; "            
344                            : "=q"(tmp)
345                            :
346     );
347     vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
348
349     __asm__ __volatile__ ( "movq %%ds, %0; "            
350                            : "=q"(tmp)
351                            :
352     );
353     vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
354
355     __asm__ __volatile__ ( "movq %%es, %0; "            
356                            : "=q"(tmp)
357                            :
358     );
359     vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
360
361     __asm__ __volatile__ ( "movq %%fs, %0; "            
362                            : "=q"(tmp)
363                            :
364     );
365     vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
366
367     __asm__ __volatile__ ( "movq %%gs, %0; "            
368                            : "=q"(tmp)
369                            :
370     );
371     vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
372
373     vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
374
375
376 #define SYSENTER_CS_MSR 0x00000174
377 #define SYSENTER_ESP_MSR 0x00000175
378 #define SYSENTER_EIP_MSR 0x00000176
379
380    // SYSENTER CS MSR
381     v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
382     vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.value);
383
384     // SYSENTER_ESP MSR
385     v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
386     vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
387
388     // SYSENTER_EIP MSR
389     v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
390     vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
391
392
393
394     return 0;
395 }
396
397
398
399
400 static addr_t vmxon_ptr_phys;
401 extern int v3_vmx_exit_handler();
402 extern int v3_vmx_vmlaunch();
403
404
405 #if 0
406 // For the 32 bit reserved bit fields 
407 // MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
408 static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
409     v3_msr_t mask_msr;
410
411     PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
412
413     v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
414
415     PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
416
417     val &= mask_msr.lo;
418     val &= mask_msr.hi;
419   
420     return val;
421 }
422
423
424
425 static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
426     v3_msr_t msr0, msr1;
427     addr_t msr0_val, msr1_val;
428
429     PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
430
431     v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
432     v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
433   
434     // This generates a mask that is the natural bit width of the CPU
435     msr0_val = msr0.value;
436     msr1_val = msr1.value;
437
438     PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
439
440     val &= msr0_val;
441     val &= msr1_val;
442
443     return val;
444 }
445
446 static int setup_base_host_state() {
447     
448
449
450     //   vmwrite(HOST_IDTR_BASE, 
451
452
453 }
454
455
456 #endif
457
458 static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
459 {
460     int ret = 0;
461     ret = vmcs_write(field,val);
462
463     if (ret != VMX_SUCCESS) {
464         PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
465         return 1;
466     }
467
468     return 0;
469 }
470
471
472 static void inline translate_segment_access(struct v3_segment * v3_seg,  
473                                             struct vmcs_segment_access * access)
474 {
475     access->type = v3_seg->type;
476     access->desc_type = v3_seg->system;
477     access->dpl = v3_seg->dpl;
478     access->present = v3_seg->present;
479     access->avail = v3_seg->avail;
480     access->long_mode = v3_seg->long_mode;
481     access->db = v3_seg->db;
482     access->granularity = v3_seg->granularity;
483 }
484
485 static int inline vmcs_write_guest_segments(struct guest_info* info)
486 {
487     int ret = 0;
488     struct vmcs_segment_access access;
489
490     memset(&access, 0, sizeof(access));
491
492     /* CS Segment */
493     translate_segment_access(&(info->segments.cs), &access);
494
495     ret &= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
496     ret &= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
497     ret &= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
498     ret &= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
499
500     /* SS Segment */
501     translate_segment_access(&(info->segments.ss), &access);
502
503     ret &= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
504     ret &= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
505     ret &= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
506     ret &= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
507
508     /* DS Segment */
509     translate_segment_access(&(info->segments.ds), &access);
510
511     ret &= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
512     ret &= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
513     ret &= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
514     ret &= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
515
516
517     /* ES Segment */
518     translate_segment_access(&(info->segments.es), &access);
519
520     ret &= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
521     ret &= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
522     ret &= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
523     ret &= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
524
525     /* FS Segment */
526     translate_segment_access(&(info->segments.fs), &access);
527
528     ret &= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
529     ret &= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
530     ret &= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
531     ret &= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
532
533     /* GS Segment */
534     translate_segment_access(&(info->segments.gs), &access);
535
536     ret &= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
537     ret &= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
538     ret &= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
539     ret &= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
540
541     /* LDTR segment */
542     translate_segment_access(&(info->segments.ldtr), &access);
543
544     ret &= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
545     ret &= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
546     ret &= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
547     ret &= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
548
549     /* TR Segment */
550     translate_segment_access(&(info->segments.tr), &access);
551
552     ret &= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
553     ret &= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.ldtr.selector);
554     ret &= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
555     ret &= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
556
557     /* GDTR Segment */
558
559     ret &= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
560     ret &= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
561
562     /* IDTR Segment*/
563     ret &= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
564     ret &= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
565
566     return ret;
567 }
568
569 static void setup_v8086_mode_for_boot(struct guest_info * vm_info)
570 {
571
572     ((struct vmx_data *)vm_info->vmm_data)->state = VMXASSIST_V8086_BIOS;
573     ((struct rflags *)&(vm_info->ctrl_regs.rflags))->vm = 1;
574     ((struct rflags *)&(vm_info->ctrl_regs.rflags))->iopl = 3;
575
576    
577     vm_info->rip = 0xfff0;
578
579     vm_info->segments.cs.selector = 0xf000;
580     vm_info->segments.cs.base = 0xf000 << 4;
581     vm_info->segments.cs.limit = 0xffff;
582     vm_info->segments.cs.type = 3;
583     vm_info->segments.cs.system = 1;
584     vm_info->segments.cs.dpl = 3;
585     vm_info->segments.cs.present = 1;
586     vm_info->segments.cs.granularity = 0;
587
588     int i = 0;
589     struct v3_segment * seg_ptr = (struct v3_segment *)&(vm_info->segments);
590
591     /* Set values for selectors ds through ss */
592     for(i = 1; i < 6 ; i++) {
593         seg_ptr[i].selector = 0x0000;
594         seg_ptr[i].base = 0x00000;
595         seg_ptr[i].type = 3;
596         seg_ptr[i].system = 1;
597         seg_ptr[i].dpl = 3;
598         seg_ptr[i].present = 1;
599         seg_ptr[i].granularity = 0;
600     }
601
602     for(i = 6; i < 10; i++) {
603         seg_ptr[i].base = 0x0;
604         seg_ptr[i].limit = 0xffff;
605     }
606
607     vm_info->segments.ldtr.selector = 0x0;
608     vm_info->segments.ldtr.type = 2;
609     vm_info->segments.ldtr.system = 0;
610     vm_info->segments.ldtr.present = 1;
611     vm_info->segments.ldtr.granularity = 0;
612
613     vm_info->segments.tr.selector = 0x0;
614     vm_info->segments.tr.type = 3;
615     vm_info->segments.tr.system = 0;
616     vm_info->segments.tr.present = 1;
617     vm_info->segments.tr.granularity = 0;
618 }
619
620
621 static addr_t allocate_vmcs() 
622 {
623     reg_ex_t msr;
624     PrintDebug("Allocating page\n");
625     struct vmcs_data * vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
626
627
628     memset(vmcs_page, 0, 4096);
629
630     v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
631     
632     vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
633     PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
634
635     return (addr_t)V3_PAddr((void *)vmcs_page);
636 }
637
638
639
640 static int init_vmcs_bios(struct guest_info * vm_info) 
641 {
642
643     setup_v8086_mode_for_boot(vm_info);
644
645     // TODO: Fix vmcs fields so they're 32-bit
646     struct vmx_data * vmx_data = (struct vmx_data *)vm_info->vmm_data;
647     int vmx_ret;
648
649     // Have to do a whole lot of flag setting here
650     PrintDebug("Clearing VMCS\n");
651     vmx_ret = vmcs_clear(vmx_data->vmcs_ptr_phys);
652
653     if (vmx_ret != VMX_SUCCESS) {
654         PrintError("VMCLEAR failed\n");
655         return -1;
656     }
657
658     PrintDebug("Loading VMCS\n");
659     vmx_ret = vmcs_load(vmx_data->vmcs_ptr_phys);
660
661     if (vmx_ret != VMX_SUCCESS) {
662         PrintError("VMPTRLD failed\n");
663         return -1;
664     }
665
666     struct v3_msr tmp_msr;
667
668     /* Write VMX Control Fields */
669     v3_get_msr(VMX_PINBASED_CTLS_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
670     vmcs_write(VMCS_PIN_CTRLS, tmp_msr.lo);
671
672     v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
673     vmcs_write(VMCS_PROC_CTRLS, tmp_msr.lo);
674
675     v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
676     vmcs_write(VMCS_EXIT_CTRLS, tmp_msr.lo);
677
678     v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
679     vmcs_write(VMCS_ENTRY_CTRLS, tmp_msr.lo);
680
681     /* Cache GDTR, IDTR, and TR in host struct */
682     struct {
683         uint16_t selector;
684         addr_t   base;
685     } __attribute__((packed)) tmp_seg;
686     
687     addr_t gdtr_base;
688
689     __asm__ __volatile__(
690                          "sgdt (%0);"
691                          :
692                          : "q"(&tmp_seg)
693                          : "memory"
694                          );
695     vmx_data->host_state.gdtr.base = gdtr_base = tmp_seg.base;
696
697     __asm__ __volatile__(
698                          "sidt (%0);"
699                          :
700                          : "q"(&tmp_seg)
701                          : "memory"
702                          );
703     vmx_data->host_state.idtr.base = tmp_seg.base;
704
705     __asm__ __volatile__(
706                          "str (%0);"
707                          :
708                          : "q"(&tmp_seg)
709                          : "memory"
710                          );
711     vmx_data->host_state.tr.selector = tmp_seg.selector;
712
713     struct tss_descriptor desc = ((struct tss_descriptor *)gdtr_base)[tmp_seg.selector];
714     
715     tmp_seg.base = (
716                     (desc.base1) |
717                     (desc.base2 << 16) |
718                     (desc.base3 << 24) |
719 #ifdef __V3_64BIT__
720                     ((uint64_t)desc.base4 << 32)
721 #else 
722                     (0)
723 #endif
724                 );
725
726     vmx_data->host_state.tr.base = tmp_seg.base;
727
728     update_vmcs_host_state(vm_info);
729     vmcs_write(VMCS_HOST_RIP, (addr_t)&v3_vmx_exit_handler);
730
731     // Setup guest state 
732     // TODO: This is not 32-bit safe!
733     vmx_ret &= check_vmcs_write(VMCS_GUEST_RIP, vm_info->rip);
734     vmx_ret &= check_vmcs_write(VMCS_GUEST_CR0, 0x60000010);
735
736     vmx_ret &= vmcs_write_guest_segments(vm_info);
737
738     vmx_ret &= check_vmcs_write(VMCS_GUEST_RFLAGS, vm_info->ctrl_regs.rflags);
739     vmx_ret &= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
740
741     if (vmx_ret != 0) {
742         PrintError("Could not initialize VMCS segments\n");
743         return -1;
744     }
745
746     v3_print_vmcs_guest_state();
747     return 0;
748 }
749
750 int v3_vmx_handle_exit()
751 {
752     PrintDebug("Exit taken!\n");
753     return 0;
754 }
755
756 static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
757     PrintDebug("Entering init_vmx_guest\n");
758     v3_pre_config_guest(info, config_ptr);
759
760     struct vmx_data * data = NULL;
761
762     data = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
763
764     PrintDebug("vmx_data pointer: %p\n", (void *)data);
765
766     PrintDebug("Allocating VMCS\n");
767     data->vmcs_ptr_phys = allocate_vmcs();
768
769     PrintDebug("VMCS pointer: %p\n", (void *)(data->vmcs_ptr_phys));
770
771     info->vmm_data = data;
772
773     PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
774
775     if (init_vmcs_bios(info) != 0) {
776         PrintError("Could not initialize VMCS BIOS\n");
777         return -1;
778     }
779
780     //v3_post_config_guest(info, config_ptr);
781
782     return 0;
783 }
784
785
786 static int start_vmx_guest(struct guest_info* info) {
787     uint32_t error = 0;
788     int ret = 0;
789
790     PrintDebug("Attempting VMLAUNCH\n");
791
792     ret = v3_vmx_vmlaunch();
793
794     PrintDebug("Returned from VMLAUNCH\n");
795
796     vmcs_read(VMCS_INSTR_ERR, &error, 4);
797
798     if (ret != VMX_SUCCESS) {
799         PrintError("VMLAUNCH failed: %d\n", error);
800     }
801
802     return -1;
803 }
804
805
806
807
808
809
810 int v3_is_vmx_capable() {
811     v3_msr_t feature_msr;
812     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
813
814     v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
815
816     PrintDebug("ECX: %p\n", (void*)ecx);
817
818     if (ecx & CPUID_1_ECX_VTXFLAG) {
819         v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
820         
821         PrintTrace("MSRREGlow: 0x%.8x\n", feature_msr.lo);
822
823         if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
824             PrintDebug("VMX is locked -- enable in the BIOS\n");
825             return 0;
826         }
827
828     } else {
829         PrintDebug("VMX not supported on this cpu\n");
830         return 0;
831     }
832
833     return 1;
834 }
835
836 static int has_vmx_nested_paging() {
837     return 0;
838 }
839
840
841
842 void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
843     extern v3_cpu_arch_t v3_cpu_type;
844
845     struct v3_msr tmp_msr;
846     uint64_t ret=0;
847
848     v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
849     
850     __asm__ __volatile__ (
851                           "movq %%cr4, %%rbx;"
852                           "orq  $0x00002000, %%rbx;"
853                           "movq %%rbx, %0;"
854                           : "=m"(ret) 
855                           :
856                           : "%rbx"
857                           );
858
859     if((~ret & tmp_msr.value) == 0) {
860         __asm__ __volatile__ (
861                               "movq %0, %%cr4;"
862                               :
863                               : "q"(ret)
864                               );
865     } else {
866         PrintError("Invalid CR4 Settings!\n");
867         return;
868     }
869       __asm__ __volatile__ (
870                             "movq %%cr0, %%rbx; "
871                             "orq  $0x00000020,%%rbx; "
872                             "movq %%rbx, %%cr0;"
873                             :
874                             :
875                             : "%rbx"
876                             );
877       //
878     // Should check and return Error here.... 
879
880     // Setup VMXON Region
881     vmxon_ptr_phys = allocate_vmcs();
882     PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr_phys);
883
884     if (v3_enable_vmx(vmxon_ptr_phys) == VMX_SUCCESS) {
885         PrintDebug("VMX Enabled\n");
886     } else {
887         PrintError("VMX initialization failure\n");
888         return;
889     }
890         
891
892     if (has_vmx_nested_paging() == 1) {
893         v3_cpu_type = V3_VMX_EPT_CPU;
894     } else {
895         v3_cpu_type = V3_VMX_CPU;
896     }
897
898     // Setup the VMX specific vmm operations
899     vm_ops->init_guest = &init_vmx_guest;
900     vm_ops->start_guest = &start_vmx_guest;
901     vm_ops->has_nested_paging = &has_vmx_nested_paging;
902
903 }