Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Latest VMX work that still has bug in guest state causing #GP after launch.
[palacios.git] / palacios / src / palacios / vmx.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Peter Dinda <pdinda@northwestern.edu>
16  *         Jack Lange <jarusl@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22
23 #include <palacios/vmx.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmm.h>
26 #include <palacios/vmx_lowlevel.h>
27 #include <palacios/vmm_lowlevel.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vmm_ctrl_regs.h>
30 #include <palacios/vm_guest_mem.h>
31
32 static addr_t vmxon_ptr_phys;
33 extern int v3_vmx_exit_handler();
34 extern int v3_vmx_vmlaunch(struct v3_gprs * vm_regs);
35
36 static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
37 {
38     int ret = 0;
39     ret = vmcs_write(field,val);
40
41     if (ret != VMX_SUCCESS) {
42         PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
43         return 1;
44     }
45
46     return 0;
47 }
48
49 static void inline translate_segment_access(struct v3_segment * v3_seg,  
50                                             struct vmcs_segment_access * access)
51 {
52     access->type = v3_seg->type;
53     access->desc_type = v3_seg->system;
54     access->dpl = v3_seg->dpl;
55     access->present = v3_seg->present;
56     access->avail = v3_seg->avail;
57     access->long_mode = v3_seg->long_mode;
58     access->db = v3_seg->db;
59     access->granularity = v3_seg->granularity;
60 }
61
62
63 static int update_vmcs_host_state(struct guest_info * info) {
64     int vmx_ret = 0;
65     addr_t tmp;
66     struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
67     struct v3_msr tmp_msr;
68
69     __asm__ __volatile__ ( "movq    %%cr0, %0; "                
70                            : "=q"(tmp)
71                            :
72     );
73     vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp);
74
75
76     __asm__ __volatile__ ( "movq %%cr3, %0; "           
77                            : "=q"(tmp)
78                            :
79     );
80     vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp);
81
82
83     __asm__ __volatile__ ( "movq %%cr4, %0; "           
84                            : "=q"(tmp)
85                            :
86     );
87     vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
88
89
90
91     vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
92     vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
93     vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
94
95 #define FS_BASE_MSR 0xc0000100
96 #define GS_BASE_MSR 0xc0000101
97
98     // FS.BASE MSR
99     v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
100     vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
101
102     // GS.BASE MSR
103     v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
104     vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
105
106
107
108     __asm__ __volatile__ ( "movq %%cs, %0; "            
109                            : "=q"(tmp)
110                            :
111     );
112     vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
113
114     __asm__ __volatile__ ( "movq %%ss, %0; "            
115                            : "=q"(tmp)
116                            :
117     );
118     vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
119
120     __asm__ __volatile__ ( "movq %%ds, %0; "            
121                            : "=q"(tmp)
122                            :
123     );
124     vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
125
126     __asm__ __volatile__ ( "movq %%es, %0; "            
127                            : "=q"(tmp)
128                            :
129     );
130     vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
131
132     __asm__ __volatile__ ( "movq %%fs, %0; "            
133                            : "=q"(tmp)
134                            :
135     );
136     vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
137
138     __asm__ __volatile__ ( "movq %%gs, %0; "            
139                            : "=q"(tmp)
140                            :
141     );
142     vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
143
144     vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
145
146
147 #define SYSENTER_CS_MSR 0x00000174
148 #define SYSENTER_ESP_MSR 0x00000175
149 #define SYSENTER_EIP_MSR 0x00000176
150
151    // SYSENTER CS MSR
152     v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
153     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
154
155     // SYSENTER_ESP MSR
156     v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
157     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
158
159     // SYSENTER_EIP MSR
160     v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
161     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
162
163     return vmx_ret;
164 }
165
166
167 static int inline update_vmcs_guest_state(struct guest_info * info)
168 {
169     struct v3_msr tmp_msr;
170     int vmx_ret = 0;
171
172     vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, info->rip);
173     vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
174     
175
176     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
177     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
178
179     vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
180 #define DEBUGCTL_MSR 0x1d9
181
182     v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
183     vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
184
185     vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, 0x400);
186
187     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
188
189     /*** Write VMCS Segments ***/
190     struct vmcs_segment_access access;
191
192     memset(&access, 0, sizeof(access));
193
194     /* CS Segment */
195     translate_segment_access(&(info->segments.cs), &access);
196
197     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
198     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
199     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
200     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
201
202     /* SS Segment */
203     translate_segment_access(&(info->segments.ss), &access);
204
205     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
206     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
207     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
208     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
209
210     /* DS Segment */
211     translate_segment_access(&(info->segments.ds), &access);
212
213     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
214     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
215     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
216     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
217
218
219     /* ES Segment */
220     translate_segment_access(&(info->segments.es), &access);
221
222     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
223     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
224     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
225     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
226
227     /* FS Segment */
228     translate_segment_access(&(info->segments.fs), &access);
229
230     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
231     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
232     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
233     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
234
235     /* GS Segment */
236     translate_segment_access(&(info->segments.gs), &access);
237
238     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
239     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
240     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
241     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
242
243     /* LDTR segment */
244     translate_segment_access(&(info->segments.ldtr), &access);
245
246     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
247     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
248     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
249     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
250
251     /* TR Segment */
252     translate_segment_access(&(info->segments.tr), &access);
253
254     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
255     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.tr.selector);
256     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
257     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
258
259     /* GDTR Segment */
260
261     vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
262     vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
263
264     /* IDTR Segment*/
265     vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
266     vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
267
268     return vmx_ret;
269
270 }
271
272
273
274
275 #if 0
276 // For the 32 bit reserved bit fields 
277 // MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
278 static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
279     v3_msr_t mask_msr;
280
281     PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
282
283     v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
284
285     PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
286
287     val |= mask_msr.lo;
288     val |= mask_msr.hi;
289   
290     return val;
291 }
292
293
294
295 static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
296     v3_msr_t msr0, msr1;
297     addr_t msr0_val, msr1_val;
298
299     PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
300
301     v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
302     v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
303   
304     // This generates a mask that is the natural bit width of the CPU
305     msr0_val = msr0.value;
306     msr1_val = msr1.value;
307
308     PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
309
310     val |= msr0_val;
311     val |= msr1_val;
312
313     return val;
314 }
315
316 static int setup_base_host_state() {
317     
318
319
320     //   vmwrite(HOST_IDTR_BASE, 
321
322
323 }
324
325
326 #endif
327
328
329 static addr_t allocate_vmcs() 
330 {
331     reg_ex_t msr;
332     PrintDebug("Allocating page\n");
333     struct vmcs_data * vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
334
335
336     memset(vmcs_page, 0, 4096);
337
338     v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
339     
340     vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
341     PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
342
343     return (addr_t)V3_PAddr((void *)vmcs_page);
344 }
345
346 #if 0
347 static void setup_v8086_mode_for_boot(struct guest_info * vm_info)
348 {
349
350     ((struct vmx_data *)vm_info->vmm_data)->state = VMXASSIST_V8086_BIOS;
351     struct rflags * flags = (struct rflags *)&(vm_info->ctrl_regs.rflags);
352     flags->rsvd1 = 1;
353     flags->vm = 1;
354     flags->iopl = 3;
355
356 #define GUEST_CR0_MASK 0x80000021
357 #define GUEST_CR4_MASK 0x00002000
358     vm_info->ctrl_regs.cr0 = GUEST_CR0_MASK;
359     vm_info->ctrl_regs.cr4 = GUEST_CR4_MASK;
360    
361     vm_info->rip = 0xd0000;
362     vm_info->vm_regs.rsp = 0x80000;
363
364     vm_info->segments.cs.selector = 0xf000;
365     vm_info->segments.cs.base = 0xf000 << 4;
366     vm_info->segments.cs.limit = 0xffff;
367     vm_info->segments.cs.type = 3;
368     vm_info->segments.cs.system = 1;
369     vm_info->segments.cs.dpl = 3;
370     vm_info->segments.cs.present = 1;
371     vm_info->segments.cs.granularity = 0;
372
373     int i = 0;
374     struct v3_segment * seg_ptr = (struct v3_segment *)&(vm_info->segments);
375
376     /* Set values for selectors ds through ss */
377     for(i = 1; i < 6 ; i++) {
378         seg_ptr[i].selector = 0x0000;
379         seg_ptr[i].base = 0x00000;
380         seg_ptr[i].limit = 0xffff;
381            }
382
383     for(i = 6; i < 10; i++) {
384         seg_ptr[i].base = 0x0;
385         seg_ptr[i].limit = 0xffff;
386     }
387
388     vm_info->segments.ldtr.selector = 0x0;
389     vm_info->segments.ldtr.type = 2;
390     vm_info->segments.ldtr.system = 0;
391     vm_info->segments.ldtr.present = 1;
392     vm_info->segments.ldtr.granularity = 0;
393
394     vm_info->segments.tr.selector = 0x0;
395     vm_info->segments.tr.type = 3;
396     vm_info->segments.tr.system = 0;
397     vm_info->segments.tr.present = 1;
398     vm_info->segments.tr.granularity = 0;
399 }
400 #endif
401
402 #if 0
403 static int init_vmcs_bios(struct guest_info * vm_info) 
404 {
405 #if 0
406
407     setup_v8086_mode_for_boot(vm_info);
408
409
410     // Setup guest state 
411     // TODO: This is not 32-bit safe!
412     vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, vm_info->rip);
413     vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, vm_info->vm_regs.rsp);
414     
415
416     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, vm_info->ctrl_regs.cr0);
417     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, vm_info->ctrl_regs.cr4);
418
419     vmx_ret |= vmcs_write_guest_segments(vm_info);
420
421     vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, vm_info->ctrl_regs.rflags);
422 #define DEBUGCTL_MSR 0x1d9
423
424     v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
425     vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
426
427     vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, 0x400);
428
429     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
430
431     if (vmx_ret != 0) {
432         PrintError("Could not initialize VMCS segments\n");
433         return -1;
434     }
435
436 #endif
437     return 0;
438 }
439 #endif
440
441 static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
442     v3_pre_config_guest(info, config_ptr);
443
444     struct vmx_data * vmx_data = NULL;
445
446     vmx_data = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
447
448     PrintDebug("vmx_data pointer: %p\n", (void *)vmx_data);
449
450     PrintDebug("Allocating VMCS\n");
451     vmx_data->vmcs_ptr_phys = allocate_vmcs();
452
453     PrintDebug("VMCS pointer: %p\n", (void *)(vmx_data->vmcs_ptr_phys));
454
455     info->vmm_data = vmx_data;
456
457     PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
458     
459     // TODO: Fix vmcs fields so they're 32-bit
460     int vmx_ret = 0;
461
462     PrintDebug("Clearing VMCS: %p\n",(void*)vmx_data->vmcs_ptr_phys);
463     vmx_ret = vmcs_clear(vmx_data->vmcs_ptr_phys);
464
465     if (vmx_ret != VMX_SUCCESS) {
466         PrintError("VMCLEAR failed\n");
467         return -1;
468     }
469
470     PrintDebug("Loading VMCS\n");
471     vmx_ret = vmcs_load(vmx_data->vmcs_ptr_phys);
472
473     if (vmx_ret != VMX_SUCCESS) {
474         PrintError("VMPTRLD failed\n");
475         return -1;
476     }
477
478
479
480     /********** Setup and write VMX Control Fields ***********/
481     struct v3_msr tmp_msr;
482
483     v3_get_msr(VMX_PINBASED_CTLS_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
484     /* Add NMI exiting */
485     tmp_msr.lo |= NMI_EXIT;
486     check_vmcs_write(VMCS_PIN_CTRLS, tmp_msr.lo);
487
488     v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
489     /* Add unconditional I/O */
490     tmp_msr.lo |= UNCOND_IO_EXIT;
491     check_vmcs_write(VMCS_PROC_CTRLS, tmp_msr.lo);
492
493     v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
494     tmp_msr.lo |= HOST_ADDR_SPACE_SIZE;
495     check_vmcs_write(VMCS_EXIT_CTRLS, tmp_msr.lo);
496
497     v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
498     check_vmcs_write(VMCS_ENTRY_CTRLS, tmp_msr.lo);
499
500     check_vmcs_write(VMCS_EXCP_BITMAP, 0xffffffff);
501
502
503
504
505     /******* Setup Host State **********/
506
507     /* Cache GDTR, IDTR, and TR in host struct */
508     addr_t gdtr_base;
509     struct {
510         uint16_t selector;
511         addr_t   base;
512     } __attribute__((packed)) tmp_seg;
513     
514
515     __asm__ __volatile__(
516                          "sgdt (%0);"
517                          :
518                          : "q"(&tmp_seg)
519                          : "memory"
520                          );
521     gdtr_base = tmp_seg.base;
522     vmx_data->host_state.gdtr.base = gdtr_base;
523
524     __asm__ __volatile__(
525                          "sidt (%0);"
526                          :
527                          : "q"(&tmp_seg)
528                          : "memory"
529                          );
530     vmx_data->host_state.idtr.base = tmp_seg.base;
531
532     __asm__ __volatile__(
533                          "str (%0);"
534                          :
535                          : "q"(&tmp_seg)
536                          : "memory"
537                          );
538     vmx_data->host_state.tr.selector = tmp_seg.selector;
539
540     /* The GDTR *index* is bits 3-15 of the selector. */
541     struct tss_descriptor * desc = (struct tss_descriptor *)
542                         (gdtr_base + 8*(tmp_seg.selector>>3));
543
544     tmp_seg.base = (
545                     (desc->base1) |
546                     (desc->base2 << 16) |
547                     (desc->base3 << 24) |
548 #ifdef __V3_64BIT__
549                     ((uint64_t)desc->base4 << 32)
550 #else 
551                     (0)
552 #endif
553                 );
554
555     vmx_data->host_state.tr.base = tmp_seg.base;
556
557     if(update_vmcs_host_state(info)) {
558         PrintError("Could not write host state\n");
559         return -1;
560     }
561
562
563
564     /******* Setup VMXAssist guest state ***********/
565     info->rip = 0xd0000;
566     info->vm_regs.rsp = 0x80000;
567
568     struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
569     flags->rsvd1 = 1;
570
571 #define GUEST_CR0 0x80000031
572 #define GUEST_CR4 0x00002000
573     info->ctrl_regs.cr0 = GUEST_CR0;
574     info->ctrl_regs.cr4 = GUEST_CR4;
575    
576     addr_t guest_cr3 = (addr_t)V3_AllocPages(1);
577
578     memset(V3_VAddr((void*)guest_cr3), 0, 4096);
579     vmcs_write(VMCS_GUEST_CR3, guest_cr3);
580
581     v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
582     PrintDebug("CR0 MSR: %p\n", (void*)tmp_msr.value);
583
584     v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
585     PrintDebug("CR4 MSR: %p\n", (void*)tmp_msr.value);
586
587     struct v3_segment * seg_reg = (struct v3_segment *)&(info->segments);
588
589     int i;
590     for(i=0; i < 10; i++)
591     {
592         seg_reg[i].selector = 3<<3;
593         seg_reg[i].limit = 0xffff;
594         seg_reg[i].base = 0x0;
595     }
596     info->segments.cs.selector = 2<<3;
597
598     /* Set only the segment registers */
599     for(i=0; i < 6; i++) {
600         seg_reg[i].limit = 0xfffff;
601         seg_reg[i].granularity = 1;
602         seg_reg[i].type = 3;
603         seg_reg[i].system = 1;
604         seg_reg[i].dpl = 0;
605         seg_reg[i].present = 1;
606         seg_reg[i].db = 1;
607     }
608     info->segments.cs.type = 0xb;
609
610     info->segments.ldtr.selector = 0x20;
611     info->segments.ldtr.type = 2;
612     info->segments.ldtr.system = 0;
613     info->segments.ldtr.present = 1;
614     info->segments.ldtr.granularity = 0;
615     
616
617
618
619 uint64_t  gdt[] __attribute__ ((aligned(32))) = {
620         0x0000000000000000ULL,          /* 0x00: reserved */
621         0x0000830000000000ULL,          /* 0x08: 32-bit TSS */
622         //      0x0000890000000000ULL,          /* 0x08: 32-bit TSS */
623         0x00CF9b000000FFFFULL,          /* 0x10: CS 32-bit */
624         0x00CF93000000FFFFULL,          /* 0x18: DS 32-bit */
625         0x000082000000FFFFULL,          /* 0x20: LDTR 32-bit */
626 };
627  
628 #define VMXASSIST_GDT   0x10000
629     addr_t vmxassist_gdt = 0;
630     if(guest_pa_to_host_va(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
631         PrintError("Could not find VMXASSIST GDT destination\n");
632         return -1;
633     }
634     memcpy((void*)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
635         
636     info->segments.gdtr.base = VMXASSIST_GDT;
637
638 #define VMXASSIST_TSS   0x40000
639     addr_t vmxassist_tss = VMXASSIST_TSS;
640     gdt[0x08 / sizeof(gdt[0])] |=
641         ((vmxassist_tss & 0xFF000000) << (56-24)) |
642         ((vmxassist_tss & 0x00FF0000) << (32-16)) |
643         ((vmxassist_tss & 0x0000FFFF) << (16)) |
644         (8392 - 1);
645
646     info->segments.tr.selector = 0x08;
647     info->segments.tr.base = vmxassist_tss;
648
649     // info->segments.tr.type = 0x9; 
650     info->segments.tr.type = 0x3;
651     info->segments.tr.system = 0;
652     info->segments.tr.present = 1;
653     info->segments.tr.granularity = 0;
654
655  
656 #define VMXASSIST_START 0x000d0000
657     extern uint8_t vmxassist_start[];
658     extern uint8_t vmxassist_end[];
659
660     addr_t vmxassist_dst = 0;
661     if(guest_pa_to_host_va(info, VMXASSIST_START, &vmxassist_dst) == -1) {
662         PrintError("Could not find VMXASSIST destination\n");
663         return -1;
664     }
665     memcpy((void*)vmxassist_dst, vmxassist_start, vmxassist_end-vmxassist_start);
666
667     if(update_vmcs_guest_state(info) != VMX_SUCCESS) {
668         PrintDebug("Writing guest state failed!\n");
669         return -1;
670     }
671
672     v3_print_vmcs();
673
674
675      //v3_post_config_guest(info, config_ptr);
676
677     return 0;
678 }
679
680
681 static int start_vmx_guest(struct guest_info* info) {
682     uint32_t error = 0;
683     int ret = 0;
684
685     PrintDebug("Attempting VMLAUNCH\n");
686
687     ret = v3_vmx_vmlaunch(&(info->vm_regs));
688     if (ret != VMX_SUCCESS) {
689         vmcs_read(VMCS_INSTR_ERR, &error);
690         PrintError("VMLAUNCH failed: %d\n", error);
691
692         v3_print_vmcs();
693
694     }
695     PrintDebug("Returned from VMLAUNCH ret=%d(0x%x)\n", ret, ret);
696
697     return -1;
698 }
699
700
701 int v3_is_vmx_capable() {
702     v3_msr_t feature_msr;
703     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
704
705     v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
706
707     PrintDebug("ECX: %p\n", (void*)ecx);
708
709     if (ecx & CPUID_1_ECX_VTXFLAG) {
710         v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
711         
712         PrintTrace("MSRREGlow: 0x%.8x\n", feature_msr.lo);
713
714         if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
715             PrintDebug("VMX is locked -- enable in the BIOS\n");
716             return 0;
717         }
718
719     } else {
720         PrintDebug("VMX not supported on this cpu\n");
721         return 0;
722     }
723
724     return 1;
725 }
726
727 static int has_vmx_nested_paging() {
728     return 0;
729 }
730
731
732
733 void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
734     extern v3_cpu_arch_t v3_cpu_type;
735
736     struct v3_msr tmp_msr;
737     uint64_t ret=0;
738
739     v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
740     
741     __asm__ __volatile__ (
742                           "movq %%cr4, %%rbx;"
743                           "orq  $0x00002000, %%rbx;"
744                           "movq %%rbx, %0;"
745                           : "=m"(ret) 
746                           :
747                           : "%rbx"
748                           );
749
750     if((~ret & tmp_msr.value) == 0) {
751         __asm__ __volatile__ (
752                               "movq %0, %%cr4;"
753                               :
754                               : "q"(ret)
755                               );
756     } else {
757         PrintError("Invalid CR4 Settings!\n");
758         return;
759     }
760       __asm__ __volatile__ (
761                             "movq %%cr0, %%rbx; "
762                             "orq  $0x00000020,%%rbx; "
763                             "movq %%rbx, %%cr0;"
764                             :
765                             :
766                             : "%rbx"
767                             );
768       //
769     // Should check and return Error here.... 
770
771
772     // Setup VMXON Region
773     vmxon_ptr_phys = allocate_vmcs();
774     PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr_phys);
775
776     if (v3_enable_vmx(vmxon_ptr_phys) == VMX_SUCCESS) {
777         PrintDebug("VMX Enabled\n");
778     } else {
779         PrintError("VMX initialization failure\n");
780         return;
781     }
782         
783
784     if (has_vmx_nested_paging() == 1) {
785         v3_cpu_type = V3_VMX_EPT_CPU;
786     } else {
787         v3_cpu_type = V3_VMX_CPU;
788     }
789
790     // Setup the VMX specific vmm operations
791     vm_ops->init_guest = &init_vmx_guest;
792     vm_ops->start_guest = &start_vmx_guest;
793     vm_ops->has_nested_paging = &has_vmx_nested_paging;
794
795 }
796