Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Succesful transition to vmxassist, then to the bios, where it dies in keyboard init.
[palacios.git] / palacios / src / palacios / vmx.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Peter Dinda <pdinda@northwestern.edu>
16  *         Jack Lange <jarusl@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22
23 #include <palacios/vmx.h>
24 #include <palacios/vmcs.h>
25 #include <palacios/vmm.h>
26 #include <palacios/vmx_lowlevel.h>
27 #include <palacios/vmm_lowlevel.h>
28 #include <palacios/vmm_ctrl_regs.h>
29 #include <palacios/vmm_config.h>
30 #include <palacios/vm_guest_mem.h>
31 #include <palacios/vmm_direct_paging.h>
32 #include <palacios/vmx_io.h>
33 #include <palacios/vmx_msr.h>
34
35 static addr_t vmxon_ptr_phys;
36 extern int v3_vmx_exit_handler();
37 extern int v3_vmx_vmlaunch(struct v3_gprs * vm_regs, struct guest_info * info);
38
39 static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
40 {
41     int ret = 0;
42     ret = vmcs_write(field,val);
43
44     if (ret != VMX_SUCCESS) {
45         PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
46         return 1;
47     }
48
49     return 0;
50 }
51
52 static void inline translate_segment_access(struct v3_segment * v3_seg,  
53                                             struct vmcs_segment_access * access)
54 {
55     access->type = v3_seg->type;
56     access->desc_type = v3_seg->system;
57     access->dpl = v3_seg->dpl;
58     access->present = v3_seg->present;
59     access->avail = v3_seg->avail;
60     access->long_mode = v3_seg->long_mode;
61     access->db = v3_seg->db;
62     access->granularity = v3_seg->granularity;
63 }
64
65 int v3_update_vmcs_ctrl_fields(struct guest_info * info) {
66     int vmx_ret = 0;
67     struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
68
69     vmx_ret |= check_vmcs_write(VMCS_PIN_CTRLS, arch_data->pinbased_ctrls);
70     vmx_ret |= check_vmcs_write(VMCS_PROC_CTRLS, arch_data->pri_procbased_ctrls);
71
72     if(arch_data->pri_procbased_ctrls & ACTIVE_SEC_CTRLS) {
73         vmx_ret |= check_vmcs_write(VMCS_SEC_PROC_CTRLS, arch_data->sec_procbased_ctrls);
74     }
75
76     vmx_ret |= check_vmcs_write(VMCS_EXIT_CTRLS, arch_data->exit_ctrls);
77     vmx_ret |= check_vmcs_write(VMCS_ENTRY_CTRLS, arch_data->entry_ctrls);
78
79     return vmx_ret;
80 }
81
82 int v3_update_vmcs_host_state(struct guest_info * info) {
83     int vmx_ret = 0;
84     addr_t tmp;
85     struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
86     struct v3_msr tmp_msr;
87
88     __asm__ __volatile__ ( "movq    %%cr0, %0; "                
89                            : "=q"(tmp)
90                            :
91     );
92     vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp);
93
94
95     __asm__ __volatile__ ( "movq %%cr3, %0; "           
96                            : "=q"(tmp)
97                            :
98     );
99     vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp);
100
101
102     __asm__ __volatile__ ( "movq %%cr4, %0; "           
103                            : "=q"(tmp)
104                            :
105     );
106     vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
107
108
109
110     vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
111     vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
112     vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
113
114 #define FS_BASE_MSR 0xc0000100
115 #define GS_BASE_MSR 0xc0000101
116
117     // FS.BASE MSR
118     v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
119     vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
120
121     // GS.BASE MSR
122     v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
123     vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
124
125
126
127     __asm__ __volatile__ ( "movq %%cs, %0; "            
128                            : "=q"(tmp)
129                            :
130     );
131     vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
132
133     __asm__ __volatile__ ( "movq %%ss, %0; "            
134                            : "=q"(tmp)
135                            :
136     );
137     vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
138
139     __asm__ __volatile__ ( "movq %%ds, %0; "            
140                            : "=q"(tmp)
141                            :
142     );
143     vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
144
145     __asm__ __volatile__ ( "movq %%es, %0; "            
146                            : "=q"(tmp)
147                            :
148     );
149     vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
150
151     __asm__ __volatile__ ( "movq %%fs, %0; "            
152                            : "=q"(tmp)
153                            :
154     );
155     vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
156
157     __asm__ __volatile__ ( "movq %%gs, %0; "            
158                            : "=q"(tmp)
159                            :
160     );
161     vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
162
163     vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
164
165
166 #define SYSENTER_CS_MSR 0x00000174
167 #define SYSENTER_ESP_MSR 0x00000175
168 #define SYSENTER_EIP_MSR 0x00000176
169
170    // SYSENTER CS MSR
171     v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
172     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
173
174     // SYSENTER_ESP MSR
175     v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
176     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
177
178     // SYSENTER_EIP MSR
179     v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
180     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
181
182     return vmx_ret;
183 }
184
185
186 int v3_update_vmcs_guest_state(struct guest_info * info)
187 {
188     int vmx_ret = 0;
189
190     vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, info->rip);
191     vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
192     
193
194     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
195     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR3, info->ctrl_regs.cr3);
196     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
197
198     vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
199
200
201
202     /*** Write VMCS Segments ***/
203     struct vmcs_segment_access access;
204
205     memset(&access, 0, sizeof(access));
206
207     /* CS Segment */
208     translate_segment_access(&(info->segments.cs), &access);
209
210     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
211     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
212     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
213     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
214
215     /* SS Segment */
216     memset(&access, 0, sizeof(access));
217     translate_segment_access(&(info->segments.ss), &access);
218
219     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
220     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
221     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
222     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
223
224     /* DS Segment */
225     memset(&access, 0, sizeof(access));
226     translate_segment_access(&(info->segments.ds), &access);
227
228     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
229     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
230     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
231     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
232
233
234     /* ES Segment */
235     memset(&access, 0, sizeof(access));
236     translate_segment_access(&(info->segments.es), &access);
237
238     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
239     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
240     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
241     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
242
243     /* FS Segment */
244     memset(&access, 0, sizeof(access));
245     translate_segment_access(&(info->segments.fs), &access);
246
247     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
248     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
249     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
250     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
251
252     /* GS Segment */
253     memset(&access, 0, sizeof(access));
254     translate_segment_access(&(info->segments.gs), &access);
255
256     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
257     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
258     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
259     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
260
261     /* LDTR segment */
262     memset(&access, 0, sizeof(access));
263     translate_segment_access(&(info->segments.ldtr), &access);
264
265     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
266     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
267     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
268     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
269
270     /* TR Segment */
271     memset(&access, 0, sizeof(access));
272     translate_segment_access(&(info->segments.tr), &access);
273
274     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
275     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.tr.selector);
276     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
277     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
278
279     /* GDTR Segment */
280
281     vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
282     vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
283
284     /* IDTR Segment*/
285     vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
286     vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
287
288     return vmx_ret;
289
290 }
291
292
293
294
295 #if 0
296 // For the 32 bit reserved bit fields 
297 // MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
298 static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
299     v3_msr_t mask_msr;
300
301     PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
302
303     v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
304
305     PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
306
307     val |= mask_msr.lo;
308     val |= mask_msr.hi;
309   
310     return val;
311 }
312
313
314
315 static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
316     v3_msr_t msr0, msr1;
317     addr_t msr0_val, msr1_val;
318
319     PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
320
321     v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
322     v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
323   
324     // This generates a mask that is the natural bit width of the CPU
325     msr0_val = msr0.value;
326     msr1_val = msr1.value;
327
328     PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
329
330     val |= msr0_val;
331     val |= msr1_val;
332
333     return val;
334 }
335
336
337
338 #endif
339
340
341 static addr_t allocate_vmcs() 
342 {
343     reg_ex_t msr;
344     PrintDebug("Allocating page\n");
345     struct vmcs_data * vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
346
347
348     memset(vmcs_page, 0, 4096);
349
350     v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
351     
352     vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
353     PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
354
355     return (addr_t)V3_PAddr((void *)vmcs_page);
356 }
357
358 #if 0
359
360 #endif
361
362 #if 0
363 static int init_vmcs_bios(struct guest_info * vm_info) 
364 {
365 #if 0
366
367     setup_v8086_mode_for_boot(vm_info);
368
369
370     // Setup guest state 
371     // TODO: This is not 32-bit safe!
372     vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, vm_info->rip);
373     vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, vm_info->vm_regs.rsp);
374     
375
376     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, vm_info->ctrl_regs.cr0);
377     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, vm_info->ctrl_regs.cr4);
378
379     vmx_ret |= vmcs_write_guest_segments(vm_info);
380
381     vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, vm_info->ctrl_regs.rflags);
382 #define DEBUGCTL_MSR 0x1d9
383
384     v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
385     vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
386
387     vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, 0x400);
388
389     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
390
391     if (vmx_ret != 0) {
392         PrintError("Could not initialize VMCS segments\n");
393         return -1;
394     }
395
396 #endif
397     return 0;
398 }
399 #endif
400
401 static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
402     v3_pre_config_guest(info, config_ptr);
403
404     struct vmx_data * vmx_data = NULL;
405
406     vmx_data = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
407
408     PrintDebug("vmx_data pointer: %p\n", (void *)vmx_data);
409
410     PrintDebug("Allocating VMCS\n");
411     vmx_data->vmcs_ptr_phys = allocate_vmcs();
412
413     PrintDebug("VMCS pointer: %p\n", (void *)(vmx_data->vmcs_ptr_phys));
414
415     info->vmm_data = vmx_data;
416
417     PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
418     
419     // TODO: Fix vmcs fields so they're 32-bit
420     int vmx_ret = 0;
421
422     PrintDebug("Clearing VMCS: %p\n",(void*)vmx_data->vmcs_ptr_phys);
423     vmx_ret = vmcs_clear(vmx_data->vmcs_ptr_phys);
424
425     if (vmx_ret != VMX_SUCCESS) {
426         PrintError("VMCLEAR failed\n");
427         return -1;
428     }
429
430     PrintDebug("Loading VMCS\n");
431     vmx_ret = vmcs_load(vmx_data->vmcs_ptr_phys);
432
433     if (vmx_ret != VMX_SUCCESS) {
434         PrintError("VMPTRLD failed\n");
435         return -1;
436     }
437
438
439
440     /******* Setup Host State **********/
441
442     /* Cache GDTR, IDTR, and TR in host struct */
443     addr_t gdtr_base;
444     struct {
445         uint16_t selector;
446         addr_t   base;
447     } __attribute__((packed)) tmp_seg;
448     
449
450     __asm__ __volatile__(
451                          "sgdt (%0);"
452                          :
453                          : "q"(&tmp_seg)
454                          : "memory"
455                          );
456     gdtr_base = tmp_seg.base;
457     vmx_data->host_state.gdtr.base = gdtr_base;
458
459     __asm__ __volatile__(
460                          "sidt (%0);"
461                          :
462                          : "q"(&tmp_seg)
463                          : "memory"
464                          );
465     vmx_data->host_state.idtr.base = tmp_seg.base;
466
467     __asm__ __volatile__(
468                          "str (%0);"
469                          :
470                          : "q"(&tmp_seg)
471                          : "memory"
472                          );
473     vmx_data->host_state.tr.selector = tmp_seg.selector;
474
475     /* The GDTR *index* is bits 3-15 of the selector. */
476     struct tss_descriptor * desc = (struct tss_descriptor *)
477                         (gdtr_base + 8*(tmp_seg.selector>>3));
478
479     tmp_seg.base = (
480                     (desc->base1) |
481                     (desc->base2 << 16) |
482                     (desc->base3 << 24) |
483 #ifdef __V3_64BIT__
484                     ((uint64_t)desc->base4 << 32)
485 #else 
486                     (0)
487 #endif
488                 );
489
490     vmx_data->host_state.tr.base = tmp_seg.base;
491
492   
493
494     /********** Setup and VMX Control Fields from MSR ***********/
495     struct v3_msr tmp_msr;
496
497     v3_get_msr(VMX_PINBASED_CTLS_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
498     /* Add NMI exiting */
499     vmx_data->pinbased_ctrls =  tmp_msr.lo | NMI_EXIT;
500
501     v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
502     vmx_data->pri_procbased_ctrls = tmp_msr.lo;
503
504     v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
505     vmx_data->exit_ctrls = tmp_msr.lo ;
506
507     v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
508     vmx_data->entry_ctrls = tmp_msr.lo;
509
510     struct vmx_exception_bitmap excp_bmap;
511     excp_bmap.value = 0xffffffff;
512     vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value);
513
514
515     /******* Setup VMXAssist guest state ***********/
516
517     info->rip = 0xd0000;
518     info->vm_regs.rsp = 0x80000;
519
520     struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
521     flags->rsvd1 = 1;
522
523     /* Print Control MSRs */
524     v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
525     PrintDebug("CR0 MSR: %p\n", (void*)tmp_msr.value);
526     v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
527     PrintDebug("CR4 MSR: %p\n", (void*)tmp_msr.value);
528
529
530 #define GUEST_CR0 0x80000031
531 #define GUEST_CR4 0x00002000
532     info->ctrl_regs.cr0 = GUEST_CR0;
533     info->ctrl_regs.cr4 = GUEST_CR4;
534    
535     /* Setup paging */
536     if(info->shdw_pg_mode == SHADOW_PAGING) {
537         PrintDebug("Creating initial shadow page table\n");
538
539         if(v3_init_passthrough_pts(info) == -1) {
540             PrintError("Could not initialize passthrough page tables\n");
541             return -1;
542         }
543
544         info->shdw_pg_state.guest_cr0 = CR0_PE;
545         PrintDebug("Created\n");
546
547         vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG) );
548         vmx_ret |= check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
549         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
550
551         info->ctrl_regs.cr3 = info->direct_map_pt;
552
553         // vmx_data->pinbased_ctrls |= NMI_EXIT;
554
555         /* Add unconditional I/O and CR exits */
556         vmx_data->pri_procbased_ctrls |= UNCOND_IO_EXIT  
557                                         | CR3_LOAD_EXIT  
558                                         | CR3_STORE_EXIT;
559  
560         vmx_data->exit_ctrls |= HOST_ADDR_SPACE_SIZE;
561     }
562
563     struct v3_segment * seg_reg = (struct v3_segment *)&(info->segments);
564
565     int i;
566     for(i=0; i < 10; i++)
567     {
568         seg_reg[i].selector = 3<<3;
569         seg_reg[i].limit = 0xffff;
570         seg_reg[i].base = 0x0;
571     }
572     info->segments.cs.selector = 2<<3;
573
574     /* Set only the segment registers */
575     for(i=0; i < 6; i++) {
576         seg_reg[i].limit = 0xfffff;
577         seg_reg[i].granularity = 1;
578         seg_reg[i].type = 3;
579         seg_reg[i].system = 1;
580         seg_reg[i].dpl = 0;
581         seg_reg[i].present = 1;
582         seg_reg[i].db = 1;
583     }
584     info->segments.cs.type = 0xb;
585
586     info->segments.ldtr.selector = 0x20;
587     info->segments.ldtr.type = 2;
588     info->segments.ldtr.system = 0;
589     info->segments.ldtr.present = 1;
590     info->segments.ldtr.granularity = 0;
591     
592     /* Setup IO map */
593     (void) v3_init_vmx_io_map(info);
594     (void) v3_init_vmx_msr_map(info);
595
596     /************* Map in GDT and vmxassist *************/
597
598     uint64_t  gdt[] __attribute__ ((aligned(32))) = {
599         0x0000000000000000ULL,          /* 0x00: reserved */
600         0x0000830000000000ULL,          /* 0x08: 32-bit TSS */
601         //0x0000890000000000ULL,                /* 0x08: 32-bit TSS */
602         0x00CF9b000000FFFFULL,          /* 0x10: CS 32-bit */
603         0x00CF93000000FFFFULL,          /* 0x18: DS 32-bit */
604         0x000082000000FFFFULL,          /* 0x20: LDTR 32-bit */
605     };
606
607 #define VMXASSIST_GDT   0x10000
608     addr_t vmxassist_gdt = 0;
609     if(guest_pa_to_host_va(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
610         PrintError("Could not find VMXASSIST GDT destination\n");
611         return -1;
612     }
613     memcpy((void*)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
614         
615     info->segments.gdtr.base = VMXASSIST_GDT;
616
617 #define VMXASSIST_TSS   0x40000
618     addr_t vmxassist_tss = VMXASSIST_TSS;
619     gdt[0x08 / sizeof(gdt[0])] |=
620         ((vmxassist_tss & 0xFF000000) << (56-24)) |
621         ((vmxassist_tss & 0x00FF0000) << (32-16)) |
622         ((vmxassist_tss & 0x0000FFFF) << (16)) |
623         (8392 - 1);
624
625     info->segments.tr.selector = 0x08;
626     info->segments.tr.base = vmxassist_tss;
627
628     //info->segments.tr.type = 0x9; 
629     info->segments.tr.type = 0x3;
630     info->segments.tr.system = 0;
631     info->segments.tr.present = 1;
632     info->segments.tr.granularity = 0;
633
634  
635 #define VMXASSIST_START 0x000d0000
636     extern uint8_t v3_vmxassist_start[];
637     extern uint8_t v3_vmxassist_end[];
638
639     addr_t vmxassist_dst = 0;
640     if(guest_pa_to_host_va(info, VMXASSIST_START, &vmxassist_dst) == -1) {
641         PrintError("Could not find VMXASSIST destination\n");
642         return -1;
643     }
644     memcpy((void*)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
645     
646     /*** Write all the info to the VMCS ***/
647
648 #define DEBUGCTL_MSR 0x1d9
649     v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
650     vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
651
652     vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, 0x400);
653
654     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, 0xffffffffffffffff);
655     
656     if(v3_update_vmcs_ctrl_fields(info)) {
657         PrintError("Could not write control fields!\n");
658         return -1;
659     }
660     
661     if(v3_update_vmcs_host_state(info)) {
662         PrintError("Could not write host state\n");
663         return -1;
664     }
665
666
667     if(v3_update_vmcs_guest_state(info) != VMX_SUCCESS) {
668         PrintError("Writing guest state failed!\n");
669         return -1;
670     }
671
672     v3_print_vmcs();
673
674     vmx_data->state = VMXASSIST_STARTUP;
675
676     v3_post_config_guest(info, config_ptr);
677
678     return 0;
679 }
680
681
682 static int start_vmx_guest(struct guest_info* info) {
683     uint32_t error = 0;
684     int ret = 0;
685
686     PrintDebug("Attempting VMLAUNCH\n");
687
688     ret = v3_vmx_vmlaunch(&(info->vm_regs), info);
689     if (ret != VMX_SUCCESS) {
690         vmcs_read(VMCS_INSTR_ERR, &error);
691         PrintError("VMLAUNCH failed: %d\n", error);
692
693         v3_print_vmcs();
694
695     }
696     PrintDebug("Returned from VMLAUNCH ret=%d(0x%x)\n", ret, ret);
697
698     return -1;
699 }
700
701
702 int v3_is_vmx_capable() {
703     v3_msr_t feature_msr;
704     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
705
706     v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
707
708     PrintDebug("ECX: %p\n", (void*)ecx);
709
710     if (ecx & CPUID_1_ECX_VTXFLAG) {
711         v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
712         
713         PrintTrace("MSRREGlow: 0x%.8x\n", feature_msr.lo);
714
715         if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
716             PrintDebug("VMX is locked -- enable in the BIOS\n");
717             return 0;
718         }
719
720     } else {
721         PrintDebug("VMX not supported on this cpu\n");
722         return 0;
723     }
724
725     return 1;
726 }
727
728 static int has_vmx_nested_paging() {
729     return 0;
730 }
731
732
733
734 void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
735     extern v3_cpu_arch_t v3_cpu_type;
736
737     struct v3_msr tmp_msr;
738     uint64_t ret=0;
739
740     v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
741     
742     __asm__ __volatile__ (
743                           "movq %%cr4, %%rbx;"
744                           "orq  $0x00002000, %%rbx;"
745                           "movq %%rbx, %0;"
746                           : "=m"(ret) 
747                           :
748                           : "%rbx"
749                           );
750
751     if((~ret & tmp_msr.value) == 0) {
752         __asm__ __volatile__ (
753                               "movq %0, %%cr4;"
754                               :
755                               : "q"(ret)
756                               );
757     } else {
758         PrintError("Invalid CR4 Settings!\n");
759         return;
760     }
761       __asm__ __volatile__ (
762                             "movq %%cr0, %%rbx; "
763                             "orq  $0x00000020,%%rbx; "
764                             "movq %%rbx, %%cr0;"
765                             :
766                             :
767                             : "%rbx"
768                             );
769       //
770     // Should check and return Error here.... 
771
772
773     // Setup VMXON Region
774     vmxon_ptr_phys = allocate_vmcs();
775     PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr_phys);
776
777     if (v3_enable_vmx(vmxon_ptr_phys) == VMX_SUCCESS) {
778         PrintDebug("VMX Enabled\n");
779     } else {
780         PrintError("VMX initialization failure\n");
781         return;
782     }
783         
784
785     if (has_vmx_nested_paging() == 1) {
786         v3_cpu_type = V3_VMX_EPT_CPU;
787     } else {
788         v3_cpu_type = V3_VMX_CPU;
789     }
790
791     // Setup the VMX specific vmm operations
792     vm_ops->init_guest = &init_vmx_guest;
793     vm_ops->start_guest = &start_vmx_guest;
794     vm_ops->has_nested_paging = &has_vmx_nested_paging;
795
796 }
797