Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Modified boot and vmxassist to handle real/protected transition.
[palacios.git] / palacios / src / palacios / vmx.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Peter Dinda <pdinda@northwestern.edu>
16  *         Jack Lange <jarusl@cs.northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22
23 #include <palacios/vmx.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vmx_lowlevel.h>
26 #include <palacios/vmm_lowlevel.h>
27 #include <palacios/vmm_ctrl_regs.h>
28 #include <palacios/vmm_config.h>
29 #include <palacios/vm_guest_mem.h>
30 #include <palacios/vmm_direct_paging.h>
31 #include <palacios/vmx_io.h>
32 #include <palacios/vmx_msr.h>
33
34 static addr_t vmxon_ptr_phys;
35 extern int v3_vmx_exit_handler();
36 extern int v3_vmx_vmlaunch(struct v3_gprs * vm_regs, struct guest_info * info);
37
38 static int inline check_vmcs_write(vmcs_field_t field, addr_t val)
39 {
40     int ret = 0;
41     ret = vmcs_write(field,val);
42
43     if (ret != VMX_SUCCESS) {
44         PrintError("VMWRITE error on %s!: %d\n", v3_vmcs_field_to_str(field), ret);
45         return 1;
46     }
47
48     return 0;
49 }
50
51 static void inline translate_segment_access(struct v3_segment * v3_seg,  
52                                             struct vmcs_segment_access * access)
53 {
54     access->type = v3_seg->type;
55     access->desc_type = v3_seg->system;
56     access->dpl = v3_seg->dpl;
57     access->present = v3_seg->present;
58     access->avail = v3_seg->avail;
59     access->long_mode = v3_seg->long_mode;
60     access->db = v3_seg->db;
61     access->granularity = v3_seg->granularity;
62 }
63
64 int v3_update_vmcs_ctrl_fields(struct guest_info * info) {
65     int vmx_ret = 0;
66     struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
67
68     vmx_ret |= check_vmcs_write(VMCS_PIN_CTRLS, arch_data->pinbased_ctrls);
69     vmx_ret |= check_vmcs_write(VMCS_PROC_CTRLS, arch_data->pri_procbased_ctrls);
70
71     if(arch_data->pri_procbased_ctrls & ACTIVE_SEC_CTRLS) {
72         vmx_ret |= check_vmcs_write(VMCS_SEC_PROC_CTRLS, arch_data->sec_procbased_ctrls);
73     }
74
75     vmx_ret |= check_vmcs_write(VMCS_EXIT_CTRLS, arch_data->exit_ctrls);
76     vmx_ret |= check_vmcs_write(VMCS_ENTRY_CTRLS, arch_data->entry_ctrls);
77
78     return vmx_ret;
79 }
80
81 int v3_update_vmcs_host_state(struct guest_info * info) {
82     int vmx_ret = 0;
83     addr_t tmp;
84     struct vmx_data * arch_data = (struct vmx_data *)(info->vmm_data);
85     struct v3_msr tmp_msr;
86
87     __asm__ __volatile__ ( "movq    %%cr0, %0; "                
88                            : "=q"(tmp)
89                            :
90     );
91     vmx_ret |= check_vmcs_write(VMCS_HOST_CR0, tmp);
92
93
94     __asm__ __volatile__ ( "movq %%cr3, %0; "           
95                            : "=q"(tmp)
96                            :
97     );
98     vmx_ret |= check_vmcs_write(VMCS_HOST_CR3, tmp);
99
100
101     __asm__ __volatile__ ( "movq %%cr4, %0; "           
102                            : "=q"(tmp)
103                            :
104     );
105     vmx_ret |= check_vmcs_write(VMCS_HOST_CR4, tmp);
106
107
108
109     vmx_ret |= check_vmcs_write(VMCS_HOST_GDTR_BASE, arch_data->host_state.gdtr.base);
110     vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
111     vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
112
113 #define FS_BASE_MSR 0xc0000100
114 #define GS_BASE_MSR 0xc0000101
115
116     // FS.BASE MSR
117     v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
118     vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);    
119
120     // GS.BASE MSR
121     v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
122     vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);    
123
124
125
126     __asm__ __volatile__ ( "movq %%cs, %0; "            
127                            : "=q"(tmp)
128                            :
129     );
130     vmx_ret |= check_vmcs_write(VMCS_HOST_CS_SELECTOR, tmp);
131
132     __asm__ __volatile__ ( "movq %%ss, %0; "            
133                            : "=q"(tmp)
134                            :
135     );
136     vmx_ret |= check_vmcs_write(VMCS_HOST_SS_SELECTOR, tmp);
137
138     __asm__ __volatile__ ( "movq %%ds, %0; "            
139                            : "=q"(tmp)
140                            :
141     );
142     vmx_ret |= check_vmcs_write(VMCS_HOST_DS_SELECTOR, tmp);
143
144     __asm__ __volatile__ ( "movq %%es, %0; "            
145                            : "=q"(tmp)
146                            :
147     );
148     vmx_ret |= check_vmcs_write(VMCS_HOST_ES_SELECTOR, tmp);
149
150     __asm__ __volatile__ ( "movq %%fs, %0; "            
151                            : "=q"(tmp)
152                            :
153     );
154     vmx_ret |= check_vmcs_write(VMCS_HOST_FS_SELECTOR, tmp);
155
156     __asm__ __volatile__ ( "movq %%gs, %0; "            
157                            : "=q"(tmp)
158                            :
159     );
160     vmx_ret |= check_vmcs_write(VMCS_HOST_GS_SELECTOR, tmp);
161
162     vmx_ret |= check_vmcs_write(VMCS_HOST_TR_SELECTOR, arch_data->host_state.tr.selector);
163
164
165 #define SYSENTER_CS_MSR 0x00000174
166 #define SYSENTER_ESP_MSR 0x00000175
167 #define SYSENTER_EIP_MSR 0x00000176
168
169    // SYSENTER CS MSR
170     v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
171     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
172
173     // SYSENTER_ESP MSR
174     v3_get_msr(SYSENTER_ESP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
175     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_ESP, tmp_msr.value);
176
177     // SYSENTER_EIP MSR
178     v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
179     vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
180
181     return vmx_ret;
182 }
183
184
185 int v3_update_vmcs_guest_state(struct guest_info * info)
186 {
187     int vmx_ret = 0;
188
189     vmx_ret |= check_vmcs_write(VMCS_GUEST_RIP, info->rip);
190     vmx_ret |= check_vmcs_write(VMCS_GUEST_RSP, info->vm_regs.rsp);
191     
192
193     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR0, info->ctrl_regs.cr0);
194     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR3, info->ctrl_regs.cr3);
195     vmx_ret |= check_vmcs_write(VMCS_GUEST_CR4, info->ctrl_regs.cr4);
196
197     vmx_ret |= check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
198
199
200
201     /*** Write VMCS Segments ***/
202     struct vmcs_segment_access access;
203
204     memset(&access, 0, sizeof(access));
205
206     /* CS Segment */
207     translate_segment_access(&(info->segments.cs), &access);
208
209     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_BASE, info->segments.cs.base);
210     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_SELECTOR, info->segments.cs.selector);
211     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_LIMIT, info->segments.cs.limit);
212     vmx_ret |= check_vmcs_write(VMCS_GUEST_CS_ACCESS, access.value);
213
214     /* SS Segment */
215     memset(&access, 0, sizeof(access));
216     translate_segment_access(&(info->segments.ss), &access);
217
218     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_BASE, info->segments.ss.base);
219     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_SELECTOR, info->segments.ss.selector);
220     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_LIMIT, info->segments.ss.limit);
221     vmx_ret |= check_vmcs_write(VMCS_GUEST_SS_ACCESS, access.value);
222
223     /* DS Segment */
224     memset(&access, 0, sizeof(access));
225     translate_segment_access(&(info->segments.ds), &access);
226
227     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_BASE, info->segments.ds.base);
228     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_SELECTOR, info->segments.ds.selector);
229     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_LIMIT, info->segments.ds.limit);
230     vmx_ret |= check_vmcs_write(VMCS_GUEST_DS_ACCESS, access.value);
231
232
233     /* ES Segment */
234     memset(&access, 0, sizeof(access));
235     translate_segment_access(&(info->segments.es), &access);
236
237     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_BASE, info->segments.es.base);
238     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_SELECTOR, info->segments.es.selector);
239     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_LIMIT, info->segments.es.limit);
240     vmx_ret |= check_vmcs_write(VMCS_GUEST_ES_ACCESS, access.value);
241
242     /* FS Segment */
243     memset(&access, 0, sizeof(access));
244     translate_segment_access(&(info->segments.fs), &access);
245
246     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_BASE, info->segments.fs.base);
247     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_SELECTOR, info->segments.fs.selector);
248     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_LIMIT, info->segments.fs.limit);
249     vmx_ret |= check_vmcs_write(VMCS_GUEST_FS_ACCESS, access.value);
250
251     /* GS Segment */
252     memset(&access, 0, sizeof(access));
253     translate_segment_access(&(info->segments.gs), &access);
254
255     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_BASE, info->segments.gs.base);
256     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_SELECTOR, info->segments.gs.selector);
257     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_LIMIT, info->segments.gs.limit);
258     vmx_ret |= check_vmcs_write(VMCS_GUEST_GS_ACCESS, access.value);
259
260     /* LDTR segment */
261     memset(&access, 0, sizeof(access));
262     translate_segment_access(&(info->segments.ldtr), &access);
263
264     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_BASE, info->segments.ldtr.base);
265     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_SELECTOR, info->segments.ldtr.selector);
266     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_LIMIT, info->segments.ldtr.limit);
267     vmx_ret |= check_vmcs_write(VMCS_GUEST_LDTR_ACCESS, access.value);
268
269     /* TR Segment */
270     memset(&access, 0, sizeof(access));
271     translate_segment_access(&(info->segments.tr), &access);
272
273     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_BASE, info->segments.tr.base);
274     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_SELECTOR, info->segments.tr.selector);
275     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_LIMIT, info->segments.tr.limit);
276     vmx_ret |= check_vmcs_write(VMCS_GUEST_TR_ACCESS, access.value);
277
278     /* GDTR Segment */
279
280     vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_BASE, info->segments.gdtr.base);
281     vmx_ret |= check_vmcs_write(VMCS_GUEST_GDTR_LIMIT, info->segments.gdtr.limit);
282
283     /* IDTR Segment*/
284     vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_BASE, info->segments.idtr.base);
285     vmx_ret |= check_vmcs_write(VMCS_GUEST_IDTR_LIMIT, info->segments.idtr.limit);
286
287     return vmx_ret;
288
289 }
290
291
292
293
294 #if 0
295 // For the 32 bit reserved bit fields 
296 // MB1s are in the low 32 bits, MBZs are in the high 32 bits of the MSR
297 static uint32_t sanitize_bits1(uint32_t msr_num, uint32_t val) {
298     v3_msr_t mask_msr;
299
300     PrintDebug("sanitize_bits1 (MSR:%x)\n", msr_num);
301
302     v3_get_msr(msr_num, &mask_msr.hi, &mask_msr.lo);
303
304     PrintDebug("MSR %x = %x : %x \n", msr_num, mask_msr.hi, mask_msr.lo);
305
306     val |= mask_msr.lo;
307     val |= mask_msr.hi;
308   
309     return val;
310 }
311
312
313
314 static addr_t sanitize_bits2(uint32_t msr_num0, uint32_t msr_num1, addr_t val) {
315     v3_msr_t msr0, msr1;
316     addr_t msr0_val, msr1_val;
317
318     PrintDebug("sanitize_bits2 (MSR0=%x, MSR1=%x)\n", msr_num0, msr_num1);
319
320     v3_get_msr(msr_num0, &msr0.hi, &msr0.lo);
321     v3_get_msr(msr_num1, &msr1.hi, &msr1.lo);
322   
323     // This generates a mask that is the natural bit width of the CPU
324     msr0_val = msr0.value;
325     msr1_val = msr1.value;
326
327     PrintDebug("MSR %x = %p, %x = %p \n", msr_num0, (void*)msr0_val, msr_num1, (void*)msr1_val);
328
329     val |= msr0_val;
330     val |= msr1_val;
331
332     return val;
333 }
334
335
336
337 #endif
338
339
340 static addr_t allocate_vmcs() 
341 {
342     reg_ex_t msr;
343     PrintDebug("Allocating page\n");
344     struct vmcs_data * vmcs_page = (struct vmcs_data *)V3_VAddr(V3_AllocPages(1));
345
346
347     memset(vmcs_page, 0, 4096);
348
349     v3_get_msr(VMX_BASIC_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
350     
351     vmcs_page->revision = ((struct vmx_basic_msr*)&msr)->revision;
352     PrintDebug("VMX Revision: 0x%x\n",vmcs_page->revision);
353
354     return (addr_t)V3_PAddr((void *)vmcs_page);
355 }
356
357
358 static int init_vmx_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
359     v3_pre_config_guest(info, config_ptr);
360
361     struct vmx_data * vmx_data = NULL;
362
363     vmx_data = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
364
365     PrintDebug("vmx_data pointer: %p\n", (void *)vmx_data);
366
367     PrintDebug("Allocating VMCS\n");
368     vmx_data->vmcs_ptr_phys = allocate_vmcs();
369
370     PrintDebug("VMCS pointer: %p\n", (void *)(vmx_data->vmcs_ptr_phys));
371
372     info->vmm_data = vmx_data;
373
374     PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
375     
376     // TODO: Fix vmcs fields so they're 32-bit
377     int vmx_ret = 0;
378
379     PrintDebug("Clearing VMCS: %p\n",(void*)vmx_data->vmcs_ptr_phys);
380     vmx_ret = vmcs_clear(vmx_data->vmcs_ptr_phys);
381
382     if (vmx_ret != VMX_SUCCESS) {
383         PrintError("VMCLEAR failed\n");
384         return -1;
385     }
386
387     PrintDebug("Loading VMCS\n");
388     vmx_ret = vmcs_load(vmx_data->vmcs_ptr_phys);
389
390     if (vmx_ret != VMX_SUCCESS) {
391         PrintError("VMPTRLD failed\n");
392         return -1;
393     }
394
395
396
397     /******* Setup Host State **********/
398
399     /* Cache GDTR, IDTR, and TR in host struct */
400     addr_t gdtr_base;
401     struct {
402         uint16_t selector;
403         addr_t   base;
404     } __attribute__((packed)) tmp_seg;
405     
406
407     __asm__ __volatile__(
408                          "sgdt (%0);"
409                          :
410                          : "q"(&tmp_seg)
411                          : "memory"
412                          );
413     gdtr_base = tmp_seg.base;
414     vmx_data->host_state.gdtr.base = gdtr_base;
415
416     __asm__ __volatile__(
417                          "sidt (%0);"
418                          :
419                          : "q"(&tmp_seg)
420                          : "memory"
421                          );
422     vmx_data->host_state.idtr.base = tmp_seg.base;
423
424     __asm__ __volatile__(
425                          "str (%0);"
426                          :
427                          : "q"(&tmp_seg)
428                          : "memory"
429                          );
430     vmx_data->host_state.tr.selector = tmp_seg.selector;
431
432     /* The GDTR *index* is bits 3-15 of the selector. */
433     struct tss_descriptor * desc = (struct tss_descriptor *)
434                         (gdtr_base + 8*(tmp_seg.selector>>3));
435
436     tmp_seg.base = (
437                     (desc->base1) |
438                     (desc->base2 << 16) |
439                     (desc->base3 << 24) |
440 #ifdef __V3_64BIT__
441                     ((uint64_t)desc->base4 << 32)
442 #else 
443                     (0)
444 #endif
445                 );
446
447     vmx_data->host_state.tr.base = tmp_seg.base;
448
449   
450
451     /********** Setup and VMX Control Fields from MSR ***********/
452     /* Setup IO map */
453     (void) v3_init_vmx_io_map(info);
454     (void) v3_init_vmx_msr_map(info);
455
456     struct v3_msr tmp_msr;
457
458     v3_get_msr(VMX_PINBASED_CTLS_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
459     /* Add NMI exiting */
460     vmx_data->pinbased_ctrls =  tmp_msr.lo | NMI_EXIT;
461
462     v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
463
464     PrintDebug("MSR High: 0x%x\n", tmp_msr.hi);
465     vmx_data->pri_procbased_ctrls = tmp_msr.lo | USE_IO_BITMAPS ;
466
467     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->io_map.arch_data));
468     vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR, 
469             (addr_t)V3_PAddr(info->io_map.arch_data) + PAGE_SIZE_4KB); 
470
471     vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->msr_map.arch_data));
472
473     v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
474     vmx_data->exit_ctrls = tmp_msr.lo ;
475
476     v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
477     vmx_data->entry_ctrls = tmp_msr.lo;
478
479     struct vmx_exception_bitmap excp_bmap;
480     excp_bmap.value = 0xffffffff;
481     excp_bmap.gp = 0;
482     vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value);
483
484
485     /******* Setup VMXAssist guest state ***********/
486
487     info->rip = 0xd0000;
488     info->vm_regs.rsp = 0x80000;
489
490     struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
491     flags->rsvd1 = 1;
492
493     /* Print Control MSRs */
494     v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
495     PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
496     v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
497     PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
498
499
500 #define GUEST_CR0 0x80000031
501 #define GUEST_CR4 0x00002000
502     info->ctrl_regs.cr0 = GUEST_CR0;
503     info->ctrl_regs.cr4 = GUEST_CR4;
504    
505     /* Setup paging */
506     if(info->shdw_pg_mode == SHADOW_PAGING) {
507         PrintDebug("Creating initial shadow page table\n");
508
509         if(v3_init_passthrough_pts(info) == -1) {
510             PrintError("Could not initialize passthrough page tables\n");
511             return -1;
512         }
513
514         info->shdw_pg_state.guest_cr0 = CR0_PE;
515         PrintDebug("Created\n");
516
517         vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG) );
518         vmx_ret |= check_vmcs_write(VMCS_CR0_READ_SHDW, info->shdw_pg_state.guest_cr0);
519         vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
520
521         info->ctrl_regs.cr3 = info->direct_map_pt;
522
523         // vmx_data->pinbased_ctrls |= NMI_EXIT;
524
525         /* Add CR exits */
526         vmx_data->pri_procbased_ctrls |= CR3_LOAD_EXIT  
527                                       | CR3_STORE_EXIT;
528  
529         vmx_data->exit_ctrls |= HOST_ADDR_SPACE_SIZE;
530     }
531
532     struct v3_segment * seg_reg = (struct v3_segment *)&(info->segments);
533
534     int i;
535     for(i=0; i < 10; i++)
536     {
537         seg_reg[i].selector = 3<<3;
538         seg_reg[i].limit = 0xffff;
539         seg_reg[i].base = 0x0;
540     }
541     info->segments.cs.selector = 2<<3;
542
543     /* Set only the segment registers */
544     for(i=0; i < 6; i++) {
545         seg_reg[i].limit = 0xfffff;
546         seg_reg[i].granularity = 1;
547         seg_reg[i].type = 3;
548         seg_reg[i].system = 1;
549         seg_reg[i].dpl = 0;
550         seg_reg[i].present = 1;
551         seg_reg[i].db = 1;
552     }
553     info->segments.cs.type = 0xb;
554
555     info->segments.ldtr.selector = 0x20;
556     info->segments.ldtr.type = 2;
557     info->segments.ldtr.system = 0;
558     info->segments.ldtr.present = 1;
559     info->segments.ldtr.granularity = 0;
560     
561     
562     /************* Map in GDT and vmxassist *************/
563
564     uint64_t  gdt[] __attribute__ ((aligned(32))) = {
565         0x0000000000000000ULL,          /* 0x00: reserved */
566         0x0000830000000000ULL,          /* 0x08: 32-bit TSS */
567         //0x0000890000000000ULL,                /* 0x08: 32-bit TSS */
568         0x00CF9b000000FFFFULL,          /* 0x10: CS 32-bit */
569         0x00CF93000000FFFFULL,          /* 0x18: DS 32-bit */
570         0x000082000000FFFFULL,          /* 0x20: LDTR 32-bit */
571     };
572
573 #define VMXASSIST_GDT   0x10000
574     addr_t vmxassist_gdt = 0;
575     if(guest_pa_to_host_va(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
576         PrintError("Could not find VMXASSIST GDT destination\n");
577         return -1;
578     }
579     memcpy((void*)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
580         
581     info->segments.gdtr.base = VMXASSIST_GDT;
582
583 #define VMXASSIST_TSS   0x40000
584     uint64_t vmxassist_tss = VMXASSIST_TSS;
585     gdt[0x08 / sizeof(gdt[0])] |=
586         ((vmxassist_tss & 0xFF000000) << (56 - 24)) |
587         ((vmxassist_tss & 0x00FF0000) << (32 - 16)) |
588         ((vmxassist_tss & 0x0000FFFF) << (16)) |
589         (8392 - 1);
590
591     info->segments.tr.selector = 0x08;
592     info->segments.tr.base = vmxassist_tss;
593
594     //info->segments.tr.type = 0x9; 
595     info->segments.tr.type = 0x3;
596     info->segments.tr.system = 0;
597     info->segments.tr.present = 1;
598     info->segments.tr.granularity = 0;
599
600  
601 #define VMXASSIST_START 0x000d0000
602     extern uint8_t v3_vmxassist_start[];
603     extern uint8_t v3_vmxassist_end[];
604
605     addr_t vmxassist_dst = 0;
606     if(guest_pa_to_host_va(info, VMXASSIST_START, &vmxassist_dst) == -1) {
607         PrintError("Could not find VMXASSIST destination\n");
608         return -1;
609     }
610     memcpy((void*)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
611     
612     /*** Write all the info to the VMCS ***/
613
614 #define DEBUGCTL_MSR 0x1d9
615     v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
616     vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
617
618     vmx_ret |= check_vmcs_write(VMCS_GUEST_DR7, 0x400);
619
620     vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
621     
622     if(v3_update_vmcs_ctrl_fields(info)) {
623         PrintError("Could not write control fields!\n");
624         return -1;
625     }
626     
627     if(v3_update_vmcs_host_state(info)) {
628         PrintError("Could not write host state\n");
629         return -1;
630     }
631
632
633     if(v3_update_vmcs_guest_state(info) != VMX_SUCCESS) {
634         PrintError("Writing guest state failed!\n");
635         return -1;
636     }
637
638     v3_print_vmcs();
639
640     vmx_data->state = VMXASSIST_DISABLED;
641
642     v3_post_config_guest(info, config_ptr);
643
644     return 0;
645 }
646
647
648 static int start_vmx_guest(struct guest_info* info) {
649     uint32_t error = 0;
650     int ret = 0;
651
652     PrintDebug("Attempting VMLAUNCH\n");
653
654     ret = v3_vmx_vmlaunch(&(info->vm_regs), info);
655     if (ret != VMX_SUCCESS) {
656         vmcs_read(VMCS_INSTR_ERR, &error);
657         PrintError("VMLAUNCH failed: %d\n", error);
658
659         v3_print_vmcs();
660
661     }
662     PrintDebug("Returned from VMLAUNCH ret=%d(0x%x)\n", ret, ret);
663
664     return -1;
665 }
666
667
668 int v3_is_vmx_capable() {
669     v3_msr_t feature_msr;
670     addr_t eax = 0, ebx = 0, ecx = 0, edx = 0;
671
672     v3_cpuid(0x1, &eax, &ebx, &ecx, &edx);
673
674     PrintDebug("ECX: %p\n", (void*)ecx);
675
676     if (ecx & CPUID_1_ECX_VTXFLAG) {
677         v3_get_msr(VMX_FEATURE_CONTROL_MSR, &(feature_msr.hi), &(feature_msr.lo));
678         
679         PrintDebug("MSRREGlow: 0x%.8x\n", feature_msr.lo);
680
681         if ((feature_msr.lo & FEATURE_CONTROL_VALID) != FEATURE_CONTROL_VALID) {
682             PrintDebug("VMX is locked -- enable in the BIOS\n");
683             return 0;
684         }
685
686     } else {
687         PrintDebug("VMX not supported on this cpu\n");
688         return 0;
689     }
690
691     return 1;
692 }
693
694 static int has_vmx_nested_paging() {
695     return 0;
696 }
697
698
699
700 void v3_init_vmx(struct v3_ctrl_ops * vm_ops) {
701     extern v3_cpu_arch_t v3_cpu_type;
702
703     struct v3_msr tmp_msr;
704     uint64_t ret=0;
705
706     v3_get_msr(VMX_CR4_FIXED0_MSR,&(tmp_msr.hi),&(tmp_msr.lo));
707     
708     __asm__ __volatile__ (
709                           "movq %%cr4, %%rbx;"
710                           "orq  $0x00002000, %%rbx;"
711                           "movq %%rbx, %0;"
712                           : "=m"(ret) 
713                           :
714                           : "%rbx"
715                           );
716
717     if((~ret & tmp_msr.value) == 0) {
718         __asm__ __volatile__ (
719                               "movq %0, %%cr4;"
720                               :
721                               : "q"(ret)
722                               );
723     } else {
724         PrintError("Invalid CR4 Settings!\n");
725         return;
726     }
727       __asm__ __volatile__ (
728                             "movq %%cr0, %%rbx; "
729                             "orq  $0x00000020,%%rbx; "
730                             "movq %%rbx, %%cr0;"
731                             :
732                             :
733                             : "%rbx"
734                             );
735       //
736     // Should check and return Error here.... 
737
738
739     // Setup VMXON Region
740     vmxon_ptr_phys = allocate_vmcs();
741     PrintDebug("VMXON pointer: 0x%p\n", (void*)vmxon_ptr_phys);
742
743     if (v3_enable_vmx(vmxon_ptr_phys) == VMX_SUCCESS) {
744         PrintDebug("VMX Enabled\n");
745     } else {
746         PrintError("VMX initialization failure\n");
747         return;
748     }
749         
750
751     if (has_vmx_nested_paging() == 1) {
752         v3_cpu_type = V3_VMX_EPT_CPU;
753     } else {
754         v3_cpu_type = V3_VMX_CPU;
755     }
756
757     // Setup the VMX specific vmm operations
758     vm_ops->init_guest = &init_vmx_guest;
759     vm_ops->start_guest = &start_vmx_guest;
760     vm_ops->has_nested_paging = &has_vmx_nested_paging;
761
762 }
763