Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


lots of fixes
[palacios.git] / palacios / src / palacios / svm.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
11  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
12  * All rights reserved.
13  *
14  * Author: Jack Lange <jarusl@cs.northwestern.edu>
15  *
16  * This is free software.  You are permitted to use,
17  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
18  */
19
20
21 #include <palacios/svm.h>
22 #include <palacios/vmm.h>
23
24 #include <palacios/vmcb.h>
25 #include <palacios/vmm_mem.h>
26 #include <palacios/vmm_paging.h>
27 #include <palacios/svm_handler.h>
28
29 #include <palacios/vmm_debug.h>
30 #include <palacios/vm_guest_mem.h>
31
32 #include <palacios/vmm_decoder.h>
33 #include <palacios/vmm_string.h>
34 #include <palacios/vmm_lowlevel.h>
35 #include <palacios/svm_msr.h>
36
37 #include <palacios/vmm_rbtree.h>
38
39 #include <palacios/vmm_direct_paging.h>
40
41 #include <palacios/vmm_ctrl_regs.h>
42 #include <palacios/svm_io.h>
43
44 #include <palacios/vmm_sprintf.h>
45
46
47 uint32_t v3_last_exit;
48
49 // This is a global pointer to the host's VMCB
50 static addr_t host_vmcbs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
51
52
53
54 extern void v3_stgi();
55 extern void v3_clgi();
56 //extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, uint64_t * fs, uint64_t * gs);
57 extern int v3_svm_launch(vmcb_t * vmcb, struct v3_gprs * vm_regs, vmcb_t * host_vmcb);
58
59
60 static vmcb_t * Allocate_VMCB() {
61     vmcb_t * vmcb_page = (vmcb_t *)V3_VAddr(V3_AllocPages(1));
62
63     memset(vmcb_page, 0, 4096);
64
65     return vmcb_page;
66 }
67
68
69
70 static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * vm_info) {
71     vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
72     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
73     uint_t i;
74
75
76     //
77
78
79     ctrl_area->svm_instrs.VMRUN = 1;
80     ctrl_area->svm_instrs.VMMCALL = 1;
81     ctrl_area->svm_instrs.VMLOAD = 1;
82     ctrl_area->svm_instrs.VMSAVE = 1;
83     ctrl_area->svm_instrs.STGI = 1;
84     ctrl_area->svm_instrs.CLGI = 1;
85     ctrl_area->svm_instrs.SKINIT = 1;
86     ctrl_area->svm_instrs.RDTSCP = 1;
87     ctrl_area->svm_instrs.ICEBP = 1;
88     ctrl_area->svm_instrs.WBINVD = 1;
89     ctrl_area->svm_instrs.MONITOR = 1;
90     ctrl_area->svm_instrs.MWAIT_always = 1;
91     ctrl_area->svm_instrs.MWAIT_if_armed = 1;
92     ctrl_area->instrs.INVLPGA = 1;
93     ctrl_area->instrs.CPUID = 1;
94
95     ctrl_area->instrs.HLT = 1;
96     // guest_state->cr0 = 0x00000001;    // PE 
97   
98     /*
99       ctrl_area->exceptions.de = 1;
100       ctrl_area->exceptions.df = 1;
101       
102       ctrl_area->exceptions.ts = 1;
103       ctrl_area->exceptions.ss = 1;
104       ctrl_area->exceptions.ac = 1;
105       ctrl_area->exceptions.mc = 1;
106       ctrl_area->exceptions.gp = 1;
107       ctrl_area->exceptions.ud = 1;
108       ctrl_area->exceptions.np = 1;
109       ctrl_area->exceptions.of = 1;
110       
111       ctrl_area->exceptions.nmi = 1;
112     */
113     
114
115     ctrl_area->instrs.NMI = 1;
116     ctrl_area->instrs.SMI = 1;
117     ctrl_area->instrs.INIT = 1;
118     ctrl_area->instrs.PAUSE = 1;
119     ctrl_area->instrs.shutdown_evts = 1;
120
121
122     /* DEBUG FOR RETURN CODE */
123     ctrl_area->exit_code = 1;
124
125
126     /* Setup Guest Machine state */
127
128     vm_info->vm_regs.rsp = 0x00;
129     vm_info->rip = 0xfff0;
130
131     vm_info->vm_regs.rdx = 0x00000f00;
132
133
134     vm_info->cpl = 0;
135
136     vm_info->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
137     vm_info->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
138     vm_info->ctrl_regs.efer |= EFER_MSR_svm_enable;
139
140
141
142
143
144     vm_info->segments.cs.selector = 0xf000;
145     vm_info->segments.cs.limit = 0xffff;
146     vm_info->segments.cs.base = 0x0000000f0000LL;
147
148     // (raw attributes = 0xf3)
149     vm_info->segments.cs.type = 0x3;
150     vm_info->segments.cs.system = 0x1;
151     vm_info->segments.cs.dpl = 0x3;
152     vm_info->segments.cs.present = 1;
153
154
155
156     struct v3_segment * segregs [] = {&(vm_info->segments.ss), &(vm_info->segments.ds), 
157                                       &(vm_info->segments.es), &(vm_info->segments.fs), 
158                                       &(vm_info->segments.gs), NULL};
159
160     for ( i = 0; segregs[i] != NULL; i++) {
161         struct v3_segment * seg = segregs[i];
162         
163         seg->selector = 0x0000;
164         //    seg->base = seg->selector << 4;
165         seg->base = 0x00000000;
166         seg->limit = ~0u;
167
168         // (raw attributes = 0xf3)
169         seg->type = 0x3;
170         seg->system = 0x1;
171         seg->dpl = 0x3;
172         seg->present = 1;
173     }
174
175     vm_info->segments.gdtr.limit = 0x0000ffff;
176     vm_info->segments.gdtr.base = 0x0000000000000000LL;
177     vm_info->segments.idtr.limit = 0x0000ffff;
178     vm_info->segments.idtr.base = 0x0000000000000000LL;
179
180     vm_info->segments.ldtr.selector = 0x0000;
181     vm_info->segments.ldtr.limit = 0x0000ffff;
182     vm_info->segments.ldtr.base = 0x0000000000000000LL;
183     vm_info->segments.tr.selector = 0x0000;
184     vm_info->segments.tr.limit = 0x0000ffff;
185     vm_info->segments.tr.base = 0x0000000000000000LL;
186
187
188     vm_info->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
189     vm_info->dbg_regs.dr7 = 0x0000000000000400LL;
190
191
192     v3_init_svm_io_map(vm_info);
193     ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
194     ctrl_area->instrs.IOIO_PROT = 1;
195
196
197     v3_init_svm_msr_map(vm_info);
198     ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
199     ctrl_area->instrs.MSR_PROT = 1;
200
201
202     PrintDebug("Exiting on interrupts\n");
203     ctrl_area->guest_ctrl.V_INTR_MASKING = 1;
204     ctrl_area->instrs.INTR = 1;
205
206
207     if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
208         PrintDebug("Creating initial shadow page table\n");
209         
210         /* JRL: This is a performance killer, and a simplistic solution */
211         /* We need to fix this */
212         ctrl_area->TLB_CONTROL = 1;
213         ctrl_area->guest_ASID = 1;
214         
215         
216         if (v3_init_passthrough_pts(vm_info) == -1) {
217             PrintError("Could not initialize passthrough page tables\n");
218             return ;
219         }
220
221
222         vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
223         PrintDebug("Created\n");
224         
225         vm_info->ctrl_regs.cr0 |= 0x80000000;
226         vm_info->ctrl_regs.cr3 = vm_info->direct_map_pt;
227
228         ctrl_area->cr_reads.cr0 = 1;
229         ctrl_area->cr_writes.cr0 = 1;
230         //ctrl_area->cr_reads.cr4 = 1;
231         ctrl_area->cr_writes.cr4 = 1;
232         ctrl_area->cr_reads.cr3 = 1;
233         ctrl_area->cr_writes.cr3 = 1;
234
235         v3_hook_msr(vm_info, EFER_MSR, 
236                     &v3_handle_efer_read,
237                     &v3_handle_efer_write, 
238                     vm_info);
239
240         ctrl_area->instrs.INVLPG = 1;
241
242         ctrl_area->exceptions.pf = 1;
243
244         guest_state->g_pat = 0x7040600070406ULL;
245
246
247
248     } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
249         // Flush the TLB on entries/exits
250         ctrl_area->TLB_CONTROL = 1;
251         ctrl_area->guest_ASID = 1;
252
253         // Enable Nested Paging
254         ctrl_area->NP_ENABLE = 1;
255
256         PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
257
258         // Set the Nested Page Table pointer
259         if (v3_init_passthrough_pts(vm_info) == -1) {
260             PrintError("Could not initialize Nested page tables\n");
261             return ;
262         }
263
264         ctrl_area->N_CR3 = vm_info->direct_map_pt;
265
266         guest_state->g_pat = 0x7040600070406ULL;
267     }
268 }
269
270
271 int v3_init_svm_vmcb(struct guest_info * info, v3_vm_class_t vm_class) {
272
273     PrintDebug("Allocating VMCB\n");
274     info->vmm_data = (void*)Allocate_VMCB();
275     
276     if (vm_class == V3_PC_VM) {
277         PrintDebug("Initializing VMCB (addr=%p)\n", (void *)info->vmm_data);
278         Init_VMCB_BIOS((vmcb_t*)(info->vmm_data), info);
279     } else {
280         PrintError("Invalid VM class\n");
281         return -1;
282     }
283
284     return 0;
285 }
286
287
288
289 static int update_irq_exit_state(struct guest_info * info) {
290     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
291
292     if ((info->intr_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
293         
294 #ifdef CONFIG_DEBUG_INTERRUPTS
295         PrintDebug("INTAK cycle completed for irq %d\n", info->intr_state.irq_vector);
296 #endif
297
298         info->intr_state.irq_started = 1;
299         info->intr_state.irq_pending = 0;
300
301         v3_injecting_intr(info, info->intr_state.irq_vector, V3_EXTERNAL_IRQ);
302     }
303
304     if ((info->intr_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
305 #ifdef CONFIG_DEBUG_INTERRUPTS
306         PrintDebug("Interrupt %d taken by guest\n", info->intr_state.irq_vector);
307 #endif
308
309         // Interrupt was taken fully vectored
310         info->intr_state.irq_started = 0;
311
312     } else {
313 #ifdef CONFIG_DEBUG_INTERRUPTS
314         PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
315 #endif
316     }
317
318     return 0;
319 }
320
321
322 static int update_irq_entry_state(struct guest_info * info) {
323     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
324
325     if (v3_excp_pending(info)) {
326         uint_t excp = v3_get_excp_number(info);
327         
328         guest_ctrl->EVENTINJ.type = SVM_INJECTION_EXCEPTION;
329         
330         if (info->excp_state.excp_error_code_valid) {
331             guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
332             guest_ctrl->EVENTINJ.ev = 1;
333 #ifdef CONFIG_DEBUG_INTERRUPTS
334             PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
335 #endif
336         }
337         
338         guest_ctrl->EVENTINJ.vector = excp;
339         
340         guest_ctrl->EVENTINJ.valid = 1;
341
342 #ifdef CONFIG_DEBUG_INTERRUPTS
343         PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n", 
344                    (int)info->num_exits, 
345                    guest_ctrl->EVENTINJ.vector, 
346                    (void *)(addr_t)info->ctrl_regs.cr2,
347                    (void *)(addr_t)info->rip);
348 #endif
349
350         v3_injecting_excp(info, excp);
351     } else if (info->intr_state.irq_started == 1) {
352 #ifdef CONFIG_DEBUG_INTERRUPTS
353         PrintDebug("IRQ pending from previous injection\n");
354 #endif
355         guest_ctrl->guest_ctrl.V_IRQ = 1;
356         guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_state.irq_vector;
357         guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
358         guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
359
360     } else {
361         switch (v3_intr_pending(info)) {
362             case V3_EXTERNAL_IRQ: {
363                 uint32_t irq = v3_get_intr(info);
364
365                 guest_ctrl->guest_ctrl.V_IRQ = 1;
366                 guest_ctrl->guest_ctrl.V_INTR_VECTOR = irq;
367                 guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
368                 guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
369
370 #ifdef CONFIG_DEBUG_INTERRUPTS
371                 PrintDebug("Injecting Interrupt %d (EIP=%p)\n", 
372                            guest_ctrl->guest_ctrl.V_INTR_VECTOR, 
373                            (void *)(addr_t)info->rip);
374 #endif
375
376                 info->intr_state.irq_pending = 1;
377                 info->intr_state.irq_vector = irq;
378                 
379                 break;
380             }
381             case V3_NMI:
382                 guest_ctrl->EVENTINJ.type = SVM_INJECTION_NMI;
383                 break;
384             case V3_SOFTWARE_INTR:
385                 guest_ctrl->EVENTINJ.type = SVM_INJECTION_SOFT_INTR;
386                 break;
387             case V3_VIRTUAL_IRQ:
388                 guest_ctrl->EVENTINJ.type = SVM_INJECTION_IRQ;
389                 break;
390
391             case V3_INVALID_INTR:
392             default:
393                 break;
394         }
395         
396     }
397
398     return 0;
399 }
400
401
402 /* 
403  * CAUTION and DANGER!!! 
404  * 
405  * The VMCB CANNOT(!!) be accessed outside of the clgi/stgi calls inside this function
406  * When exectuing a symbiotic call, the VMCB WILL be overwritten, so any dependencies 
407  * on its contents will cause things to break. The contents at the time of the exit WILL 
408  * change before the exit handler is executed.
409  */
410 int v3_svm_enter(struct guest_info * info) {
411     vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
412     vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data)); 
413     ullong_t tmp_tsc;
414     addr_t exit_code = 0, exit_info1 = 0, exit_info2 = 0;
415
416     // Conditionally yield the CPU if the timeslice has expired
417     v3_yield_cond(info);
418
419     // disable global interrupts for vm state transition
420     v3_clgi();
421
422     // Synchronize the guest state to the VMCB
423     guest_state->cr0 = info->ctrl_regs.cr0;
424     guest_state->cr2 = info->ctrl_regs.cr2;
425     guest_state->cr3 = info->ctrl_regs.cr3;
426     guest_state->cr4 = info->ctrl_regs.cr4;
427     guest_state->dr6 = info->dbg_regs.dr6;
428     guest_state->dr7 = info->dbg_regs.dr7;
429     guest_ctrl->guest_ctrl.V_TPR = info->ctrl_regs.cr8 & 0xff;
430     guest_state->rflags = info->ctrl_regs.rflags;
431     guest_state->efer = info->ctrl_regs.efer;
432     
433     guest_state->cpl = info->cpl;
434
435     v3_set_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
436
437     guest_state->rax = info->vm_regs.rax;
438     guest_state->rip = info->rip;
439     guest_state->rsp = info->vm_regs.rsp;
440
441 #ifdef CONFIG_SYMBIOTIC
442     if (info->sym_state.sym_call_active == 0) {
443         update_irq_entry_state(info);
444     }
445 #else 
446     update_irq_entry_state(info);
447 #endif
448
449
450     /* ** */
451
452     /*
453       PrintDebug("SVM Entry to CS=%p  rip=%p...\n", 
454       (void *)(addr_t)info->segments.cs.base, 
455       (void *)(addr_t)info->rip);
456     */
457
458 #ifdef CONFIG_SYMBIOTIC
459     if (info->sym_state.sym_call_active == 1) {
460         if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
461             V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
462         }
463     }
464 #endif
465
466
467     rdtscll(info->time_state.cached_host_tsc);
468     guest_ctrl->TSC_OFFSET = info->time_state.guest_tsc - info->time_state.cached_host_tsc;
469         
470     v3_svm_launch((vmcb_t *)V3_PAddr(info->vmm_data), &(info->vm_regs), (vmcb_t *)host_vmcbs[info->cpu_id]);
471     
472
473     v3_last_exit = (uint32_t)(guest_ctrl->exit_code);
474
475     //  v3_print_cond("SVM Returned: Exit Code: %x\n", (uint32_t)(guest_ctrl->exit_code));
476
477     rdtscll(tmp_tsc);
478
479     //PrintDebug("SVM Returned\n");
480     
481     info->num_exits++;
482
483     v3_update_time(info, tmp_tsc - info->time_state.cached_host_tsc);
484
485
486     // Save Guest state from VMCB
487     info->rip = guest_state->rip;
488     info->vm_regs.rsp = guest_state->rsp;
489     info->vm_regs.rax = guest_state->rax;
490
491     info->cpl = guest_state->cpl;
492
493     info->ctrl_regs.cr0 = guest_state->cr0;
494     info->ctrl_regs.cr2 = guest_state->cr2;
495     info->ctrl_regs.cr3 = guest_state->cr3;
496     info->ctrl_regs.cr4 = guest_state->cr4;
497     info->dbg_regs.dr6 = guest_state->dr6;
498     info->dbg_regs.dr7 = guest_state->dr7;
499     info->ctrl_regs.cr8 = guest_ctrl->guest_ctrl.V_TPR;
500     info->ctrl_regs.rflags = guest_state->rflags;
501     info->ctrl_regs.efer = guest_state->efer;
502     
503     v3_get_vmcb_segments((vmcb_t*)(info->vmm_data), &(info->segments));
504     info->cpu_mode = v3_get_vm_cpu_mode(info);
505     info->mem_mode = v3_get_vm_mem_mode(info);
506     /* ** */
507
508
509     // save exit info here
510     exit_code = guest_ctrl->exit_code;
511     exit_info1 = guest_ctrl->exit_info1;
512     exit_info2 = guest_ctrl->exit_info2;
513
514
515 #ifdef CONFIG_SYMBIOTIC
516     if (info->sym_state.sym_call_active == 0) {
517         update_irq_exit_state(info);
518     }
519 #else
520     update_irq_exit_state(info);
521 #endif
522
523
524     // reenable global interrupts after vm exit
525     v3_stgi();
526
527  
528     // Conditionally yield the CPU if the timeslice has expired
529     v3_yield_cond(info);
530
531
532     if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) {
533         PrintError("Error in SVM exit handler\n");
534         return -1;
535     }
536
537
538     return 0;
539 }
540
541
542 int v3_start_svm_guest(struct guest_info *info) {
543     //    vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
544     //  vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
545
546
547
548     PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
549     //PrintDebugVMCB((vmcb_t*)(info->vmm_data));
550     
551     info->run_state = VM_RUNNING;
552     rdtscll(info->yield_start_cycle);
553
554
555     while (1) {
556         if (v3_svm_enter(info) == -1) {
557             vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
558             addr_t host_addr;
559             addr_t linear_addr = 0;
560             
561             info->run_state = VM_ERROR;
562             
563             V3_Print("SVM ERROR!!\n"); 
564             
565             v3_print_guest_state(info);
566             
567             V3_Print("SVM Exit Code: %p\n", (void *)(addr_t)guest_ctrl->exit_code); 
568             
569             V3_Print("exit_info1 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info1));
570             V3_Print("exit_info1 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info1)) + 4));
571             
572             V3_Print("exit_info2 low = 0x%.8x\n", *(uint_t*)&(guest_ctrl->exit_info2));
573             V3_Print("exit_info2 high = 0x%.8x\n", *(uint_t *)(((uchar_t *)&(guest_ctrl->exit_info2)) + 4));
574             
575             linear_addr = get_addr_linear(info, info->rip, &(info->segments.cs));
576             
577             if (info->mem_mode == PHYSICAL_MEM) {
578                 guest_pa_to_host_va(info, linear_addr, &host_addr);
579             } else if (info->mem_mode == VIRTUAL_MEM) {
580                 guest_va_to_host_va(info, linear_addr, &host_addr);
581             }
582             
583             V3_Print("Host Address of rip = 0x%p\n", (void *)host_addr);
584             
585             V3_Print("Instr (15 bytes) at %p:\n", (void *)host_addr);
586             v3_dump_mem((uint8_t *)host_addr, 15);
587             
588             v3_print_stack(info);
589
590             break;
591         }
592         
593 /*
594         if ((info->num_exits % 5000) == 0) {
595             V3_Print("SVM Exit number %d\n", (uint32_t)info->num_exits);
596         }
597 */
598         
599     }
600     return 0;
601 }
602
603
604
605
606
607 /* Checks machine SVM capability */
608 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
609 int v3_is_svm_capable() {
610     uint_t vm_cr_low = 0, vm_cr_high = 0;
611     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
612
613     v3_cpuid(CPUID_EXT_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
614   
615     PrintDebug("CPUID_EXT_FEATURE_IDS_ecx=0x%x\n", ecx);
616
617     if ((ecx & CPUID_EXT_FEATURE_IDS_ecx_svm_avail) == 0) {
618       V3_Print("SVM Not Available\n");
619       return 0;
620     }  else {
621         v3_get_msr(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
622         
623         PrintDebug("SVM_VM_CR_MSR = 0x%x 0x%x\n", vm_cr_high, vm_cr_low);
624         
625         if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 1) {
626             V3_Print("SVM is available but is disabled.\n");
627             
628             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
629             
630             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
631             
632             if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
633                 V3_Print("SVM BIOS Disabled, not unlockable\n");
634             } else {
635                 V3_Print("SVM is locked with a key\n");
636             }
637             return 0;
638
639         } else {
640             V3_Print("SVM is available and  enabled.\n");
641
642             v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
643             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_eax=0x%x\n", eax);
644             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ebx=0x%x\n", ebx);
645             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_ecx=0x%x\n", ecx);
646             PrintDebug("CPUID_SVM_REV_AND_FEATURE_IDS_edx=0x%x\n", edx);
647
648             return 1;
649         }
650     }
651 }
652
653 static int has_svm_nested_paging() {
654     uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
655
656     v3_cpuid(CPUID_SVM_REV_AND_FEATURE_IDS, &eax, &ebx, &ecx, &edx);
657
658     //PrintDebug("CPUID_EXT_FEATURE_IDS_edx=0x%x\n", edx);
659
660     if ((edx & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
661         V3_Print("SVM Nested Paging not supported\n");
662         return 0;
663     } else {
664         V3_Print("SVM Nested Paging supported\n");
665         return 1;
666     }
667 }
668
669
670 void v3_init_svm_cpu(int cpu_id) {
671     reg_ex_t msr;
672     extern v3_cpu_arch_t v3_cpu_types[];
673
674     // Enable SVM on the CPU
675     v3_get_msr(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
676     msr.e_reg.low |= EFER_MSR_svm_enable;
677     v3_set_msr(EFER_MSR, 0, msr.e_reg.low);
678
679     V3_Print("SVM Enabled\n");
680
681     // Setup the host state save area
682     host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
683
684     /* 64-BIT-ISSUE */
685     //  msr.e_reg.high = 0;
686     //msr.e_reg.low = (uint_t)host_vmcb;
687     msr.r_reg = host_vmcbs[cpu_id];
688
689     PrintDebug("Host State being saved at %p\n", (void *)host_vmcbs[cpu_id]);
690     v3_set_msr(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
691
692
693     if (has_svm_nested_paging() == 1) {
694         v3_cpu_types[cpu_id] = V3_SVM_REV3_CPU;
695     } else {
696         v3_cpu_types[cpu_id] = V3_SVM_CPU;
697     }
698 }
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753 #if 0
754 /* 
755  * Test VMSAVE/VMLOAD Latency 
756  */
757 #define vmsave ".byte 0x0F,0x01,0xDB ; "
758 #define vmload ".byte 0x0F,0x01,0xDA ; "
759 {
760     uint32_t start_lo, start_hi;
761     uint32_t end_lo, end_hi;
762     uint64_t start, end;
763     
764     __asm__ __volatile__ (
765                           "rdtsc ; "
766                           "movl %%eax, %%esi ; "
767                           "movl %%edx, %%edi ; "
768                           "movq  %%rcx, %%rax ; "
769                           vmsave
770                           "rdtsc ; "
771                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
772                           : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
773                           );
774     
775     start = start_hi;
776     start <<= 32;
777     start += start_lo;
778     
779     end = end_hi;
780     end <<= 32;
781     end += end_lo;
782     
783     PrintDebug("VMSave Cycle Latency: %d\n", (uint32_t)(end - start));
784     
785     __asm__ __volatile__ (
786                           "rdtsc ; "
787                           "movl %%eax, %%esi ; "
788                           "movl %%edx, %%edi ; "
789                           "movq  %%rcx, %%rax ; "
790                           vmload
791                           "rdtsc ; "
792                           : "=D"(start_hi), "=S"(start_lo), "=a"(end_lo),"=d"(end_hi)
793                               : "c"(host_vmcb[cpu_id]), "0"(0), "1"(0), "2"(0), "3"(0)
794                               );
795         
796         start = start_hi;
797         start <<= 32;
798         start += start_lo;
799
800         end = end_hi;
801         end <<= 32;
802         end += end_lo;
803
804
805         PrintDebug("VMLoad Cycle Latency: %d\n", (uint32_t)(end - start));
806     }
807     /* End Latency Test */
808
809 #endif
810
811
812
813
814
815
816
817 #if 0
818 void Init_VMCB_pe(vmcb_t *vmcb, struct guest_info vm_info) {
819   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
820   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
821   uint_t i = 0;
822
823
824   guest_state->rsp = vm_info.vm_regs.rsp;
825   guest_state->rip = vm_info.rip;
826
827
828   /* I pretty much just gutted this from TVMM */
829   /* Note: That means its probably wrong */
830
831   // set the segment registers to mirror ours
832   guest_state->cs.selector = 1<<3;
833   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
834   guest_state->cs.attrib.fields.S = 1;
835   guest_state->cs.attrib.fields.P = 1;
836   guest_state->cs.attrib.fields.db = 1;
837   guest_state->cs.attrib.fields.G = 1;
838   guest_state->cs.limit = 0xfffff;
839   guest_state->cs.base = 0;
840   
841   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
842   for ( i = 0; segregs[i] != NULL; i++) {
843     struct vmcb_selector * seg = segregs[i];
844     
845     seg->selector = 2<<3;
846     seg->attrib.fields.type = 0x2; // Data Segment+read/write
847     seg->attrib.fields.S = 1;
848     seg->attrib.fields.P = 1;
849     seg->attrib.fields.db = 1;
850     seg->attrib.fields.G = 1;
851     seg->limit = 0xfffff;
852     seg->base = 0;
853   }
854
855
856   {
857     /* JRL THIS HAS TO GO */
858     
859     //    guest_state->tr.selector = GetTR_Selector();
860     guest_state->tr.attrib.fields.type = 0x9; 
861     guest_state->tr.attrib.fields.P = 1;
862     // guest_state->tr.limit = GetTR_Limit();
863     //guest_state->tr.base = GetTR_Base();// - 0x2000;
864     /* ** */
865   }
866
867
868   /* ** */
869
870
871   guest_state->efer |= EFER_MSR_svm_enable;
872   guest_state->rflags = 0x00000002; // The reserved bit is always 1
873   ctrl_area->svm_instrs.VMRUN = 1;
874   guest_state->cr0 = 0x00000001;    // PE 
875   ctrl_area->guest_ASID = 1;
876
877
878   //  guest_state->cpl = 0;
879
880
881
882   // Setup exits
883
884   ctrl_area->cr_writes.cr4 = 1;
885   
886   ctrl_area->exceptions.de = 1;
887   ctrl_area->exceptions.df = 1;
888   ctrl_area->exceptions.pf = 1;
889   ctrl_area->exceptions.ts = 1;
890   ctrl_area->exceptions.ss = 1;
891   ctrl_area->exceptions.ac = 1;
892   ctrl_area->exceptions.mc = 1;
893   ctrl_area->exceptions.gp = 1;
894   ctrl_area->exceptions.ud = 1;
895   ctrl_area->exceptions.np = 1;
896   ctrl_area->exceptions.of = 1;
897   ctrl_area->exceptions.nmi = 1;
898
899   
900
901   ctrl_area->instrs.IOIO_PROT = 1;
902   ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
903   
904   {
905     reg_ex_t tmp_reg;
906     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
907     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
908   }
909
910   ctrl_area->instrs.INTR = 1;
911
912   
913   {
914     char gdt_buf[6];
915     char idt_buf[6];
916
917     memset(gdt_buf, 0, 6);
918     memset(idt_buf, 0, 6);
919
920
921     uint_t gdt_base, idt_base;
922     ushort_t gdt_limit, idt_limit;
923     
924     GetGDTR(gdt_buf);
925     gdt_base = *(ulong_t*)((uchar_t*)gdt_buf + 2) & 0xffffffff;
926     gdt_limit = *(ushort_t*)(gdt_buf) & 0xffff;
927     PrintDebug("GDT: base: %x, limit: %x\n", gdt_base, gdt_limit);
928
929     GetIDTR(idt_buf);
930     idt_base = *(ulong_t*)(idt_buf + 2) & 0xffffffff;
931     idt_limit = *(ushort_t*)(idt_buf) & 0xffff;
932     PrintDebug("IDT: base: %x, limit: %x\n",idt_base, idt_limit);
933
934
935     // gdt_base -= 0x2000;
936     //idt_base -= 0x2000;
937
938     guest_state->gdtr.base = gdt_base;
939     guest_state->gdtr.limit = gdt_limit;
940     guest_state->idtr.base = idt_base;
941     guest_state->idtr.limit = idt_limit;
942
943
944   }
945   
946   
947   // also determine if CPU supports nested paging
948   /*
949   if (vm_info.page_tables) {
950     //   if (0) {
951     // Flush the TLB on entries/exits
952     ctrl_area->TLB_CONTROL = 1;
953
954     // Enable Nested Paging
955     ctrl_area->NP_ENABLE = 1;
956
957     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
958
959         // Set the Nested Page Table pointer
960     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
961
962
963     //   ctrl_area->N_CR3 = Get_CR3();
964     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
965
966     guest_state->g_pat = 0x7040600070406ULL;
967
968     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
969     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
970     // Enable Paging
971     //    guest_state->cr0 |= 0x80000000;
972   }
973   */
974
975 }
976
977
978
979
980
981 #endif
982
983