Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


WTF??????
[palacios.git] / palacios / src / geekos / svm.c
1 #include <geekos/svm.h>
2 #include <geekos/vmm.h>
3
4 #include <geekos/vmcb.h>
5 #include <geekos/vmm_mem.h>
6 #include <geekos/vmm_paging.h>
7
8
9 extern struct vmm_os_hooks * os_hooks;
10
11 extern uint_t cpuid_ecx(uint_t op);
12 extern uint_t cpuid_edx(uint_t op);
13 extern void Get_MSR(uint_t MSR, uint_t * high_byte, uint_t * low_byte); 
14 extern void Set_MSR(uint_t MSR, uint_t high_byte, uint_t low_byte);
15 extern uint_t launch_svm(vmcb_t * vmcb_addr);
16 extern uint_t Get_CR3();
17
18 extern void GetGDTR(void * gdt);
19 extern void GetIDTR(void * idt);
20
21
22 /* Checks machine SVM capability */
23 /* Implemented from: AMD Arch Manual 3, sect 15.4 */ 
24 int is_svm_capable() {
25   uint_t ret =  cpuid_ecx(CPUID_FEATURE_IDS);
26   uint_t vm_cr_low = 0, vm_cr_high = 0;
27
28
29   if ((ret & CPUID_FEATURE_IDS_ecx_svm_avail) == 0) {
30     PrintDebug("SVM Not Available\n");
31     return 0;
32   } 
33
34   Get_MSR(SVM_VM_CR_MSR, &vm_cr_high, &vm_cr_low);
35
36   if ((vm_cr_low & SVM_VM_CR_MSR_svmdis) == 0) {
37     return 1;
38   }
39
40   ret = cpuid_edx(CPUID_SVM_REV_AND_FEATURE_IDS);
41   
42
43   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_np) == 0) {
44     PrintDebug("Nested Paging not supported\n");
45   }
46
47   if ((ret & CPUID_SVM_REV_AND_FEATURE_IDS_edx_svml) == 0) {
48     PrintDebug("SVM BIOS Disabled, not unlockable\n");
49   } else {
50     PrintDebug("SVM is locked with a key\n");
51   }
52
53   return 0;
54 }
55
56
57
58 void Init_SVM(struct vmm_ctrl_ops * vmm_ops) {
59   reg_ex_t msr;
60   void * host_state;
61
62
63   // Enable SVM on the CPU
64   Get_MSR(EFER_MSR, &(msr.e_reg.high), &(msr.e_reg.low));
65   msr.e_reg.low |= EFER_MSR_svm_enable;
66   Set_MSR(EFER_MSR, 0, msr.e_reg.low);
67   
68   PrintDebug("SVM Enabled\n");
69
70
71   // Setup the host state save area
72   host_state = os_hooks->allocate_pages(1);
73   
74   msr.e_reg.high = 0;
75   msr.e_reg.low = (uint_t)host_state;
76
77
78   PrintDebug("Host State being saved at %x\n", (uint_t)host_state);
79   Set_MSR(SVM_VM_HSAVE_PA_MSR, msr.e_reg.high, msr.e_reg.low);
80
81
82
83   // Setup the SVM specific vmm operations
84   vmm_ops->init_guest = &init_svm_guest;
85   vmm_ops->start_guest = &start_svm_guest;
86
87
88   return;
89 }
90
91
92 int init_svm_guest(struct guest_info *info) {
93  
94   PrintDebug("Allocating VMCB\n");
95   info->vmm_data = (void*)Allocate_VMCB();
96
97
98   PrintDebug("Generating Guest nested page tables\n");
99   print_mem_list(&(info->mem_list));
100   print_mem_layout(&(info->mem_layout));
101   info->page_tables = generate_guest_page_tables_64(&(info->mem_layout), &(info->mem_list));
102   //PrintDebugPageTables(info->page_tables);
103
104   
105
106   PrintDebug("Initializing VMCB (addr=%x)\n", info->vmm_data);
107   Init_VMCB((vmcb_t*)(info->vmm_data), *info);
108
109
110   
111
112   return 0;
113 }
114
115
116 // can we start a kernel thread here...
117 int start_svm_guest(struct guest_info *info) {
118   vmcb_ctrl_t * guest_ctrl = 0;
119
120   ulong_t exit_code = 0;
121
122   PrintDebug("Launching SVM VM (vmcb=%x)\n", info->vmm_data);
123   // PrintDebugVMCB((vmcb_t*)(info->vmm_data));
124
125
126   launch_svm((vmcb_t*)(info->vmm_data));
127
128   guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
129
130
131   PrintDebug("SVM Returned: (Exit Code=%x) (VMCB=%x)\n",&(guest_ctrl->exit_code), info->vmm_data); 
132
133
134   exit_code = guest_ctrl->exit_code;
135
136   PrintDebug("SVM Returned: Exit Code: %x\n",exit_code); 
137
138   return 0;
139 }
140
141
142
143 /** 
144  *  We handle the svm exits here
145  *  This function should probably be moved to another file to keep things managable....
146  */
147 int handle_svm_exit(struct VMM_GPRs guest_gprs) {
148
149   return 0;
150 }
151
152
153 vmcb_t * Allocate_VMCB() {
154   vmcb_t * vmcb_page = (vmcb_t*)os_hooks->allocate_pages(1);
155
156
157   memset(vmcb_page, 0, 4096);
158
159   return vmcb_page;
160 }
161
162
163
164 void Init_VMCB(vmcb_t *vmcb, guest_info_t vm_info) {
165   vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
166   vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
167   uint_t i = 0;
168
169
170   guest_state->rsp = vm_info.rsp;
171   guest_state->rip = vm_info.rip;
172
173
174   /* I pretty much just gutted this from TVMM */
175   /* Note: That means its probably wrong */
176
177   // set the segment registers to mirror ours
178   guest_state->cs.selector = 0;
179   guest_state->cs.attrib.fields.type = 0xa; // Code segment+read
180   guest_state->cs.attrib.fields.S = 1;
181   guest_state->cs.attrib.fields.P = 1;
182   guest_state->cs.attrib.fields.db = 1;
183   guest_state->cs.limit = 0xffffffff;
184   guest_state->cs.base = 0;
185   
186   struct vmcb_selector *segregs [] = {&(guest_state->ss), &(guest_state->ds), &(guest_state->es), &(guest_state->fs), &(guest_state->gs), NULL};
187   for ( i = 0; segregs[i] != NULL; i++) {
188     struct vmcb_selector * seg = segregs[i];
189     
190     seg->selector = 0;
191     seg->attrib.fields.type = 0x2; // Data Segment+read/write
192     seg->attrib.fields.S = 1;
193     seg->attrib.fields.P = 1;
194     seg->attrib.fields.db = 1;
195     seg->limit = 0xffffffff;
196     seg->base = 0;
197   }
198
199   /* ** */
200
201
202   guest_state->efer |= EFER_MSR_svm_enable;
203   guest_state->rflags = 0x00000002; // The reserved bit is always 1
204   ctrl_area->svm_instrs.instrs.VMRUN = 1;
205   guest_state->cr0 = 0x00000001;    // PE 
206   ctrl_area->guest_ASID = 1;
207
208
209   //  guest_state->cpl = 3;
210
211
212
213
214
215   // Setup exits
216
217   
218   ctrl_area->exceptions.ex_names.de = 1;
219   ctrl_area->exceptions.ex_names.df = 1;
220   ctrl_area->exceptions.ex_names.pf = 1;
221   ctrl_area->exceptions.ex_names.ts = 1;
222   ctrl_area->exceptions.ex_names.ss = 1;
223   ctrl_area->exceptions.ex_names.ac = 1;
224   ctrl_area->exceptions.ex_names.mc = 1;
225   ctrl_area->exceptions.ex_names.gp = 1;
226   ctrl_area->exceptions.ex_names.ud = 1;
227   ctrl_area->exceptions.ex_names.np = 1;
228   ctrl_area->exceptions.ex_names.of = 1;
229   ctrl_area->exceptions.ex_names.nmi = 1;
230
231   
232
233   // ctrl_area->instrs.instrs.IOIO_PROT = 1;
234   ctrl_area->IOPM_BASE_PA = (uint_t)os_hooks->allocate_pages(3);
235   
236   {
237     reg_ex_t tmp_reg;
238     tmp_reg.r_reg = ctrl_area->IOPM_BASE_PA;
239     memset((void*)(tmp_reg.e_reg.low), 0xffffffff, PAGE_SIZE * 2);
240   }
241
242   ctrl_area->instrs.instrs.INTR = 1;
243
244   /*
245   {
246     reg_ex_t gdt;
247     reg_ex_t idt;
248     
249     GetGDTR(&(gdt.r_reg));
250     PrintDebug("GDT: hi: %x, lo: %x\n", gdt.e_reg.high, gdt.e_reg.low);
251
252     GetIDTR(&(idt.r_reg));
253
254   }
255   */
256
257   // also determine if CPU supports nested paging
258     if (vm_info.page_tables) {
259   //  if (0) {
260     // Flush the TLB on entries/exits
261     ctrl_area->TLB_CONTROL = 1;
262
263     // Enable Nested Paging
264     ctrl_area->NP_ENABLE = 1;
265
266     PrintDebug("NP_Enable at 0x%x\n", &(ctrl_area->NP_ENABLE));
267
268         // Set the Nested Page Table pointer
269     ctrl_area->N_CR3 |= ((addr_t)vm_info.page_tables & 0xfffff000);
270
271
272     //   ctrl_area->N_CR3 = Get_CR3();
273     // guest_state->cr3 |= (Get_CR3() & 0xfffff000);
274
275     guest_state->g_pat = 0x7040600070406ULL;
276
277     PrintDebug("Set Nested CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(ctrl_area->N_CR3)), (uint_t)*((unsigned char *)&(ctrl_area->N_CR3) + 4));
278     PrintDebug("Set Guest CR3: lo: 0x%x  hi: 0x%x\n", (uint_t)*(&(guest_state->cr3)), (uint_t)*((unsigned char *)&(guest_state->cr3) + 4));
279     // Enable Paging
280     //    guest_state->cr0 |= 0x80000000;
281   }
282
283
284 }
285
286