1 #include <palacios/vmm_shadow_paging.h>
4 #include <palacios/vmm.h>
5 #include <palacios/vm_guest_mem.h>
6 #include <palacios/vmm_decoder.h>
10 int init_shadow_page_state(struct shadow_page_state * state) {
11 state->guest_mode = PDE32;
12 state->shadow_mode = PDE32;
15 state->shadow_cr3 = 0;
20 int handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
22 if (info->mem_mode == PHYSICAL_MEM) {
23 // If paging is not turned on we need to handle the special cases
24 return handle_special_page_fault(info, fault_addr, error_code);
25 } else if (info->mem_mode == VIRTUAL_MEM) {
27 switch (info->cpu_mode) {
29 return handle_shadow_pagefault32(info, fault_addr, error_code);
33 // currently not handled
40 PrintDebug("Invalid Memory mode\n");
46 int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
47 pde32_t * guest_pde = NULL;
48 pde32_t * shadow_pde = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
49 addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
50 pt_access_status_t guest_pde_access;
51 pt_access_status_t shadow_pde_access;
52 pde32_t * guest_pde_entry = NULL;
53 pde32_t * shadow_pde_entry = (pde32_t *)&(shadow_pde[PDE32_INDEX(fault_addr)]);
55 if (guest_pa_to_host_va(info, guest_cr3, (addr_t*)&guest_pde) == -1) {
56 PrintDebug("Invalid Guest PDE Address: 0x%x\n", guest_cr3);
61 guest_pde_entry = (pde32_t *)&(guest_pde[PDE32_INDEX(fault_addr)]);
63 // Check the guest page permissions
64 guest_pde_access = can_access_pde32(guest_pde, fault_addr, error_code);
66 if (guest_pde_access != PT_ACCESS_OK) {
67 // inject page fault to the guest (Guest PDE fault)
69 info->ctrl_regs.cr2 = fault_addr;
70 raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
72 PrintDebug("Injecting PDE pf to guest\n");
77 // Check that the Guest PDE entry points to valid memory
78 // else Machine Check the guest
80 shadow_pde_access = can_access_pde32(shadow_pde, fault_addr, error_code);
83 if (shadow_pde_access == PT_ENTRY_NOT_PRESENT) {
84 pte32_t * shadow_pte = NULL;
86 V3_AllocPages(shadow_pte, 1);
87 memset(shadow_pte, 0, PAGE_SIZE);
89 shadow_pde_entry->pt_base_addr = PD32_BASE_ADDR(shadow_pte);
92 shadow_pde_entry->present = 1;
93 shadow_pde_entry->user_page = guest_pde_entry->user_page;
95 // VMM Specific options
96 shadow_pde_entry->write_through = 0;
97 shadow_pde_entry->cache_disable = 0;
98 shadow_pde_entry->global_page = 0;
101 guest_pde_entry->accessed = 1;
103 if (guest_pde_entry->large_page == 0) {
104 shadow_pde_entry->writable = guest_pde_entry->writable;
107 * Check the Intel manual because we are ignoring Large Page issues here
108 * Also be wary of hooked pages
111 PrintDebug("Large PAge!!!\n");
116 } else if (shadow_pde_access == PT_WRITE_ERROR) {
119 // Page Directory Entry marked read-only
122 PrintDebug("Shadow Paging Write Error\n");
124 } else if (shadow_pde_access == PT_USER_ERROR) {
127 // Page Directory Entry marked non-user
130 PrintDebug("Shadow Paging User access error\n");
132 } else if (shadow_pde_access == PT_ACCESS_OK) {
133 pte32_t * shadow_pte = (pte32_t *)PDE32_T_ADDR((*shadow_pde_entry));
134 pte32_t * guest_pte = NULL;
136 // Page Table Entry fault
138 if (guest_pa_to_host_va(info, PDE32_T_ADDR((*guest_pde_entry)), (addr_t*)&guest_pte) == -1) {
139 PrintDebug("Invalid Guest PTE Address: 0x%x\n", PDE32_T_ADDR((*guest_pde_entry)));
140 // Machine check the guest
142 raise_exception(info, MC_EXCEPTION);
148 if (handle_shadow_pte32_fault(info, fault_addr, error_code, shadow_pte, guest_pte) == -1) {
149 PrintDebug("Error handling Page fault caused by PTE\n");
154 // Unknown error raise page fault in guest
155 info->ctrl_regs.cr2 = fault_addr;
156 raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
158 // For debugging we will return an error here for the time being,
159 // this probably shouldn't ever happen
160 PrintDebug("Unknown Error occurred\n");
161 PrintDebug("Manual Says to inject page fault into guest\n");
165 //PrintDebugPageTables(shadow_pde);
166 PrintDebug("Returning end of PDE function\n");
173 * We assume the the guest pte pointer has already been translated to a host virtual address
175 int handle_shadow_pte32_fault(struct guest_info * info,
177 pf_error_t error_code,
178 pte32_t * shadow_pte,
179 pte32_t * guest_pte) {
181 pt_access_status_t guest_pte_access;
182 pt_access_status_t shadow_pte_access;
183 pte32_t * guest_pte_entry = (pte32_t *)&(guest_pte[PTE32_INDEX(fault_addr)]);;
184 pte32_t * shadow_pte_entry = (pte32_t *)&(shadow_pte[PTE32_INDEX(fault_addr)]);
187 // Check the guest page permissions
188 guest_pte_access = can_access_pte32(guest_pte, fault_addr, error_code);
191 if (guest_pte_access != PT_ACCESS_OK) {
192 // Inject page fault into the guest
194 info->ctrl_regs.cr2 = fault_addr;
195 raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
197 PrintDebug("Access error injecting pf to guest\n");
202 shadow_pte_access = can_access_pte32(shadow_pte, fault_addr, error_code);
204 if (shadow_pte_access == PT_ACCESS_OK) {
205 // Inconsistent state...
206 // Guest Re-Entry will flush page tables and everything should now work
207 PrintDebug("Inconsistent state... Guest re-entry should flush tlb\n");
209 } else if (shadow_pte_access == PT_ENTRY_NOT_PRESENT) {
211 addr_t guest_pa = PTE32_T_ADDR((*guest_pte_entry));
213 // Page Table Entry Not Present
215 host_region_type_t host_page_type = get_shadow_addr_type(info, guest_pa);
217 if (host_page_type == HOST_REGION_INVALID) {
218 // Inject a machine check in the guest
220 raise_exception(info, MC_EXCEPTION);
222 PrintDebug("Invalid Guest Address in page table (0x%x)\n", guest_pa);
225 } else if (host_page_type == HOST_REGION_PHYSICAL_MEMORY) {
227 shadow_pa = get_shadow_addr(info, guest_pa);
229 shadow_pte_entry->page_base_addr = PT32_BASE_ADDR(shadow_pa);
231 shadow_pte_entry->present = guest_pte_entry->present;
232 shadow_pte_entry->user_page = guest_pte_entry->user_page;
234 //set according to VMM policy
235 shadow_pte_entry->write_through = 0;
236 shadow_pte_entry->cache_disable = 0;
237 shadow_pte_entry->global_page = 0;
240 guest_pte_entry->accessed = 1;
242 if (guest_pte_entry->dirty == 1) {
243 shadow_pte_entry->writable = guest_pte_entry->writable;
244 } else if ((guest_pte_entry->dirty == 0) && (error_code.write == 1)) {
245 shadow_pte_entry->writable = guest_pte_entry->writable;
246 guest_pte_entry->dirty = 1;
247 } else if ((guest_pte_entry->dirty = 0) && (error_code.write == 0)) {
248 shadow_pte_entry->writable = 0;
251 // Page fault handled by hook functions
252 if (handle_special_page_fault(info, fault_addr, error_code) == -1) {
253 PrintDebug("Special Page fault handler returned error for address: %x\n", fault_addr);
258 } else if ((shadow_pte_access == PT_WRITE_ERROR) &&
259 (guest_pte_entry->dirty == 0)) {
260 guest_pte_entry->dirty = 1;
261 shadow_pte_entry->writable = guest_pte_entry->writable;
263 PrintDebug("Shadow PTE Write Error\n");
267 // Inject page fault into the guest
269 info->ctrl_regs.cr2 = fault_addr;
270 raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
272 PrintDebug("PTE Page fault fell through... Not sure if this should ever happen\n");
273 PrintDebug("Manual Says to inject page fault into guest\n");
277 PrintDebug("Returning end of function\n");
283 addr_t create_new_shadow_pt32(struct guest_info * info) {
286 V3_AllocPages(host_pde, 1);
287 memset(host_pde, 0, PAGE_SIZE);
289 return (addr_t)host_pde;
294 /* Currently Does not work with Segmentation!!! */
295 int handle_shadow_invlpg(struct guest_info * info) {
296 if (info->mem_mode != VIRTUAL_MEM) {
297 // Paging must be turned on...
298 // should handle with some sort of fault I think
299 PrintDebug("ERROR: INVLPG called in non paged mode\n");
304 if (info->cpu_mode == PROTECTED) {
309 ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
311 PrintDebug("Could not read instruction 0x%x (ret=%d)\n", info->rip, ret);
316 /* Can INVLPG work with Segments?? */
317 while (is_prefix_byte(instr[index])) {
322 if ((instr[index] == (uchar_t)0x0f) &&
323 (instr[index + 1] == (uchar_t)0x01)) {
325 addr_t first_operand;
326 addr_t second_operand;
327 operand_type_t addr_type;
331 addr_type = decode_operands32(&(info->vm_regs), instr + index, &index, &first_operand, &second_operand, REG32);
333 if (addr_type == MEM_OPERAND) {
334 pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
335 pde32_t * shadow_pde_entry = (pde32_t *)&shadow_pd[PDE32_INDEX(first_operand)];
337 //PrintDebug("PDE Index=%d\n", PDE32_INDEX(first_operand));
338 //PrintDebug("FirstOperand = %x\n", first_operand);
340 if (shadow_pde_entry->large_page == 1) {
341 shadow_pde_entry->present = 0;
343 if (shadow_pde_entry->present == 1) {
344 pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde_entry));
345 pte32_t * shadow_pte_entry = (pte32_t *)&shadow_pt[PTE32_INDEX(first_operand)];
347 shadow_pte_entry->present = 0;
354 PrintDebug("Invalid Operand type\n");
358 PrintDebug("invalid Instruction Opcode\n");
359 PrintTraceMemDump(instr, 15);
371 addr_t setup_shadow_pt32(struct guest_info * info, addr_t virt_cr3) {
372 addr_t cr3_guest_addr = CR3_TO_PDE32(virt_cr3);
374 pde32_t * host_pde = NULL;
377 // Setup up guest_pde to point to the PageDir in host addr
378 if (guest_pa_to_host_va(info, cr3_guest_addr, (addr_t*)&guest_pde) == -1) {
382 V3_AllocPages(host_pde, 1);
383 memset(host_pde, 0, PAGE_SIZE);
385 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
386 if (guest_pde[i].present == 1) {
390 if (guest_pa_to_host_va(info, PDE32_T_ADDR(guest_pde[i]), &pt_host_addr) == -1) {
394 if ((host_pte = setup_shadow_pte32(info, pt_host_addr)) == 0) {
398 host_pde[i].present = 1;
399 host_pde[i].pt_base_addr = PD32_BASE_ADDR(host_pte);
402 // Set Page DIR flags
407 PrintDebugPageTables(host_pde);
409 return (addr_t)host_pde;
414 addr_t setup_shadow_pte32(struct guest_info * info, addr_t pt_host_addr) {
415 pte32_t * guest_pte = (pte32_t *)pt_host_addr;
416 pte32_t * host_pte = NULL;
419 V3_AllocPages(host_pte, 1);
420 memset(host_pte, 0, PAGE_SIZE);
422 for (i = 0; i < MAX_PTE32_ENTRIES; i++) {
423 if (guest_pte[i].present == 1) {
424 addr_t guest_pa = PTE32_T_ADDR(guest_pte[i]);
425 shadow_mem_type_t page_type;
428 page_type = get_shadow_addr_type(info, guest_pa);
430 if (page_type == HOST_REGION_PHYSICAL_MEMORY) {
431 host_pa = get_shadow_addr(info, guest_pa);
435 // Setup various memory types
439 host_pte[i].page_base_addr = PT32_BASE_ADDR(host_pa);
440 host_pte[i].present = 1;
444 return (addr_t)host_pte;