2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #include <palacios/vmm_shadow_paging.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vmm_decoder.h>
27 #include <palacios/vmm_ctrl_regs.h>
29 #include <palacios/vmm_hashtable.h>
31 #include <palacios/vmm_direct_paging.h>
33 #ifndef DEBUG_SHADOW_PAGING
35 #define PrintDebug(fmt, args...)
44 struct shadow_page_data {
48 struct list_head page_list_node;
52 DEFINE_HASHTABLE_INSERT(add_pte_map, addr_t, addr_t);
53 DEFINE_HASHTABLE_SEARCH(find_pte_map, addr_t, addr_t);
54 //DEFINE_HASHTABLE_REMOVE(del_pte_map, addr_t, addr_t, 0);
58 static uint_t pte_hash_fn(addr_t key) {
59 return hash_long(key, 32);
62 static int pte_equals(addr_t key1, addr_t key2) {
63 return (key1 == key2);
66 static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info);
67 static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
68 static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access);
71 #include "vmm_shadow_paging_32.h"
72 #include "vmm_shadow_paging_32pae.h"
73 #include "vmm_shadow_paging_64.h"
77 int v3_init_shadow_page_state(struct guest_info * info) {
78 struct shadow_page_state * state = &(info->shdw_pg_state);
83 INIT_LIST_HEAD(&(state->page_list));
85 state->cached_ptes = NULL;
86 state->cached_cr3 = 0;
93 // Reads the guest CR3 register
94 // creates new shadow page tables
95 // updates the shadow CR3 register to point to the new pts
96 int v3_activate_shadow_pt(struct guest_info * info) {
97 switch (v3_get_cpu_mode(info)) {
100 return activate_shadow_pt_32(info);
102 return activate_shadow_pt_32pae(info);
106 return activate_shadow_pt_64(info);
108 PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info)));
116 int v3_activate_passthrough_pt(struct guest_info * info) {
117 // For now... But we need to change this....
118 // As soon as shadow paging becomes active the passthrough tables are hosed
119 // So this will cause chaos if it is called at that time
121 info->ctrl_regs.cr3 = *(addr_t*)&(info->direct_map_pt);
122 //PrintError("Activate Passthrough Page tables not implemented\n");
128 int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
130 if (v3_get_mem_mode(info) == PHYSICAL_MEM) {
131 // If paging is not turned on we need to handle the special cases
132 return v3_handle_passthrough_pagefault(info, fault_addr, error_code);
133 } else if (v3_get_mem_mode(info) == VIRTUAL_MEM) {
135 switch (v3_get_cpu_mode(info)) {
137 return handle_shadow_pagefault_32(info, fault_addr, error_code);
140 return handle_shadow_pagefault_32pae(info, fault_addr, error_code);
144 return handle_shadow_pagefault_64(info, fault_addr, error_code);
147 PrintError("Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info)));
151 PrintError("Invalid Memory mode\n");
157 int v3_handle_shadow_invlpg(struct guest_info * info) {
159 struct x86_instr dec_instr;
163 if (v3_get_mem_mode(info) != VIRTUAL_MEM) {
164 // Paging must be turned on...
165 // should handle with some sort of fault I think
166 PrintError("ERROR: INVLPG called in non paged mode\n");
170 if (v3_get_mem_mode(info) == PHYSICAL_MEM) {
171 ret = read_guest_pa_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
173 ret = read_guest_va_memory(info, get_addr_linear(info, info->rip, &(info->segments.cs)), 15, instr);
177 PrintError("Could not read instruction into buffer\n");
181 if (v3_decode(info, (addr_t)instr, &dec_instr) == -1) {
182 PrintError("Decoding Error\n");
186 if ((dec_instr.op_type != V3_OP_INVLPG) ||
187 (dec_instr.num_operands != 1) ||
188 (dec_instr.dst_operand.type != MEM_OPERAND)) {
189 PrintError("Decoder Error: Not a valid INVLPG instruction...\n");
193 vaddr = dec_instr.dst_operand.operand;
195 info->rip += dec_instr.instr_length;
197 switch (v3_get_cpu_mode(info)) {
199 return handle_shadow_invlpg_32(info, vaddr);
201 return handle_shadow_invlpg_32pae(info, vaddr);
205 return handle_shadow_invlpg_64(info, vaddr);
207 PrintError("Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_cpu_mode(info)));
215 static struct shadow_page_data * create_new_shadow_pt(struct guest_info * info) {
216 struct shadow_page_state * state = &(info->shdw_pg_state);
217 v3_reg_t cur_cr3 = info->ctrl_regs.cr3;
218 struct shadow_page_data * page_tail = NULL;
219 addr_t shdw_page = 0;
221 if (!list_empty(&(state->page_list))) {
222 page_tail = list_tail_entry(&(state->page_list), struct shadow_page_data, page_list_node);
224 if (page_tail->cr3 != cur_cr3) {
225 PrintDebug("Reusing old shadow Page: %p (cur_CR3=%p)(page_cr3=%p) \n",
226 (void *) page_tail->page_pa, (void *)cur_cr3, (void *)(page_tail->cr3));
228 list_move(&(page_tail->page_list_node), &(state->page_list));
230 memset(V3_VAddr((void *)(page_tail->page_pa)), 0, PAGE_SIZE_4KB);
239 page_tail = (struct shadow_page_data *)V3_Malloc(sizeof(struct shadow_page_data));
240 page_tail->page_pa = (addr_t)V3_AllocPages(1);
242 PrintDebug("Allocating new shadow Page: %p (cur_cr3=%p)\n", (void *)page_tail->page_pa, (void *)cur_cr3);
244 page_tail->cr3 = cur_cr3;
245 list_add(&(page_tail->page_list_node), &(state->page_list));
247 shdw_page = (addr_t)V3_VAddr((void *)(page_tail->page_pa));
248 memset((void *)shdw_page, 0, PAGE_SIZE_4KB);
254 static void inject_guest_pf(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
255 if (info->enable_profiler) {
256 info->profiler.guest_pf_cnt++;
259 info->ctrl_regs.cr2 = fault_addr;
260 v3_raise_exception_with_error(info, PF_EXCEPTION, *(uint_t *)&error_code);
264 static int is_guest_pf(pt_access_status_t guest_access, pt_access_status_t shadow_access) {
265 /* basically the reasoning is that there can be multiple reasons for a page fault:
266 If there is a permissions failure for a page present in the guest _BUT_
267 the reason for the fault was that the page is not present in the shadow,
268 _THEN_ we have to map the shadow page in and reexecute, this will generate
269 a permissions fault which is _THEN_ valid to send to the guest
270 _UNLESS_ both the guest and shadow have marked the page as not present
274 if (guest_access != PT_ACCESS_OK) {
275 // Guest Access Error
277 if ((shadow_access != PT_ACCESS_NOT_PRESENT) &&
278 (guest_access != PT_ACCESS_NOT_PRESENT)) {
279 // aka (guest permission error)
283 if ((shadow_access == PT_ACCESS_NOT_PRESENT) &&
284 (guest_access == PT_ACCESS_NOT_PRESENT)) {
285 // Page tables completely blank, handle guest first
289 // Otherwise we'll handle the guest fault later...?