2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20 #include <palacios/vmm_shadow_paging.h>
21 #include <palacios/vmm_ctrl_regs.h>
23 #include <palacios/vm_guest.h>
24 #include <palacios/vm_guest_mem.h>
27 #ifndef V3_CONFIG_DEBUG_SHDW_PG_VTLB
29 #define PrintDebug(fmt, ...)
33 struct shadow_page_data {
37 struct list_head page_list_node;
41 struct vtlb_local_state {
43 struct list_head page_list;
48 static struct shadow_page_data * create_new_shadow_pt(struct guest_info * core);
51 #include "vmm_shdw_pg_tlb_32.h"
52 #include "vmm_shdw_pg_tlb_32pae.h"
53 #include "vmm_shdw_pg_tlb_64.h"
56 static struct shadow_page_data * create_new_shadow_pt(struct guest_info * core) {
57 struct v3_shdw_pg_state * state = &(core->shdw_pg_state);
58 struct vtlb_local_state * impl_state = (struct vtlb_local_state *)(state->local_impl_data);
59 v3_reg_t cur_cr3 = core->ctrl_regs.cr3;
60 struct shadow_page_data * page_tail = NULL;
63 if (!list_empty(&(impl_state->page_list))) {
64 page_tail = list_tail_entry(&(impl_state->page_list), struct shadow_page_data, page_list_node);
67 if (page_tail->cr3 != cur_cr3) {
68 PrintDebug(core->vm_info, core, "Reusing old shadow Page: %p (cur_CR3=%p)(page_cr3=%p) \n",
69 (void *)(addr_t)page_tail->page_pa,
70 (void *)(addr_t)cur_cr3,
71 (void *)(addr_t)(page_tail->cr3));
73 list_move(&(page_tail->page_list_node), &(impl_state->page_list));
75 memset(V3_VAddr((void *)(page_tail->page_pa)), 0, PAGE_SIZE_4KB);
84 page_tail = (struct shadow_page_data *)V3_Malloc(sizeof(struct shadow_page_data));
87 PrintError(core->vm_info, core, "Cannot allocate\n");
91 page_tail->page_pa = (addr_t)V3_AllocPages(1);
93 if (!page_tail->page_pa) {
94 PrintError(core->vm_info, core, "Cannot allocate page\n");
98 PrintDebug(core->vm_info, core, "Allocating new shadow Page: %p (cur_cr3=%p)\n",
99 (void *)(addr_t)page_tail->page_pa,
100 (void *)(addr_t)cur_cr3);
102 page_tail->cr3 = cur_cr3;
103 list_add(&(page_tail->page_list_node), &(impl_state->page_list));
105 shdw_page = (addr_t)V3_VAddr((void *)(page_tail->page_pa));
106 memset((void *)shdw_page, 0, PAGE_SIZE_4KB);
113 static int vtlb_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
115 V3_Print(vm, VCORE_NONE, "VTLB initialization\n");
119 static int vtlb_deinit(struct v3_vm_info * vm) {
123 static int vtlb_local_init(struct guest_info * core) {
124 struct v3_shdw_pg_state * state = &(core->shdw_pg_state);
125 struct vtlb_local_state * vtlb_state = NULL;
127 V3_Print(core->vm_info, core, "VTLB local initialization\n");
129 vtlb_state = (struct vtlb_local_state *)V3_Malloc(sizeof(struct vtlb_local_state));
132 PrintError(core->vm_info, core, "Cannot allocate\n");
136 INIT_LIST_HEAD(&(vtlb_state->page_list));
138 state->local_impl_data = vtlb_state;
144 static int vtlb_local_deinit(struct guest_info * core) {
145 struct v3_shdw_pg_state * state = &(core->shdw_pg_state);
146 struct vtlb_local_state * vtlb_state = state->local_impl_data;
148 struct shadow_page_data * shdw_pg = NULL;
149 struct shadow_page_data * tmp = NULL;
152 list_for_each_entry_safe(shdw_pg, tmp, &(vtlb_state->page_list), page_list_node) {
153 list_del(&(shdw_pg->page_list_node));
154 V3_FreePages((void *)shdw_pg->page_pa, 1);
165 static int vtlb_activate_shdw_pt(struct guest_info * core) {
166 switch (v3_get_vm_cpu_mode(core)) {
169 return activate_shadow_pt_32(core);
171 return activate_shadow_pt_32pae(core);
175 return activate_shadow_pt_64(core);
177 PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
184 static int vtlb_invalidate_shdw_pt(struct guest_info * core) {
185 return vtlb_activate_shdw_pt(core);
189 static int vtlb_handle_pf(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
191 switch (v3_get_vm_cpu_mode(core)) {
193 return handle_shadow_pagefault_32(core, fault_addr, error_code);
196 return handle_shadow_pagefault_32pae(core, fault_addr, error_code);
200 return handle_shadow_pagefault_64(core, fault_addr, error_code);
203 PrintError(core->vm_info, core, "Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
209 static int vtlb_handle_invlpg(struct guest_info * core, addr_t vaddr) {
211 switch (v3_get_vm_cpu_mode(core)) {
213 return handle_shadow_invlpg_32(core, vaddr);
215 return handle_shadow_invlpg_32pae(core, vaddr);
219 return handle_shadow_invlpg_64(core, vaddr);
221 PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
226 static struct v3_shdw_pg_impl vtlb_impl = {
229 .deinit = vtlb_deinit,
230 .local_init = vtlb_local_init,
231 .local_deinit = vtlb_local_deinit,
232 .handle_pagefault = vtlb_handle_pf,
233 .handle_invlpg = vtlb_handle_invlpg,
234 .activate_shdw_pt = vtlb_activate_shdw_pt,
235 .invalidate_shdw_pt = vtlb_invalidate_shdw_pt
242 register_shdw_pg_impl(&vtlb_impl);