2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
15 * Author: Chunxiao Diao <chunxiaodiao2012@u.northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <palacios/vmm_shadow_paging.h>
23 #include <palacios/vmm_ctrl_regs.h>
25 #include <palacios/vm_guest.h>
26 #include <palacios/vm_guest_mem.h>
29 #ifndef V3_CONFIG_DEBUG_SHDW_PG_VTLB
31 #define PrintDebug(fmt, ...)
35 struct shadow_page_data {
39 struct list_head page_list_node;
43 struct vtlb_local_state {
45 struct list_head page_list;
50 static struct shadow_page_data * create_new_shadow_pt(struct guest_info * core);
53 #include "vmm_shdw_pg_tlb_32.h"
54 #include "vmm_shdw_pg_tlb_32pae.h"
55 #include "vmm_shdw_pg_tlb_64.h"
59 static struct shadow_page_data * create_new_shadow_pt(struct guest_info * core) {
60 struct v3_shdw_pg_state * state = &(core->shdw_pg_state);
61 struct vtlb_local_state * impl_state = (struct vtlb_local_state *)(state->local_impl_data);
62 v3_reg_t cur_cr3 = core->ctrl_regs.cr3;
63 struct shadow_page_data * page_tail = NULL;
66 if (!list_empty(&(impl_state->page_list))) {
67 page_tail = list_tail_entry(&(impl_state->page_list), struct shadow_page_data, page_list_node);
70 if (page_tail->cr3 != cur_cr3) {
71 PrintDebug(core->vm_info, core, "Reusing old shadow Page: %p (cur_CR3=%p)(page_cr3=%p) \n",
72 (void *)(addr_t)page_tail->page_pa,
73 (void *)(addr_t)cur_cr3,
74 (void *)(addr_t)(page_tail->cr3));
76 list_move(&(page_tail->page_list_node), &(impl_state->page_list));
78 memset(V3_VAddr((void *)(page_tail->page_pa)), 0, PAGE_SIZE_4KB);
87 page_tail = (struct shadow_page_data *)V3_Malloc(sizeof(struct shadow_page_data));
90 PrintError(core->vm_info, core, "Cannot allocate\n");
94 page_tail->page_pa = (addr_t)V3_AllocPagesExtended(1,PAGE_SIZE_4KB,-1,0,0);
96 if (!page_tail->page_pa) {
97 PrintError(core->vm_info, core, "Cannot allocate page\n");
101 PrintDebug(core->vm_info, core, "Allocating new shadow Page: %p (cur_cr3=%p)\n",
102 (void *)(addr_t)page_tail->page_pa,
103 (void *)(addr_t)cur_cr3);
105 page_tail->cr3 = cur_cr3;
106 list_add(&(page_tail->page_list_node), &(impl_state->page_list));
108 shdw_page = (addr_t)V3_VAddr((void *)(page_tail->page_pa));
109 memset((void *)shdw_page, 0, PAGE_SIZE_4KB);
116 static int vtlb_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
118 V3_Print(vm, VCORE_NONE, "VTLB initialization\n");
122 static int vtlb_deinit(struct v3_vm_info * vm) {
126 static int vtlb_local_init(struct guest_info * core) {
127 struct v3_shdw_pg_state * state = &(core->shdw_pg_state);
128 struct vtlb_local_state * vtlb_state = NULL;
130 V3_Print(core->vm_info, core, "VTLB local initialization\n");
132 vtlb_state = (struct vtlb_local_state *)V3_Malloc(sizeof(struct vtlb_local_state));
135 PrintError(core->vm_info, core, "Cannot allocate\n");
139 INIT_LIST_HEAD(&(vtlb_state->page_list));
141 state->local_impl_data = vtlb_state;
147 static int vtlb_local_deinit(struct guest_info * core) {
148 struct v3_shdw_pg_state * state = &(core->shdw_pg_state);
149 struct vtlb_local_state * vtlb_state = state->local_impl_data;
151 struct shadow_page_data * shdw_pg = NULL;
152 struct shadow_page_data * tmp = NULL;
155 list_for_each_entry_safe(shdw_pg, tmp, &(vtlb_state->page_list), page_list_node) {
156 list_del(&(shdw_pg->page_list_node));
157 V3_FreePages((void *)shdw_pg->page_pa, 1);
168 static int vtlb_activate_shdw_pt(struct guest_info * core) {
169 switch (v3_get_vm_cpu_mode(core)) {
172 return activate_shadow_pt_32(core);
175 return activate_shadow_pt_32pae(core);
180 return activate_shadow_pt_64(core);
183 PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
191 static int vtlb_invalidate_shdw_pt(struct guest_info * core) {
192 return vtlb_activate_shdw_pt(core);
196 static int vtlb_handle_pf(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
198 switch (v3_get_vm_cpu_mode(core)) {
200 return handle_shadow_pagefault_32(core, fault_addr, error_code);
203 return handle_shadow_pagefault_32pae(core, fault_addr, error_code);
207 return handle_shadow_pagefault_64(core, fault_addr, error_code);
210 PrintError(core->vm_info, core, "Unhandled CPU Mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
216 static int vtlb_handle_invlpg(struct guest_info * core, addr_t vaddr) {
218 switch (v3_get_vm_cpu_mode(core)) {
220 return handle_shadow_invlpg_32(core, vaddr);
223 return handle_shadow_invlpg_32pae(core, vaddr);
228 return handle_shadow_invlpg_64(core, vaddr);
231 PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
237 static struct v3_shdw_pg_impl vtlb_impl = {
240 .deinit = vtlb_deinit,
241 .local_init = vtlb_local_init,
242 .local_deinit = vtlb_local_deinit,
243 .handle_pagefault = vtlb_handle_pf,
244 .handle_invlpg = vtlb_handle_invlpg,
245 .activate_shdw_pt = vtlb_activate_shdw_pt,
246 .invalidate_shdw_pt = vtlb_invalidate_shdw_pt
253 register_shdw_pg_impl(&vtlb_impl);