2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
11 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #ifndef __VMM_DIRECT_PAGING_64_H__
22 #define __VMM_DIRECT_PAGING_64_H__
24 #include <palacios/vmm_mem.h>
25 #include <palacios/vmm_paging.h>
26 #include <palacios/vmm.h>
27 #include <palacios/vm_guest_mem.h>
28 #include <palacios/vm_guest.h>
30 // Reference: AMD Software Developer Manual Vol.2 Ch.5 "Page Translation and Protection"
32 static uint32_t get_page_size(struct guest_info * core, addr_t fault_addr) {
33 addr_t pg_start = 0UL, pg_end = 0UL; // 2MiB page containing the faulting address
34 struct v3_mem_region * pg_next_reg = NULL; // next immediate mem reg after page start addr
35 uint32_t page_size = PAGE_SIZE_4KB;
37 /* If the guest has been configured for 2MiB pages, then we must check for hooked regions of
38 * memory which may overlap with the 2MiB page containing the faulting address (due to
39 * potentially differing access policies in place for e.g. i/o devices and APIC). A 2MiB page
40 * can be used if a) no region overlaps the page [or b) a region does overlap but fully contains
41 * the page]. The [bracketed] text pertains to the #if 0'd code below, state D. TODO modify this
42 * note if someone decides to enable this optimization. It can be tested with the SeaStar
45 * Examples: (CAPS regions are returned by v3_get_next_mem_region; state A returns the base reg)
47 * |region| |region| 2MiB mapped (state A)
48 * |reg| |REG| 2MiB mapped (state B)
49 * |region| |reg| |REG| |region| |reg| 4KiB mapped (state C)
50 * |reg| |reg| |--REGION---| [2MiB mapped (state D)]
51 * |--------------------------------------------| RAM
53 * |----|----|----|----|----|page|----|----|----| 2MB pages
54 * >>>>>>>>>>>>>>>>>>>> search space
58 // guest page maps to a host page + offset (so when we shift, it aligns with a host page)
59 pg_start = PAGE_ADDR_2MB(fault_addr);
60 pg_end = (pg_start + PAGE_SIZE_2MB);
62 PrintDebug("%s: page [%p,%p) contains address\n", __FUNCTION__, (void *)pg_start, (void *)pg_end);
64 pg_next_reg = v3_get_next_mem_region(core->vm_info, core->cpu_id, pg_start);
66 if (pg_next_reg == NULL) {
67 PrintError("%s: Error: address not in base region, %p\n", __FUNCTION__, (void *)fault_addr);
71 if (pg_next_reg->flags.base == 1) {
72 page_size = PAGE_SIZE_2MB; // State A
74 #if 0 // State B/C and D optimization
75 if ((pg_next_reg->guest_end >= pg_end) &&
76 ((pg_next_reg->guest_start >= pg_end) || (pg_next_reg->guest_start <= pg_start))) {
77 page_size = PAGE_SIZE_2MB;
80 PrintDebug("%s: region [%p,%p) %s partially overlap with page\n", __FUNCTION__,
81 (void *)pg_next_reg->guest_start, (void *)pg_next_reg->guest_end,
82 (page_size == PAGE_SIZE_2MB) ? "does not" : "does");
85 if (pg_next_reg->guest_start >= pg_end) {
87 page_size = PAGE_SIZE_2MB;
90 PrintDebug("%s: region [%p,%p) %s overlap with page\n", __FUNCTION__,
91 (void *)pg_next_reg->guest_start, (void *)pg_next_reg->guest_end,
92 (page_size == PAGE_SIZE_2MB) ? "does not" : "does");
101 static inline int handle_passthrough_pagefault_64(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
102 pml4e64_t * pml = NULL;
103 pdpe64_t * pdpe = NULL;
104 pde64_t * pde = NULL;
105 pde64_2MB_t * pde2mb = NULL;
106 pte64_t * pte = NULL;
107 addr_t host_addr = 0;
109 int pml_index = PML4E64_INDEX(fault_addr);
110 int pdpe_index = PDPE64_INDEX(fault_addr);
111 int pde_index = PDE64_INDEX(fault_addr);
112 int pte_index = PTE64_INDEX(fault_addr);
114 struct v3_mem_region * region = v3_get_mem_region(core->vm_info, core->cpu_id, fault_addr);
115 int page_size = PAGE_SIZE_4KB;
117 if (region == NULL) {
118 PrintError("%s: invalid region, addr=%p\n", __FUNCTION__, (void *)fault_addr);
123 * 1. the guest is configured to use large pages and
124 * 2. the memory regions can be referenced by a large page
126 if ((core->use_large_pages == 1) ) {
127 page_size = get_page_size(core, fault_addr);
130 PrintDebug("Using page size of %dKB\n", page_size / 1024);
133 // Lookup the correct PML address based on the PAGING MODE
134 if (core->shdw_pg_mode == SHADOW_PAGING) {
135 pml = CR3_TO_PML4E64_VA(core->ctrl_regs.cr3);
137 pml = CR3_TO_PML4E64_VA(core->direct_map_pt);
140 //Fix up the PML entry
141 if (pml[pml_index].present == 0) {
142 pdpe = (pdpe64_t *)create_generic_pt_page();
144 // Set default PML Flags...
145 pml[pml_index].present = 1;
146 pml[pml_index].writable = 1;
147 pml[pml_index].user_page = 1;
149 pml[pml_index].pdp_base_addr = PAGE_BASE_ADDR_4KB((addr_t)V3_PAddr(pdpe));
151 pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR_4KB(pml[pml_index].pdp_base_addr));
154 // Fix up the PDPE entry
155 if (pdpe[pdpe_index].present == 0) {
156 pde = (pde64_t *)create_generic_pt_page();
158 // Set default PDPE Flags...
159 pdpe[pdpe_index].present = 1;
160 pdpe[pdpe_index].writable = 1;
161 pdpe[pdpe_index].user_page = 1;
163 pdpe[pdpe_index].pd_base_addr = PAGE_BASE_ADDR_4KB((addr_t)V3_PAddr(pde));
165 pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR_4KB(pdpe[pdpe_index].pd_base_addr));
168 // Fix up the 2MiB PDE and exit here
169 if (page_size == PAGE_SIZE_2MB) {
170 pde2mb = (pde64_2MB_t *)pde; // all but these two lines are the same for PTE
171 pde2mb[pde_index].large_page = 1;
173 if (pde2mb[pde_index].present == 0) {
174 pde2mb[pde_index].user_page = 1;
176 if ( (region->flags.alloced == 1) &&
177 (region->flags.read == 1)) {
179 pde2mb[pde_index].present = 1;
181 if (region->flags.write == 1) {
182 pde2mb[pde_index].writable = 1;
184 pde2mb[pde_index].writable = 0;
187 if (v3_gpa_to_hpa(core, fault_addr, &host_addr) == -1) {
188 PrintError("Error Could not translate fault addr (%p)\n", (void *)fault_addr);
192 pde2mb[pde_index].page_base_addr = PAGE_BASE_ADDR_2MB(host_addr);
194 return region->unhandled(core, fault_addr, fault_addr, region, error_code);
197 // We fix all permissions on the first pass,
198 // so we only get here if its an unhandled exception
200 return region->unhandled(core, fault_addr, fault_addr, region, error_code);
207 // Continue with the 4KiB page heirarchy
209 // Fix up the PDE entry
210 if (pde[pde_index].present == 0) {
211 pte = (pte64_t *)create_generic_pt_page();
213 pde[pde_index].present = 1;
214 pde[pde_index].writable = 1;
215 pde[pde_index].user_page = 1;
217 pde[pde_index].pt_base_addr = PAGE_BASE_ADDR_4KB((addr_t)V3_PAddr(pte));
219 pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR_4KB(pde[pde_index].pt_base_addr));
222 // Fix up the PTE entry
223 if (pte[pte_index].present == 0) {
224 pte[pte_index].user_page = 1;
226 if ((region->flags.alloced == 1) &&
227 (region->flags.read == 1)) {
229 pte[pte_index].present = 1;
231 if (region->flags.write == 1) {
232 pte[pte_index].writable = 1;
234 pte[pte_index].writable = 0;
237 if (v3_gpa_to_hpa(core, fault_addr, &host_addr) == -1) {
238 PrintError("Error Could not translate fault addr (%p)\n", (void *)fault_addr);
242 pte[pte_index].page_base_addr = PAGE_BASE_ADDR_4KB(host_addr);
244 return region->unhandled(core, fault_addr, fault_addr, region, error_code);
247 // We fix all permissions on the first pass,
248 // so we only get here if its an unhandled exception
250 return region->unhandled(core, fault_addr, fault_addr, region, error_code);
256 static inline int invalidate_addr_64(struct guest_info * core, addr_t inv_addr) {
257 pml4e64_t * pml = NULL;
258 pdpe64_t * pdpe = NULL;
259 pde64_t * pde = NULL;
260 pte64_t * pte = NULL;
266 // clear the page table entry
267 int pml_index = PML4E64_INDEX(inv_addr);
268 int pdpe_index = PDPE64_INDEX(inv_addr);
269 int pde_index = PDE64_INDEX(inv_addr);
270 int pte_index = PTE64_INDEX(inv_addr);
273 // Lookup the correct PDE address based on the PAGING MODE
274 if (core->shdw_pg_mode == SHADOW_PAGING) {
275 pml = CR3_TO_PML4E64_VA(core->ctrl_regs.cr3);
277 pml = CR3_TO_PML4E64_VA(core->direct_map_pt);
280 if (pml[pml_index].present == 0) {
284 pdpe = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pml[pml_index].pdp_base_addr));
286 if (pdpe[pdpe_index].present == 0) {
288 } else if (pdpe[pdpe_index].large_page == 1) { // 1GiB
289 pdpe[pdpe_index].present = 0;
293 pde = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pdpe[pdpe_index].pd_base_addr));
295 if (pde[pde_index].present == 0) {
297 } else if (pde[pde_index].large_page == 1) { // 2MiB
298 pde[pde_index].present = 0;
302 pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
304 pte[pte_index].present = 0; // 4KiB