1 /* (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> */
2 /* (c) 2008, The V3VEE Project <http://www.v3vee.org> */
8 #include <palacios/vmm_types.h>
9 #include <palacios/vmm_util.h>
13 In the following, when we say "page table", we mean the whole 2 or 4 layer
14 page table (PDEs, PTEs), etc.
17 guest-visible paging state
18 This is the state that the guest thinks the machine is using
20 - guest physical memory
21 The physical memory addresses the guest is allowed to use
22 (see shadow page maps, below)
24 (we care about when the current one changes)
25 - guest paging registers (these are never written to hardware)
31 This the state that the machine will actually use when the guest
32 is running. It consists of:
33 - current shadow page table
34 This is the page table actually useed when the guest is running.
35 It is changed/regenerated when the guest page table changes
36 It mostly reflects the guest page table, except that it restricts
37 physical addresses to those the VMM allocates to the guest.
39 This is a mapping from guest physical memory addresses to
40 the current location of the guest physical memory content.
41 It maps from regions of physical memory addresses to regions
42 located in physical memory or elsewhere.
43 (8192,16384) -> MEM(8912,...)
44 (0,8191) -> DISK(65536,..)
45 - guest paging registers (these are written to guest state)
50 This is the state we expect to be operative when the VMM is running.
51 Typically, this is set up by the host os into which we have embedded
52 the VMM, but we include the description here for clarity.
54 This is the page table we use when we are executing in
55 the VMM (or the host os)
61 The reason why the shadow paging state and the host paging state are
62 distinct is to permit the guest to use any virtual address it wants,
63 irrespective of the addresses the VMM or the host os use. These guest
64 virtual addresses are reflected in the shadow paging state. When we
65 exit from the guest, we switch to the host paging state so that any
66 virtual addresses that overlap between the guest and VMM/host now map
67 to the physical addresses epxected by the VMM/host. On AMD SVM, this
68 switch is done by the hardware. On Intel VT, the switch is done
69 by the hardware as well, but we are responsible for manually updating
70 the host state in the vmcs before entering the guest.
76 #define MAX_PTE32_ENTRIES 1024
77 #define MAX_PDE32_ENTRIES 1024
79 #define MAX_PTE64_ENTRIES 512
80 #define MAX_PDE64_ENTRIES 512
81 #define MAX_PDPE64_ENTRIES 512
82 #define MAX_PML4E64_ENTRIES 512
85 /* Converts an address into a page table index */
86 #define PDE32_INDEX(x) ((((uint_t)x) >> 22) & 0x3ff)
87 #define PTE32_INDEX(x) ((((uint_t)x) >> 12) & 0x3ff)
89 /* Gets the base address needed for a Page Table entry */
90 #define PD32_BASE_ADDR(x) (((uint_t)x) >> 12)
91 #define PT32_BASE_ADDR(x) (((uint_t)x) >> 12)
92 #define PD32_4MB_BASE_ADDR(x) (((uint_t)x) >> 22)
94 #define PT32_PAGE_ADDR(x) (((uint_t)x) & 0xfffff000)
95 #define PT32_PAGE_OFFSET(x) (((uint_t)x) & 0xfff)
96 #define PT32_PAGE_POWER 12
98 #define PD32_4MB_PAGE_ADDR(x) (((uint_t)x) & 0xffc00000)
99 #define PD32_4MB_PAGE_OFFSET(x) (((uint_t)x) & 0x003fffff)
100 #define PAGE_SIZE_4MB (4096 * 1024)
102 /* The following should be phased out */
103 #define PAGE_OFFSET(x) ((((uint_t)x) & 0xfff))
104 #define PAGE_ALIGNED_ADDR(x) (((uint_t) (x)) >> 12)
105 #define PAGE_ADDR(x) (PAGE_ALIGNED_ADDR(x) << 12)
106 #define PAGE_POWER 12
107 #define PAGE_SIZE 4096
113 #define CR3_TO_PDE32(cr3) (((ulong_t)cr3) & 0xfffff000)
114 #define CR3_TO_PDPTRE(cr3) (((ulong_t)cr3) & 0xffffffe0)
115 #define CR3_TO_PML4E64(cr3) (((ullong_t)cr3) & 0x000ffffffffff000LL)
120 /* Accessor functions for the page table structures */
121 #define PDE32_T_ADDR(x) (((x).pt_base_addr) << 12)
122 #define PTE32_T_ADDR(x) (((x).page_base_addr) << 12)
123 #define PDE32_4MB_T_ADDR(x) (((x).page_base_addr) << 22)
125 /* Page Table Flag Values */
126 #define PT32_HOOK 0x1
127 #define PT32_GUEST_PT 0x2
132 /* PDE 32 bit PAGE STRUCTURES */
133 typedef enum {PDE32_ENTRY_NOT_PRESENT, PDE32_ENTRY_PTE32, PDE32_ENTRY_LARGE_PAGE} pde32_entry_type_t;
134 typedef enum {PT_ACCESS_OK, PT_ENTRY_NOT_PRESENT, PT_WRITE_ERROR, PT_USER_ERROR} pt_access_status_t;
136 typedef struct pde32 {
139 uint_t user_page : 1;
140 uint_t write_through : 1;
141 uint_t cache_disable : 1;
144 uint_t large_page : 1;
145 uint_t global_page : 1;
147 uint_t pt_base_addr : 20;
150 typedef struct pde32_4MB {
153 uint_t user_page : 1;
154 uint_t write_through : 1;
155 uint_t cache_disable : 1;
159 uint_t global_page : 1;
163 uint_t page_base_addr : 10;
167 typedef struct pte32 {
170 uint_t user_page : 1;
171 uint_t write_through : 1;
172 uint_t cache_disable : 1;
176 uint_t global_page : 1;
178 uint_t page_base_addr : 20;
182 /* 32 bit PAE PAGE STRUCTURES */
191 /* LONG MODE 64 bit PAGE STRUCTURES */
192 typedef struct pml4e64 {
202 uint_t pdp_base_addr_lo : 20;
203 uint_t pdp_base_addr_hi : 20;
204 uint_t available : 11;
205 uint_t no_execute : 1;
209 typedef struct pdpe64 {
217 uint_t large_pages : 1;
220 uint_t pd_base_addr_lo : 20;
221 uint_t pd_base_addr_hi : 20;
222 uint_t available : 11;
223 uint_t no_execute : 1;
229 typedef struct pde64 {
234 uint_t large_pages : 1;
235 uint_t reserved2 : 1;
237 uint_t pt_base_addr_lo : 20;
238 uint_t pt_base_addr_hi : 20;
239 uint_t available : 11;
240 uint_t no_execute : 1;
243 typedef struct pte64 {
249 uint_t global_page : 1;
251 uint_t page_base_addr_lo : 20;
252 uint_t page_base_addr_hi : 20;
253 uint_t available : 11;
254 uint_t no_execute : 1;
257 /* *************** */
259 typedef struct pf_error_code {
260 uint_t present : 1; // if 0, fault due to page not present
261 uint_t write : 1; // if 1, faulting access was a write
262 uint_t user : 1; // if 1, faulting access was in user mode
263 uint_t rsvd_access : 1; // if 1, fault from reading a 1 from a reserved field (?)
264 uint_t ifetch : 1; // if 1, faulting access was an instr fetch (only with NX)
268 typedef enum { PDE32 } paging_mode_t;
273 void delete_page_tables_pde32(pde32_t * pde);
276 pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
277 int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry);
279 // This assumes that the page table resides in the host address space
280 // IE. IT DOES NO VM ADDR TRANSLATION
281 int pt32_lookup(pde32_t * pd, addr_t vaddr, addr_t * paddr);
285 pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type);
286 pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type);
294 pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info);
301 void PrintDebugPageTables(pde32_t * pde);
307 void PrintPT32(addr_t starting_address, pte32_t * pte);
308 void PrintPD32(pde32_t * pde);
309 void PrintPTE32(addr_t virtual_address, pte32_t * pte);
310 void PrintPDE32(addr_t virtual_address, pde32_t * pde);