5 #include <palacios/vmm_types.h>
9 #include <palacios/vmm_mem.h>
10 #include <palacios/vmm_util.h>
14 In the following, when we say "page table", we mean the whole 2 or 4 layer
15 page table (PDEs, PTEs), etc.
18 guest-visible paging state
19 This is the state that the guest thinks the machine is using
21 - guest physical memory
22 The physical memory addresses the guest is allowed to use
23 (see shadow page maps, below)
25 (we care about when the current one changes)
26 - guest paging registers (these are never written to hardware)
32 This the state that the machine will actually use when the guest
33 is running. It consists of:
34 - current shadow page table
35 This is the page table actually useed when the guest is running.
36 It is changed/regenerated when the guest page table changes
37 It mostly reflects the guest page table, except that it restricts
38 physical addresses to those the VMM allocates to the guest.
40 This is a mapping from guest physical memory addresses to
41 the current location of the guest physical memory content.
42 It maps from regions of physical memory addresses to regions
43 located in physical memory or elsewhere.
44 (8192,16384) -> MEM(8912,...)
45 (0,8191) -> DISK(65536,..)
46 - guest paging registers (these are written to guest state)
51 This is the state we expect to be operative when the VMM is running.
52 Typically, this is set up by the host os into which we have embedded
53 the VMM, but we include the description here for clarity.
55 This is the page table we use when we are executing in
56 the VMM (or the host os)
62 The reason why the shadow paging state and the host paging state are
63 distinct is to permit the guest to use any virtual address it wants,
64 irrespective of the addresses the VMM or the host os use. These guest
65 virtual addresses are reflected in the shadow paging state. When we
66 exit from the guest, we switch to the host paging state so that any
67 virtual addresses that overlap between the guest and VMM/host now map
68 to the physical addresses epxected by the VMM/host. On AMD SVM, this
69 switch is done by the hardware. On Intel VT, the switch is done
70 by the hardware as well, but we are responsible for manually updating
71 the host state in the vmcs before entering the guest.
77 #define MAX_PTE32_ENTRIES 1024
78 #define MAX_PDE32_ENTRIES 1024
80 #define MAX_PTE64_ENTRIES 512
81 #define MAX_PDE64_ENTRIES 512
82 #define MAX_PDPE64_ENTRIES 512
83 #define MAX_PML4E64_ENTRIES 512
86 /* Converts an address into a page table index */
87 #define PDE32_INDEX(x) ((((uint_t)x) >> 22) & 0x3ff)
88 #define PTE32_INDEX(x) ((((uint_t)x) >> 12) & 0x3ff)
90 /* Gets the base address needed for a Page Table entry */
91 #define PD32_BASE_ADDR(x) (((uint_t)x) >> 12)
92 #define PT32_BASE_ADDR(x) (((uint_t)x) >> 12)
94 #define PT32_PAGE_ADDR(x) (((uint_t)x) & 0xfffff000)
95 #define PT32_PAGE_OFFSET(x) (((uint_t)x) & 0xfff)
96 #define PT32_PAGE_POWER 12
99 /* The following should be phased out */
100 #define PAGE_OFFSET(x) ((((uint_t)x) & 0xfff))
101 #define PAGE_ALIGNED_ADDR(x) (((uint_t) (x)) >> 12)
102 #define PAGE_ADDR(x) (PAGE_ALIGNED_ADDR(x) << 12)
103 #define PAGE_POWER 12
104 #define PAGE_SIZE 4096
109 #define CR3_TO_PDE32(cr3) (((ulong_t)cr3) & 0xfffff000)
110 #define CR3_TO_PDPTRE(cr3) (((ulong_t)cr3) & 0xffffffe0)
111 #define CR3_TO_PML4E64(cr3) (((ullong_t)cr3) & 0x000ffffffffff000LL)
114 /* Accessor functions for the page table structures */
115 #define PDE32_T_ADDR(x) ((x.pt_base_addr) << 12)
116 #define PTE32_T_ADDR(x) ((x.page_base_addr) << 12)
127 /* PDE 32 bit PAGE STRUCTURES */
128 typedef enum {NOT_PRESENT, PTE32, LARGE_PAGE} pde32_entry_type_t;
130 typedef struct pde32 {
135 uint_t large_pages : 1;
136 uint_t global_page : 1;
138 uint_t pt_base_addr : 20;
141 typedef struct pte32 {
147 uint_t global_page : 1;
149 uint_t page_base_addr : 20;
153 /* 32 bit PAE PAGE STRUCTURES */
162 /* LONG MODE 64 bit PAGE STRUCTURES */
163 typedef struct pml4e64 {
173 uint_t pdp_base_addr_lo : 20;
174 uint_t pdp_base_addr_hi : 20;
175 uint_t available : 11;
176 uint_t no_execute : 1;
180 typedef struct pdpe64 {
188 uint_t large_pages : 1;
191 uint_t pd_base_addr_lo : 20;
192 uint_t pd_base_addr_hi : 20;
193 uint_t available : 11;
194 uint_t no_execute : 1;
200 typedef struct pde64 {
205 uint_t large_pages : 1;
206 uint_t reserved2 : 1;
208 uint_t pt_base_addr_lo : 20;
209 uint_t pt_base_addr_hi : 20;
210 uint_t available : 11;
211 uint_t no_execute : 1;
214 typedef struct pte64 {
220 uint_t global_page : 1;
222 uint_t page_base_addr_lo : 20;
223 uint_t page_base_addr_hi : 20;
224 uint_t available : 11;
225 uint_t no_execute : 1;
228 /* *************** */
230 typedef struct pf_error_code {
231 uint_t present : 1; // if 0, fault due to page not present
232 uint_t write : 1; // if 1, faulting access was a write
233 uint_t user : 1; // if 1, faulting access was in user mode
234 uint_t rsvd_access : 1; // if 1, fault from reading a 1 from a reserved field (?)
235 uint_t ifetch : 1; // if 1, faulting access was an instr fetch (only with NX)
239 typedef enum { PDE32 } paging_mode_t;
244 void delete_page_tables_pde32(pde32_t * pde);
247 pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry);
248 int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry);
254 pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info);
261 void PrintDebugPageTables(pde32_t * pde);
267 void PrintPT32(addr_t starting_address, pte32_t * pte);
268 void PrintPD32(pde32_t * pde);
269 void PrintPTE32(addr_t virtual_address, pte32_t * pte);
270 void PrintPDE32(addr_t virtual_address, pde32_t * pde);