5 #include <palacios/vmm_types.h>
6 #include <palacios/vmm_util.h>
10 In the following, when we say "page table", we mean the whole 2 or 4 layer
11 page table (PDEs, PTEs), etc.
14 guest-visible paging state
15 This is the state that the guest thinks the machine is using
17 - guest physical memory
18 The physical memory addresses the guest is allowed to use
19 (see shadow page maps, below)
21 (we care about when the current one changes)
22 - guest paging registers (these are never written to hardware)
28 This the state that the machine will actually use when the guest
29 is running. It consists of:
30 - current shadow page table
31 This is the page table actually useed when the guest is running.
32 It is changed/regenerated when the guest page table changes
33 It mostly reflects the guest page table, except that it restricts
34 physical addresses to those the VMM allocates to the guest.
36 This is a mapping from guest physical memory addresses to
37 the current location of the guest physical memory content.
38 It maps from regions of physical memory addresses to regions
39 located in physical memory or elsewhere.
40 (8192,16384) -> MEM(8912,...)
41 (0,8191) -> DISK(65536,..)
42 - guest paging registers (these are written to guest state)
47 This is the state we expect to be operative when the VMM is running.
48 Typically, this is set up by the host os into which we have embedded
49 the VMM, but we include the description here for clarity.
51 This is the page table we use when we are executing in
52 the VMM (or the host os)
58 The reason why the shadow paging state and the host paging state are
59 distinct is to permit the guest to use any virtual address it wants,
60 irrespective of the addresses the VMM or the host os use. These guest
61 virtual addresses are reflected in the shadow paging state. When we
62 exit from the guest, we switch to the host paging state so that any
63 virtual addresses that overlap between the guest and VMM/host now map
64 to the physical addresses epxected by the VMM/host. On AMD SVM, this
65 switch is done by the hardware. On Intel VT, the switch is done
66 by the hardware as well, but we are responsible for manually updating
67 the host state in the vmcs before entering the guest.
73 #define MAX_PTE32_ENTRIES 1024
74 #define MAX_PDE32_ENTRIES 1024
76 #define MAX_PTE64_ENTRIES 512
77 #define MAX_PDE64_ENTRIES 512
78 #define MAX_PDPE64_ENTRIES 512
79 #define MAX_PML4E64_ENTRIES 512
82 /* Converts an address into a page table index */
83 #define PDE32_INDEX(x) ((((uint_t)x) >> 22) & 0x3ff)
84 #define PTE32_INDEX(x) ((((uint_t)x) >> 12) & 0x3ff)
86 /* Gets the base address needed for a Page Table entry */
87 #define PD32_BASE_ADDR(x) (((uint_t)x) >> 12)
88 #define PT32_BASE_ADDR(x) (((uint_t)x) >> 12)
90 #define PT32_PAGE_ADDR(x) (((uint_t)x) & 0xfffff000)
91 #define PT32_PAGE_OFFSET(x) (((uint_t)x) & 0xfff)
92 #define PT32_PAGE_POWER 12
94 #define PD32_4MB_PAGE_ADDR(x) (((uint_t)x) & 0xffc00000)
95 #define PD32_4MB_PAGE_OFFSET(x) (((uint_t)x) & 0x003fffff)
97 /* The following should be phased out */
98 #define PAGE_OFFSET(x) ((((uint_t)x) & 0xfff))
99 #define PAGE_ALIGNED_ADDR(x) (((uint_t) (x)) >> 12)
100 #define PAGE_ADDR(x) (PAGE_ALIGNED_ADDR(x) << 12)
101 #define PAGE_POWER 12
102 #define PAGE_SIZE 4096
108 #define CR3_TO_PDE32(cr3) (((ulong_t)cr3) & 0xfffff000)
109 #define CR3_TO_PDPTRE(cr3) (((ulong_t)cr3) & 0xffffffe0)
110 #define CR3_TO_PML4E64(cr3) (((ullong_t)cr3) & 0x000ffffffffff000LL)
113 /* Accessor functions for the page table structures */
114 #define PDE32_T_ADDR(x) (((x).pt_base_addr) << 12)
115 #define PTE32_T_ADDR(x) (((x).page_base_addr) << 12)
116 #define PDE32_4MB_T_ADDR(x) (((x).page_base_addr) << 22)
118 /* Page Table Flag Values */
119 #define PT32_HOOK 0x1
124 /* PDE 32 bit PAGE STRUCTURES */
125 typedef enum {PDE32_ENTRY_NOT_PRESENT, PDE32_ENTRY_PTE32, PDE32_ENTRY_LARGE_PAGE} pde32_entry_type_t;
126 typedef enum {PT_ACCESS_OK, PT_ENTRY_NOT_PRESENT, PT_WRITE_ERROR, PT_USER_ERROR} pt_access_status_t;
128 typedef struct pde32 {
131 uint_t user_page : 1;
132 uint_t write_through : 1;
133 uint_t cache_disable : 1;
136 uint_t large_page : 1;
137 uint_t global_page : 1;
139 uint_t pt_base_addr : 20;
142 typedef struct pde32_4MB {
145 uint_t user_page : 1;
146 uint_t write_through : 1;
147 uint_t cache_disable : 1;
151 uint_t global_page : 1;
155 uint_t page_base_addr : 10;
159 typedef struct pte32 {
162 uint_t user_page : 1;
163 uint_t write_through : 1;
164 uint_t cache_disable : 1;
168 uint_t global_page : 1;
170 uint_t page_base_addr : 20;
174 /* 32 bit PAE PAGE STRUCTURES */
183 /* LONG MODE 64 bit PAGE STRUCTURES */
184 typedef struct pml4e64 {
194 uint_t pdp_base_addr_lo : 20;
195 uint_t pdp_base_addr_hi : 20;
196 uint_t available : 11;
197 uint_t no_execute : 1;
201 typedef struct pdpe64 {
209 uint_t large_pages : 1;
212 uint_t pd_base_addr_lo : 20;
213 uint_t pd_base_addr_hi : 20;
214 uint_t available : 11;
215 uint_t no_execute : 1;
221 typedef struct pde64 {
226 uint_t large_pages : 1;
227 uint_t reserved2 : 1;
229 uint_t pt_base_addr_lo : 20;
230 uint_t pt_base_addr_hi : 20;
231 uint_t available : 11;
232 uint_t no_execute : 1;
235 typedef struct pte64 {
241 uint_t global_page : 1;
243 uint_t page_base_addr_lo : 20;
244 uint_t page_base_addr_hi : 20;
245 uint_t available : 11;
246 uint_t no_execute : 1;
249 /* *************** */
251 typedef struct pf_error_code {
252 uint_t present : 1; // if 0, fault due to page not present
253 uint_t write : 1; // if 1, faulting access was a write
254 uint_t user : 1; // if 1, faulting access was in user mode
255 uint_t rsvd_access : 1; // if 1, fault from reading a 1 from a reserved field (?)
256 uint_t ifetch : 1; // if 1, faulting access was an instr fetch (only with NX)
260 typedef enum { PDE32 } paging_mode_t;
265 void delete_page_tables_pde32(pde32_t * pde);
268 pde32_entry_type_t pde32_lookup(pde32_t * pd, addr_t addr, addr_t * entry);
269 int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry);
271 // This assumes that the page table resides in the host address space
272 // IE. IT DOES NO VM ADDR TRANSLATION
273 int pt32_lookup(pde32_t * pd, addr_t vaddr, addr_t * paddr);
277 pt_access_status_t can_access_pde32(pde32_t * pde, addr_t addr, pf_error_t access_type);
278 pt_access_status_t can_access_pte32(pte32_t * pte, addr_t addr, pf_error_t access_type);
286 pde32_t * create_passthrough_pde32_pts(struct guest_info * guest_info);
293 void PrintDebugPageTables(pde32_t * pde);
299 void PrintPT32(addr_t starting_address, pte32_t * pte);
300 void PrintPD32(pde32_t * pde);
301 void PrintPTE32(addr_t virtual_address, pte32_t * pte);
302 void PrintPDE32(addr_t virtual_address, pde32_t * pde);