1 #include <geekos/vmm_shadow_paging.h>
3 #include <geekos/vmm.h>
4 #include <geekos/vm_guest_mem.h>
6 extern struct vmm_os_hooks * os_hooks;
9 int init_shadow_page_state(struct shadow_page_state * state) {
10 state->guest_mode = PDE32;
11 state->shadow_mode = PDE32;
13 state->guest_cr3.r_reg = 0;
14 state->shadow_cr3.r_reg = 0;
20 int wholesale_update_shadow_page_state(struct guest_info * guest_info) {
25 struct shadow_page_state * state = &(guest_info->shdw_pg_state);
28 // For now, we'll only work with PDE32
29 if (state->guest_mode != PDE32) {
33 shadow_pde = (pde32_t *)(CR3_TO_PDE32(state->shadow_cr3.e_reg.low));
35 if (host_pa_to_host_va(CR3_TO_PDE32(state->guest_cr3.e_reg.low), (addr_t*)&guest_pde) != 0) {
39 // Delete the current page table
40 delete_page_tables_pde32(shadow_pde);
42 shadow_pde = os_hooks->allocate_pages(1);
44 state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
46 state->shadow_mode = PDE32;
48 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
49 shadow_pde[i] = guest_pde[i];
51 // The shadow can be identical to the guest if it's not present
52 if (!shadow_pde[i].present) {
56 if (shadow_pde[i].large_pages) {
57 // large page - just map it through shadow map to generate its physical location
58 addr_t guest_addr = PAGE_ADDR(shadow_pde[i].pt_base_addr);
60 shadow_region_t * ent;
62 ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
65 // FIXME Panic here - guest is trying to map to physical memory
66 // it does not own in any way!
70 // FIXME Bounds check here to see if it's trying to trick us
72 switch (ent->host_type) {
73 case HOST_REGION_PHYSICAL_MEMORY:
74 // points into currently allocated physical memory, so we just
75 // set up the shadow to point to the mapped location
76 if (guest_pa_to_host_pa(guest_info, guest_addr, &host_addr)) {
81 shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
82 // FIXME set vmm_info bits here
84 case HOST_REGION_UNALLOCATED:
85 // points to physical memory that is *allowed* but that we
86 // have not yet allocated. We mark as not present and set a
87 // bit to remind us to allocate it later
88 shadow_pde[i].present = 0;
89 // FIXME Set vminfo bits here so that we know that we will be
90 // allocating it later
92 case HOST_REGION_NOTHING:
93 // points to physical memory that is NOT ALLOWED.
94 // We will mark it as not present and set a bit to remind
95 // us that it's bad later and insert a GPF then
96 shadow_pde[i].present = 0;
98 case HOST_REGION_MEMORY_MAPPED_DEVICE:
99 case HOST_REGION_REMOTE:
100 case HOST_REGION_SWAPPED:
102 // Panic. Currently unhandled
108 pte32_t * shadow_pte;
110 addr_t guest_pte_host_addr;
111 shadow_region_t * ent;
113 // small page - set PDE and follow down to the child table
114 shadow_pde[i] = guest_pde[i];
116 guest_addr = PAGE_ADDR(guest_pde[i].pt_base_addr);
118 // Allocate a new second level page table for the shadow
119 shadow_pte = os_hooks->allocate_pages(1);
121 // make our first level page table in the shadow point to it
122 shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
124 ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
127 /* JRL: This is bad.... */
128 // For now the guest Page Table must always be mapped to host physical memory
129 /* If we swap out a page table or if it isn't present for some reason, this turns real ugly */
131 if ((!ent) || (ent->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
132 // FIXME Panic here - guest is trying to map to physical memory
133 // it does not own in any way!
137 // Address of the relevant second level page table in the guest
138 if (guest_pa_to_host_pa(guest_info, guest_addr, &guest_pte_host_addr)) {
144 // host_addr now contains the host physical address for the guest's 2nd level page table
145 // Now we transform it to relevant virtual address
146 guest_pte = os_hooks->paddr_to_vaddr((void *)guest_pte_host_addr);
148 // Now we walk through the second level guest page table
149 // and clone it into the shadow
150 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
151 shadow_pte[j] = guest_pte[j];
153 addr_t guest_addr = PAGE_ADDR(shadow_pte[j].page_base_addr);
155 shadow_region_t * ent;
157 ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
160 // FIXME Panic here - guest is trying to map to physical memory
161 // it does not own in any way!
165 switch (ent->host_type) {
166 case HOST_REGION_PHYSICAL_MEMORY:
170 // points into currently allocated physical memory, so we just
171 // set up the shadow to point to the mapped location
172 if (guest_pa_to_host_pa(guest_info, guest_addr, &host_addr)) {
177 shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
178 // FIXME set vmm_info bits here
181 case HOST_REGION_UNALLOCATED:
182 // points to physical memory that is *allowed* but that we
183 // have not yet allocated. We mark as not present and set a
184 // bit to remind us to allocate it later
185 shadow_pte[j].present = 0;
186 // FIXME Set vminfo bits here so that we know that we will be
187 // allocating it later
189 case HOST_REGION_NOTHING:
190 // points to physical memory that is NOT ALLOWED.
191 // We will mark it as not present and set a bit to remind
192 // us that it's bad later and insert a GPF then
193 shadow_pte[j].present = 0;
195 case HOST_REGION_MEMORY_MAPPED_DEVICE:
196 case HOST_REGION_REMOTE:
197 case HOST_REGION_SWAPPED:
199 // Panic. Currently unhandled