1 #include <geekos/vmm_shadow_paging.h>
3 #include <geekos/vmm.h>
4 #include <geekos/vm_guest_mem.h>
6 extern struct vmm_os_hooks * os_hooks;
9 int init_shadow_page_state(struct shadow_page_state * state) {
10 state->guest_mode = PDE32;
11 state->shadow_mode = PDE32;
13 state->guest_cr3.r_reg = 0;
14 state->shadow_cr3.r_reg = 0;
20 int wholesale_update_shadow_page_state(struct guest_info * guest_info) {
25 struct shadow_page_state * state = &(guest_info->shdw_pg_state);
28 // For now, we'll only work with PDE32
29 if (state->guest_mode != PDE32) {
33 shadow_pde = (pde32_t *)(CR3_TO_PDE32(state->shadow_cr3.e_reg.low));
34 guest_pde = (pde32_t *)(host_pa_to_host_va((void*)CR3_TO_PDE32(state->guest_cr3.e_reg.low)));
36 // Delete the current page table
37 delete_page_tables_pde32(shadow_pde);
39 shadow_pde = os_hooks->allocate_pages(1);
41 state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
43 state->shadow_mode = PDE32;
45 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
46 shadow_pde[i] = guest_pde[i];
48 // The shadow can be identical to the guest if it's not present
49 if (!shadow_pde[i].present) {
53 if (shadow_pde[i].large_pages) {
54 // large page - just map it through shadow map to generate its physical location
55 addr_t guest_addr = PAGE_ADDR(shadow_pde[i].pt_base_addr);
57 shadow_region_t * ent;
59 ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
62 // FIXME Panic here - guest is trying to map to physical memory
63 // it does not own in any way!
67 // FIXME Bounds check here to see if it's trying to trick us
69 switch (ent->host_type) {
70 case HOST_REGION_PHYSICAL_MEMORY:
71 // points into currently allocated physical memory, so we just
72 // set up the shadow to point to the mapped location
73 if (guest_pa_to_host_pa(guest_info, guest_addr, &host_addr)) {
78 shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
79 // FIXME set vmm_info bits here
81 case HOST_REGION_UNALLOCATED:
82 // points to physical memory that is *allowed* but that we
83 // have not yet allocated. We mark as not present and set a
84 // bit to remind us to allocate it later
85 shadow_pde[i].present = 0;
86 // FIXME Set vminfo bits here so that we know that we will be
87 // allocating it later
89 case HOST_REGION_NOTHING:
90 // points to physical memory that is NOT ALLOWED.
91 // We will mark it as not present and set a bit to remind
92 // us that it's bad later and insert a GPF then
93 shadow_pde[i].present = 0;
95 case HOST_REGION_MEMORY_MAPPED_DEVICE:
96 case HOST_REGION_REMOTE:
97 case HOST_REGION_SWAPPED:
99 // Panic. Currently unhandled
105 pte32_t * shadow_pte;
107 addr_t guest_pte_host_addr;
108 shadow_region_t * ent;
110 // small page - set PDE and follow down to the child table
111 shadow_pde[i] = guest_pde[i];
113 guest_addr = PAGE_ADDR(guest_pde[i].pt_base_addr);
115 // Allocate a new second level page table for the shadow
116 shadow_pte = os_hooks->allocate_pages(1);
118 // make our first level page table in the shadow point to it
119 shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
121 ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
124 /* JRL: This is bad.... */
125 // For now the guest Page Table must always be mapped to host physical memory
126 /* If we swap out a page table or if it isn't present for some reason, this turns real ugly */
128 if ((!ent) || (ent->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
129 // FIXME Panic here - guest is trying to map to physical memory
130 // it does not own in any way!
134 // Address of the relevant second level page table in the guest
135 if (guest_pa_to_host_pa(guest_info, guest_addr, &guest_pte_host_addr)) {
141 // host_addr now contains the host physical address for the guest's 2nd level page table
142 // Now we transform it to relevant virtual address
143 guest_pte = os_hooks->paddr_to_vaddr((void *)guest_pte_host_addr);
145 // Now we walk through the second level guest page table
146 // and clone it into the shadow
147 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
148 shadow_pte[j] = guest_pte[j];
150 addr_t guest_addr = PAGE_ADDR(shadow_pte[j].page_base_addr);
152 shadow_region_t * ent;
154 ent = get_shadow_region_by_addr(&(guest_info->mem_map), guest_addr);
157 // FIXME Panic here - guest is trying to map to physical memory
158 // it does not own in any way!
162 switch (ent->host_type) {
163 case HOST_REGION_PHYSICAL_MEMORY:
167 // points into currently allocated physical memory, so we just
168 // set up the shadow to point to the mapped location
169 if (guest_pa_to_host_pa(guest_info, guest_addr, &host_addr)) {
174 shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
175 // FIXME set vmm_info bits here
178 case HOST_REGION_UNALLOCATED:
179 // points to physical memory that is *allowed* but that we
180 // have not yet allocated. We mark as not present and set a
181 // bit to remind us to allocate it later
182 shadow_pte[j].present = 0;
183 // FIXME Set vminfo bits here so that we know that we will be
184 // allocating it later
186 case HOST_REGION_NOTHING:
187 // points to physical memory that is NOT ALLOWED.
188 // We will mark it as not present and set a bit to remind
189 // us that it's bad later and insert a GPF then
190 shadow_pte[j].present = 0;
192 case HOST_REGION_MEMORY_MAPPED_DEVICE:
193 case HOST_REGION_REMOTE:
194 case HOST_REGION_SWAPPED:
196 // Panic. Currently unhandled