1 #include <geekos/vmm_shadow_paging.h>
3 #include <geekos/vmm.h>
5 extern struct vmm_os_hooks * os_hooks;
8 int init_shadow_page_state(shadow_page_state_t * state) {
9 state->guest_mode = PDE32;
10 state->shadow_mode = PDE32;
12 state->guest_cr3.r_reg = 0;
13 state->shadow_cr3.r_reg = 0;
19 int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map) {
21 vmm_pde_t * guest_pde;
22 vmm_pde_t * shadow_pde;
25 // For now, we'll only work with PDE32
26 if (state->guest_mode != PDE32) {
32 shadow_pde = (vmm_pde_t *)(CR3_TO_PDE(state->shadow_cr3.e_reg.low));
33 guest_pde = (vmm_pde_t *)(os_hooks->paddr_to_vaddr((void*)CR3_TO_PDE(state->guest_cr3.e_reg.low)));
35 // Delete the current page table
36 delete_page_tables_pde32(shadow_pde);
38 shadow_pde = os_hooks->allocate_pages(1);
41 state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
43 state->shadow_mode = PDE32;
46 for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
47 shadow_pde[i] = guest_pde[i];
49 // The shadow can be identical to the guest if it's not present
50 if (!shadow_pde[i].present) {
54 if (shadow_pde[i].large_pages) {
55 // large page - just map it through shadow map to generate its physical location
56 addr_t guest_addr = PAGE_ADDR(shadow_pde[i].pt_base_addr);
58 shadow_region_t * ent;
60 ent = get_shadow_region_by_addr(mem_map, guest_addr);
63 // FIXME Panic here - guest is trying to map to physical memory
64 // it does not own in any way!
68 // FIXME Bounds check here to see if it's trying to trick us
70 switch (ent->host_type) {
71 case HOST_REGION_PHYSICAL_MEMORY:
72 // points into currently allocated physical memory, so we just
73 // set up the shadow to point to the mapped location
74 if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
79 shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
80 // FIXME set vmm_info bits here
82 case HOST_REGION_UNALLOCATED:
83 // points to physical memory that is *allowed* but that we
84 // have not yet allocated. We mark as not present and set a
85 // bit to remind us to allocate it later
86 shadow_pde[i].present = 0;
87 // FIXME Set vminfo bits here so that we know that we will be
88 // allocating it later
90 case HOST_REGION_NOTHING:
91 // points to physical memory that is NOT ALLOWED.
92 // We will mark it as not present and set a bit to remind
93 // us that it's bad later and insert a GPF then
94 shadow_pde[i].present = 0;
96 case HOST_REGION_MEMORY_MAPPED_DEVICE:
97 case HOST_REGION_REMOTE:
98 case HOST_REGION_SWAPPED:
100 // Panic. Currently unhandled
105 vmm_pte_t * guest_pte;
106 vmm_pte_t * shadow_pte;
108 addr_t guest_pte_host_addr;
109 shadow_region_t * ent;
111 // small page - set PDE and follow down to the child table
112 shadow_pde[i] = guest_pde[i];
114 guest_addr = PAGE_ADDR(guest_pde[i].pt_base_addr);
116 // Allocate a new second level page table for the shadow
117 shadow_pte = os_hooks->allocate_pages(1);
119 // make our first level page table in the shadow point to it
120 shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
122 ent = get_shadow_region_by_addr(mem_map, guest_addr);
125 /* JRL: This is bad.... */
126 // For now the guest Page Table must always be mapped to host physical memory
127 /* If we swap out a page table or if it isn't present for some reason, this turns real ugly */
129 if ((!ent) || (ent->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
130 // FIXME Panic here - guest is trying to map to physical memory
131 // it does not own in any way!
135 // Address of the relevant second level page table in the guest
136 if (guest_paddr_to_host_paddr(ent, guest_addr, &guest_pte_host_addr)) {
142 // host_addr now contains the host physical address for the guest's 2nd level page table
143 // Now we transform it to relevant virtual address
144 guest_pte = os_hooks->paddr_to_vaddr((void *)guest_pte_host_addr);
146 // Now we walk through the second level guest page table
147 // and clone it into the shadow
148 for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
149 shadow_pte[j] = guest_pte[j];
151 addr_t guest_addr = PAGE_ADDR(shadow_pte[j].page_base_addr);
153 shadow_region_t * ent;
155 ent = get_shadow_region_by_addr(mem_map, guest_addr);
158 // FIXME Panic here - guest is trying to map to physical memory
159 // it does not own in any way!
163 switch (ent->host_type) {
164 case HOST_REGION_PHYSICAL_MEMORY:
168 // points into currently allocated physical memory, so we just
169 // set up the shadow to point to the mapped location
170 if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
175 shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
176 // FIXME set vmm_info bits here
179 case HOST_REGION_UNALLOCATED:
180 // points to physical memory that is *allowed* but that we
181 // have not yet allocated. We mark as not present and set a
182 // bit to remind us to allocate it later
183 shadow_pte[j].present = 0;
184 // FIXME Set vminfo bits here so that we know that we will be
185 // allocating it later
187 case HOST_REGION_NOTHING:
188 // points to physical memory that is NOT ALLOWED.
189 // We will mark it as not present and set a bit to remind
190 // us that it's bad later and insert a GPF then
191 shadow_pte[j].present = 0;
193 case HOST_REGION_MEMORY_MAPPED_DEVICE:
194 case HOST_REGION_REMOTE:
195 case HOST_REGION_SWAPPED:
197 // Panic. Currently unhandled