1 #include <geekos/vm_guest_mem.h>
2 #include <geekos/vmm.h>
3 #include <geekos/vmm_paging.h>
5 extern struct vmm_os_hooks * os_hooks;
8 /**********************************/
10 /**********************************/
12 int host_va_to_host_pa(addr_t host_va, addr_t * host_pa) {
13 if ((os_hooks) && (os_hooks)->vaddr_to_paddr) {
15 *host_pa = (addr_t)(os_hooks)->vaddr_to_paddr((void *)host_va);
27 int host_pa_to_host_va(addr_t host_pa, addr_t * host_va) {
28 if ((os_hooks) && (os_hooks)->paddr_to_vaddr) {
30 *host_va = (addr_t)(os_hooks)->paddr_to_vaddr((void *)host_pa);
43 int guest_pa_to_host_pa(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_pa) {
44 // we use the shadow map here...
45 if (lookup_shadow_map_addr(&(guest_info->mem_map), guest_pa, host_pa) != HOST_REGION_PHYSICAL_MEMORY) {
53 /* !! Currently not implemented !! */
54 // This is a scan of the shadow map
55 // For now we ignore it
57 int host_pa_to_guest_pa(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_pa) {
65 /**********************************/
67 /**********************************/
70 /* !! Currently not implemented !! */
71 // This will return negative until we implement host_pa_to_guest_pa()
72 int host_va_to_guest_pa(struct guest_info * guest_info, addr_t host_va, addr_t * guest_pa) {
76 if (host_va_to_host_pa(host_va, &host_pa) != 0) {
80 if (host_pa_to_guest_pa(guest_info, host_pa, guest_pa) != 0) {
90 int guest_pa_to_host_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_va) {
95 if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
99 if (host_pa_to_host_va(host_pa, host_va) != 0) {
107 int guest_va_to_guest_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * guest_pa) {
108 if (guest_info->page_mode == SHADOW_PAGING) {
109 switch (guest_info->cpu_mode) {
114 // guest virtual address is the same as the physical
115 *guest_pa = guest_va;
121 addr_t guest_pde = CR3_TO_PDE32(guest_info->shdw_pg_state.guest_cr3.r_reg);
123 if (guest_pa_to_host_va(guest_info, guest_pde, (addr_t *)&pde) == -1) {
127 switch (pde32_lookup(pde, guest_va, &tmp_pa)) {
138 if (guest_pa_to_host_va(guest_info, tmp_pa, (addr_t*)&pte) == -1) {
142 if (pte32_lookup(pte, guest_va, guest_pa) != 0) {
152 case PROTECTED_PAE_PG:
163 } else if (guest_info->page_mode == NESTED_PAGING) {
177 /* !! Currently not implemented !! */
178 /* This will be a real pain.... its your standard page table walker in guest memory
180 * For now we ignore it...
182 int guest_pa_to_guest_va(struct guest_info * guest_info, addr_t guest_pa, addr_t * guest_va) {
188 /**********************************/
190 /**********************************/
193 int guest_va_to_host_pa(struct guest_info * guest_info, addr_t guest_va, addr_t * host_pa) {
198 if (guest_va_to_guest_pa(guest_info, guest_va, &guest_pa) != 0) {
202 if (guest_pa_to_host_pa(guest_info, guest_pa, host_pa) != 0) {
209 /* !! Currently not implemented !! */
210 int host_pa_to_guest_va(struct guest_info * guest_info, addr_t host_pa, addr_t * guest_va) {
215 if (host_pa_to_guest_pa(guest_info, host_pa, &guest_pa) != 0) {
219 if (guest_pa_to_guest_va(guest_info, guest_pa, guest_va) != 0) {
229 int guest_va_to_host_va(struct guest_info * guest_info, addr_t guest_va, addr_t * host_va) {
235 if (guest_va_to_guest_pa(guest_info, guest_va, &guest_pa) != 0) {
239 if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
243 if (host_pa_to_host_va(host_pa, host_va) != 0) {
251 /* !! Currently not implemented !! */
252 int host_va_to_guest_va(struct guest_info * guest_info, addr_t host_va, addr_t * guest_va) {
258 if (host_va_to_host_pa(host_va, &host_pa) != 0) {
262 if (host_pa_to_guest_pa(guest_info, host_pa, &guest_pa) != 0) {
266 if (guest_pa_to_guest_va(guest_info, guest_pa, guest_va) != 0) {
278 /* This is a straight address conversion + copy,
279 * except for the tiny little issue of crossing page boundries.....
281 int read_guest_va_memory(struct guest_info * guest_info, addr_t guest_va, int count, char * dest) {
282 addr_t cursor = guest_va;
286 int dist_to_pg_edge = (PAGE_OFFSET(cursor) + PAGE_SIZE) - cursor;
287 int bytes_to_copy = (dist_to_pg_edge > count) ? count : dist_to_pg_edge;
290 if (guest_va_to_host_va(guest_info, cursor, &host_addr) != 0) {
294 memcpy(dest + bytes_read, (void*)host_addr, bytes_to_copy);
296 bytes_read += bytes_to_copy;
297 count -= bytes_to_copy;
298 cursor += bytes_to_copy;
309 /* This is a straight address conversion + copy,
310 * except for the tiny little issue of crossing page boundries.....
312 int read_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, char * dest) {
313 addr_t cursor = guest_pa;
317 int dist_to_pg_edge = (PAGE_OFFSET(cursor) + PAGE_SIZE) - cursor;
318 int bytes_to_copy = (dist_to_pg_edge > count) ? count : dist_to_pg_edge;
321 if (guest_pa_to_host_va(guest_info, cursor, &host_addr) != 0) {
325 memcpy(dest + bytes_read, (void*)host_addr, bytes_to_copy);
327 bytes_read += bytes_to_copy;
328 count -= bytes_to_copy;
329 cursor += bytes_to_copy;
338 /* This is a straight address conversion + copy,
339 * except for the tiny little issue of crossing page boundries.....
341 int write_guest_pa_memory(struct guest_info * guest_info, addr_t guest_pa, int count, char * src) {
342 addr_t cursor = guest_pa;
343 int bytes_written = 0;
346 int dist_to_pg_edge = (PAGE_OFFSET(cursor) + PAGE_SIZE) - cursor;
347 int bytes_to_copy = (dist_to_pg_edge > count) ? count : dist_to_pg_edge;
350 if (guest_pa_to_host_va(guest_info, cursor, &host_addr) != 0) {
354 memcpy((void*)host_addr, src + bytes_written, bytes_to_copy);
356 bytes_written += bytes_to_copy;
357 count -= bytes_to_copy;
358 cursor += bytes_to_copy;