--- /dev/null
+#ifndef __SVM_CTRL_REGS_H
+#define __SVM_CTRL_REGS_H
+
+#include <geekos/vm_guest.h>
+#include <geekos/vmm_util.h>
+
+
+// First opcode byte
+static const uchar_t cr_access_byte = 0x0f;
+
+// Second opcode byte
+static const uchar_t lmsw_byte = 0x01;
+static const uchar_t smsw_byte = 0x01;
+static const uchar_t clts_byte = 0x06;
+static const uchar_t mov_to_cr_byte = 0x22;
+static const uchar_t mov_from_cr_byte = 0x20;
+
+
+
+int handle_cr0_write(guest_info_t * info, ullong_t * new_cr0);
+
+
+
+
+#endif
#include <geekos/vmm_mem.h>
#include <geekos/ktypes.h>
#include <geekos/vmm_io.h>
-#include <geekos/vmm_paging.h>
+//#include <geekos/vmm_paging.h>
+#include <geekos/vmm_shadow_paging.h>
struct guest_gprs {
-
-
#endif
--- /dev/null
+#ifndef __VM_GUEST_MEM_H
+#define __VM_GUEST_MEM_H
+
+#include <geekos/vm_guest.h>
+#include <geekos/vmm_mem.h>
+
+
+int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * guest_pa);
+int guest_pa_to_guest_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * guest_va);
+int guest_va_to_host_va(guest_info_t * guest_info, addr_t guest_va, addr_t * host_va);
+int guest_pa_to_host_pa(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_pa);
+int guest_pa_to_host_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_va);
+
+int host_va_to_guest_pa(guest_info_t * guest_info, addr_t host_va, addr_t * guest_pa);
+int host_pa_to_guest_va(guest_info_t * guest_info, addr_t host_pa, addr_t * guest_va);
+
+int host_va_to_host_pa(addr_t host_va, addr_t * host_pa);
+int host_pa_to_host_va(addr_t host_pa, addr_t * host_va);
+
+
+
+int read_guest_va_memory(guest_info_t * guest_info, addr_t guest_va, int count, char * dest);
+int read_guest_pa_memory(guest_info_t * guest_info, addr_t guest_pa, int count, char * dest);
+
+
+
+#endif
--- /dev/null
+#ifndef __VMM_EMULATE_H
+#define __VMM_EMULATE_H
+
+
+/* JRL: Most of this was taken from the Xen sources...
+ *
+ */
+
+
+#define MAKE_INSTR(nm, ...) static const uchar_t OPCODE_##nm[] = { __VA_ARGS__ }
+
+/*
+ * Here's how it works:
+ * First byte: Length.
+ * Following bytes: Opcode bytes.
+ * Special case: Last byte, if zero, doesn't need to match.
+ */
+MAKE_INSTR(INVD, 2, 0x0f, 0x08);
+MAKE_INSTR(CPUID, 2, 0x0f, 0xa2);
+MAKE_INSTR(RDMSR, 2, 0x0f, 0x32);
+MAKE_INSTR(WRMSR, 2, 0x0f, 0x30);
+MAKE_INSTR(RDTSC, 2, 0x0f, 0x31);
+MAKE_INSTR(RDTSCP, 3, 0x0f, 0x01, 0xf9);
+MAKE_INSTR(CLI, 1, 0xfa);
+MAKE_INSTR(STI, 1, 0xfb);
+MAKE_INSTR(RDPMC, 2, 0x0f, 0x33);
+MAKE_INSTR(CLGI, 3, 0x0f, 0x01, 0xdd);
+MAKE_INSTR(STGI, 3, 0x0f, 0x01, 0xdc);
+MAKE_INSTR(VMRUN, 3, 0x0f, 0x01, 0xd8);
+MAKE_INSTR(VMLOAD, 3, 0x0f, 0x01, 0xda);
+MAKE_INSTR(VMSAVE, 3, 0x0f, 0x01, 0xdb);
+MAKE_INSTR(VMCALL, 3, 0x0f, 0x01, 0xd9);
+MAKE_INSTR(PAUSE, 2, 0xf3, 0x90);
+MAKE_INSTR(SKINIT, 3, 0x0f, 0x01, 0xde);
+MAKE_INSTR(MOV2CR, 3, 0x0f, 0x22, 0x00);
+MAKE_INSTR(MOVCR2, 3, 0x0f, 0x20, 0x00);
+MAKE_INSTR(MOV2DR, 3, 0x0f, 0x23, 0x00);
+MAKE_INSTR(MOVDR2, 3, 0x0f, 0x21, 0x00);
+MAKE_INSTR(PUSHF, 1, 0x9c);
+MAKE_INSTR(POPF, 1, 0x9d);
+MAKE_INSTR(RSM, 2, 0x0f, 0xaa);
+MAKE_INSTR(INVLPG, 3, 0x0f, 0x01, 0x00);
+MAKE_INSTR(INVLPGA,3, 0x0f, 0x01, 0xdf);
+MAKE_INSTR(HLT, 1, 0xf4);
+MAKE_INSTR(CLTS, 2, 0x0f, 0x06);
+MAKE_INSTR(LMSW, 3, 0x0f, 0x01, 0x00);
+MAKE_INSTR(SMSW, 3, 0x0f, 0x01, 0x00);
+
+
+
+static inline int is_prefix_byte(char byte) {
+ switch (byte) {
+ case 0xF0: // lock
+ case 0xF2: // REPNE/REPNZ
+ case 0xF3: // REP or REPE/REPZ
+ case 0x2E: // CS override or Branch hint not taken (with Jcc instrs)
+ case 0x36: // SS override
+ case 0x3E: // DS override or Branch hint taken (with Jcc instrs)
+ case 0x26: // ES override
+ case 0x64: // FS override
+ case 0x65: // GS override
+ //case 0x2E: // branch not taken hint
+ // case 0x3E: // branch taken hint
+ case 0x66: // operand size override
+ case 0x67: // address size override
+ return 1;
+ break;
+ default:
+ return 0;
+ break;
+ }
+}
+
+
+
+
+
+
+#endif
// These are the types of physical memory address regions
// from the perspective of the guest
typedef enum guest_region_type {
- GUEST_REGION_PHYSICAL_MEMORY,
GUEST_REGION_NOTHING,
+ GUEST_REGION_PHYSICAL_MEMORY,
GUEST_REGION_MEMORY_MAPPED_DEVICE} guest_region_type_t;
// These are the types of physical memory address regions
// from the perspective of the HOST
typedef enum host_region_type {
- HOST_REGION_PHYSICAL_MEMORY,
- HOST_REGION_UNALLOCATED,
- HOST_REGION_NOTHING,
- HOST_REGION_MEMORY_MAPPED_DEVICE,
- HOST_REGION_REMOTE,
- HOST_REGION_SWAPPED,
+ HOST_REGION_INVALID, // This region is INVALID (this is a return type, to denote errors)
+ HOST_REGION_NOTHING, // This region is mapped as not present (always generate page faults)
+ HOST_REGION_PHYSICAL_MEMORY, // Region is a section of host memory
+ HOST_REGION_MEMORY_MAPPED_DEVICE, // Region is allocated for DMA
+ HOST_REGION_UNALLOCATED, // Region is mapped on demand
+ HOST_REGION_REMOTE, // Region is located on a remote machine
+ HOST_REGION_SWAPPED, // Region is swapped
} host_region_type_t;
shadow_region_t * get_shadow_region_by_index(shadow_map_t * map, uint_t index);
+/*
int guest_paddr_to_host_paddr(shadow_region_t * entry,
addr_t guest_addr,
addr_t * host_addr);
+*/
+
+host_region_type_t lookup_shadow_map_addr(shadow_map_t * map, addr_t guest_addr, addr_t * host_addr);
// Semantics:
-
#endif
#include <geekos/ktypes.h>
-
+#include <geekos/vm_guest.h>
#include <geekos/vmm_mem.h>
#include <geekos/vmm_util.h>
-#define MAX_PAGE_TABLE_ENTRIES 1024
-#define MAX_PAGE_DIR_ENTRIES 1024
+#define MAX_PTE32_ENTRIES 1024
+#define MAX_PDE32_ENTRIES 1024
+
+#define MAX_PTE64_ENTRIES 512
+#define MAX_PDE64_ENTRIES 512
+#define MAX_PDPE64_ENTRIES 512
+#define MAX_PML4E64_ENTRIES 512
-#define MAX_PAGE_TABLE_ENTRIES_64 512
-#define MAX_PAGE_DIR_ENTRIES_64 512
-#define MAX_PAGE_DIR_PTR_ENTRIES_64 512
-#define MAX_PAGE_MAP_ENTRIES_64 512
+#define PDE32_INDEX(x) ((((uint_t)x) >> 22) & 0x3ff)
+#define PTE32_INDEX(x) ((((uint_t)x) >> 12) & 0x3ff)
-#define PAGE_DIRECTORY_INDEX(x) ((((uint_t)x) >> 22) & 0x3ff)
-#define PAGE_TABLE_INDEX(x) ((((uint_t)x) >> 12) & 0x3ff)
-#define PAGE_OFFSET(x) ((((uint_t)x) & 0xfff))
#define PAGE_ALIGNED_ADDR(x) (((uint_t) (x)) >> 12)
#ifndef PAGE_ADDR
#define PAGE_ADDR(x) (PAGE_ALIGNED_ADDR(x) << 12)
#endif
+#define PAGE_OFFSET(x) ((((uint_t)x) & 0xfff))
#define PAGE_POWER 12
-#define CR3_TO_PDE(cr3) (((ulong_t)cr3) & 0xfffff000)
+#define CR3_TO_PDE32(cr3) (((ulong_t)cr3) & 0xfffff000)
#define CR3_TO_PDPTRE(cr3) (((ulong_t)cr3) & 0xffffffe0)
-#define CR3_TO_PML4E(cr3) (((ullong_t)cr3) & 0x000ffffffffff000)
+#define CR3_TO_PML4E64(cr3) (((ullong_t)cr3) & 0x000ffffffffff000)
#define VM_WRITE 1
#define VM_USER 2
#define VM_EXEC 0
-typedef struct pde {
+/* PDE 32 bit PAGE STRUCTURES */
+typedef enum {NOT_PRESENT, PTE32, LARGE_PAGE} pde32_entry_type_t;
+
+typedef struct pde32 {
uint_t present : 1;
uint_t flags : 4;
uint_t accessed : 1;
uint_t global_page : 1;
uint_t vmm_info : 3;
uint_t pt_base_addr : 20;
-} vmm_pde_t;
+} pde32_t;
-typedef struct pte {
+typedef struct pte32 {
uint_t present : 1;
uint_t flags : 4;
uint_t accessed : 1;
uint_t global_page : 1;
uint_t vmm_info : 3;
uint_t page_base_addr : 20;
-} vmm_pte_t;
+} pte32_t;
+/* ***** */
+/* 32 bit PAE PAGE STRUCTURES */
+//
+// Fill in
+//
-typedef struct pte64 {
- uint_t present : 1;
- uint_t flags : 4;
- uint_t accessed : 1;
- uint_t dirty : 1;
- uint_t pte_attr : 1;
- uint_t global_page : 1;
- uint_t vmm_info : 3;
- uint_t page_base_addr_lo : 20;
- uint_t page_base_addr_hi : 20;
- uint_t available : 11;
- uint_t no_execute : 1;
-} pte64_t;
+/* ********** */
-typedef struct pde64 {
- uint_t present : 1;
- uint_t flags : 4;
- uint_t accessed : 1;
- uint_t reserved : 1;
- uint_t large_pages : 1;
- uint_t reserved2 : 1;
- uint_t vmm_info : 3;
- uint_t pt_base_addr_lo : 20;
- uint_t pt_base_addr_hi : 20;
- uint_t available : 11;
- uint_t no_execute : 1;
-} pde64_t;
-typedef struct pdpe64 {
+/* LONG MODE 64 bit PAGE STRUCTURES */
+typedef struct pml4e64 {
uint_t present : 1;
uint_t writable : 1;
uint_t user : 1;
uint_t pcd : 1;
uint_t accessed : 1;
uint_t reserved : 1;
- uint_t large_pages : 1;
- uint_t zero : 1;
+ uint_t zero : 2;
uint_t vmm_info : 3;
- uint_t pd_base_addr_lo : 20;
- uint_t pd_base_addr_hi : 20;
+ uint_t pdp_base_addr_lo : 20;
+ uint_t pdp_base_addr_hi : 20;
uint_t available : 11;
uint_t no_execute : 1;
-} pdpe64_t;
+} pml4e64_t;
-typedef struct pml4e {
+typedef struct pdpe64 {
uint_t present : 1;
uint_t writable : 1;
uint_t user : 1;
uint_t pcd : 1;
uint_t accessed : 1;
uint_t reserved : 1;
- uint_t zero : 2;
+ uint_t large_pages : 1;
+ uint_t zero : 1;
uint_t vmm_info : 3;
- uint_t pdp_base_addr_lo : 20;
- uint_t pdp_base_addr_hi : 20;
+ uint_t pd_base_addr_lo : 20;
+ uint_t pd_base_addr_hi : 20;
uint_t available : 11;
uint_t no_execute : 1;
-} pml4e64_t;
+} pdpe64_t;
+
+
+
+
+typedef struct pde64 {
+ uint_t present : 1;
+ uint_t flags : 4;
+ uint_t accessed : 1;
+ uint_t reserved : 1;
+ uint_t large_pages : 1;
+ uint_t reserved2 : 1;
+ uint_t vmm_info : 3;
+ uint_t pt_base_addr_lo : 20;
+ uint_t pt_base_addr_hi : 20;
+ uint_t available : 11;
+ uint_t no_execute : 1;
+} pde64_t;
+typedef struct pte64 {
+ uint_t present : 1;
+ uint_t flags : 4;
+ uint_t accessed : 1;
+ uint_t dirty : 1;
+ uint_t pte_attr : 1;
+ uint_t global_page : 1;
+ uint_t vmm_info : 3;
+ uint_t page_base_addr_lo : 20;
+ uint_t page_base_addr_hi : 20;
+ uint_t available : 11;
+ uint_t no_execute : 1;
+} pte64_t;
+
+/* *************** */
typedef enum { PDE32 } paging_mode_t;
-typedef struct shadow_page_state {
- // these two reflect the top-level page directory
- // of the guest page table
- paging_mode_t guest_mode;
- reg_ex_t guest_cr3; // points to guest's current page table
+pde32_t * create_passthrough_pde32_pts(guest_info_t * guest_info);
+
+
+void delete_page_tables_pde32(pde32_t * pde);
+
+
+pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry);
+int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry);
- // Should thi sbe here
- reg_ex_t guest_cr0;
- // these two reflect the top-level page directory
- // the shadow page table
- paging_mode_t shadow_mode;
- reg_ex_t shadow_cr3;
-} shadow_page_state_t;
-int init_shadow_page_state(shadow_page_state_t * state);
-// This function will cause the shadow page table to be deleted
-// and rewritten to reflect the guest page table and the shadow map
-int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map);
-vmm_pde_t * create_passthrough_pde32_pts(shadow_map_t * map);
+void PrintDebugPageTables(pde32_t * pde);
-//void free_guest_page_tables(vmm_pde_t * pde);
-void PrintDebugPageTables(vmm_pde_t * pde);
#endif
--- /dev/null
+#ifndef __VMM_SHADOW_PAGING_H
+#define __VMM_SHADOW_PAGING_H
+
+
+#include <geekos/vmm_paging.h>
+
+#include <geekos/vmm_util.h>
+
+typedef struct shadow_page_state {
+
+ // these two reflect the top-level page directory
+ // of the guest page table
+ paging_mode_t guest_mode;
+ reg_ex_t guest_cr3; // points to guest's current page table
+
+ // Should thi sbe here
+ reg_ex_t guest_cr0;
+
+ // these two reflect the top-level page directory
+ // the shadow page table
+ paging_mode_t shadow_mode;
+ reg_ex_t shadow_cr3;
+
+
+} shadow_page_state_t;
+
+
+
+int init_shadow_page_state(shadow_page_state_t * state);
+
+// This function will cause the shadow page table to be deleted
+// and rewritten to reflect the guest page table and the shadow map
+int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map);
+
+
+
+
+#endif
* Copyright (c) 2001,2003,2004 David H. Hovemeyer <daveho@cs.umd.edu>
* Copyright (c) 2003, Jeffrey K. Hollingsworth <hollings@cs.umd.edu>
* Copyright (c) 2004, Iulian Neamtiu <neamtiu@cs.umd.edu>
- * $Revision: 1.23 $
+ * $Revision: 1.24 $
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "COPYING".
#include <geekos/debug.h>
#include <geekos/vmm.h>
+
#include <geekos/gdt.h>
--- /dev/null
+#include <geekos/svm_ctrl_regs.h>
+#include <geekos/vmm_mem.h>
+#include <geekos/vmm.h>
+#include <geekos/vmcb.h>
+#include <geekos/vmm_emulate.h>
+
+
+int handle_cr0_write(guest_info_t * info, ullong_t * new_cr0) {
+ // vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA((vmcb_t *)(info->vmm_data));
+ //vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA((vmcb_t*)(info->vmm_data));
+
+
+
+ /*
+
+ if (info->cpu_mode == REAL) {
+ addr_t host_addr;
+ shadow_region_t * region = get_shadow_region_by_addr(&(info->mem_map), (addr_t)(info->rip));
+ if (!region || (region->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
+ //PANIC
+ return -1;
+ }
+
+ guest_paddr_to_host_paddr(region, (addr_t)(info->rip), &host_addr);
+ // pa to va
+
+
+ PrintDebug("Instr: %.4x\n", *(ushort_t*)host_addr);
+
+ if ((*(ushort_t*)host_addr) == LMSW_EAX) {
+ PrintDebug("lmsw from eax (0x%x)\n", guest_state->rax);
+ }
+ }*/
+ return 0;
+}
+
+
#include <geekos/svm_ctrl_regs.h>
-
int handle_svm_exit(guest_info_t * info) {
vmcb_ctrl_t * guest_ctrl = 0;
vmcb_saved_state_t * guest_state = 0;
--- /dev/null
+#include <geekos/vm_guest_mem.c>
+
+#include <geekos/vmm_paging.h>
+
+extern struct vmm_os_hooks * os_hooks;
+
+
+
+int guest_va_to_guest_pa(guest_info_t * guest_info, addr_t guest_va, addr_t * guest_pa) {
+ if (guest_info->page_mode == SHADOW_PAGING) {
+ switch (guest_info->cpu_mode) {
+ case REAL:
+ case PROTECTED:
+ case LONG:
+ case PROTECTED_PAE:
+ // guest virtual address is the same as the physical
+ *guest_pa = guest_va;
+ return 0;
+ case PROTECTED_PG:
+ {
+ addr_t tmp_pa;
+ pde32_t * pde;
+ addr_t guest_pde = CR3_TO_PDE32(guest_info->shadow_page_state.guest_cr3);
+
+ if (guest_pa_to_host_va(guest_info, guest_pde, (addr_t *)&pde) == -1) {
+ return -1;
+ }
+
+ switch (pde32_lookup(pde, guest_va, &tmp_pa)) {
+ case NOT_PRESENT:
+ *guest_page = 0;
+ return -1;
+ case LARGE_PAGE:
+ *guest_pa = tmp_pa;
+ return 0;
+ case PTE32:
+ {
+ pte32_t * pte;
+
+ if (guest_pa_to_host_va(guest_info, tmp_pa, (addr_t*)&pte) == -1) {
+ return -1;
+ }
+
+ if (pte32_lookup(pte, guest_va, guest_pa) != 0) {
+ return -1;
+ }
+
+ return 0;
+ }
+ default:
+ return -1;
+ }
+ }
+ case PROTECTED_PAE_PG:
+ {
+ // Fill in
+ }
+ case LONG_PG:
+ {
+ // Fill in
+ }
+ default:
+ return -1;
+ }
+ } else if (guest_info->page_mode == NESTED_PAGING) {
+
+ // Fill in
+
+ } else {
+ return -1;
+ }
+
+
+ return 0;
+}
+
+
+
+
+
+
+
+int guest_pa_to_host_va(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_va) {
+ addr_t host_pa;
+
+ if (guest_pa_to_host_pa(guest_info, guest_pa, &host_pa) != 0) {
+ return -1;
+ }
+
+ if (host_pa_to_host_va(host_pa, host_va) != 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int guest_pa_to_host_pa(guest_info_t * guest_info, addr_t guest_pa, addr_t * host_pa) {
+ // we use the shadow map here...
+ if (lookup_shadow_map_addr(guest_info->shadow_map, guest_pa, host_pa) != HOST_REGION_PHYSICAL_MEMORY) {
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+
+int host_va_to_host_pa(addr_t host_va, addr_t * host_pa) {
+ *host_pa = os_hooks->vaddr_to_paddr(host_va);
+
+ if (*host_pa == 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int host_pa_to_host_va(addr_t host_pa, addr_t * host_va) {
+ *host_va = os_hooks->paddr_to_vaddr(host_pa);
+
+ if (*host_va == 0) {
+ return -1;
+ }
+
+ return 0;
+}
+host_region_type_t lookup_shadow_map_addr(shadow_map_t * map, addr_t guest_addr, addr_t * host_addr) {
+ shadow_region_t * reg = get_shadow_region_by_addr(map, guest_addr);
+
+ if (!reg) {
+ // No mapping exists
+ return HOST_REGION_INVALID;
+ } else {
+ switch (reg->host_type) {
+ case HOST_REGION_PHYSICAL_MEMORY:
+ *host_addr = (guest_addr - reg->guest_start) + reg->host_addr.phys_addr.host_start;
+ return reg->host_type;
+ case HOST_REGION_MEMORY_MAPPED_DEVICE:
+ case HOST_REGION_UNALLOCATED:
+ // ...
+ default:
+ *host_addr = 0;
+ return reg->host_type;
+ }
+ }
+}
+
+/*
int guest_paddr_to_host_paddr(shadow_region_t * entry,
addr_t guest_addr,
addr_t * host_addr) {
}
}
+*/
+
void print_shadow_map(shadow_map_t * map) {
shadow_region_t * cur = map->head;
#include <geekos/vmm.h>
+#include <geekos/vm_guest_mem.h>
extern struct vmm_os_hooks * os_hooks;
-void delete_page_tables_pde32(vmm_pde_t * pde) {
+void delete_page_tables_pde32(pde32_t * pde) {
int i, j;
if (pde == NULL) {
return;
}
- for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
+ for (i = 0; (i < MAX_PDE32_ENTRIES); i++) {
if (pde[i].present) {
- vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
+ pte32_t * pte = (pte32_t *)(pde[i].pt_base_addr << PAGE_POWER);
- for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
+ for (j = 0; (j < MAX_PTE32_ENTRIES); j++) {
if ((pte[j].present)) {
os_hooks->free_page((void *)(pte[j].page_base_addr << PAGE_POWER));
}
}
-int init_shadow_page_state(shadow_page_state_t * state) {
- state->guest_mode = PDE32;
- state->shadow_mode = PDE32;
-
- state->guest_cr3.r_reg = 0;
- state->shadow_cr3.r_reg = 0;
-
- return 0;
-}
-
-
-int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map) {
- unsigned i, j;
- vmm_pde_t * guest_pde;
- vmm_pde_t * shadow_pde;
-
-
- // For now, we'll only work with PDE32
- if (state->guest_mode != PDE32) {
- return -1;
- }
-
-
-
- shadow_pde = (vmm_pde_t *)(CR3_TO_PDE(state->shadow_cr3.e_reg.low));
- guest_pde = (vmm_pde_t *)(os_hooks->paddr_to_vaddr((void*)CR3_TO_PDE(state->guest_cr3.e_reg.low)));
-
- // Delete the current page table
- delete_page_tables_pde32(shadow_pde);
-
- shadow_pde = os_hooks->allocate_pages(1);
-
-
- state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
-
- state->shadow_mode = PDE32;
-
-
- for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
- shadow_pde[i] = guest_pde[i];
-
- // The shadow can be identical to the guest if it's not present
- if (!shadow_pde[i].present) {
- continue;
- }
-
- if (shadow_pde[i].large_pages) {
- // large page - just map it through shadow map to generate its physical location
- addr_t guest_addr = PAGE_ADDR(shadow_pde[i].pt_base_addr);
- addr_t host_addr;
- shadow_region_t * ent;
-
- ent = get_shadow_region_by_addr(mem_map, guest_addr);
-
- if (!ent) {
- // FIXME Panic here - guest is trying to map to physical memory
- // it does not own in any way!
- return -1;
- }
-
- // FIXME Bounds check here to see if it's trying to trick us
-
- switch (ent->host_type) {
- case HOST_REGION_PHYSICAL_MEMORY:
- // points into currently allocated physical memory, so we just
- // set up the shadow to point to the mapped location
- if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
- // Panic here
- return -1;
- }
-
- shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
- // FIXME set vmm_info bits here
- break;
- case HOST_REGION_UNALLOCATED:
- // points to physical memory that is *allowed* but that we
- // have not yet allocated. We mark as not present and set a
- // bit to remind us to allocate it later
- shadow_pde[i].present = 0;
- // FIXME Set vminfo bits here so that we know that we will be
- // allocating it later
- break;
- case HOST_REGION_NOTHING:
- // points to physical memory that is NOT ALLOWED.
- // We will mark it as not present and set a bit to remind
- // us that it's bad later and insert a GPF then
- shadow_pde[i].present = 0;
- break;
- case HOST_REGION_MEMORY_MAPPED_DEVICE:
- case HOST_REGION_REMOTE:
- case HOST_REGION_SWAPPED:
- default:
- // Panic. Currently unhandled
- return -1;
- break;
- }
- } else {
- vmm_pte_t * guest_pte;
- vmm_pte_t * shadow_pte;
- addr_t guest_addr;
- addr_t guest_pte_host_addr;
- shadow_region_t * ent;
-
- // small page - set PDE and follow down to the child table
- shadow_pde[i] = guest_pde[i];
-
- guest_addr = PAGE_ADDR(guest_pde[i].pt_base_addr);
-
- // Allocate a new second level page table for the shadow
- shadow_pte = os_hooks->allocate_pages(1);
-
- // make our first level page table in the shadow point to it
- shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
-
- ent = get_shadow_region_by_addr(mem_map, guest_addr);
-
-
- /* JRL: This is bad.... */
- // For now the guest Page Table must always be mapped to host physical memory
- /* If we swap out a page table or if it isn't present for some reason, this turns real ugly */
-
- if ((!ent) || (ent->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
- // FIXME Panic here - guest is trying to map to physical memory
- // it does not own in any way!
- return -1;
- }
-
- // Address of the relevant second level page table in the guest
- if (guest_paddr_to_host_paddr(ent, guest_addr, &guest_pte_host_addr)) {
- // Panic here
- return -1;
- }
-
-
- // host_addr now contains the host physical address for the guest's 2nd level page table
- // Now we transform it to relevant virtual address
- guest_pte = os_hooks->paddr_to_vaddr((void *)guest_pte_host_addr);
-
- // Now we walk through the second level guest page table
- // and clone it into the shadow
- for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
- shadow_pte[j] = guest_pte[j];
-
- addr_t guest_addr = PAGE_ADDR(shadow_pte[j].page_base_addr);
-
- shadow_region_t * ent;
-
- ent = get_shadow_region_by_addr(mem_map, guest_addr);
-
- if (!ent) {
- // FIXME Panic here - guest is trying to map to physical memory
- // it does not own in any way!
- return -1;
- }
-
- switch (ent->host_type) {
- case HOST_REGION_PHYSICAL_MEMORY:
- {
- addr_t host_addr;
-
- // points into currently allocated physical memory, so we just
- // set up the shadow to point to the mapped location
- if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
- // Panic here
- return -1;
- }
-
- shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
- // FIXME set vmm_info bits here
- break;
- }
- case HOST_REGION_UNALLOCATED:
- // points to physical memory that is *allowed* but that we
- // have not yet allocated. We mark as not present and set a
- // bit to remind us to allocate it later
- shadow_pte[j].present = 0;
- // FIXME Set vminfo bits here so that we know that we will be
- // allocating it later
- break;
- case HOST_REGION_NOTHING:
- // points to physical memory that is NOT ALLOWED.
- // We will mark it as not present and set a bit to remind
- // us that it's bad later and insert a GPF then
- shadow_pte[j].present = 0;
- break;
- case HOST_REGION_MEMORY_MAPPED_DEVICE:
- case HOST_REGION_REMOTE:
- case HOST_REGION_SWAPPED:
- default:
- // Panic. Currently unhandled
- return -1;
- break;
- }
- }
- }
- }
- return 0;
-}
-
-
-
/* We generate a page table to correspond to a given memory layout
* pulling pages from the mem_list when necessary
* If there are any gaps in the layout, we add them as unmapped pages
*/
-vmm_pde_t * create_passthrough_pde32_pts(shadow_map_t * map) {
+pde32_t * create_passthrough_pde32_pts(guest_info_t * guest_info) {
ullong_t current_page_addr = 0;
int i, j;
+ shadow_map_t * map = guest_info->mem_map;
- vmm_pde_t * pde = os_hooks->allocate_pages(1);
+ pde32_t * pde = os_hooks->allocate_pages(1);
- for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
+ for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
int pte_present = 0;
- vmm_pte_t * pte = os_hooks->allocate_pages(1);
+ pte32_t * pte = os_hooks->allocate_pages(1);
- for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
+ for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
shadow_region_t * region = get_shadow_region_by_addr(map, current_page_addr);
if (!region ||
pte[j].global_page = 0;
pte[j].vmm_info = 0;
- if (guest_paddr_to_host_paddr(region, current_page_addr, &host_addr) == -1) {
+ if (guest_pa_to_host_pa(guest_info, current_page_addr, &host_addr) == -1) {
// BIG ERROR
// PANIC
return NULL;
+/* We can't do a full lookup because we don't know what context the page tables are in...
+ * The entry addresses could be pointing to either guest physical memory or host physical memory
+ * Instead we just return the entry address, and a flag to show if it points to a pte or a large page...
+ */
+pde32_entry_type_t pde32_lookup(pde32_t * pde, addr_t addr, addr_t * entry) {
+ pde32_t * pde_entry = pde[PDE32_INDEX(addr)];
+
+ if (!pde_entry->present) {
+ *entry = 0;
+ return NOT_PRESENT;
+ } else {
+ *entry = PAGE_ADDR(pde_entry->pt_base_addr);
+
+ if (pde_entry->large_pages) {
+ *entry += PAGE_OFFSET(addr);
+ return LARGE_PAGE;
+ } else {
+ return PTE32;
+ }
+ }
+ return NOT_PRESENT;
+}
+
+
+int pte32_lookup(pte32_t * pte, addr_t addr, addr_t * entry) {
+ pte32_t * pte_entry = pte[PTE32_INDEX(addr)];
+
+ if (!pte_entry->present) {
+ *entry = 0;
+ return -1;
+ } else {
+ *entry = PAGE_ADDR(pte_entry->page_base_addr);
+ *entry += PAGE_OFFSET(addr);
+ return 0;
+ }
-void PrintPDE(void * virtual_address, vmm_pde_t * pde)
+ return -1;
+}
+
+
+
+
+
+
+
+
+
+void PrintPDE32(void * virtual_address, pde32_t * pde)
{
PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
virtual_address,
pde->vmm_info);
}
-void PrintPTE(void * virtual_address, vmm_pte_t * pte)
+void PrintPTE32(void * virtual_address, pte32_t * pte)
{
PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
virtual_address,
-void PrintPD(vmm_pde_t * pde)
+void PrintPD32(pde32_t * pde)
{
int i;
PrintDebug("Page Directory at %p:\n", pde);
- for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
- PrintPDE((void*)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
+ for (i = 0; (i < MAX_PDE32_ENTRIES) && pde[i].present; i++) {
+ PrintPDE32((void*)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
}
}
-void PrintPT(void * starting_address, vmm_pte_t * pte)
+void PrintPT32(void * starting_address, pte32_t * pte)
{
int i;
PrintDebug("Page Table at %p:\n", pte);
- for (i = 0; (i < MAX_PAGE_TABLE_ENTRIES) && pte[i].present; i++) {
- PrintPTE(starting_address + (PAGE_SIZE * i), &(pte[i]));
+ for (i = 0; (i < MAX_PTE32_ENTRIES) && pte[i].present; i++) {
+ PrintPTE32(starting_address + (PAGE_SIZE * i), &(pte[i]));
}
}
-void PrintDebugPageTables(vmm_pde_t * pde)
+void PrintDebugPageTables(pde32_t * pde)
{
int i;
PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
- for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) {
- PrintPDE((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
- PrintPT((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));
+ for (i = 0; (i < MAX_PDE32_ENTRIES) && pde[i].present; i++) {
+ PrintPDE32((void *)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), &(pde[i]));
+ PrintPT32((void *)(PAGE_SIZE * MAX_PTE32_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));
}
}
-
-
-#if 0
-
-pml4e64_t * generate_guest_page_tables_64(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
- pml4e64_t * pml = os_hooks->allocate_pages(1);
- int i, j, k, m;
- ullong_t current_page_addr = 0;
- uint_t layout_index = 0;
- uint_t list_index = 0;
- ullong_t layout_addr = 0;
- uint_t num_entries = layout->num_pages; // The number of pages left in the layout
-
- for (m = 0; m < MAX_PAGE_MAP_ENTRIES_64; m++ ) {
- if (num_entries == 0) {
- pml[m].present = 0;
- pml[m].writable = 0;
- pml[m].user = 0;
- pml[m].pwt = 0;
- pml[m].pcd = 0;
- pml[m].accessed = 0;
- pml[m].reserved = 0;
- pml[m].zero = 0;
- pml[m].vmm_info = 0;
- pml[m].pdp_base_addr_lo = 0;
- pml[m].pdp_base_addr_hi = 0;
- pml[m].available = 0;
- pml[m].no_execute = 0;
- } else {
- pdpe64_t * pdpe = os_hooks->allocate_pages(1);
-
- pml[m].present = 1;
- pml[m].writable = 1;
- pml[m].user = 1;
- pml[m].pwt = 0;
- pml[m].pcd = 0;
- pml[m].accessed = 0;
- pml[m].reserved = 0;
- pml[m].zero = 0;
- pml[m].vmm_info = 0;
- pml[m].pdp_base_addr_lo = PAGE_ALLIGNED_ADDR(pdpe) & 0xfffff;
- pml[m].pdp_base_addr_hi = 0;
- pml[m].available = 0;
- pml[m].no_execute = 0;
-
- for (k = 0; k < MAX_PAGE_DIR_PTR_ENTRIES_64; k++) {
- if (num_entries == 0) {
- pdpe[k].present = 0;
- pdpe[k].writable = 0;
- pdpe[k].user = 0;
- pdpe[k].pwt = 0;
- pdpe[k].pcd = 0;
- pdpe[k].accessed = 0;
- pdpe[k].reserved = 0;
- pdpe[k].large_pages = 0;
- pdpe[k].zero = 0;
- pdpe[k].vmm_info = 0;
- pdpe[k].pd_base_addr_lo = 0;
- pdpe[k].pd_base_addr_hi = 0;
- pdpe[k].available = 0;
- pdpe[k].no_execute = 0;
- } else {
- pde64_t * pde = os_hooks->allocate_pages(1);
-
- pdpe[k].present = 1;
- pdpe[k].writable = 1;
- pdpe[k].user = 1;
- pdpe[k].pwt = 0;
- pdpe[k].pcd = 0;
- pdpe[k].accessed = 0;
- pdpe[k].reserved = 0;
- pdpe[k].large_pages = 0;
- pdpe[k].zero = 0;
- pdpe[k].vmm_info = 0;
- pdpe[k].pd_base_addr_lo = PAGE_ALLIGNED_ADDR(pde) & 0xfffff;
- pdpe[k].pd_base_addr_hi = 0;
- pdpe[k].available = 0;
- pdpe[k].no_execute = 0;
-
-
-
- for (i = 0; i < MAX_PAGE_DIR_ENTRIES_64; i++) {
- if (num_entries == 0) {
- pde[i].present = 0;
- pde[i].flags = 0;
- pde[i].accessed = 0;
- pde[i].reserved = 0;
- pde[i].large_pages = 0;
- pde[i].reserved2 = 0;
- pde[i].vmm_info = 0;
- pde[i].pt_base_addr_lo = 0;
- pde[i].pt_base_addr_hi = 0;
- pde[i].available = 0;
- pde[i].no_execute = 0;
- } else {
- pte64_t * pte = os_hooks->allocate_pages(1);
-
- pde[i].present = 1;
- pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
- pde[i].accessed = 0;
- pde[i].reserved = 0;
- pde[i].large_pages = 0;
- pde[i].reserved2 = 0;
- pde[i].vmm_info = 0;
- pde[i].pt_base_addr_lo = PAGE_ALLIGNED_ADDR(pte) & 0xfffff;
- pde[i].pt_base_addr_hi = 0;
- pde[i].available = 0;
- pde[i].no_execute = 0;
-
-
- for (j = 0; j < MAX_PAGE_TABLE_ENTRIES_64; j++) {
- layout_addr = get_mem_layout_addr(layout, layout_index);
-
- if ((current_page_addr < layout_addr) || (num_entries == 0)) {
- // We have a gap in the layout, fill with unmapped page
- pte[j].present = 0;
- pte[j].flags = 0;
- pte[j].accessed = 0;
- pte[j].dirty = 0;
- pte[j].pte_attr = 0;
- pte[j].global_page = 0;
- pte[j].vmm_info = 0;
- pte[j].page_base_addr_lo = 0;
- pte[j].page_base_addr_hi = 0;
- pte[j].available = 0;
- pte[j].no_execute = 0;
-
- current_page_addr += PAGE_SIZE;
- } else if (current_page_addr == layout_addr) {
- // Set up the Table entry to map correctly to the layout region
- layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
-
- if (page_region->type == UNMAPPED) {
- pte[j].present = 0;
- pte[j].flags = 0;
- } else {
- pte[j].present = 1;
- pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
- }
-
- pte[j].accessed = 0;
- pte[j].dirty = 0;
- pte[j].pte_attr = 0;
- pte[j].global_page = 0;
- pte[j].vmm_info = 0;
- pte[j].available = 0;
- pte[j].no_execute = 0;
-
- if (page_region->type == UNMAPPED) {
- pte[j].page_base_addr_lo = 0;
- pte[j].page_base_addr_hi = 0;
- } else if (page_region->type == SHARED) {
- addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
-
- pte[j].page_base_addr_lo = PAGE_ALLIGNED_ADDR(host_addr) & 0xfffff;
- pte[j].page_base_addr_hi = 0;
- pte[j].vmm_info = SHARED_PAGE;
- } else if (page_region->type == GUEST) {
- addr_t list_addr = get_mem_list_addr(list, list_index++);
-
- if (list_addr == -1) {
- // error
- // cleanup...
- //free_guest_page_tables(pde);
- return NULL;
- }
- PrintDebug("Adding guest page (%x)\n", list_addr);
- pte[j].page_base_addr_lo = PAGE_ALLIGNED_ADDR(list_addr) & 0xfffff;
- pte[j].page_base_addr_hi = 0;
-
- // Reset this when we move over to dynamic page allocation
- // pte[j].vmm_info = GUEST_PAGE;
- pte[j].vmm_info = SHARED_PAGE;
- }
-
- num_entries--;
- current_page_addr += PAGE_SIZE;
- layout_index++;
- } else {
- // error
- PrintDebug("Error creating page table...\n");
- // cleanup
- // free_guest_page_tables64(pde);
- return NULL;
- }
- }
- }
- }
- }
- }
- }
- }
- return pml;
-}
-
-#endif
--- /dev/null
+#include <geekos/vmm_shadow_paging.h>
+
+#include <geekos/vmm.h>
+
+extern struct vmm_os_hooks * os_hooks;
+
+
+int init_shadow_page_state(shadow_page_state_t * state) {
+ state->guest_mode = PDE32;
+ state->shadow_mode = PDE32;
+
+ state->guest_cr3.r_reg = 0;
+ state->shadow_cr3.r_reg = 0;
+
+ return 0;
+}
+
+
+int wholesale_update_shadow_page_state(shadow_page_state_t * state, shadow_map_t * mem_map) {
+ unsigned i, j;
+ vmm_pde_t * guest_pde;
+ vmm_pde_t * shadow_pde;
+
+
+ // For now, we'll only work with PDE32
+ if (state->guest_mode != PDE32) {
+ return -1;
+ }
+
+
+
+ shadow_pde = (vmm_pde_t *)(CR3_TO_PDE(state->shadow_cr3.e_reg.low));
+ guest_pde = (vmm_pde_t *)(os_hooks->paddr_to_vaddr((void*)CR3_TO_PDE(state->guest_cr3.e_reg.low)));
+
+ // Delete the current page table
+ delete_page_tables_pde32(shadow_pde);
+
+ shadow_pde = os_hooks->allocate_pages(1);
+
+
+ state->shadow_cr3.e_reg.low = (addr_t)shadow_pde;
+
+ state->shadow_mode = PDE32;
+
+
+ for (i = 0; i < MAX_PDE32_ENTRIES; i++) {
+ shadow_pde[i] = guest_pde[i];
+
+ // The shadow can be identical to the guest if it's not present
+ if (!shadow_pde[i].present) {
+ continue;
+ }
+
+ if (shadow_pde[i].large_pages) {
+ // large page - just map it through shadow map to generate its physical location
+ addr_t guest_addr = PAGE_ADDR(shadow_pde[i].pt_base_addr);
+ addr_t host_addr;
+ shadow_region_t * ent;
+
+ ent = get_shadow_region_by_addr(mem_map, guest_addr);
+
+ if (!ent) {
+ // FIXME Panic here - guest is trying to map to physical memory
+ // it does not own in any way!
+ return -1;
+ }
+
+ // FIXME Bounds check here to see if it's trying to trick us
+
+ switch (ent->host_type) {
+ case HOST_REGION_PHYSICAL_MEMORY:
+ // points into currently allocated physical memory, so we just
+ // set up the shadow to point to the mapped location
+ if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
+ // Panic here
+ return -1;
+ }
+
+ shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+ // FIXME set vmm_info bits here
+ break;
+ case HOST_REGION_UNALLOCATED:
+ // points to physical memory that is *allowed* but that we
+ // have not yet allocated. We mark as not present and set a
+ // bit to remind us to allocate it later
+ shadow_pde[i].present = 0;
+ // FIXME Set vminfo bits here so that we know that we will be
+ // allocating it later
+ break;
+ case HOST_REGION_NOTHING:
+ // points to physical memory that is NOT ALLOWED.
+ // We will mark it as not present and set a bit to remind
+ // us that it's bad later and insert a GPF then
+ shadow_pde[i].present = 0;
+ break;
+ case HOST_REGION_MEMORY_MAPPED_DEVICE:
+ case HOST_REGION_REMOTE:
+ case HOST_REGION_SWAPPED:
+ default:
+ // Panic. Currently unhandled
+ return -1;
+ break;
+ }
+ } else {
+ vmm_pte_t * guest_pte;
+ vmm_pte_t * shadow_pte;
+ addr_t guest_addr;
+ addr_t guest_pte_host_addr;
+ shadow_region_t * ent;
+
+ // small page - set PDE and follow down to the child table
+ shadow_pde[i] = guest_pde[i];
+
+ guest_addr = PAGE_ADDR(guest_pde[i].pt_base_addr);
+
+ // Allocate a new second level page table for the shadow
+ shadow_pte = os_hooks->allocate_pages(1);
+
+ // make our first level page table in the shadow point to it
+ shadow_pde[i].pt_base_addr = PAGE_ALIGNED_ADDR(shadow_pte);
+
+ ent = get_shadow_region_by_addr(mem_map, guest_addr);
+
+
+ /* JRL: This is bad.... */
+ // For now the guest Page Table must always be mapped to host physical memory
+ /* If we swap out a page table or if it isn't present for some reason, this turns real ugly */
+
+ if ((!ent) || (ent->host_type != HOST_REGION_PHYSICAL_MEMORY)) {
+ // FIXME Panic here - guest is trying to map to physical memory
+ // it does not own in any way!
+ return -1;
+ }
+
+ // Address of the relevant second level page table in the guest
+ if (guest_paddr_to_host_paddr(ent, guest_addr, &guest_pte_host_addr)) {
+ // Panic here
+ return -1;
+ }
+
+
+ // host_addr now contains the host physical address for the guest's 2nd level page table
+ // Now we transform it to relevant virtual address
+ guest_pte = os_hooks->paddr_to_vaddr((void *)guest_pte_host_addr);
+
+ // Now we walk through the second level guest page table
+ // and clone it into the shadow
+ for (j = 0; j < MAX_PTE32_ENTRIES; j++) {
+ shadow_pte[j] = guest_pte[j];
+
+ addr_t guest_addr = PAGE_ADDR(shadow_pte[j].page_base_addr);
+
+ shadow_region_t * ent;
+
+ ent = get_shadow_region_by_addr(mem_map, guest_addr);
+
+ if (!ent) {
+ // FIXME Panic here - guest is trying to map to physical memory
+ // it does not own in any way!
+ return -1;
+ }
+
+ switch (ent->host_type) {
+ case HOST_REGION_PHYSICAL_MEMORY:
+ {
+ addr_t host_addr;
+
+ // points into currently allocated physical memory, so we just
+ // set up the shadow to point to the mapped location
+ if (guest_paddr_to_host_paddr(ent, guest_addr, &host_addr)) {
+ // Panic here
+ return -1;
+ }
+
+ shadow_pte[j].page_base_addr = PAGE_ALIGNED_ADDR(host_addr);
+ // FIXME set vmm_info bits here
+ break;
+ }
+ case HOST_REGION_UNALLOCATED:
+ // points to physical memory that is *allowed* but that we
+ // have not yet allocated. We mark as not present and set a
+ // bit to remind us to allocate it later
+ shadow_pte[j].present = 0;
+ // FIXME Set vminfo bits here so that we know that we will be
+ // allocating it later
+ break;
+ case HOST_REGION_NOTHING:
+ // points to physical memory that is NOT ALLOWED.
+ // We will mark it as not present and set a bit to remind
+ // us that it's bad later and insert a GPF then
+ shadow_pte[j].present = 0;
+ break;
+ case HOST_REGION_MEMORY_MAPPED_DEVICE:
+ case HOST_REGION_REMOTE:
+ case HOST_REGION_SWAPPED:
+ default:
+ // Panic. Currently unhandled
+ return -1;
+ break;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+