v3_paging_mode_t shdw_pg_mode;
struct shadow_page_state shdw_pg_state;
addr_t direct_map_pt;
- // nested_paging_t nested_page_state;
// This structure is how we get interrupts for the guest
// These are the types of physical memory address regions
// from the perspective of the HOST
typedef enum shdw_region_type {
- SHDW_REGION_INVALID, // This region is INVALID (this is a return type to denote errors)
SHDW_REGION_WRITE_HOOK, // This region is mapped as read-only (page faults on write)
SHDW_REGION_FULL_HOOK, // This region is mapped as not present (always generate page faults)
SHDW_REGION_ALLOCATED, // Region is a section of host memory
} v3_shdw_region_type_t;
-typedef struct v3_shdw_map {
- addr_t hook_hva;
-
- struct rb_root shdw_regions;
-} v3_shdw_map_t;
struct v3_shadow_region {
};
+typedef struct v3_shdw_map {
+ struct v3_shadow_region base_region;
+
+
+ addr_t hook_hva;
+
+ struct rb_root shdw_regions;
+} v3_shdw_map_t;
+
+
+
void v3_init_shadow_map(struct guest_info * info);
void v3_delete_shadow_map(struct guest_info * info);
return -1;
}
- if ((shdw_reg->host_type == SHDW_REGION_INVALID) ||
- (shdw_reg->host_type == SHDW_REGION_FULL_HOOK)) {
+ if (shdw_reg->host_type == SHDW_REGION_FULL_HOOK) {
PrintError("In GPA->HPA: Could not find address in shadow map (addr=%p) (reg_type=%s)\n",
(void *)guest_pa, v3_shdw_region_type_to_str(shdw_reg->host_type));
return -1;
int v3_config_guest(struct guest_info * info, struct v3_vm_config * config_ptr) {
extern v3_cpu_arch_t v3_cpu_type;
+ // Amount of ram the Guest will have, rounded to a 4K page boundary
+ info->mem_size = config_ptr->mem_size & ~(addr_t)0xfff;
+
// Initialize the subsystem data strutures
v3_init_time(info);
v3_init_io_map(info);
v3_init_decoder(info);
+ v3_init_hypercall_map(info);
+
+
+ // Initialize the memory map
v3_init_shadow_map(info);
- v3_init_hypercall_map(info);
if ((v3_cpu_type == V3_SVM_REV3_CPU) &&
(config_ptr->enable_nested_paging == 1)) {
info->cpu_mode = REAL;
info->mem_mode = PHYSICAL_MEM;
- // Amount of ram the Guest will have, rounded to a 4K page boundary
- info->mem_size = config_ptr->mem_size & ~(addr_t)0xfff;
-
// Configure the memory map for the guest
if (setup_memory_map(info, config_ptr) == -1) {
PrintError("Setting up guest memory map failed...\n");
// Configure the devices for the guest
setup_devices(info, config_ptr);
-
-
if (config_ptr->enable_profiling) {
info->enable_profiler = 1;
v3_init_profiler(info);
* We need to make sure the memory map extends to cover it
*/
static int setup_memory_map(struct guest_info * info, struct v3_vm_config * config_ptr) {
- addr_t mem_pages = info->mem_size >> 12;
-
PrintDebug("Setting up memory map (memory size=%dMB)\n", (uint_t)(info->mem_size / (1024 * 1024)));
- // Fill up to the 640K hole
- if (mem_pages >= 160) {
- if (v3_add_shadow_mem(info, 0x0, 0xa0000, (addr_t)V3_AllocPages(160)) == -1) {
- PrintError("Could not map full conventional memory\n");
- return -1;
- }
- } else {
- // Less than 640k of memory
- if (v3_add_shadow_mem(info, 0x0, (mem_pages * PAGE_SIZE), (addr_t)V3_AllocPages(mem_pages)) == -1) {
- PrintError("Could not map subset of conventional memory\n");
- return -1;
- };
- }
-
-
-#define VGABIOS_START 0x000c0000
-#define ROMBIOS_START 0x000f0000
-
// VGA frame buffer
if (1) {
if (v3_add_shadow_mem(info, 0xa0000, 0xc0000, 0xa0000) == -1) {
} else {
v3_hook_write_mem(info, 0xa0000, 0xc0000, 0xa0000, passthrough_mem_write, NULL);
}
-
-
+
+#define VGABIOS_START 0x000c0000
+#define ROMBIOS_START 0x000f0000
+
/* layout vgabios */
{
- uint_t num_pages = (config_ptr->vgabios_size + PAGE_SIZE - 1) / PAGE_SIZE;
- void * guest_mem = V3_AllocPages(num_pages);
- addr_t vgabios_end = VGABIOS_START + (num_pages * PAGE_SIZE);
-
- PrintDebug("Layout Region %d bytes\n", config_ptr->vgabios_size);
- memcpy(V3_VAddr(guest_mem), config_ptr->vgabios, config_ptr->vgabios_size);
-
- if (v3_add_shadow_mem(info, VGABIOS_START, vgabios_end, (addr_t)guest_mem) == -1) {
- PrintError("Could not map VGABIOS\n");
- return -1;
- }
-
- PrintDebug("Adding Shadow Region (0x%p-0x%p) -> 0x%p\n",
- (void *)VGABIOS_START,
- (void *)vgabios_end,
- (void *)guest_mem);
-
-
- // Fill in the space between the VGABIOS and the ROMBIOS
- // We'll just back this to shadow memory for now....
- if (v3_add_shadow_mem(info, vgabios_end, ROMBIOS_START,
- (addr_t)V3_AllocPages((ROMBIOS_START - vgabios_end) / PAGE_SIZE)) == -1) {
- PrintError("Could not map VGABIOS->ROMBIOS gap\n");
- return -1;
- }
+ addr_t vgabios_dst = v3_get_shadow_addr(&(info->mem_map.base_region), VGABIOS_START);
+ memcpy(V3_VAddr((void *)vgabios_dst), config_ptr->vgabios, config_ptr->vgabios_size);
}
/* layout rombios */
{
- uint_t num_pages = (config_ptr->rombios_size + PAGE_SIZE - 1) / PAGE_SIZE;
- void * guest_mem = V3_AllocPages(num_pages);
- addr_t rombios_end = ROMBIOS_START + (num_pages * PAGE_SIZE);
-
- PrintDebug("Layout Region %d bytes\n", config_ptr->rombios_size);
- memcpy(V3_VAddr(guest_mem), config_ptr->rombios, config_ptr->rombios_size);
-
- if (v3_add_shadow_mem(info, ROMBIOS_START, rombios_end, (addr_t)guest_mem) == -1) {
- PrintError("Could not map ROMBIOS\n");
- return -1;
- }
-
- PrintDebug("Adding Shadow Region (0x%p-0x%p) -> 0x%p\n",
- (void *)ROMBIOS_START,
- (void *)rombios_end,
- (void *)guest_mem);
-
- if (rombios_end != 0x100000) {
- PrintError("ROMBIOS must reach the 1MB barrier....\n");
- return -1;
- }
+ addr_t rombios_dst = v3_get_shadow_addr(&(info->mem_map.base_region), ROMBIOS_START);
+ memcpy(V3_VAddr((void *)rombios_dst), config_ptr->rombios, config_ptr->rombios_size);
}
#ifdef CRAY_XT
{
#define SEASTAR_START 0xffe00000
#define SEASTAR_END 0xffffffff
-
- // Fill in generic memory below the seastar
- addr_t top_of_mem = (SEASTAR_START < info->mem_size) ? SEASTAR_START : info->mem_size;
- int num_low_pages = (top_of_mem - 0x100000) / PAGE_SIZE;
-
- if (v3_add_shadow_mem(info, 0x100000, top_of_mem, (addr_t)V3_AllocPages(num_low_pages)) == -1) {
- PrintError("Could not extended memory below 4G\n");
- return -1;
- }
-
// Map the Seastar straight through
if (v3_add_shadow_mem(info, SEASTAR_START, SEASTAR_END, SEASTAR_START) == -1) {
PrintError("Could not map through the seastar\n");
return -1;
}
-
-
- // Add memory above the seastar
- if (info->mem_size > SEASTAR_END) {
- int num_high_pages = mem_pages - (SEASTAR_END / PAGE_SIZE);
-
- if (v3_add_shadow_mem(info, SEASTAR_END, info->mem_size, (addr_t)V3_AllocPages(num_high_pages)) == -1) {
- PrintError("Could not map extended memory above 4G\n");
- return -1;
- }
- }
- }
-#else
- // Fill in the extended memory map....
- {
- int num_ext_pages = mem_pages - (0x100000 / PAGE_SIZE);
-
- if (num_ext_pages > 0) {
- if (v3_add_shadow_mem(info, 0x100000, info->mem_size, (addr_t)V3_AllocPages(num_ext_pages)) == -1) {
- PrintError("Could not allocate extended shadow memory\n");
- return -1;
- }
- }
}
#endif
struct v3_shadow_region * region = v3_get_shadow_region(info, fault_addr);
- if ((region == NULL) ||
- (region->host_type == SHDW_REGION_INVALID)) {
+ if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32, addr=%p\n",
(void *)fault_addr);
return -1;
struct v3_shadow_region * region = v3_get_shadow_region(info, fault_addr);
- if ((region == NULL) ||
- (region->host_type == SHDW_REGION_INVALID)) {
+ if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32PAE, addr=%p\n",
(void *)fault_addr);
return -1;
struct v3_shadow_region * region = v3_get_shadow_region(info, fault_addr);
- if ((region == NULL) ||
- (region->host_type == SHDW_REGION_INVALID)) {
+ if (region == NULL) {
PrintError("Invalid region in passthrough page fault 64, addr=%p\n",
(void *)fault_addr);
return -1;
#include <palacios/vmm_mem.h>
#include <palacios/vmm.h>
#include <palacios/vmm_util.h>
-//#include <palacios/vmm_decoder.h>
#include <palacios/vmm_emulator.h>
-
+#define MEM_OFFSET_HCALL 0x1000
struct v3_shadow_region * region);
+static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
+ info->vm_regs.rbx = info->mem_map.base_region.host_addr;
+
+ return 0;
+}
+
void v3_init_shadow_map(struct guest_info * info) {
v3_shdw_map_t * map = &(info->mem_map);
+ addr_t mem_pages = info->mem_size >> 12;
map->shdw_regions.rb_node = NULL;
map->hook_hva = (addr_t)V3_VAddr(V3_AllocPages(1));
+ // There is an underlying region that contains all of the guest memory
+ map->base_region.guest_start = 0;
+ map->base_region.guest_end = info->mem_size;
+ map->base_region.host_type = SHDW_REGION_ALLOCATED;
+ map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
+
+ v3_register_hypercall(info, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
}
void v3_delete_shadow_map(struct guest_info * info) {
-
-
-
-
int handle_special_page_fault(struct guest_info * info,
addr_t fault_gva, addr_t fault_gpa,
pf_error_t access_info)
}
}
- return NULL;
-}
-
+ // There is not registered region, so we check if its a valid address in the base region
-addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, addr_t guest_addr) {
- if ( (reg) &&
- (reg->host_type != SHDW_REGION_FULL_HOOK) &&
- (reg->host_type != SHDW_REGION_INVALID) ) {
- return (guest_addr - reg->guest_start) + reg->host_addr;
- } else {
- PrintDebug("MEM Region Invalid\n");
- return 0;
+ if (guest_addr > info->mem_map.base_region.guest_end) {
+ PrintError("Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
+ (void *)guest_addr, (void *)info->mem_map.base_region.guest_end);
+ return NULL;
}
+
+ return &(info->mem_map.base_region);
}
-
void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region * reg) {
if (reg != NULL) {
v3_rb_erase(&(reg->tree_node), &(info->mem_map.shdw_regions));
+addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, addr_t guest_addr) {
+ if ( (reg) &&
+ (reg->host_type != SHDW_REGION_FULL_HOOK)) {
+ return (guest_addr - reg->guest_start) + reg->host_addr;
+ } else {
+ PrintDebug("MEM Region Invalid\n");
+ return 0;
+ }
+
+}
+
+
+
void print_shadow_map(struct guest_info * info) {
struct rb_node * node = v3_rb_first(&(info->mem_map.shdw_regions));
- struct v3_shadow_region * reg;
+ struct v3_shadow_region * reg = &(info->mem_map.base_region);
int i = 0;
PrintDebug("Memory Layout:\n");
+
+ PrintDebug("Base Region: 0x%p - 0x%p -> 0x%p\n",
+ (void *)(reg->guest_start),
+ (void *)(reg->guest_end - 1),
+ (void *)(reg->host_addr));
+
do {
reg = rb_entry(node, struct v3_shadow_region, tree_node);
}
-static const uchar_t SHDW_REGION_INVALID_STR[] = "SHDW_REGION_INVALID";
static const uchar_t SHDW_REGION_WRITE_HOOK_STR[] = "SHDW_REGION_WRITE_HOOK";
static const uchar_t SHDW_REGION_FULL_HOOK_STR[] = "SHDW_REGION_FULL_HOOK";
static const uchar_t SHDW_REGION_ALLOCATED_STR[] = "SHDW_REGION_ALLOCATED";
-
-
const uchar_t * v3_shdw_region_type_to_str(v3_shdw_region_type_t type) {
switch (type) {
case SHDW_REGION_WRITE_HOOK:
case SHDW_REGION_ALLOCATED:
return SHDW_REGION_ALLOCATED_STR;
default:
- return SHDW_REGION_INVALID_STR;
+ return (uchar_t *)"SHDW_REGION_INVALID";
}
}
struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
- if ((shdw_reg == NULL) ||
- (shdw_reg->host_type == SHDW_REGION_INVALID)) {
+ if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
v3_raise_exception(info, MC_EXCEPTION);
struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_pa);
- if ((shdw_reg == NULL) ||
- (shdw_reg->host_type == SHDW_REGION_INVALID)) {
+ if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
v3_raise_exception(info, MC_EXCEPTION);
- if ((shdw_reg == NULL) ||
- (shdw_reg->host_type == SHDW_REGION_INVALID)) {
+ if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_pa);
v3_raise_exception(info, MC_EXCEPTION);
struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
- if ((shdw_reg == NULL) ||
- (shdw_reg->host_type == SHDW_REGION_INVALID)) {
+ if (shdw_reg == NULL) {
// Inject a machine check in the guest
PrintDebug("Invalid Guest Address in page table (0x%p)\n", (void *)guest_fault_pa);
v3_raise_exception(info, MC_EXCEPTION);