// There is an underlying region that contains all of the guest memory
// PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
+ // 2MB page alignment needed for 2MB hardware nested paging
map->base_region.guest_start = 0;
map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
return -1;
}
- PrintDebug("%s: page [%p,%p) contains address\n", __FUNCTION__, (void *)pg_start, (void *)pg_end);
+ //PrintDebug("%s: page [%p,%p) contains address\n", __FUNCTION__, (void *)pg_start, (void *)pg_end);
pg_next_reg = v3_get_next_mem_region(core->vm_info, core->cpu_id, pg_start);
if (pg_next_reg->flags.base == 1) {
page_size = req_size; // State A
- PrintDebug("%s: base region [%p,%p) contains page.\n", __FUNCTION__,
- (void *)pg_next_reg->guest_start, (void *)pg_next_reg->guest_end);
+ //PrintDebug("%s: base region [%p,%p) contains page.\n", __FUNCTION__,
+ // (void *)pg_next_reg->guest_start, (void *)pg_next_reg->guest_end);
} else {
#if 0 // State B/C and D optimization
if ((pg_next_reg->guest_end >= pg_end) &&
return page_size;
}
+// For an address on a page of size page_size, compute the actual alignment
+// of the physical page it maps to
+uint32_t v3_compute_page_alignment(addr_t page_addr)
+{
+ if (PAGE_OFFSET_1GB(page_addr) == 0) {
+ return PAGE_SIZE_1GB;
+ } else if (PAGE_OFFSET_4MB(page_addr) == 0) {
+ return PAGE_SIZE_4MB;
+ } else if (PAGE_OFFSET_2MB(page_addr) == 0) {
+ return PAGE_SIZE_2MB;
+ } else if (PAGE_OFFSET_4KB(page_addr) == 0) {
+ return PAGE_SIZE_4KB;
+ } else {
+ PrintError("Non-page aligned address passed to %s.\n", __FUNCTION__);
+ return 0;
+ }
+}
void v3_print_mem_map(struct v3_vm_info * vm) {
struct rb_node * node = v3_rb_first(&(vm->mem_map.mem_regions));