This updates the page allocation os hook to take a "constraints" parameter. Currently,
there only one constraint, allocation below 4 GB. The buddy allocator and other elements
of the support code now obey this constraint. I have vetted page allocations in Palacios
so that with a guest using shadow paging, any allocation that could become part of the
guest memory map, and any allocation done in 32 bit shadow paging (page tables) is done
with this constraint. THis includes direct paging. Nested paging should be unaffected.
return -1;
}
- mp = palacios_alloc_node_extended(sizeof(struct buddy_mempool), GFP_KERNEL, zone->node_id);
+ mp = palacios_alloc_extended(sizeof(struct buddy_mempool), GFP_KERNEL, zone->node_id);
if (IS_ERR(mp)) {
ERROR("Could not allocate mempool\n");
/* Allocate a bitmap with 1 bit per minimum-sized block */
mp->num_blocks = (1UL << pool_order) / (1UL << zone->min_order);
- mp->tag_bits = palacios_alloc_node_extended(
+ mp->tag_bits = palacios_alloc_extended(
BITS_TO_LONGS(mp->num_blocks) * sizeof(long), GFP_KERNEL, zone->node_id
);
* Arguments:
* [IN] mp: Buddy system memory allocator object.
* [IN] order: Block size to allocate (2^order bytes).
- *
+ * [IN] constraints: bitmmask showing restrictions for scan. currently: 0=none, or LWK_BUDDY_CONSTRAINT_4GB
* Returns:
* Success: Pointer to the start of the allocated memory block.
* Failure: NULL
*/
uintptr_t
-buddy_alloc(struct buddy_memzone *zone, unsigned long order)
+buddy_alloc(struct buddy_memzone *zone, unsigned long order, int constraints)
{
unsigned long j;
struct buddy_mempool * mp = NULL;
struct list_head * list = NULL;
+ struct list_head * cur = NULL;
struct block * block = NULL;
struct block * buddy_block = NULL;
unsigned long flags = 0;
+ if (constraints && constraints!=LWK_BUDDY_CONSTRAINT_4GB) {
+ ERROR("Do not know how to satisfy constraint mask 0x%x\n", constraints);
+ return (uintptr_t) NULL;
+ }
+
BUG_ON(zone == NULL);
BUG_ON(order > zone->max_order);
INFO("Order iter=%lu\n", j);
- /* Try to allocate the first block in the order j list */
- list = &zone->avail[j];
+ block=NULL;
+
+ list = &(zone->avail[j]);
+
+ if (list_empty(list)) {
+ continue;
+ }
+
+ list_for_each(cur, list) {
+ block = list_entry(cur, struct block, link);
- if (list_empty(list))
+ if (!constraints) {
+ // without a constraint, we just want the first one
+ break;
+ }
+
+ if (constraints & LWK_BUDDY_CONSTRAINT_4GB) {
+ // under this constraint, we will only use if the entirity
+ // of the allocation within the block will be below 4 GB
+ void *block_pa = (void*)__pa(block);
+ if ((block_pa + (1ULL<<order)) <= (void*)(0x100000000ULL)) {
+ // this block will work
+ break;
+ } else {
+ // look for the next block
+ block=NULL;
+ continue;
+ }
+ }
+ }
+
+ if (!block) {
+ // uh oh, no block, look to next order
continue;
+ }
+
+ // have appropriate block, will allocate
- block = list_entry(list->next, struct block, link);
list_del(&(block->link));
mp = block->mp;
INFO("pool=%p, block=%p, order=%lu, j=%lu\n", mp, block, order, j);
- /*
- palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
- return 0;
- */
-
/* Trim if a higher order block than necessary was allocated */
while (j > order) {
--j;
if (min_order > max_order)
return NULL;
- zone = palacios_alloc_node_extended(sizeof(struct buddy_memzone), GFP_KERNEL, node_id);
+ zone = palacios_alloc_extended(sizeof(struct buddy_memzone), GFP_KERNEL, node_id);
INFO("Allocated zone at %p\n", zone);
- if (IS_ERR(zone)) {
+ if (!zone) {
ERROR("Could not allocate memzone\n");
return NULL;
}
zone->node_id = node_id;
/* Allocate a list for every order up to the maximum allowed order */
- zone->avail = palacios_alloc_node_extended((max_order + 1) * sizeof(struct list_head), GFP_KERNEL, zone->node_id);
+ zone->avail = palacios_alloc_extended((max_order + 1) * sizeof(struct list_head), GFP_KERNEL, zone->node_id);
+
+ if (!(zone->avail)) {
+ ERROR("Unable to allocate space for zone list\n");
+ palacios_free(zone);
+ return NULL;
+ }
INFO("Allocated free lists at %p\n", zone->avail);
void **user_metadata);
+// constraint=0 => no constraints, otherwise bitmask of:
+#define LWK_BUDDY_CONSTRAINT_4GB 0x1
+
/* Allocate pages, returns physical address */
extern uintptr_t
buddy_alloc(struct buddy_memzone * zone,
- unsigned long order);
+ unsigned long order,
+ int constraints);
/* Free a physical address */
int add_global_ctrl(unsigned int cmd,
int (*handler)(unsigned int cmd, unsigned long arg)) {
- struct global_ctrl * ctrl = palacios_alloc_extended(sizeof(struct global_ctrl), GFP_KERNEL);
+ struct global_ctrl * ctrl = palacios_alloc_extended(sizeof(struct global_ctrl), GFP_KERNEL,-1);
if (ctrl == NULL) {
printk("Error: Could not allocate global ctrl %d\n", cmd);
{
struct proc_dir_entry *entry;
- INFO("palacios_proc_dir=%p before v3-guests\n",palacios_proc_dir);
+ //INFO("palacios_proc_dir=%p before v3-guests\n",palacios_proc_dir);
entry = create_proc_read_entry("v3-guests", 0444, palacios_proc_dir, read_guests, NULL);
if (entry) {
INFO("/proc/v3vee/v3-guests successfully created\n");
void * addr = NULL;
mod_allocs++;
- addr = palacios_alloc_extended(size, flags);
+ addr = palacios_alloc_extended(size, flags, -1);
return addr;
}
#include "mm.h"
#include "buddy.h"
#include "numa.h"
+#include "palacios/vmm.h"
static struct buddy_memzone ** memzones = NULL;
// alignment is in bytes
-uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node_id) {
+uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node_id, int constraints) {
uintptr_t addr = 0;
int any = node_id==-1; // can allocate on any
+ int buddy_constraints=0;
+
+ if (constraints && constraints!=V3_ALLOC_PAGES_CONSTRAINT_4GB) {
+ ERROR("Unknown constraint mask 0x%x\n",constraints);
+ return 0;
+ }
+
+ if (constraints & V3_ALLOC_PAGES_CONSTRAINT_4GB) {
+ buddy_constraints |= LWK_BUDDY_CONSTRAINT_4GB;
+ }
+
if (node_id == -1) {
int cpu_id = get_cpu();
return 0;
}
- addr = buddy_alloc(memzones[node_id], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT);
+ addr = buddy_alloc(memzones[node_id], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT, buddy_constraints);
if (!addr && any) {
int i;
// do a scan to see if we can satisfy request on any node
for (i=0; i< numa_num_nodes(); i++) {
if (i!=node_id) {
- addr = buddy_alloc(memzones[i], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT);
+ addr = buddy_alloc(memzones[i], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT, buddy_constraints);
if (addr) {
break;
}
return -1;
}
+ if ((node_id != r->node) && (r->node!=-1)) {
+ INFO("Memory add request is for node %d, but memory is in node %d\n",r->node,node_id);
+ }
+
pool_order = get_order(r->num_pages * PAGE_SIZE) + PAGE_SHIFT;
if (buddy_add_pool(memzones[node_id], r->base_addr, pool_order, keep)) {
INFO("memory manager init: MAX_ORDER=%d (%llu bytes)\n",MAX_ORDER, PAGE_SIZE*pow2(MAX_ORDER));
- memzones = palacios_alloc_extended(sizeof(struct buddy_memzone *) * num_nodes, GFP_KERNEL);
+ memzones = palacios_alloc_extended(sizeof(struct buddy_memzone *) * num_nodes, GFP_KERNEL,-1);
if (!memzones) {
ERROR("Cannot allocate space for memory zones\n");
memset(memzones, 0, sizeof(struct buddy_memzone *) * num_nodes);
- seed_addrs = palacios_alloc_extended(sizeof(uintptr_t) * num_nodes, GFP_KERNEL);
+ seed_addrs = palacios_alloc_extended(sizeof(uintptr_t) * num_nodes, GFP_KERNEL,-1);
if (!seed_addrs) {
ERROR("Cannot allocate space for seed addrs\n");
-uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node_id);
+uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node_id, int constraints);
void free_palacios_pg(uintptr_t base_addr);
void free_palacios_pgs(uintptr_t base_addr, u64 num_pages);
* Allocates a contiguous region of pages of the requested size.
* Returns the physical address of the first page in the region.
*/
-void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id) {
+void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id, int constraints) {
void * pg_addr = NULL;
if (num_pages<=0) {
return NULL;
}
- pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id);
+ pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id, constraints);
if (!pg_addr) {
ERROR("ALERT ALERT Page allocation has FAILED Warning\n");
void *
-palacios_alloc_extended(unsigned int size, unsigned int flags) {
+palacios_alloc_extended(unsigned int size, unsigned int flags, int node) {
void * addr = NULL;
if (size==0) {
return NULL;
}
- addr = kmalloc(size+2*ALLOC_PAD, flags);
+ if (node==-1) {
+ addr = kmalloc(size+2*ALLOC_PAD, flags);
+ } else {
+ addr = kmalloc_node(size+2*ALLOC_PAD, flags, node);
+ }
if (!addr) {
ERROR("ALERT ALERT kmalloc has FAILED FAILED FAILED\n");
// module, both in places where interrupts are off and where they are on
// a GFP_KERNEL call, when done with interrupts off can lead to DEADLOCK
if (irqs_disabled()) {
- return palacios_alloc_extended(size,GFP_ATOMIC);
+ return palacios_alloc_extended(size,GFP_ATOMIC,-1);
} else {
- return palacios_alloc_extended(size,GFP_KERNEL);
+ return palacios_alloc_extended(size,GFP_KERNEL,-1);
}
}
// The idea is that everything uses the same stubs
void palacios_print_scoped(void *vm, int vcore, const char *fmt, ...);
#define palacios_print(...) palacios_print_scoped(0,-1, __VA_ARGS__)
-void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id);
+// node_id=-1 => no node constraint
+void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id, int constraints);
void palacios_free_pages(void *page_addr, int num_pages);
void *palacios_alloc(unsigned int size);
-void *palacios_alloc_extended(unsigned int size, unsigned int flags);
-// FIX
-// NEED A palacios_alloc_node wrapper
-//
-#define palacios_alloc_node_extended(size, flags, node) kmalloc_node(size,flags,node)
+// node_id=-1 => no node constraint
+void *palacios_alloc_extended(unsigned int size, unsigned int flags, int node_id);
void palacios_free(void *);
void *palacios_valloc(unsigned int size); // use instead of vmalloc
void palacios_vfree(void *); // use instead of vfree
-
#ifdef __V3VEE__
#include <palacios/vmm_mem.h>
#include <palacios/vmm_types.h>
extern struct v3_os_hooks * os_hooks; \
void * ptr = 0; \
if ((os_hooks) && (os_hooks)->allocate_pages) { \
- ptr = (os_hooks)->allocate_pages(num_pages,PAGE_SIZE_4KB,-1); \
+ ptr = (os_hooks)->allocate_pages(num_pages,PAGE_SIZE_4KB,-1,0); \
} \
ptr; \
})
+// Use 32 bit constraints if the vm uses 32bit shadow paging at any point
+// Should be used for shadow page tables and any physical memory
+// mapped into the vm
+#define V3_AllocShadowSafePages(vm,num_pages) \
+ ({ \
+ extern struct v3_os_hooks * os_hooks; \
+ void * ptr = 0; \
+ int c; int shadow=0; \
+ for (c=0;c<(vm)->num_cores && !shadow;c++) { \
+ shadow|=vm->cores[c].shdw_pg_mode==SHADOW_PAGING; \
+ } \
+ if ((os_hooks) && (os_hooks)->allocate_pages) { \
+ ptr = (os_hooks)->allocate_pages(num_pages,PAGE_SIZE_4KB,-1,\
+ shadow ? V3_ALLOC_PAGES_CONSTRAINT_4GB : 0); \
+ } \
+ ptr; \
+ })
+
#define V3_AllocAlignedPages(num_pages, align) \
({ \
extern struct v3_os_hooks * os_hooks; \
void * ptr = 0; \
if ((os_hooks) && (os_hooks)->allocate_pages) { \
- ptr = (os_hooks)->allocate_pages(num_pages,align,-1); \
+ ptr = (os_hooks)->allocate_pages(num_pages,align,-1,0); \
} \
ptr; \
})
extern struct v3_os_hooks * os_hooks; \
void * ptr = 0; \
if ((os_hooks) && (os_hooks)->allocate_pages) { \
- ptr = (os_hooks)->allocate_pages(num_pages, PAGE_SIZE_4KB, node_id); \
+ ptr = (os_hooks)->allocate_pages(num_pages, PAGE_SIZE_4KB, node_id,0); \
+ } \
+ ptr; \
+ })
+
+#define V3_AllocPagesExtended(num_pages, align, node_id, constraints) \
+ ({ \
+ extern struct v3_os_hooks * os_hooks; \
+ void * ptr = 0; \
+ if ((os_hooks) && (os_hooks)->allocate_pages) { \
+ ptr = (os_hooks)->allocate_pages(num_pages, align, node_id,constraints); \
} \
ptr; \
})
/* This will contain function pointers that provide OS services */
struct v3_os_hooks {
- // the vm pointer is the host os's "priv_data" from v3_create_vm
- // if vm is null, this is a general palacios printout
- // if vm is not null, and vcore is negative, this is a general print form the vm
- // if vm is not null, and vcore is non-negative, this is a print from a specific vcore
+ // the vm pointer is the host os's "priv_data" from v3_create_vm
+ // if vm is null, this is a general palacios printout
+ // if vm is not null, and vcore is negative, this is a general print form the vm
+ // if vm is not null, and vcore is non-negative, this is a print from a specific vcore
void (*print)(void *vm, int vcore, const char * format, ...)
__attribute__ ((format (printf, 3, 4)));
-
- void *(*allocate_pages)(int num_pages, unsigned int alignment, int node_id);
+
+ // For page allocation:
+ // - node_id -1 => any node, otherwise the numa node we want to alloc from
+ // - constraint = 0 => no constraints, otherwise a bitwise-or of the following flags
+#define V3_ALLOC_PAGES_CONSTRAINT_4GB 1
+ void *(*allocate_pages)(int num_pages, unsigned int alignment, int node_id, int constraint);
void (*free_pages)(void * page, int num_pages);
void *(*malloc)(unsigned int size);
/* 4KB-aligned */
static inline void * Vnet_AllocPages(int num_pages){
if ((host_hooks) && host_hooks->allocate_pages) {
- return host_hooks->allocate_pages(num_pages, PAGE_SIZE_4KB);
+ return host_hooks->allocate_pages(num_pages, PAGE_SIZE_4KB,-1,0); // any zone, no constraints
}
return NULL;
video_state->dev = dev;
- video_state->framebuf_pa = (addr_t)V3_AllocPages(FRAMEBUF_SIZE / 4096);
+ video_state->framebuf_pa = (addr_t)V3_AllocShadowSafePages(vm,FRAMEBUF_SIZE / 4096);
if (!video_state->framebuf_pa) {
PrintError(vm, VCORE_NONE, "Cannot allocate frame buffer\n");
PrintDebug(info->vm_info, info, "video: init_device\n");
PrintDebug(info->vm_info, info, "Num Pages=%d\n", SIZE_OF_REGION / 4096);
- video_state->video_memory_pa = (addr_t)V3_AllocPages(SIZE_OF_REGION / 4096);
+ video_state->video_memory_pa = (addr_t)V3_AllocShadowSafePages(vm, SIZE_OF_REGION / 4096);
if (!video_state->video_memory_pa) {
PrintError(info->vm_info, info, "Cannot allocate video memory\n");
V3_Free(video_state);
if (state->mode==MEM || state->mode==GCONS_MEM) {
state->mem_size=MAXX*MAXY*MAXBPP;
PrintDebug(vm, VCORE_NONE, "paragraph: allocating %llu bytes for local framebuffer\n", state->mem_size);
- state->mem_paddr = V3_AllocPages(ceil_pages(state->mem_size));
+ state->mem_paddr = V3_AllocShadowSafePages(vm,ceil_pages(state->mem_size));
if (!state->mem_paddr) {
PrintError(state->vm, VCORE_NONE, "paragraph: Cannot allocate memory for framebuffer\n");
paragraph_free_internal(state);
swap->active = 0;
swap->hdr = (union swap_header *)swap;
- swap->swap_base_addr = (addr_t)V3_AllocPages(swap->capacity / 4096);
+ swap->swap_base_addr = (addr_t)V3_AllocShadowSafePages(swap->capacity / 4096);
if (!swap->swap_base_addr) {
PrintError(vm, VCORE_NONE, "Cannot allocate swap space\n");
swap->active = 0;
- swap->cache_base_addr = (addr_t)V3_AllocPages(swap->cache_size / 4096);
+ swap->cache_base_addr = (addr_t)V3_AllocShadowSafePages(vm,swap->cache_size / 4096);
if (!swap->cache_base_addr) {
PrintError(vm, VCORE_NONE, "Cannot allocate cache space\n");
#include <palacios/vmm.h>
#include <palacios/vmm_dev_mgr.h>
-
+#include <palacios/vm_guest.h>
struct blk_state {
uint64_t capacity;
blk->capacity = capacity;
- blk->blk_base_addr = (addr_t)V3_AllocPages(blk->capacity / 4096);
+ blk->blk_base_addr = (addr_t)V3_AllocShadowSafePages(vm,blk->capacity / 4096);
if (!blk->blk_base_addr) {
PrintError(vm, VCORE_NONE, "Cannot allocate block space\n");
for (i=0;i<MAP_NUM;i++) {
void *temp;
- temp = (void*)V3_AllocPages(MAP_SIZE/4096);
+ temp = (void*)V3_AllocShadowSafePages(vm,MAP_SIZE/4096);
if (!temp) {
PrintError(vm, VCORE_NONE, "vga: cannot allocate maps\n");
free_vga(vga);
return NULL;
}
- pg_data->hpa = (addr_t)V3_AllocPages(1);
+ pg_data->hpa = (addr_t)V3_AllocPagesExtended(1,PAGE_SIZE_4KB,-1,
+ V3_ALLOC_PAGES_CONSTRAINT_4GB);
+
if (!pg_data->hpa) {
PrintError(vm, VCORE_NONE, "Cannot allocate page for shadow page table\n");
}
+
static void shadow_free_page (struct guest_info * core, struct shadow_page_cache_data * page)
{
list_del(&page->link);
V3_FreePages((void *)page->page_pa, 1);
- page->page_pa=(addr_t)V3_AllocPages(1);
+
+ // presumably the same page could be used for 32 or 64 bit tables, so, we'll make it
+ // uniformly compatible
+ page->page_pa=(addr_t)V3_AllocPagesExtended(1,PAGE_SIZE_4KB,-1,V3_ALLOC_PAGES_CONSTRAINT_4GB);
if (!page->page_pa) {
PrintError(info->vm_info, info, "Freeing shadow page failed on allocation\n");
page_header = &core->page_header_buf[i];
INIT_LIST_HEAD(&page_header->link);
- if (!(page_header->page_pa = (addr_t)V3_AllocPages(1))) {
+ // presumably the same page could be used for 32 or 64 bit tables, so, we'll make it
+ // uniformly compatible
+ page_headeer->page_pa=(addr_t)V3_AllocPagesExtended(1,PAGE_SIZE_4KB,-1,
+ V3_ALLOC_PAGES_CONSTRAINT_4GB);
+ if (!(page_header->page_pa)) {
PrintError(info->vm_info, info, "Allocation failed in allocating shadow page\n");
goto error_1;
}
#include "vmm_shdw_pg_swapbypass_32pae.h"
#include "vmm_shdw_pg_swapbypass_64.h"
+static inline int get_constraints(struct guest_info *core)
+{
+ switch (v3_get_vm_cpu_mode(core)) {
+ case PROTECTED:
+ case PROTECTED_PAE:
+ return V3_ALLOC_PAGES_CONSTRAINT_4GB;
+ break;
+ case LONG:
+ case LONG_32_COMPAT:
+ case LONG_16_COMPAT:
+ return 0;
+ break;
+ default:
+ return V3_ALLOC_PAGES_CONSTRAINT_4GB;
+ break;
+ }
+ return V3_ALLOC_PAGES_CONSTRAINT_4GB;
+}
+
static struct shadow_page_data * create_new_shadow_pt(struct guest_info * core) {
struct v3_shdw_pg_state * state = &(core->shdw_pg_state);
return NULL;
}
- page_tail->page_pa = (addr_t)V3_AllocPages(1);
+ page_tail->page_pa = (addr_t)V3_AllocPagesExtended(1,PAGE_SIZE_4KB,-1,get_constraints(core));
if (!page_tail->page_pa) {
PrintError(core->vm_info, core, "Cannot allocate page\n");
#include "vmm_shdw_pg_tlb_64.h"
+static inline int get_constraints(struct guest_info *core)
+{
+ switch (v3_get_vm_cpu_mode(core)) {
+ case PROTECTED:
+ case PROTECTED_PAE:
+ return V3_ALLOC_PAGES_CONSTRAINT_4GB;
+ break;
+ case LONG:
+ case LONG_32_COMPAT:
+ case LONG_16_COMPAT:
+ return 0;
+ break;
+ default:
+ return V3_ALLOC_PAGES_CONSTRAINT_4GB;
+ break;
+ }
+ return V3_ALLOC_PAGES_CONSTRAINT_4GB;
+}
+
+
static struct shadow_page_data * create_new_shadow_pt(struct guest_info * core) {
struct v3_shdw_pg_state * state = &(core->shdw_pg_state);
struct vtlb_local_state * impl_state = (struct vtlb_local_state *)(state->local_impl_data);
return NULL;
}
- page_tail->page_pa = (addr_t)V3_AllocPages(1);
+ page_tail->page_pa = (addr_t)V3_AllocPagesExtended(1,PAGE_SIZE_4KB,-1,get_constraints(core));
if (!page_tail->page_pa) {
PrintError(core->vm_info, core, "Cannot allocate page\n");
case PROTECTED:
return activate_shadow_pt_32(core);
+ break;
case PROTECTED_PAE:
return activate_shadow_pt_32pae(core);
+ break;
case LONG:
case LONG_32_COMPAT:
case LONG_16_COMPAT:
return activate_shadow_pt_64(core);
+ break;
default:
PrintError(core->vm_info, core, "Invalid CPU mode: %s\n", v3_cpu_mode_to_str(v3_get_vm_cpu_mode(core)));
return -1;
+ break;
}
return 0;
static vmcb_t * Allocate_VMCB() {
vmcb_t * vmcb_page = NULL;
- addr_t vmcb_pa = (addr_t)V3_AllocPages(1);
+ addr_t vmcb_pa = (addr_t)V3_AllocPages(1); // need not be shadow safe, not exposed to guest
if ((void *)vmcb_pa == NULL) {
PrintError(VM_NONE, VCORE_NONE, "Error allocating VMCB\n");
V3_Print(VM_NONE, VCORE_NONE, "SVM Enabled\n");
// Setup the host state save area
- host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4);
+ host_vmcbs[cpu_id] = (addr_t)V3_AllocPages(4); // need not be shadow-safe, not exposed to guest
if (!host_vmcbs[cpu_id]) {
PrintError(VM_NONE, VCORE_NONE, "Failed to allocate VMCB\n");
ctrl_area->instrs.IOIO_PROT = 1;
- ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3);
+ ctrl_area->IOPM_BASE_PA = (uint_t)V3_AllocPages(3); // need not be shadow-safe, not exposed to guest
if (!ctrl_area->IOPM_BASE_PA) {
PrintError(core->vm_info, core, "Cannot allocate IO bitmap\n");
vm->io_map.update_map = update_map;
- temp = V3_AllocPages(3);
+ temp = V3_AllocPages(3); // need not be shadow-safe, not exposed to guest
if (!temp) {
PrintError(vm, VCORE_NONE, "Cannot allocate io bitmap\n");
msr_map->update_map = update_map;
- temp = V3_AllocPages(2);
+ temp = V3_AllocPages(2); // need not be shadow-safe, not exposed to guest
if (!temp) {
PrintError(vm, VCORE_NONE, "Cannot allocate msr bitmap\n");
int V3_deinit_devices() {
- v3_free_htable(master_dev_table, 0, 0);
+ if (master_dev_table) {
+ v3_free_htable(master_dev_table, 0, 0);
+ master_dev_table=0;
+ }
return 0;
}
free_frontends(vm, mgr);
- v3_free_htable(mgr->dev_table, 0, 0);
+ if (mgr->dev_table) {
+ v3_free_htable(mgr->dev_table, 0, 0);
+ mgr->dev_table=0;
+ }
return 0;
}
#endif
-static addr_t create_generic_pt_page() {
+static addr_t create_generic_pt_page(struct guest_info *core) {
void * page = 0;
void *temp;
- temp = V3_AllocPages(1);
+ temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1,
+ core->shdw_pg_mode==SHADOW_PAGING ? V3_ALLOC_PAGES_CONSTRAINT_4GB : 0);
if (!temp) {
PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n");
return 0;
#include "vmm_direct_paging_64.h"
int v3_init_passthrough_pts(struct guest_info * info) {
- info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page());
+ info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info));
return 0;
}
// Fix up the PDE entry
if (pde[pde_index].present == 0) {
- pte = (pte32_t *)create_generic_pt_page();
+ pte = (pte32_t *)create_generic_pt_page(info);
pde[pde_index].present = 1;
pde[pde_index].writable = 1;
// Fix up the PDPE entry
if (pdpe[pdpe_index].present == 0) {
- pde = (pde32pae_t *)create_generic_pt_page();
+ pde = (pde32pae_t *)create_generic_pt_page(info);
pdpe[pdpe_index].present = 1;
// Set default PDPE Flags...
// Fix up the PDE entry
if (pde[pde_index].present == 0) {
- pte = (pte32pae_t *)create_generic_pt_page();
+ pte = (pte32pae_t *)create_generic_pt_page(info);
pde[pde_index].present = 1;
pde[pde_index].writable = 1;
//Fix up the PML entry
if (pml[pml_index].present == 0) {
- pdpe = (pdpe64_t *)create_generic_pt_page();
+ pdpe = (pdpe64_t *)create_generic_pt_page(core);
// Set default PML Flags...
pml[pml_index].present = 1;
// Fix up the PDPE entry
if (pdpe[pdpe_index].present == 0) {
- pde = (pde64_t *)create_generic_pt_page();
+ pde = (pde64_t *)create_generic_pt_page(core);
// Set default PDPE Flags...
pdpe[pdpe_index].present = 1;
// Fix up the PDE entry
if (pde[pde_index].present == 0) {
- pte = (pte64_t *)create_generic_pt_page();
+ pte = (pte64_t *)create_generic_pt_page(core);
pde[pde_index].present = 1;
pde[pde_index].writable = 1;
return -1;
}
+//
+// This code parallels that in vmm_shadow_paging.c:v3_init_shdw_impl()
+// and vmm_config.c:determine_paging_mode. The determination of which
+// paging mode will be used is determined much later than the allocation of
+// the guest memory regions, so we need to do this here to decide if they
+// need to be below 4 GB or not.
+static int will_use_shadow_paging(struct v3_vm_info *vm)
+{
+ v3_cfg_tree_t * pg_cfg = v3_cfg_subtree(vm->cfg_data->cfg, "paging");
+ char * pg_mode = v3_cfg_val(pg_cfg, "mode");
+
+ if (pg_mode == NULL) {
+ return 1; // did not ask, get shadow
+ } else {
+ if (strcasecmp(pg_mode, "nested") == 0) {
+ extern v3_cpu_arch_t v3_mach_type;
+ if ((v3_mach_type == V3_SVM_REV3_CPU) ||
+ (v3_mach_type == V3_VMX_EPT_CPU) ||
+ (v3_mach_type == V3_VMX_EPT_UG_CPU)) {
+ return 0; // ask for nested, get nested
+ } else {
+ return 1; // ask for nested, get shadow
+ }
+ } else if (strcasecmp(pg_mode, "shadow") != 0) {
+ return 1; // ask for shadow, get shadow
+ } else {
+ return 1; // ask for something else, get shadow
+ }
+ }
+}
int v3_init_mem_map(struct v3_vm_info * vm) {
node_id = gpa_to_node_from_cfg(vm, region->guest_start);
V3_Print(vm, VCORE_NONE, "Allocating block %d on node %d\n", i, node_id);
-
- if (node_id != -1) {
- region->host_addr = (addr_t)V3_AllocPagesNode(block_pages, node_id);
- } else {
- region->host_addr = (addr_t)V3_AllocPages(block_pages);
- }
+ region->host_addr = (addr_t)V3_AllocPagesExtended(block_pages,
+ PAGE_SIZE_4KB,
+ node_id,
+ will_use_shadow_paging(vm) ?
+ V3_ALLOC_PAGES_CONSTRAINT_4GB : 0 );
+
if ((void *)region->host_addr == NULL) {
PrintError(vm, VCORE_NONE, "Could not allocate guest memory\n");
return -1;
struct v3_mem_hooks * hooks = &(vm->mem_hooks);
- temp = V3_AllocPages(vm->num_cores);
+ temp = V3_AllocShadowSafePages(vm,vm->num_cores);
if (!temp) {
PrintError(vm, VCORE_NONE, "Cannot allocate space for mem hooks\n");
hooks->hook_hvas_1 = V3_VAddr(temp);
- temp = V3_AllocPages(vm->num_cores);
+ temp = V3_AllocShadowSafePages(vm,vm->num_cores);
if (!temp) {
PrintError(vm, VCORE_NONE,"Cannot allocate space for mem hooks\n");
int v3_init_symspy_vm(struct v3_vm_info * vm, struct v3_symspy_global_state * state) {
- state->global_page_pa = (addr_t)V3_AllocPages(1);
+ state->global_page_pa = (addr_t)V3_AllocShadowSafePages(vm, 1);
if (!state->global_page_pa) {
PrintError(vm, VCORE_NONE, "Cannot allocate page\n");
return -1;
int v3_init_symspy_core(struct guest_info * core, struct v3_symspy_local_state * state) {
- state->local_page_pa = (addr_t)V3_AllocPages(1);
+ state->local_page_pa = (addr_t)V3_AllocShadowSafePages(core->vm_info, 1);
if (!state->local_page_pa) {
PrintError(core->vm_info, core, "Cannot allocate page\n");
PrintDebug(VM_NONE, VCORE_NONE, "Allocating page\n");
- temp = V3_AllocPages(1);
+ temp = V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
if (!temp) {
PrintError(VM_NONE, VCORE_NONE, "Cannot allocate VMCS\n");
return -1;
return -1;
}
- vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1);
+ vmx_state->msr_area_paddr = (addr_t)V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
if (vmx_state->msr_area_paddr == (addr_t)NULL) {
PrintError(core->vm_info, core, "could not allocate msr load/store area\n");
struct cr0_32 * shadow_cr0;
addr_t vmcs_page_paddr; //HPA
- vmcs_page_paddr = (addr_t) V3_AllocPages(1);
+ vmcs_page_paddr = (addr_t) V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
if (!vmcs_page_paddr) {
PrintError(core->vm_info, core, "Could not allocate space for a vmcs in VMX\n");
void * temp;
void * page = 0;
- temp = V3_AllocPages(1);
+ temp = V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
if (!temp) {
PrintError(VM_NONE, VCORE_NONE, "Cannot allocate EPT page\n");
return 0;
vm->io_map.update_map = update_map;
- temp = V3_AllocPages(2);
+ temp = V3_AllocPages(2); // need not be shadow-safe, not exposed to guest
if (!temp) {
PrintError(vm, VCORE_NONE, "Cannot allocate io bitmap\n");
return -1;
msr_map->update_map = update_map;
- temp = V3_AllocPages(1);
+ temp = V3_AllocPages(1); // need not be shadow-safe, not exposed to guest
if (!temp) {
PrintError(vm, VCORE_NONE, "Cannot allocat MSR bitmap\n");
return -1;