};
-extern struct proc_dir_entry * palacios_proc_dir;
-
void buddy_deinit(struct buddy_memzone * zone) {
unsigned long flags;
memset(proc_file_name, 0, 128);
snprintf(proc_file_name, 128, "v3-mem%d", zone->node_id);
- remove_proc_entry(proc_file_name, palacios_proc_dir);
+ remove_proc_entry(proc_file_name, palacios_get_procdir());
}
char proc_file_name[128];
memset(proc_file_name, 0, 128);
- snprintf(proc_file_name, 128, "v3-mem%d", zone->node_id);
+ snprintf(proc_file_name, 128, "v3-mem%u", zone->node_id);
- zone_entry = create_proc_entry(proc_file_name, 0444, palacios_proc_dir);
+ zone_entry = create_proc_entry(proc_file_name, 0444, palacios_get_procdir());
if (zone_entry) {
zone_entry->proc_fops = &zone_proc_ops;
zone_entry->data = zone;
+ INFO("Successfully created /proc/v3vee/v3-mem%d\n", zone->node_id);
} else {
- ERROR("Error creating memory zone proc file\n");
+ ERROR("Cannot create /proc/v3vee/v3-mem%d\n", zone->node_id);
}
}
static int v3_major_num = 0;
static struct v3_guest * guest_map[MAX_VMS] = {[0 ... MAX_VMS - 1] = 0};
-struct proc_dir_entry * palacios_proc_dir = NULL;
+static struct proc_dir_entry * palacios_proc_dir = NULL;
struct class * v3_class = NULL;
static struct cdev ctrl_dev;
struct proc_dir_entry *palacios_get_procdir(void)
{
+ INFO("Returning procdir=%p\n",palacios_proc_dir);
return palacios_proc_dir;
}
LOCKCHECK_INIT();
MEMCHECK_INIT();
- palacios_init_mm();
+ palacios_proc_dir = proc_mkdir("v3vee", NULL);
+ if (!palacios_proc_dir) {
+ ERROR("Could not create proc entry\n");
+ ret = -1;
+ goto failure1;
+ }
+
+ // this will populate the v3vee tree...
+ if (palacios_init_mm()) {
+ goto failure2;
+ }
if (allow_devmem) {
palacios_allow_devmem();
// Initialize Palacios
palacios_vmm_init(options);
-
// initialize extensions
init_lnx_extensions();
v3_class = class_create(THIS_MODULE, "vms");
if (IS_ERR(v3_class)) {
ERROR("Failed to register V3 VM device class\n");
- return PTR_ERR(v3_class);
+ ret = PTR_ERR(v3_class);
+ goto failure3;
}
INFO("intializing V3 Control device\n");
if (ret < 0) {
ERROR("Error registering device region for V3 devices\n");
- goto failure2;
+ goto failure4;
}
v3_major_num = MAJOR(dev);
if (ret != 0) {
ERROR("Error adding v3 control device\n");
- goto failure1;
+ goto failure5;
}
- palacios_proc_dir = proc_mkdir("v3vee", NULL);
- if (palacios_proc_dir) {
+ {
struct proc_dir_entry *entry;
- entry = create_proc_read_entry("v3-guests", 0444, palacios_proc_dir,
- read_guests, NULL);
+ INFO("palacios_proc_dir=%p before v3-guests\n",palacios_proc_dir);
+ entry = create_proc_read_entry("v3-guests", 0444, palacios_proc_dir, read_guests, NULL);
if (entry) {
INFO("/proc/v3vee/v3-guests successfully created\n");
} else {
ERROR("Could not create proc entry\n");
- goto failure1;
+ goto failure6;
}
-
- } else {
- ERROR("Could not create proc entry\n");
- goto failure1;
}
return 0;
- failure1:
+ failure6:
+ remove_proc_entry("v3-guests", palacios_proc_dir);
+ failure5:
unregister_chrdev_region(MKDEV(v3_major_num, 0), MAX_VMS + 1);
- failure2:
+ failure4:
class_destroy(v3_class);
+ failure3:
+ if (allow_devmem) {
+ palacios_restore_devmem();
+ }
+ palacios_deinit_mm();
+ failure2:
+ remove_proc_entry("v3vee", NULL);
+ failure1:
+ MEMCHECK_DEINIT();
+ LOCKCHECK_DEINIT();
return ret;
}
// alignment is in bytes
uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node_id) {
uintptr_t addr = 0;
+ int any = node_id==-1; // can allocate on any
if (node_id == -1) {
int cpu_id = get_cpu();
put_cpu();
- node_id = numa_cpu_to_node(cpu_id);
+ node_id = numa_cpu_to_node(cpu_id); // try first preferentially for the calling pcore
+
} else if (numa_num_nodes() == 1) {
node_id = 0;
} else if (node_id >= numa_num_nodes()) {
addr = buddy_alloc(memzones[node_id], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT);
+ if (!addr && any) {
+ int i;
+ // do a scan to see if we can satisfy request on any node
+ for (i=0; i< numa_num_nodes(); i++) {
+ if (i!=node_id) {
+ addr = buddy_alloc(memzones[i], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT);
+ if (addr) {
+ break;
+ }
+ }
+ }
+ }
+
+
//DEBUG("Returning from alloc addr=%p, vaddr=%p\n", (void *)addr, __va(addr));
return addr;
}
struct v3_mem_region *keep;
+ INFO("Palacios Memory Add Request: type=%d, node=%d, base_addr=0x%llx, num_pages=%llu\n",r->type,r->node,r->base_addr,r->num_pages);
+
// fixup request regardless of its type
if (r->num_pages*4096 < V3_CONFIG_MEM_BLOCK_SIZE) {
WARNING("Allocating a memory pool smaller than the Palacios block size - may not be useful\n");
}
- if (pow2(get_order(r->num_pages*PAGE_SIZE)) != r->num_pages*PAGE_SIZE) {
- WARNING("Allocating a memory pool that is not a power of two (is %llu) - it will be rounded down!\n", r->num_pages*PAGE_SIZE);
+ if (pow2(get_order(r->num_pages*PAGE_SIZE)) != r->num_pages) {
+ WARNING("Allocating a memory pool that is not a power of two (is %llu) - it will be rounded down!\n", r->num_pages);
r->num_pages=pow2(get_order(r->num_pages*PAGE_SIZE));
- WARNING("Rounded power Allocating a memory pool that is not a power of two (rounded to %llu)\n", r->num_pages*PAGE_SIZE);
+ WARNING("Rounded request is for %llu pages\n", r->num_pages);
}
if (r->type==REQUESTED || r->type==REQUESTED32) {
- struct page * pgs = alloc_pages_node(r->node,
- r->type==REQUESTED ? GFP_KERNEL :
- r->type==REQUESTED32 ? GFP_DMA32 : GFP_KERNEL,
- get_order(r->num_pages));
+ struct page *pgs;
+
+ INFO("Attempting to allocate %llu pages of %s memory\n", r->num_pages,
+ r->type==REQUESTED ? "64 bit (unrestricted)" :
+ r->type==REQUESTED32 ? "32 bit (restricted)" : "unknown (assuming 64 bit unrestricted)");
+
+ pgs = alloc_pages_node(r->node,
+ r->type==REQUESTED ? GFP_KERNEL :
+ r->type==REQUESTED32 ? GFP_DMA32 : GFP_KERNEL,
+ get_order(r->num_pages*PAGE_SIZE));
if (!pgs) {
ERROR("Unable to satisfy allocation request\n");
palacios_free(keep);
int num_nodes = numa_num_nodes();
int node_id = 0;
+ INFO("memory manager init: MAX_ORDER=%d (%llu bytes)\n",MAX_ORDER, PAGE_SIZE*pow2(MAX_ORDER));
+
memzones = palacios_alloc_extended(sizeof(struct buddy_memzone *) * num_nodes, GFP_KERNEL);
if (!memzones) {
// See: alloc_pages_node()
{
- struct page * pgs = alloc_pages_node(node_id, GFP_KERNEL, MAX_ORDER - 1);
+ struct page * pgs;
+
+ // attempt to first allocate below 4 GB for compatibility with
+ // 32 bit shadow paging
+ pgs = alloc_pages_node(node_id, GFP_DMA32, MAX_ORDER - 1);
if (!pgs) {
- ERROR("Could not allocate initial memory block for node %d\n", node_id);
- BUG_ON(!pgs);
- palacios_deinit_mm();
- return -1;
+ INFO("Could not allocate initial memory block for node %d beloew 4GB\n", node_id);
+
+ pgs = alloc_pages_node(node_id, GFP_KERNEL, MAX_ORDER - 1);
+
+ if (!pgs) {
+ INFO("Could not allocate initial memory block for node %d beloew 4GB\n", node_id);
+ if (!pgs) {
+ ERROR("Could not allocate initial memory block for node %d without restrictions\n", node_id);
+ BUG_ON(!pgs);
+ palacios_deinit_mm();
+ return -1;
+ }
+ }
}
seed_addrs[node_id] = page_to_pfn(pgs) << PAGE_SHIFT;