//static struct list_head pools;
#include "palacios.h"
+#include "mm.h"
+#include "buddy.h"
+#include "numa.h"
+#include "palacios/vmm.h"
-struct mempool {
- uintptr_t base_addr;
- u64 num_pages;
- u8 * bitmap;
-};
+static struct buddy_memzone ** memzones = NULL;
+static uintptr_t * seed_addrs = NULL;
-static struct mempool pool;
+// alignment is in bytes
+uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node_id, int constraints) {
+ uintptr_t addr = 0;
+ int any = node_id==-1; // can allocate on any
+ int buddy_constraints=0;
+
+ if (constraints && constraints!=V3_ALLOC_PAGES_CONSTRAINT_4GB) {
+ ERROR("Unknown constraint mask 0x%x\n",constraints);
+ return 0;
+ }
+
+ if (constraints & V3_ALLOC_PAGES_CONSTRAINT_4GB) {
+ buddy_constraints |= LWK_BUDDY_CONSTRAINT_4GB;
+ }
+
+
+ if (node_id == -1) {
+ int cpu_id = get_cpu();
+ put_cpu();
+
+ node_id = numa_cpu_to_node(cpu_id); // try first preferentially for the calling pcore
+
+ } else if (numa_num_nodes() == 1) {
+ node_id = 0;
+ } else if (node_id >= numa_num_nodes()) {
+ ERROR("Requesting memory from an invalid NUMA node. (Node: %d) (%d nodes on system)\n",
+ node_id, numa_num_nodes());
+ return 0;
+ }
-static inline int get_page_bit(int index) {
- int major = index / 8;
- int minor = index % 8;
+ addr = buddy_alloc(memzones[node_id], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT, buddy_constraints);
- return (pool.bitmap[major] & (0x1 << minor));
+ if (!addr && any) {
+ int i;
+ // do a scan to see if we can satisfy request on any node
+ for (i=0; i< numa_num_nodes(); i++) {
+ if (i!=node_id) {
+ addr = buddy_alloc(memzones[i], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT, buddy_constraints);
+ if (addr) {
+ break;
+ }
+ }
+ }
+ }
+
+
+ //DEBUG("Returning from alloc addr=%p, vaddr=%p\n", (void *)addr, __va(addr));
+ return addr;
}
-static inline void set_page_bit(int index) {
- int major = index / 8;
- int minor = index % 8;
- pool.bitmap[major] |= (0x1 << minor);
+
+void free_palacios_pgs(uintptr_t pg_addr, u64 num_pages) {
+ int node_id = numa_addr_to_node(pg_addr);
+
+ //DEBUG("Freeing Memory page %p\n", (void *)pg_addr);
+ if (buddy_free(memzones[node_id], pg_addr, get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT)) {
+ // it is possible that the allocation was actually on a different zone,
+ // so, just to be sure, we'll try to dellocate on each
+ for (node_id=0;node_id<numa_num_nodes();node_id++) {
+ if (!buddy_free(memzones[node_id], pg_addr, get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT)) {
+ // successfully freed on different zone, which is also OK
+ break;
+ }
+ }
+ if (node_id==numa_num_nodes()) {
+ ERROR("Unable to free pages -addr=%p, numpages=%llu on any node\n",(void*)pg_addr,num_pages);
+ }
+ }
+
+ return;
}
-static inline void clear_page_bit(int index) {
- int major = index / 8;
- int minor = index % 8;
- pool.bitmap[major] &= ~(0x1 << minor);
+unsigned long long pow2(int i)
+{
+ unsigned long long x=1;
+ for (;i!=0;i--) { x*=2; }
+ return x;
}
+static unsigned long long get_palacios_mem_block_size(void)
+{
+ char *s = v3_lookup_option("mem_block_size");
+ if (!s) {
+ return V3_CONFIG_MEM_BLOCK_SIZE;
+ } else {
+ unsigned long long temp;
-static uintptr_t alloc_contig_pgs(u64 num_pages, u32 alignment) {
- int step = 1;
- int i = 0;
- int start = 0;
+ if (strict_strtoull(s,0,&temp)) {
+ return V3_CONFIG_MEM_BLOCK_SIZE; // odd...
+ } else {
+ return temp;
+ }
+ }
+}
- DEBUG("Allocating %llu pages (align=%lu)\n",
- num_pages, (unsigned long)alignment);
+int add_palacios_memory(struct v3_mem_region *r) {
+ int pool_order = 0;
+ int node_id = 0;
- if (pool.bitmap == NULL) {
- ERROR("ERROR: Attempting to allocate from non initialized memory\n");
- return 0;
+
+ struct v3_mem_region *keep;
+
+ INFO("Palacios Memory Add Request: type=%d, node=%d, base_addr=0x%llx, num_pages=%llu\n",r->type,r->node,r->base_addr,r->num_pages);
+
+ // fixup request regardless of its type
+ if (r->num_pages*4096 < get_palacios_mem_block_size()) {
+ WARNING("Allocating a memory pool smaller than the Palacios block size - may not be useful\n");
}
- if (alignment > 0) {
- step = alignment / 4096;
+ if (pow2(get_order(r->num_pages*PAGE_SIZE)) != r->num_pages) {
+ WARNING("Allocating a memory pool that is not a power of two (is %llu) - it will be rounded down!\n", r->num_pages);
+ r->num_pages=pow2(get_order(r->num_pages*PAGE_SIZE));
+ WARNING("Rounded request is for %llu pages\n", r->num_pages);
}
- // Start the search at the correct alignment
- if (pool.base_addr % alignment) {
- start = ((alignment - (pool.base_addr % alignment)) >> 12);
+
+ if (!(keep=palacios_alloc(sizeof(struct v3_mem_region)))) {
+ ERROR("Error allocating space for tracking region\n");
+ return -1;
}
- ERROR("\t Start idx %d (base_addr=%p)\n", start, (void *)(u64)pool.base_addr);
- for (i = start; i < (pool.num_pages - num_pages); i += step) {
- if (get_page_bit(i) == 0) {
- int j = 0;
- int collision = 0;
+ if (r->type==REQUESTED || r->type==REQUESTED32) {
+ struct page *pgs;
+
+ INFO("Attempting to allocate %llu pages of %s memory\n", r->num_pages,
+ r->type==REQUESTED ? "64 bit (unrestricted)" :
+ r->type==REQUESTED32 ? "32 bit (restricted)" : "unknown (assuming 64 bit unrestricted)");
+
+ pgs = alloc_pages_node(r->node,
+ r->type==REQUESTED ? GFP_KERNEL :
+ r->type==REQUESTED32 ? GFP_DMA32 : GFP_KERNEL,
+ get_order(r->num_pages*PAGE_SIZE));
+ if (!pgs) {
+ ERROR("Unable to satisfy allocation request\n");
+ palacios_free(keep);
+ return -1;
+ }
+ r->base_addr = page_to_pfn(pgs) << PAGE_SHIFT;
+ }
+
- for (j = i; (j - i) < num_pages; j++) {
- if (get_page_bit(j) == 1) {
- collision = 1;
- break;
- }
- }
+ *keep = *r;
- if (collision == 1) {
- break;
- }
+ node_id = numa_addr_to_node(r->base_addr);
- for (j = i; (j - i) < num_pages; j++) {
- set_page_bit(j);
- }
+ if (node_id == -1) {
+ ERROR("Error locating node for addr %p\n", (void *)(r->base_addr));
+ return -1;
+ }
- return pool.base_addr + (i * 4096);
- }
+ if ((node_id != r->node) && (r->node!=-1)) {
+ INFO("Memory add request is for node %d, but memory is in node %d\n",r->node,node_id);
}
- /* ERROR("PALACIOS BAD: LARGE PAGE ALLOCATION FAILED\n"); */
+ pool_order = get_order(r->num_pages * PAGE_SIZE) + PAGE_SHIFT;
+
+ if (buddy_add_pool(memzones[node_id], r->base_addr, pool_order, keep)) {
+ ERROR("ALERT ALERT ALERT Unable to add pool to buddy allocator...\n");
+ if (r->type==REQUESTED || r->type==REQUESTED32) {
+ free_pages((uintptr_t)__va(r->base_addr), get_order(r->num_pages*PAGE_SIZE));
+ }
+ palacios_free(keep);
+ return -1;
+ }
return 0;
}
-// alignment is in bytes
-uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment) {
- uintptr_t addr = 0;
- if ((num_pages < 12)) {
- struct page * pgs = NULL;
- int order = get_order(num_pages * PAGE_SIZE);
-
- pgs = alloc_pages(GFP_DMA32, order);
-
- WARN(!pgs, "Could not allocate pages\n");
-
- /* if (!pgs) { ERROR("PALACIOS BAD: SMALL PAGE ALLOCATION FAILED\n"); } */
-
- /* DEBUG("%llu pages (order=%d) aquired from alloc_pages\n",
- num_pages, order); */
-
- addr = page_to_pfn(pgs) << PAGE_SHIFT;
- } else {
- //DEBUG("Allocating %llu pages from bitmap allocator\n", num_pages);
- //addr = pool.base_addr;
- addr = alloc_contig_pgs(num_pages, alignment);
+int remove_palacios_memory(struct v3_mem_region *req) {
+ int node_id = numa_addr_to_node(req->base_addr);
+ struct v3_mem_region *r;
+
+ if (buddy_remove_pool(memzones[node_id], req->base_addr, 0, (void**)(&r))) { //unforced remove
+ ERROR("Cannot remove memory at base address 0x%p\n", (void*)(req->base_addr));
+ return -1;
}
+ if (r) {
+ if (r->type==REQUESTED || r->type==REQUESTED32) {
+ free_pages((uintptr_t)__va(r->base_addr), get_order(r->num_pages*PAGE_SIZE));
+ } else {
+ // user space responsible for onlining
+ }
+ palacios_free(r);
+ }
- //DEBUG("Returning from alloc addr=%p, vaddr=%p\n", (void *)addr, __va(addr));
- return addr;
+ return 0;
}
+static int handle_free(void *meta)
+{
+ struct v3_mem_region *r = (struct v3_mem_region *)meta;
-void free_palacios_pgs(uintptr_t pg_addr, int num_pages) {
- //DEBUG("Freeing Memory page %p\n", (void *)pg_addr);
+ if (r) {
+ if (r->type==REQUESTED || r->type==REQUESTED32) {
+ //INFO("Freeing %llu pages at %p\n",r->num_pages,(void*)(r->base_addr));
+ free_pages((uintptr_t)__va(r->base_addr), get_order(r->num_pages*PAGE_SIZE));
+ } else {
+ // user space responsible for onlining
+ }
+ palacios_free(r);
+ }
+
+ return 0;
+}
- if ((pg_addr >= pool.base_addr) &&
- (pg_addr < pool.base_addr + (4096 * pool.num_pages))) {
- int pg_idx = (pg_addr - pool.base_addr) / 4096;
- int i = 0;
+
- if ((pg_idx + num_pages) > pool.num_pages) {
- ERROR("Freeing memory bounds exceeded\n");
- return;
- }
- for (i = 0; i < num_pages; i++) {
- WARN(get_page_bit(pg_idx + i) == 0, "Trying to free unallocated page\n");
+int palacios_deinit_mm( void ) {
- clear_page_bit(pg_idx + i);
+ int i = 0;
+
+ if (memzones) {
+ for (i = 0; i < numa_num_nodes(); i++) {
+
+ if (memzones[i]) {
+ INFO("Deiniting memory zone %d\n",i);
+ buddy_deinit(memzones[i],handle_free);
+ }
+
+ // note that the memory is not onlined here - offlining and onlining
+ // is the resposibility of the caller
+
+ if (seed_addrs[i]) {
+ // free the seed regions
+ INFO("Freeing seed addrs %d\n",i);
+ free_pages((uintptr_t)__va(seed_addrs[i]), MAX_ORDER - 1);
+ }
}
- } else {
- __free_pages(pfn_to_page(pg_addr >> PAGE_SHIFT), get_order(num_pages * PAGE_SIZE));
+
+ palacios_free(memzones);
+ palacios_free(seed_addrs);
}
+
+ return 0;
}
+int palacios_init_mm( void ) {
+ int num_nodes = numa_num_nodes();
+ int node_id = 0;
-int add_palacios_memory(uintptr_t base_addr, u64 num_pages) {
- /* JRL: OK.... so this is horrible, terrible and if anyone else did it I would yell at them.
- * But... the fact that you can do this in C is so ridiculous that I can't help myself.
- * Note that we're repurposing "true" to be 1 here
- */
+ INFO("memory manager init: MAX_ORDER=%d (%llu bytes)\n",MAX_ORDER, PAGE_SIZE*pow2(MAX_ORDER));
- int bitmap_size = (num_pages / 8) + ((num_pages % 8) > 0);
+ memzones = palacios_alloc_extended(sizeof(struct buddy_memzone *) * num_nodes, GFP_KERNEL,-1);
- if (pool.num_pages != 0) {
- ERROR("ERROR: Memory has already been added\n");
+ if (!memzones) {
+ ERROR("Cannot allocate space for memory zones\n");
+ palacios_deinit_mm();
return -1;
}
- DEBUG("Managing %dMB of memory starting at %llu (%lluMB)\n",
- (unsigned int)(num_pages * 4096) / (1024 * 1024),
- (unsigned long long)base_addr,
- (unsigned long long)(base_addr / (1024 * 1024)));
+ memset(memzones, 0, sizeof(struct buddy_memzone *) * num_nodes);
+ seed_addrs = palacios_alloc_extended(sizeof(uintptr_t) * num_nodes, GFP_KERNEL,-1);
- pool.bitmap = kmalloc(bitmap_size, GFP_KERNEL);
-
- if (IS_ERR(pool.bitmap)) {
- WARNING("Error allocating Palacios MM bitmap\n");
+ if (!seed_addrs) {
+ ERROR("Cannot allocate space for seed addrs\n");
+ palacios_deinit_mm();
return -1;
}
-
- memset(pool.bitmap, 0, bitmap_size);
- pool.base_addr = base_addr;
- pool.num_pages = num_pages;
+ memset(seed_addrs, 0, sizeof(uintptr_t) * num_nodes);
+
+ for (node_id = 0; node_id < num_nodes; node_id++) {
+ struct buddy_memzone * zone = NULL;
+
+ // Seed the allocator with a small set of pages to allow initialization to complete.
+ // For now we will just grab some random pages, but in the future we will need to grab NUMA specific regions
+ // See: alloc_pages_node()
+
+ {
+ struct page * pgs;
+ int actual_node;
+
+ // attempt to first allocate below 4 GB for compatibility with
+ // 32 bit shadow paging
+ pgs = alloc_pages_node(node_id, GFP_DMA32, MAX_ORDER - 1);
+
+
+ if (!pgs) {
+ INFO("Could not allocate initial memory block for node %d below 4GB\n", node_id);
+
+ pgs = alloc_pages_node(node_id, GFP_KERNEL, MAX_ORDER - 1);
+
+ if (!pgs) {
+ INFO("Could not allocate initial memory block for node %d beloew 4GB\n", node_id);
+ if (!pgs) {
+ ERROR("Could not allocate initial memory block for node %d without restrictions\n", node_id);
+ BUG_ON(!pgs);
+ palacios_deinit_mm();
+ return -1;
+ }
+ } else {
+ actual_node=numa_addr_to_node((uintptr_t)(page_to_pfn(pgs) << PAGE_SHIFT));
+ if (actual_node != node_id) {
+ WARNING("Initial 64 bit allocation attempt for node %d resulted in allocation on node %d\n",node_id,actual_node);
+ }
+ }
+
+ } else {
+ actual_node=numa_addr_to_node((uintptr_t)(page_to_pfn(pgs) << PAGE_SHIFT));
+ if (actual_node != node_id) {
+ WARNING("Initial 32bit-limited allocation attempt for node %d resulted in allocation on node %d\n",node_id,actual_node);
+ }
+ }
- return 0;
-}
+ seed_addrs[node_id] = page_to_pfn(pgs) << PAGE_SHIFT;
+ }
+ // Initialization is done using the compile-time memory block size since
+ // at this point, we do not yet know what the run-time size is
+ zone = buddy_init(get_order(V3_CONFIG_MEM_BLOCK_SIZE) + PAGE_SHIFT, PAGE_SHIFT, node_id);
+ if (zone == NULL) {
+ ERROR("Could not initialization memory management for node %d\n", node_id);
+ palacios_deinit_mm();
+ return -1;
+ }
-int palacios_init_mm( void ) {
- // INIT_LIST_HEAD(&(pools));
- pool.base_addr = 0;
- pool.num_pages = 0;
- pool.bitmap = NULL;
+ printk("Zone initialized, Adding seed region (order=%d)\n",
+ (MAX_ORDER - 1) + PAGE_SHIFT);
- return 0;
-}
+ if (buddy_add_pool(zone, seed_addrs[node_id], (MAX_ORDER - 1) + PAGE_SHIFT,0)) {
+ ERROR("Could not add pool to buddy allocator\n");
+ palacios_deinit_mm();
+ return -1;
+ }
+
+ memzones[node_id] = zone;
+ }
-int palacios_deinit_mm( void ) {
- kfree(pool.bitmap);
-
return 0;
+
}
+