1 /* Palacios memory manager
5 #include <asm/page_64_types.h>
6 #include <linux/kernel.h>
7 #include <linux/list.h>
8 #include <linux/slab.h>
10 //static struct list_head pools;
18 static struct buddy_memzone ** memzones = NULL;
19 static uintptr_t * seed_addrs = NULL;
22 // alignment is in bytes
23 uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node_id) {
27 int cpu_id = get_cpu();
30 node_id = numa_cpu_to_node(cpu_id);
31 } else if (numa_num_nodes() == 1) {
33 } else if (node_id >= numa_num_nodes()) {
34 ERROR("Requesting memory from an invalid NUMA node. (Node: %d) (%d nodes on system)\n",
35 node_id, numa_num_nodes());
39 addr = buddy_alloc(memzones[node_id], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT);
41 //DEBUG("Returning from alloc addr=%p, vaddr=%p\n", (void *)addr, __va(addr));
47 void free_palacios_pgs(uintptr_t pg_addr, u64 num_pages) {
48 int node_id = numa_addr_to_node(pg_addr);
50 //DEBUG("Freeing Memory page %p\n", (void *)pg_addr);
51 buddy_free(memzones[node_id], pg_addr, get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT);
60 for (;i>0;i--) { x*=2; }
64 int add_palacios_memory(struct v3_mem_region *r) {
68 struct v3_mem_region *keep;
70 // fixup request regardless of its type
71 if (r->num_pages*4096 < V3_CONFIG_MEM_BLOCK_SIZE) {
72 WARNING("Allocating a memory pool smaller than the Palacios block size - may not be useful\n");
75 if (pow2(get_order(r->num_pages)) != r->num_pages) {
76 WARNING("Allocating a memory pool that is not a power of two - it will be rounded down!\n");
77 r->num_pages=pow2(get_order(r->num_pages));
81 if (!(keep=palacios_alloc(sizeof(struct v3_mem_region)))) {
82 ERROR("Error allocating space for tracking region\n");
87 if (r->type==REQUESTED || r->type==REQUESTED32) {
88 struct page * pgs = alloc_pages_node(r->node,
89 r->type==REQUESTED ? GFP_KERNEL :
90 r->type==REQUESTED32 ? GFP_DMA32 : GFP_KERNEL,
91 get_order(r->num_pages));
93 ERROR("Unable to satisfy allocation request\n");
97 r->base_addr = page_to_pfn(pgs) << PAGE_SHIFT;
103 node_id = numa_addr_to_node(r->base_addr);
106 ERROR("Error locating node for addr %p\n", (void *)(r->base_addr));
110 pool_order = get_order(r->num_pages * PAGE_SIZE) + PAGE_SHIFT;
112 if (buddy_add_pool(memzones[node_id], r->base_addr, pool_order, keep)) {
113 ERROR("ALERT ALERT ALERT Unable to add pool to buddy allocator...\n");
114 if (r->type==REQUESTED || r->type==REQUESTED32) {
115 free_pages((uintptr_t)__va(r->base_addr), get_order(r->num_pages));
126 int palacios_remove_memory(uintptr_t base_addr) {
127 int node_id = numa_addr_to_node(base_addr);
128 struct v3_mem_region *r;
130 if (buddy_remove_pool(memzones[node_id], base_addr, 0, (void**)(&r))) { //unforced remove
131 ERROR("Cannot remove memory at base address 0x%p because it is in use\n", (void*)base_addr);
135 if (r->type==REQUESTED || r->type==REQUESTED32) {
136 free_pages((uintptr_t)__va(r->base_addr), get_order(r->num_pages));
138 // user space resposible for onlining
148 int palacios_deinit_mm( void ) {
153 for (i = 0; i < numa_num_nodes(); i++) {
156 buddy_deinit(memzones[i]);
159 // note that the memory is not onlined here - offlining and onlining
160 // is the resposibility of the caller
163 // free the seed regions
164 free_pages((uintptr_t)__va(seed_addrs[i]), MAX_ORDER - 1);
168 palacios_free(memzones);
169 palacios_free(seed_addrs);
175 int palacios_init_mm( void ) {
176 int num_nodes = numa_num_nodes();
179 memzones = palacios_alloc_extended(sizeof(struct buddy_memzone *) * num_nodes, GFP_KERNEL);
182 ERROR("Cannot allocate space for memory zones\n");
183 palacios_deinit_mm();
187 memset(memzones, 0, sizeof(struct buddy_memzone *) * num_nodes);
189 seed_addrs = palacios_alloc_extended(sizeof(uintptr_t) * num_nodes, GFP_KERNEL);
192 ERROR("Cannot allocate space for seed addrs\n");
193 palacios_deinit_mm();
197 memset(seed_addrs, 0, sizeof(uintptr_t) * num_nodes);
199 for (node_id = 0; node_id < num_nodes; node_id++) {
200 struct buddy_memzone * zone = NULL;
202 // Seed the allocator with a small set of pages to allow initialization to complete.
203 // For now we will just grab some random pages, but in the future we will need to grab NUMA specific regions
204 // See: alloc_pages_node()
207 struct page * pgs = alloc_pages_node(node_id, GFP_KERNEL, MAX_ORDER - 1);
210 ERROR("Could not allocate initial memory block for node %d\n", node_id);
212 palacios_deinit_mm();
216 seed_addrs[node_id] = page_to_pfn(pgs) << PAGE_SHIFT;
219 zone = buddy_init(get_order(V3_CONFIG_MEM_BLOCK_SIZE) + PAGE_SHIFT, PAGE_SHIFT, node_id);
222 ERROR("Could not initialization memory management for node %d\n", node_id);
223 palacios_deinit_mm();
227 printk("Zone initialized, Adding seed region (order=%d)\n",
228 (MAX_ORDER - 1) + PAGE_SHIFT);
230 if (buddy_add_pool(zone, seed_addrs[node_id], (MAX_ORDER - 1) + PAGE_SHIFT,0)) {
231 ERROR("Could not add pool to buddy allocator\n");
232 palacios_deinit_mm();
236 memzones[node_id] = zone;