1 /* Palacios memory manager
5 #include <asm/page_64_types.h>
6 #include <linux/kernel.h>
7 #include <linux/list.h>
8 #include <linux/slab.h>
10 //static struct list_head pools;
18 static struct buddy_memzone ** memzones = NULL;
19 static uintptr_t * seed_addrs = NULL;
22 // alignment is in bytes
23 uintptr_t alloc_palacios_pgs(u64 num_pages, u32 alignment, int node_id) {
27 int cpu_id = get_cpu();
30 node_id = numa_cpu_to_node(cpu_id);
31 } else if (numa_num_nodes() == 1) {
33 } else if (node_id >= numa_num_nodes()) {
34 ERROR("Requesting memory from an invalid NUMA node. (Node: %d) (%d nodes on system)\n",
35 node_id, numa_num_nodes());
39 addr = buddy_alloc(memzones[node_id], get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT);
41 //DEBUG("Returning from alloc addr=%p, vaddr=%p\n", (void *)addr, __va(addr));
47 void free_palacios_pgs(uintptr_t pg_addr, u64 num_pages) {
48 int node_id = numa_addr_to_node(pg_addr);
50 //DEBUG("Freeing Memory page %p\n", (void *)pg_addr);
51 buddy_free(memzones[node_id], pg_addr, get_order(num_pages * PAGE_SIZE) + PAGE_SHIFT);
57 unsigned long long pow2(int i)
59 unsigned long long x=1;
60 for (;i!=0;i--) { x*=2; }
64 int add_palacios_memory(struct v3_mem_region *r) {
68 struct v3_mem_region *keep;
70 // fixup request regardless of its type
71 if (r->num_pages*4096 < V3_CONFIG_MEM_BLOCK_SIZE) {
72 WARNING("Allocating a memory pool smaller than the Palacios block size - may not be useful\n");
75 if (pow2(get_order(r->num_pages*PAGE_SIZE)) != r->num_pages*PAGE_SIZE) {
76 WARNING("Allocating a memory pool that is not a power of two (is %llu) - it will be rounded down!\n", r->num_pages*PAGE_SIZE);
77 r->num_pages=pow2(get_order(r->num_pages*PAGE_SIZE));
78 WARNING("Rounded power Allocating a memory pool that is not a power of two (rounded to %llu)\n", r->num_pages*PAGE_SIZE);
82 if (!(keep=palacios_alloc(sizeof(struct v3_mem_region)))) {
83 ERROR("Error allocating space for tracking region\n");
88 if (r->type==REQUESTED || r->type==REQUESTED32) {
89 struct page * pgs = alloc_pages_node(r->node,
90 r->type==REQUESTED ? GFP_KERNEL :
91 r->type==REQUESTED32 ? GFP_DMA32 : GFP_KERNEL,
92 get_order(r->num_pages));
94 ERROR("Unable to satisfy allocation request\n");
98 r->base_addr = page_to_pfn(pgs) << PAGE_SHIFT;
104 node_id = numa_addr_to_node(r->base_addr);
107 ERROR("Error locating node for addr %p\n", (void *)(r->base_addr));
111 pool_order = get_order(r->num_pages * PAGE_SIZE) + PAGE_SHIFT;
113 if (buddy_add_pool(memzones[node_id], r->base_addr, pool_order, keep)) {
114 ERROR("ALERT ALERT ALERT Unable to add pool to buddy allocator...\n");
115 if (r->type==REQUESTED || r->type==REQUESTED32) {
116 free_pages((uintptr_t)__va(r->base_addr), get_order(r->num_pages));
127 int palacios_remove_memory(uintptr_t base_addr) {
128 int node_id = numa_addr_to_node(base_addr);
129 struct v3_mem_region *r;
131 if (buddy_remove_pool(memzones[node_id], base_addr, 0, (void**)(&r))) { //unforced remove
132 ERROR("Cannot remove memory at base address 0x%p because it is in use\n", (void*)base_addr);
136 if (r->type==REQUESTED || r->type==REQUESTED32) {
137 free_pages((uintptr_t)__va(r->base_addr), get_order(r->num_pages));
139 // user space resposible for onlining
149 int palacios_deinit_mm( void ) {
154 for (i = 0; i < numa_num_nodes(); i++) {
157 buddy_deinit(memzones[i]);
160 // note that the memory is not onlined here - offlining and onlining
161 // is the resposibility of the caller
164 // free the seed regions
165 free_pages((uintptr_t)__va(seed_addrs[i]), MAX_ORDER - 1);
169 palacios_free(memzones);
170 palacios_free(seed_addrs);
176 int palacios_init_mm( void ) {
177 int num_nodes = numa_num_nodes();
180 memzones = palacios_alloc_extended(sizeof(struct buddy_memzone *) * num_nodes, GFP_KERNEL);
183 ERROR("Cannot allocate space for memory zones\n");
184 palacios_deinit_mm();
188 memset(memzones, 0, sizeof(struct buddy_memzone *) * num_nodes);
190 seed_addrs = palacios_alloc_extended(sizeof(uintptr_t) * num_nodes, GFP_KERNEL);
193 ERROR("Cannot allocate space for seed addrs\n");
194 palacios_deinit_mm();
198 memset(seed_addrs, 0, sizeof(uintptr_t) * num_nodes);
200 for (node_id = 0; node_id < num_nodes; node_id++) {
201 struct buddy_memzone * zone = NULL;
203 // Seed the allocator with a small set of pages to allow initialization to complete.
204 // For now we will just grab some random pages, but in the future we will need to grab NUMA specific regions
205 // See: alloc_pages_node()
208 struct page * pgs = alloc_pages_node(node_id, GFP_KERNEL, MAX_ORDER - 1);
211 ERROR("Could not allocate initial memory block for node %d\n", node_id);
213 palacios_deinit_mm();
217 seed_addrs[node_id] = page_to_pfn(pgs) << PAGE_SHIFT;
220 zone = buddy_init(get_order(V3_CONFIG_MEM_BLOCK_SIZE) + PAGE_SHIFT, PAGE_SHIFT, node_id);
223 ERROR("Could not initialization memory management for node %d\n", node_id);
224 palacios_deinit_mm();
228 printk("Zone initialized, Adding seed region (order=%d)\n",
229 (MAX_ORDER - 1) + PAGE_SHIFT);
231 if (buddy_add_pool(zone, seed_addrs[node_id], (MAX_ORDER - 1) + PAGE_SHIFT,0)) {
232 ERROR("Could not add pool to buddy allocator\n");
233 palacios_deinit_mm();
237 memzones[node_id] = zone;