1 /* Copyright (c) 2007, Sandia National Laboratories */
2 /* Modified by Jack Lange, 2012 */
4 #include <linux/module.h>
5 #include <linux/slab.h>
6 #include <linux/log2.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
15 * Converts a block address to its block index in the specified buddy allocator.
16 * A block's index is used to find the block's tag bit, mp->tag_bits[block_id].
19 block_to_id(struct buddy_mempool *mp, struct block *block)
21 unsigned long block_id =
22 ((unsigned long)__pa(block) - mp->base_addr) >> mp->zone->min_order;
23 BUG_ON(block_id >= mp->num_blocks);
29 * Marks a block as free by setting its tag bit to one.
32 mark_available(struct buddy_mempool *mp, struct block *block)
34 __set_bit(block_to_id(mp, block), mp->tag_bits);
39 * Marks a block as allocated by setting its tag bit to zero.
42 mark_allocated(struct buddy_mempool *mp, struct block *block)
44 __clear_bit(block_to_id(mp, block), mp->tag_bits);
49 * Returns true if block is free, false if it is allocated.
52 is_available(struct buddy_mempool *mp, struct block *block)
54 return test_bit(block_to_id(mp, block), mp->tag_bits);
59 * Returns the address of the block's buddy block.
62 find_buddy(struct buddy_mempool *mp, struct block *block, unsigned long order)
67 BUG_ON((unsigned long)__pa(block) < mp->base_addr);
69 /* Fixup block address to be zero-relative */
70 _block = (unsigned long)__pa(block) - mp->base_addr;
72 /* Calculate buddy in zero-relative space */
73 _buddy = _block ^ (1UL << order);
75 /* Return the buddy's address */
76 return (void *)(_buddy + __va(mp->base_addr));
80 static inline uintptr_t pool_end_addr(struct buddy_mempool * pool) {
81 return pool->base_addr + (1UL << pool->pool_order);
85 static struct buddy_mempool *
86 find_mempool(struct buddy_memzone * zone, uintptr_t addr) {
87 struct rb_node * n = zone->mempools.rb_node;
88 struct buddy_mempool * pool = NULL;
91 pool = rb_entry(n, struct buddy_mempool, tree_node);
93 if (addr < pool->base_addr) {
95 } else if (addr >= pool_end_addr(pool)) {
108 insert_mempool(struct buddy_memzone * zone,
109 struct buddy_mempool * pool) {
110 struct rb_node ** p = &(zone->mempools.rb_node);
111 struct rb_node * parent = NULL;
112 struct buddy_mempool * tmp_pool;
116 tmp_pool = rb_entry(parent, struct buddy_mempool, tree_node);
118 if (pool_end_addr(pool) <= tmp_pool->base_addr) {
120 } else if (pool->base_addr >= pool_end_addr(tmp_pool)) {
127 rb_link_node(&(pool->tree_node), parent, p);
128 rb_insert_color(&(pool->tree_node), &(zone->mempools));
137 /* This adds a pool of a given size to a buddy allocated zone
140 int buddy_add_pool(struct buddy_memzone * zone,
141 unsigned long base_addr,
142 unsigned long pool_order,
143 void *user_metadata) {
144 struct buddy_mempool * mp = NULL;
145 unsigned long flags = 0;
148 if (pool_order > zone->max_order) {
149 ERROR("Pool order size is larger than max allowable zone size (pool_order=%lu) (max_order=%lu)\n", pool_order, zone->max_order);
151 } else if (pool_order < zone->min_order) {
152 ERROR("Pool order is smaller than min allowable zone size (pool_order=%lu) (min_order=%lu)\n", pool_order, zone->min_order);
156 mp = palacios_alloc_node_extended(sizeof(struct buddy_mempool), GFP_KERNEL, zone->node_id);
159 ERROR("Could not allocate mempool\n");
163 mp->base_addr = base_addr;
164 mp->pool_order = pool_order;
166 mp->num_free_blocks = 0;
168 mp->user_metadata = user_metadata;
170 /* Allocate a bitmap with 1 bit per minimum-sized block */
171 mp->num_blocks = (1UL << pool_order) / (1UL << zone->min_order);
173 mp->tag_bits = palacios_alloc_node_extended(
174 BITS_TO_LONGS(mp->num_blocks) * sizeof(long), GFP_KERNEL, zone->node_id
177 /* Initially mark all minimum-sized blocks as allocated */
178 bitmap_zero(mp->tag_bits, mp->num_blocks);
180 palacios_spinlock_lock_irqsave(&(zone->lock), flags);
181 ret = insert_mempool(zone, mp);
182 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
185 ERROR("Error: Could not insert mempool into zone\n");
186 palacios_free(mp->tag_bits);
192 buddy_free(zone, base_addr, pool_order);
194 INFO("Added memory pool (addr=%p), order=%lu\n", (void *)base_addr, pool_order);
201 * Removes a mempool from a zone,
202 * assumes the zone lock is already held
204 static int __buddy_remove_mempool(struct buddy_memzone * zone,
205 unsigned long base_addr,
207 void **user_metadata)
210 struct buddy_mempool * pool = NULL;
211 struct block * block = NULL;
213 pool = find_mempool(zone, base_addr);
216 ERROR("Could not find mempool with base address (%p)\n", (void *)base_addr);
220 if (!bitmap_empty(pool->tag_bits, pool->num_blocks)) {
221 ERROR("Trying to remove an in use memory pool\n");
225 *user_metadata = pool->user_metadata;
227 block = (struct block *)__va(pool->base_addr);
229 list_del(&(block->link));
230 rb_erase(&(pool->tree_node), &(zone->mempools));
232 palacios_free(pool->tag_bits);
240 int buddy_remove_pool(struct buddy_memzone * zone,
241 unsigned long base_addr,
243 void **user_metadata)
245 unsigned long flags = 0;
248 palacios_spinlock_lock_irqsave(&(zone->lock), flags);
249 ret = __buddy_remove_mempool(zone, base_addr, force, user_metadata);
250 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
259 * Allocates a block of memory of the requested size (2^order bytes).
262 * [IN] mp: Buddy system memory allocator object.
263 * [IN] order: Block size to allocate (2^order bytes).
266 * Success: Pointer to the start of the allocated memory block.
270 buddy_alloc(struct buddy_memzone *zone, unsigned long order)
273 struct buddy_mempool * mp = NULL;
274 struct list_head * list = NULL;
275 struct block * block = NULL;
276 struct block * buddy_block = NULL;
277 unsigned long flags = 0;
279 BUG_ON(zone == NULL);
280 BUG_ON(order > zone->max_order);
282 /* Fixup requested order to be at least the minimum supported */
283 if (order < zone->min_order) {
284 order = zone->min_order;
287 INFO("zone=%p, order=%lu\n", zone, order);
289 palacios_spinlock_lock_irqsave(&(zone->lock), flags);
291 for (j = order; j <= zone->max_order; j++) {
293 INFO("Order iter=%lu\n", j);
295 /* Try to allocate the first block in the order j list */
296 list = &zone->avail[j];
298 if (list_empty(list))
301 block = list_entry(list->next, struct block, link);
302 list_del(&(block->link));
306 mark_allocated(mp, block);
308 INFO("pool=%p, block=%p, order=%lu, j=%lu\n", mp, block, order, j);
311 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
315 /* Trim if a higher order block than necessary was allocated */
318 buddy_block = (struct block *)((unsigned long)block + (1UL << j));
319 buddy_block->mp = mp;
320 buddy_block->order = j;
321 mark_available(mp, buddy_block);
322 list_add(&(buddy_block->link), &(zone->avail[j]));
325 mp->num_free_blocks -= (1UL << (order - zone->min_order));
327 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
332 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
334 return (uintptr_t)NULL;
339 * Returns a block of memory to the buddy system memory allocator.
343 //! Buddy system memory allocator object.
344 struct buddy_memzone * zone,
345 //! Address of memory block to free.
347 //! Size of the memory block (2^order bytes).
351 struct block * block = NULL;
352 struct buddy_mempool * pool = NULL;
353 unsigned long flags = 0;
355 BUG_ON(zone == NULL);
356 BUG_ON(order > zone->max_order);
359 if ((addr & ((1UL << zone->min_order) - 1)) != 0) {
360 ERROR("Attempting to free an invalid memory address (%p)\n", (void *)addr);
365 /* Fixup requested order to be at least the minimum supported */
366 if (order < zone->min_order) {
367 order = zone->min_order;
371 palacios_spinlock_lock_irqsave(&(zone->lock), flags);
373 pool = find_mempool(zone, addr);
375 if ((pool == NULL) || (order > pool->pool_order)) {
376 WARNING("Attempted to free an invalid page address (%p)\n", (void *)addr);
377 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
382 /* Overlay block structure on the memory block being freed */
383 block = (struct block *) __va(addr);
385 if (is_available(pool, block)) {
386 ERROR("Error: Freeing an available block\n");
387 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
391 pool->num_free_blocks += (1UL << (order - zone->min_order));
393 /* Coalesce as much as possible with adjacent free buddy blocks */
394 while (order < pool->pool_order) {
395 /* Determine our buddy block's address */
396 struct block * buddy = find_buddy(pool, block, order);
398 /* Make sure buddy is available and has the same size as us */
399 if (!is_available(pool, buddy))
401 if (is_available(pool, buddy) && (buddy->order != order))
404 /* OK, we're good to go... buddy merge! */
405 list_del(&buddy->link);
409 block->order = order;
412 /* Add the (possibly coalesced) block to the appropriate free list */
413 block->order = order;
415 mark_available(pool, block);
416 list_add(&(block->link), &(zone->avail[order]));
418 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
425 * Dumps the state of a buddy system memory allocator object to the console.
428 zone_mem_show(struct seq_file * s, void * v) {
429 struct buddy_memzone * zone = s->private;
431 unsigned long num_blocks;
432 struct list_head * entry = NULL;
433 unsigned long flags = 0;
437 seq_printf(s, "Null Zone Pointer!!\n");
441 seq_printf(s, "DUMP OF BUDDY MEMORY ZONE:\n");
442 seq_printf(s, " Zone Max Order=%lu, Min Order=%lu\n",
443 zone->max_order, zone->min_order);
445 palacios_spinlock_lock_irqsave(&(zone->lock), flags);
447 for (i = zone->min_order; i <= zone->max_order; i++) {
449 /* Count the number of memory blocks in the list */
451 list_for_each(entry, &zone->avail[i]) {
455 seq_printf(s, " order %2lu: %lu free blocks\n", i, num_blocks);
459 seq_printf(s, " %lu memory pools\n", zone->num_pools);
460 // list pools in zone
462 struct rb_node * node = rb_first(&(zone->mempools));
463 struct buddy_mempool * pool = NULL;
466 pool = rb_entry(node, struct buddy_mempool, tree_node);
468 seq_printf(s, " Base Addr=%p, order=%lu, size=%lu, free=%lu\n",
469 (void *)pool->base_addr, pool->pool_order, (1UL << pool->pool_order),
470 pool->num_free_blocks << zone->min_order);
473 node = rb_next(node);
477 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
483 static int zone_proc_open(struct inode * inode, struct file * filp) {
484 struct proc_dir_entry * proc_entry = PDE(inode);
485 INFO("proc_entry at %p, data at %p\n", proc_entry, proc_entry->data);
486 return single_open(filp, zone_mem_show, proc_entry->data);
490 static struct file_operations zone_proc_ops = {
491 .owner = THIS_MODULE,
492 .open = zone_proc_open,
495 .release = single_release,
499 extern struct proc_dir_entry * palacios_proc_dir;
501 void buddy_deinit(struct buddy_memzone * zone) {
504 palacios_spinlock_lock_irqsave(&(zone->lock), flags);
506 // for each pool, free it
507 #warning We really need to free the memory pools here
509 palacios_spinlock_unlock_irqrestore(&(zone->lock), flags);
512 char proc_file_name[128];
514 memset(proc_file_name, 0, 128);
515 snprintf(proc_file_name, 128, "v3-mem%d", zone->node_id);
517 remove_proc_entry(proc_file_name, palacios_proc_dir);
521 palacios_free(zone->avail);
530 * Initializes a buddy system memory allocator object.
533 * [IN] base_addr: Base address of the memory pool.
534 * [IN] pool_order: Size of the memory pool (2^pool_order bytes).
535 * [IN] min_order: Minimum allocatable block size (2^min_order bytes).
538 * Success: Pointer to an initialized buddy system memory allocator.
541 * NOTE: The min_order argument is provided as an optimization. Since one tag
542 * bit is required for each minimum-sized block, large memory pools that
543 * allow order 0 allocations will use large amounts of memory. Specifying
544 * a min_order of 5 (32 bytes), for example, reduces the number of tag
547 struct buddy_memzone *
549 unsigned long max_order,
550 unsigned long min_order,
554 struct buddy_memzone * zone = NULL;
557 DEBUG("Initializing Memory zone with up to %lu bit blocks on Node %d\n", max_order, node_id);
560 /* Smallest block size must be big enough to hold a block structure */
561 if ((1UL << min_order) < sizeof(struct block))
562 min_order = ilog2( roundup_pow_of_two(sizeof(struct block)) );
564 /* The minimum block order must be smaller than the pool order */
565 if (min_order > max_order)
568 zone = palacios_alloc_node_extended(sizeof(struct buddy_memzone), GFP_KERNEL, node_id);
570 INFO("Allocated zone at %p\n", zone);
573 ERROR("Could not allocate memzone\n");
577 memset(zone, 0, sizeof(struct buddy_memzone));
579 zone->max_order = max_order;
580 zone->min_order = min_order;
581 zone->node_id = node_id;
583 /* Allocate a list for every order up to the maximum allowed order */
584 zone->avail = palacios_alloc_node_extended((max_order + 1) * sizeof(struct list_head), GFP_KERNEL, zone->node_id);
586 INFO("Allocated free lists at %p\n", zone->avail);
588 /* Initially all lists are empty */
589 for (i = 0; i <= max_order; i++) {
590 INIT_LIST_HEAD(&zone->avail[i]);
594 palacios_spinlock_init(&(zone->lock));
596 zone->mempools.rb_node = NULL;
598 INFO("Allocated zone at %p\n", zone);
601 struct proc_dir_entry * zone_entry = NULL;
602 char proc_file_name[128];
604 memset(proc_file_name, 0, 128);
605 snprintf(proc_file_name, 128, "v3-mem%d", zone->node_id);
607 zone_entry = create_proc_entry(proc_file_name, 0444, palacios_proc_dir);
609 zone_entry->proc_fops = &zone_proc_ops;
610 zone_entry->data = zone;
612 ERROR("Error creating memory zone proc file\n");