1 /* Copyright (c) 2007,2008 Sandia National Laboratories */
3 #include <lwk/kernel.h>
5 #include <lwk/spinlock.h>
6 #include <lwk/string.h>
7 #include <lwk/aspace.h>
8 #include <lwk/idspace.h>
9 #include <lwk/htable.h>
11 #include <lwk/cpuinfo.h>
13 #include <arch/uaccess.h>
16 * ID space used to allocate address space IDs.
18 static idspace_t idspace;
21 * Hash table used to lookup address space structures by ID.
23 static htable_t htable;
26 * Lock for serializing access to the htable.
28 static DEFINE_SPINLOCK(htable_lock);
31 * Memory region structure. A memory region represents a contiguous region
32 * [start, end) of valid memory addresses in an address space.
35 struct aspace * aspace; /* Address space this region belongs to */
36 struct list_head link; /* Linkage in the aspace->region_list */
38 vaddr_t start; /* Starting address of the region */
39 vaddr_t end; /* 1st byte after end of the region */
40 vmflags_t flags; /* Permissions, caching, etc. */
41 vmpagesize_t pagesz; /* Allowed page sizes... 2^bit */
42 id_t smartmap; /* If (flags & VM_SMARTMAP), ID of the
43 aspace this region is mapped to */
44 char name[16]; /* Human-readable name of the region */
48 * This calculates a region's end address. Normally end is the address of the
49 * first byte after the region. However if the region extends to the end of
50 * memory, that is not possible so set end to the last valid address,
54 calc_end(vaddr_t start, size_t extent)
56 vaddr_t end = start + extent;
63 * Locates the region covering the specified address.
65 static struct region *
66 find_region(struct aspace *aspace, vaddr_t addr)
70 list_for_each_entry(rgn, &aspace->region_list, link) {
71 if ((rgn->start <= addr) && (rgn->end > addr))
78 * Finds a region that overlaps the specified interval.
80 static struct region *
81 find_overlapping_region(struct aspace *aspace, vaddr_t start, vaddr_t end)
85 list_for_each_entry(rgn, &aspace->region_list, link) {
86 if ((start < rgn->end) && (end > rgn->start))
93 * Locates the region that is SMARTMAP'ed to the specified aspace ID.
95 static struct region *
96 find_smartmap_region(struct aspace *aspace, id_t src_aspace)
100 list_for_each_entry(rgn, &aspace->region_list, link) {
101 if ((rgn->flags & VM_SMARTMAP) && (rgn->smartmap == src_aspace))
108 * Looks up an aspace object by ID and returns it with its spinlock locked.
110 static struct aspace *
111 lookup_and_lock(id_t id)
113 struct aspace *aspace;
115 /* Lock the hash table, lookup aspace object by ID */
116 spin_lock(&htable_lock);
117 if ((aspace = htable_lookup(htable, id)) == NULL) {
118 spin_unlock(&htable_lock);
122 /* Lock the identified aspace */
123 spin_lock(&aspace->lock);
125 /* Unlock the hash table, others may now use it */
126 spin_unlock(&htable_lock);
132 * Like lookup_and_lock(), but looks up two address spaces instead of one.
135 lookup_and_lock_two(id_t a, id_t b,
136 struct aspace **aspace_a, struct aspace **aspace_b)
138 /* Lock the hash table, lookup aspace objects by ID */
139 spin_lock(&htable_lock);
140 if ((*aspace_a = htable_lookup(htable, a)) == NULL) {
141 spin_unlock(&htable_lock);
145 if ((*aspace_b = htable_lookup(htable, b)) == NULL) {
146 spin_unlock(&htable_lock);
150 /* Lock the identified aspaces */
151 spin_lock(&(*aspace_a)->lock);
152 spin_lock(&(*aspace_b)->lock);
154 /* Unlock the hash table, others may now use it */
155 spin_unlock(&htable_lock);
163 return ((id >= ASPACE_MIN_ID) && (id <= ASPACE_MAX_ID));
167 aspace_subsys_init(void)
171 /* Create an ID space for allocating address space IDs */
172 if ((status = idspace_create(__ASPACE_MIN_ID, __ASPACE_MAX_ID, &idspace)))
173 panic("Failed to create aspace ID space (status=%d).", status);
175 /* Create a hash table that will be used for quick ID->aspace lookups */
176 if ((status = htable_create(7 /* 2^7 bins */,
177 offsetof(struct aspace, id),
178 offsetof(struct aspace, ht_link),
180 panic("Failed to create aspace hash table (status=%d).", status);
182 /* Create an aspace for use by kernel threads */
183 if ((status = aspace_create(KERNEL_ASPACE_ID, "kernel", NULL)))
184 panic("Failed to create kernel aspace (status=%d).", status);
186 /* Switch to the newly created kernel address space */
187 if ((current->aspace = aspace_acquire(KERNEL_ASPACE_ID)) == NULL)
188 panic("Failed to acquire kernel aspace.");
189 arch_aspace_activate(current->aspace);
195 aspace_get_myid(id_t *id)
197 *id = current->aspace->id;
202 sys_aspace_get_myid(id_t __user *id)
207 if ((status = aspace_get_myid(&_id)) != 0)
210 if (id && copy_to_user(id, &_id, sizeof(*id)))
217 aspace_create(id_t id_request, const char *name, id_t *id)
221 struct aspace *aspace;
224 if ((status = idspace_alloc_id(idspace, id_request, &new_id)) != 0)
227 if ((aspace = kmem_alloc(sizeof(*aspace))) == NULL) {
228 idspace_free_id(idspace, new_id);
233 * Initialize the address space. kmem_alloc() allocates zeroed memory
234 * so fields with an initial state of zero do not need to be explicitly
238 spin_lock_init(&aspace->lock);
239 list_head_init(&aspace->region_list);
240 hlist_node_init(&aspace->ht_link);
242 strlcpy(aspace->name, name, sizeof(aspace->name));
244 /* Create a region for the kernel portion of the address space */
249 ULONG_MAX-PAGE_OFFSET+1, /* # bytes to end of memory */
257 /* Do architecture-specific initialization */
258 if ((status = arch_aspace_create(aspace)) != 0)
261 /* Add new address space to a hash table, for quick lookups by ID */
262 spin_lock_irqsave(&htable_lock, flags);
263 BUG_ON(htable_add(htable, aspace));
264 spin_unlock_irqrestore(&htable_lock, flags);
271 BUG_ON(__aspace_del_region(aspace,PAGE_OFFSET,ULONG_MAX-PAGE_OFFSET+1));
273 idspace_free_id(idspace, aspace->id);
279 sys_aspace_create(id_t id_request, const char __user *name, id_t __user *id)
285 if (current->uid != 0)
288 if ((id_request != ANY_ID) && !id_ok(id_request))
291 if (strncpy_from_user(_name, name, sizeof(_name)) < 0)
293 _name[sizeof(_name) - 1] = '\0';
295 if ((status = aspace_create(id_request, _name, &_id)) != 0)
300 if (id && copy_to_user(id, &_id, sizeof(*id)))
307 aspace_destroy(id_t id)
309 struct aspace *aspace;
310 struct list_head *pos, *tmp;
312 unsigned long irqstate;
314 /* Lock the hash table, lookup aspace object by ID */
315 spin_lock_irqsave(&htable_lock, irqstate);
316 if ((aspace = htable_lookup(htable, id)) == NULL) {
317 spin_unlock_irqrestore(&htable_lock, irqstate);
321 /* Lock the identified aspace */
322 spin_lock(&aspace->lock);
324 if (aspace->refcnt) {
325 spin_unlock(&aspace->lock);
326 spin_unlock_irqrestore(&htable_lock, irqstate);
330 /* Remove aspace from hash table, preventing others from finding it */
331 BUG_ON(htable_del(htable, aspace));
333 /* Unlock the hash table, others may now use it */
334 spin_unlock_irqrestore(&htable_lock, irqstate);
335 spin_unlock(&aspace->lock);
337 /* Finish up destroying the aspace, we have the only reference */
338 list_for_each_safe(pos, tmp, &aspace->region_list) {
339 rgn = list_entry(pos, struct region, link);
340 /* Must drop our reference on all SMARTMAP'ed aspaces */
341 if (rgn->flags & VM_SMARTMAP) {
343 spin_lock_irqsave(&htable_lock, irqstate);
344 src = htable_lookup(htable, rgn->smartmap);
346 spin_lock(&src->lock);
348 spin_unlock(&src->lock);
349 spin_unlock_irqrestore(&htable_lock, irqstate);
351 list_del(&rgn->link);
354 arch_aspace_destroy(aspace);
355 BUG_ON(idspace_free_id(idspace, aspace->id));
361 sys_aspace_destroy(id_t id)
363 if (current->uid != 0)
367 return aspace_destroy(id);
371 * Acquires an address space object. The object is guaranteed not to be
372 * deleted until it is released via aspace_release().
375 aspace_acquire(id_t id)
377 struct aspace *aspace;
378 unsigned long irqstate;
380 local_irq_save(irqstate);
381 if ((aspace = lookup_and_lock(id)) != NULL) {
383 spin_unlock(&aspace->lock);
385 local_irq_restore(irqstate);
390 * Releases an aspace object that was previously acquired via aspace_acquire().
391 * The aspace object passed in must be unlocked.
394 aspace_release(struct aspace *aspace)
396 unsigned long irqstate;
397 spin_lock_irqsave(&aspace->lock, irqstate);
399 spin_unlock_irqrestore(&aspace->lock, irqstate);
403 __aspace_find_hole(struct aspace *aspace,
404 vaddr_t start_hint, size_t extent, size_t alignment,
410 if (!aspace || !extent || !is_power_of_2(alignment))
416 hole = round_up(start_hint, alignment);
417 while ((rgn = find_overlapping_region(aspace, hole, hole + extent))) {
418 if (rgn->end == ULONG_MAX)
420 hole = round_up(rgn->end, alignment);
429 aspace_find_hole(id_t id,
430 vaddr_t start_hint, size_t extent, size_t alignment,
434 struct aspace *aspace;
435 unsigned long irqstate;
437 local_irq_save(irqstate);
438 aspace = lookup_and_lock(id);
439 status = __aspace_find_hole(aspace, start_hint, extent, alignment,
441 if (aspace) spin_unlock(&aspace->lock);
442 local_irq_restore(irqstate);
447 sys_aspace_find_hole(id_t id,
448 vaddr_t start_hint, size_t extent, size_t alignment,
449 vaddr_t __user *start)
454 if (current->uid != 0)
460 status = aspace_find_hole(id, start_hint, extent, alignment, &_start);
464 if (start && copy_to_user(start, &_start, sizeof(_start)))
471 __aspace_add_region(struct aspace *aspace,
472 vaddr_t start, size_t extent,
473 vmflags_t flags, vmpagesize_t pagesz,
478 struct list_head *pos;
479 vaddr_t end = calc_end(start, extent);
481 if (!aspace || !start)
484 /* Region must have non-zero size */
486 printk(KERN_WARNING "Extent must be non-zero.\n");
490 /* Region must have a positive size */
493 "Invalid region size (start=0x%lx, extent=0x%lx).\n",
498 /* Architecture must support the page size specified */
499 if ((pagesz & cpu_info[0].pagesz_mask) == 0) {
501 "Invalid page size specified (pagesz=0x%lx).\n",
505 pagesz &= cpu_info[0].pagesz_mask;
507 /* Only one page size may be specified */
508 if (!is_power_of_2(pagesz)) {
510 "More than one page size specified (pagesz=0x%lx).\n",
515 /* Region must be aligned to at least the specified page size */
516 if ((start & (pagesz-1)) || ((end!=ULONG_MAX) && (end & (pagesz-1)))) {
518 "Region is misaligned (start=0x%lx, end=0x%lx).\n",
523 /* Region must not overlap with any existing regions */
524 list_for_each_entry(cur, &aspace->region_list, link) {
525 if ((start < cur->end) && (end > cur->start)) {
527 "Region overlaps with existing region.\n");
532 /* Allocate and initialize a new region object */
533 if ((rgn = kmem_alloc(sizeof(struct region))) == NULL)
536 rgn->aspace = aspace;
540 rgn->pagesz = pagesz;
542 strlcpy(rgn->name, name, sizeof(rgn->name));
544 /* The heap region is special, remember its bounds */
545 if (flags & VM_HEAP) {
546 aspace->heap_start = start;
547 aspace->heap_end = end;
548 aspace->brk = aspace->heap_start;
549 aspace->mmap_brk = aspace->heap_end;
552 /* Insert region into address space's sorted region list */
553 list_for_each(pos, &aspace->region_list) {
554 cur = list_entry(pos, struct region, link);
555 if (cur->start > rgn->start)
558 list_add_tail(&rgn->link, pos);
563 aspace_add_region(id_t id,
564 vaddr_t start, size_t extent,
565 vmflags_t flags, vmpagesize_t pagesz,
569 struct aspace *aspace;
570 unsigned long irqstate;
572 local_irq_save(irqstate);
573 aspace = lookup_and_lock(id);
574 status = __aspace_add_region(aspace, start, extent, flags, pagesz, name);
575 if (aspace) spin_unlock(&aspace->lock);
576 local_irq_restore(irqstate);
581 sys_aspace_add_region(id_t id,
582 vaddr_t start, size_t extent,
583 vmflags_t flags, vmpagesize_t pagesz,
584 const char __user *name)
588 if (current->uid != 0)
594 if (strncpy_from_user(_name, name, sizeof(_name)) < 0)
596 _name[sizeof(_name) - 1] = '\0';
598 return aspace_add_region(id, start, extent, flags, pagesz, _name);
603 __aspace_del_region(struct aspace *aspace, vaddr_t start, size_t extent)
607 vaddr_t end = calc_end(start, extent);
612 /* Locate the region to delete */
613 rgn = find_region(aspace, start);
614 if (!rgn || (rgn->start != start) || (rgn->end != end)
615 || (rgn->flags & VM_KERNEL))
618 if (!(rgn->flags & VM_SMARTMAP)) {
619 /* Unmap all of the memory that was mapped to the region */
620 status = __aspace_unmap_pmem(aspace, start, extent);
625 /* Remove the region from the address space */
626 list_del(&rgn->link);
632 aspace_del_region(id_t id, vaddr_t start, size_t extent)
635 struct aspace *aspace;
636 unsigned long irqstate;
638 local_irq_save(irqstate);
639 aspace = lookup_and_lock(id);
640 status = __aspace_del_region(aspace, start, extent);
641 if (aspace) spin_unlock(&aspace->lock);
642 local_irq_restore(irqstate);
647 sys_aspace_del_region(id_t id, vaddr_t start, size_t extent)
649 if (current->uid != 0)
653 return aspace_del_region(id, start, extent);
657 map_pmem(struct aspace *aspace,
658 paddr_t pmem, vaddr_t start, size_t extent,
667 if (umem_only && !pmem_is_umem(pmem, extent)) {
669 "User-space tried to map non-UMEM "
670 "(pmem=0x%lx, extent=0x%lx).\n",
676 /* Find region covering the address */
677 rgn = find_region(aspace, start);
680 "Failed to find region covering addr=0x%lx.\n",
685 /* Can't map anything to kernel or SMARTMAP regions */
686 if ((rgn->flags & VM_KERNEL) || (rgn->flags & VM_SMARTMAP)) {
688 "Trying to map memory to protected region.\n");
692 /* addresses must be aligned to region's page size */
693 if ((start & (rgn->pagesz-1)) || (pmem & (rgn->pagesz-1))) {
696 "(start=0x%lx, pmem=0x%lx, pagesz=0x%lx).\n",
697 start, pmem, rgn->pagesz);
701 /* Map until full extent mapped or end of region is reached */
702 while (extent && (start < rgn->end)) {
705 arch_aspace_map_page(
715 extent -= rgn->pagesz;
716 start += rgn->pagesz;
725 map_pmem_locked(id_t id,
726 paddr_t pmem, vaddr_t start, size_t extent,
730 struct aspace *aspace;
731 unsigned long irqstate;
733 local_irq_save(irqstate);
734 aspace = lookup_and_lock(id);
735 status = map_pmem(aspace, pmem, start, extent, umem_only);
736 if (aspace) spin_unlock(&aspace->lock);
737 local_irq_restore(irqstate);
742 __aspace_map_pmem(struct aspace *aspace,
743 paddr_t pmem, vaddr_t start, size_t extent)
745 return map_pmem(aspace, pmem, start, extent, false);
749 aspace_map_pmem(id_t id, paddr_t pmem, vaddr_t start, size_t extent)
751 return map_pmem_locked(id, pmem, start, extent, false);
755 sys_aspace_map_pmem(id_t id, paddr_t pmem, vaddr_t start, size_t extent)
757 if (current->uid != 0)
761 return map_pmem_locked(id, pmem, start, extent, true);
765 __aspace_unmap_pmem(struct aspace *aspace, vaddr_t start, size_t extent)
773 /* Find region covering the address */
774 rgn = find_region(aspace, start);
777 "Failed to find region covering addr=0x%lx.\n",
782 /* Can't unmap anything from kernel or SMARTMAP regions */
783 if ((rgn->flags & VM_KERNEL) || (rgn->flags & VM_SMARTMAP)) {
785 "Trying to map memory to protected region.\n");
789 /* address must be aligned to region's page size */
790 if (start & (rgn->pagesz-1)) {
792 "Misalignment (start=0x%lx, pagesz=0x%lx).\n",
797 /* Unmap until full extent unmapped or end of region is reached */
798 while (extent && (start < rgn->end)) {
800 arch_aspace_unmap_page(
806 extent -= rgn->pagesz;
807 start += rgn->pagesz;
815 aspace_unmap_pmem(id_t id, vaddr_t start, size_t extent)
818 struct aspace *aspace;
819 unsigned long irqstate;
821 local_irq_save(irqstate);
822 aspace = lookup_and_lock(id);
823 status = __aspace_unmap_pmem(aspace, start, extent);
824 if (aspace) spin_unlock(&aspace->lock);
825 local_irq_restore(irqstate);
830 sys_aspace_unmap_pmem(id_t id, vaddr_t start, size_t extent)
832 if (current->uid != 0)
836 return aspace_unmap_pmem(id, start, extent);
840 __aspace_smartmap(struct aspace *src, struct aspace *dst,
841 vaddr_t start, size_t extent)
844 vaddr_t end = start + extent;
848 /* Can only SMARTMAP a given aspace in once */
849 if (find_smartmap_region(dst, src->id))
855 if ((start & (SMARTMAP_ALIGN-1)) || (end & (SMARTMAP_ALIGN-1)))
858 snprintf(name, sizeof(name), "SMARTMAP-%u", (unsigned int)src->id);
859 if ((status = __aspace_add_region(dst, start, extent,
860 VM_SMARTMAP, PAGE_SIZE, name)))
863 /* Do architecture-specific SMARTMAP initialization */
864 if ((status = arch_aspace_smartmap(src, dst, start, extent))) {
865 BUG_ON(__aspace_del_region(dst, start, extent));
869 /* Remember the source aspace that the SMARTMAP region is mapped to */
870 rgn = find_region(dst, start);
872 rgn->smartmap = src->id;
874 /* Ensure source aspace doesn't go away while we have it SMARTMAP'ed */
881 aspace_smartmap(id_t src, id_t dst, vaddr_t start, size_t extent)
884 struct aspace *src_spc, *dst_spc;
885 unsigned long irqstate;
887 /* Don't allow self SMARTMAP'ing */
891 local_irq_save(irqstate);
892 if ((status = lookup_and_lock_two(src, dst, &src_spc, &dst_spc))) {
893 local_irq_restore(irqstate);
896 status = __aspace_smartmap(src_spc, dst_spc, start, extent);
897 spin_unlock(&src_spc->lock);
898 spin_unlock(&dst_spc->lock);
899 local_irq_restore(irqstate);
904 sys_aspace_smartmap(id_t src, id_t dst, vaddr_t start, size_t extent)
906 if (current->uid != 0)
908 if (!id_ok(src) || !id_ok(dst))
910 return aspace_smartmap(src, dst, start, extent);
914 __aspace_unsmartmap(struct aspace *src, struct aspace *dst)
919 if ((rgn = find_smartmap_region(dst, src->id)) == NULL)
921 extent = rgn->end - rgn->start;
923 /* Do architecture-specific SMARTMAP unmapping */
924 BUG_ON(arch_aspace_unsmartmap(src, dst, rgn->start, extent));
926 /* Delete the SMARTMAP region and release our reference on the source */
927 BUG_ON(__aspace_del_region(dst, rgn->start, extent));
934 aspace_unsmartmap(id_t src, id_t dst)
937 struct aspace *src_spc, *dst_spc;
938 unsigned long irqstate;
940 /* Don't allow self SMARTMAP'ing */
944 local_irq_save(irqstate);
945 if ((status = lookup_and_lock_two(src, dst, &src_spc, &dst_spc))) {
946 local_irq_restore(irqstate);
949 status = __aspace_unsmartmap(src_spc, dst_spc);
950 spin_unlock(&src_spc->lock);
951 spin_unlock(&dst_spc->lock);
952 local_irq_restore(irqstate);
957 sys_aspace_unsmartmap(id_t src, id_t dst)
959 if (current->uid != 0)
961 if (!id_ok(src) || !id_ok(dst))
963 return aspace_unsmartmap(src, dst);
967 aspace_dump2console(id_t id)
969 struct aspace *aspace;
971 unsigned long irqstate;
973 local_irq_save(irqstate);
975 if ((aspace = lookup_and_lock(id)) == NULL) {
976 local_irq_restore(irqstate);
980 printk(KERN_DEBUG "DUMP OF ADDRESS SPACE %u:\n", aspace->id);
981 printk(KERN_DEBUG " name: %s\n", aspace->name);
982 printk(KERN_DEBUG " refcnt: %d\n", aspace->refcnt);
983 printk(KERN_DEBUG " regions:\n");
984 list_for_each_entry(rgn, &aspace->region_list, link) {
986 " [0x%016lx, 0x%016lx%c %s\n",
989 (rgn->end == ULONG_MAX) ? ']' : ')',
994 spin_unlock(&aspace->lock);
995 local_irq_restore(irqstate);
1000 sys_aspace_dump2console(id_t id)
1002 return aspace_dump2console(id);