1 /* Copyright (c) 2008, Sandia National Laboratories */
3 #include <lwk/kernel.h>
4 #include <lwk/spinlock.h>
5 #include <lwk/string.h>
9 #include <arch/uaccess.h>
11 static LIST_HEAD(pmem_list);
12 static DEFINE_SPINLOCK(pmem_list_lock);
14 struct pmem_list_entry {
15 struct list_head link;
16 struct pmem_region rgn;
19 static struct pmem_list_entry *
20 alloc_pmem_list_entry(void)
22 return kmem_alloc(sizeof(struct pmem_list_entry));
26 free_pmem_list_entry(struct pmem_list_entry *entry)
32 calc_overlap(const struct pmem_region *a, const struct pmem_region *b,
33 struct pmem_region *dst)
35 if (!((a->start < b->end) && (a->end > b->start)))
39 dst->start = max(a->start, b->start);
40 dst->end = min(a->end, b->end);
47 regions_overlap(const struct pmem_region *a, const struct pmem_region *b)
49 return calc_overlap(a, b, NULL);
53 region_is_unique(const struct pmem_region *rgn)
55 struct pmem_list_entry *entry;
57 list_for_each_entry(entry, &pmem_list, link) {
58 if (regions_overlap(rgn, &entry->rgn))
65 region_is_sane(const struct pmem_region *rgn)
72 if (rgn->end <= rgn->start)
75 for (i = 0; i < sizeof(rgn->name); i++) {
76 if (rgn->name[i] == '\0')
79 if (i == sizeof(rgn->name))
86 region_is_known(const struct pmem_region *rgn)
88 struct pmem_list_entry *entry;
89 struct pmem_region overlap;
92 size = rgn->end - rgn->start;
93 list_for_each_entry(entry, &pmem_list, link) {
94 if (!calc_overlap(rgn, &entry->rgn, &overlap))
97 size -= (overlap.end - overlap.start);
100 return (size == 0) ? true : false;
104 insert_pmem_list_entry(struct pmem_list_entry *entry)
106 struct list_head *pos;
107 struct pmem_list_entry *cur;
109 /* Locate the entry that the new entry should be inserted before */
110 list_for_each(pos, &pmem_list) {
111 cur = list_entry(pos, struct pmem_list_entry, link);
112 if (cur->rgn.start > entry->rgn.start)
115 list_add_tail(&entry->link, pos);
119 regions_are_mergeable(const struct pmem_region *a, const struct pmem_region *b)
121 if ((a->end != b->start) && (b->end != a->start))
124 if (a->type_is_set != b->type_is_set)
126 if (a->type_is_set && (a->type != b->type))
129 if (a->lgroup_is_set != b->lgroup_is_set)
131 if (a->lgroup_is_set && (a->lgroup != b->lgroup))
134 if (a->allocated_is_set != b->allocated_is_set)
136 if (a->allocated_is_set && (a->allocated != b->allocated))
139 if (a->name_is_set != b->name_is_set)
141 if (a->name_is_set && !strcmp(a->name, b->name))
148 region_matches(const struct pmem_region *query, const struct pmem_region *rgn)
150 if (!regions_overlap(query, rgn))
153 if (query->type_is_set
154 && (!rgn->type_is_set || (rgn->type != query->type)))
157 if (query->lgroup_is_set
158 && (!rgn->lgroup_is_set || (rgn->lgroup != query->lgroup)))
161 if (query->allocated_is_set
162 && (!rgn->allocated_is_set || (rgn->allocated != query->allocated)))
165 if (query->name_is_set
166 && (!rgn->name_is_set || strcmp(rgn->name, query->name)))
173 merge_pmem_list(void)
175 struct pmem_list_entry *entry, *prev, *tmp;
178 list_for_each_entry_safe(entry, tmp, &pmem_list, link) {
179 if (prev && regions_are_mergeable(&prev->rgn, &entry->rgn)) {
180 prev->rgn.end = entry->rgn.end;
181 list_del(&entry->link);
182 free_pmem_list_entry(entry);
190 zero_pmem(const struct pmem_region *rgn)
192 /* access pmem region via the kernel's identity map */
193 memset(__va(rgn->start), 0, rgn->end - rgn->start);
197 __pmem_add(const struct pmem_region *rgn)
199 struct pmem_list_entry *entry;
201 if (!region_is_sane(rgn))
204 if (!region_is_unique(rgn))
207 if (!(entry = alloc_pmem_list_entry()))
212 insert_pmem_list_entry(entry);
219 pmem_add(const struct pmem_region *rgn)
222 unsigned long irqstate;
224 spin_lock_irqsave(&pmem_list_lock, irqstate);
225 status = __pmem_add(rgn);
226 spin_unlock_irqrestore(&pmem_list_lock, irqstate);
232 sys_pmem_add(const struct pmem_region __user *rgn)
234 struct pmem_region _rgn;
236 if (current->uid != 0)
239 if (copy_from_user(&_rgn, rgn, sizeof(_rgn)))
242 return pmem_add(&_rgn);
246 __pmem_update(const struct pmem_region *update, bool umem_only)
248 struct pmem_list_entry *entry, *head, *tail;
249 struct pmem_region overlap;
251 if (!region_is_sane(update))
254 if (!region_is_known(update))
257 list_for_each_entry(entry, &pmem_list, link) {
258 if (!calc_overlap(update, &entry->rgn, &overlap))
261 /* Jail user-space to PMEM_TYPE_UMEM regions */
263 if (!entry->rgn.type_is_set
264 || (entry->rgn.type != PMEM_TYPE_UMEM))
266 if (!update->type_is_set
267 || (update->type != PMEM_TYPE_UMEM))
271 /* Handle head of entry non-overlap */
272 if (entry->rgn.start < overlap.start) {
273 if (!(head = alloc_pmem_list_entry()))
275 head->rgn = entry->rgn;
276 head->rgn.end = overlap.start;
277 list_add_tail(&head->link, &entry->link);
280 /* Handle tail of entry non-overlap */
281 if (entry->rgn.end > overlap.end) {
282 if (!(tail = alloc_pmem_list_entry()))
284 tail->rgn = entry->rgn;
285 tail->rgn.start = overlap.end;
286 list_add(&tail->link, &entry->link);
289 /* Update entry to reflect the overlap */
290 entry->rgn = *update;
291 entry->rgn.start = overlap.start;
292 entry->rgn.end = overlap.end;
301 _pmem_update(const struct pmem_region *update, bool umem_only)
304 unsigned long irqstate;
306 spin_lock_irqsave(&pmem_list_lock, irqstate);
307 status = __pmem_update(update, umem_only);
308 spin_unlock_irqrestore(&pmem_list_lock, irqstate);
314 pmem_update(const struct pmem_region *update)
316 return _pmem_update(update, false);
320 sys_pmem_update(const struct pmem_region __user *update)
322 struct pmem_region _update;
324 if (current->uid != 0)
327 if (copy_from_user(&_update, update, sizeof(_update)))
330 return _pmem_update(&_update, true);
334 __pmem_query(const struct pmem_region *query, struct pmem_region *result)
336 struct pmem_list_entry *entry;
337 struct pmem_region *rgn;
339 if (!region_is_sane(query))
342 list_for_each_entry(entry, &pmem_list, link) {
344 if (!region_matches(query, rgn))
347 /* match found, update result */
350 calc_overlap(query, rgn, result);
359 pmem_query(const struct pmem_region *query, struct pmem_region *result)
362 unsigned long irqstate;
364 spin_lock_irqsave(&pmem_list_lock, irqstate);
365 status = __pmem_query(query, result);
366 spin_unlock_irqrestore(&pmem_list_lock, irqstate);
372 sys_pmem_query(const struct pmem_region __user *query,
373 struct pmem_region __user *result)
375 struct pmem_region _query, _result;
378 if (current->uid != 0)
381 if (copy_from_user(&_query, query, sizeof(_query)))
384 if ((status = pmem_query(&_query, &_result)) != 0)
387 if (result && copy_to_user(result, &_result, sizeof(*result)))
394 __pmem_alloc(size_t size, size_t alignment,
395 const struct pmem_region *constraint,
396 struct pmem_region *result)
399 struct pmem_region query;
400 struct pmem_region candidate;
405 if (alignment && !is_power_of_2(alignment))
408 if (!region_is_sane(constraint))
411 if (constraint->allocated_is_set && constraint->allocated)
416 while ((status = __pmem_query(&query, &candidate)) == 0) {
418 candidate.start = round_up(candidate.start, alignment);
419 if (candidate.start >= candidate.end)
423 if ((candidate.end - candidate.start) >= size) {
424 candidate.end = candidate.start + size;
425 candidate.allocated_is_set = true;
426 candidate.allocated = true;
427 status = __pmem_update(&candidate, false);
429 zero_pmem(&candidate);
435 query.start = candidate.end;
437 BUG_ON(status != -ENOENT);
443 pmem_alloc(size_t size, size_t alignment,
444 const struct pmem_region *constraint,
445 struct pmem_region *result)
448 unsigned long irqstate;
450 spin_lock_irqsave(&pmem_list_lock, irqstate);
451 status = __pmem_alloc(size, alignment, constraint, result);
452 spin_unlock_irqrestore(&pmem_list_lock, irqstate);
458 sys_pmem_alloc(size_t size, size_t alignment,
459 const struct pmem_region __user *constraint,
460 struct pmem_region __user *result)
462 struct pmem_region _constraint, _result;
465 if (current->uid != 0)
468 if (copy_from_user(&_constraint, constraint, sizeof(_constraint)))
471 if ((status = pmem_alloc(size, alignment, &_constraint, &_result)) != 0)
474 if (result && copy_to_user(result, &_result, sizeof(*result)))