1 #include <lwk/kernel.h>
3 #include <lwk/htable.h>
4 #include <lwk/aspace.h>
7 #include <arch/uaccess.h>
10 * ID space used to allocate task IDs.
12 static idspace_t idspace;
15 * Hash table used to lookup task structures by ID.
17 static htable_t htable;
20 * Lock for serializing access to the htable.
22 static DEFINE_SPINLOCK(htable_lock);
25 task_subsys_init(void)
27 if (idspace_create(__TASK_MIN_ID, __TASK_MAX_ID, &idspace))
28 panic("Failed to create task ID space.");
30 if (htable_create(7 /* 2^7 bins */,
31 offsetof(struct task_struct, id),
32 offsetof(struct task_struct, ht_link),
34 panic("Failed to create task hash table.");
40 task_get_myid(id_t *id)
47 sys_task_get_myid(id_t __user *id)
52 if ((status = task_get_myid(&_id)) != 0)
55 if (id && copy_to_user(id, &_id, sizeof(*id)))
62 __task_reserve_id(id_t id)
64 return idspace_alloc_id(idspace, id, NULL);
68 __task_create(id_t id, const char *name,
69 const start_state_t *start_state,
70 struct task_struct **task)
73 union task_union *task_union;
74 struct task_struct *tsk;
76 if ((task_union = kmem_get_pages(TASK_ORDER)) == NULL)
79 tsk = &task_union->task_info;
82 * Initialize the new task. kmem_alloc() allocates zeroed memory
83 * so fields with an initial state of zero do not need to be explicitly
88 strlcpy(tsk->name, name, sizeof(tsk->name));
89 hlist_node_init(&tsk->ht_link);
90 tsk->state = TASKSTATE_READY;
91 tsk->uid = start_state->uid;
92 tsk->gid = start_state->gid;
93 tsk->aspace = aspace_acquire(start_state->aspace_id);
99 if (start_state->cpumask) {
100 cpumask_user2kernel(start_state->cpumask, &tsk->cpumask);
101 if (!cpus_subset(tsk->cpumask, current->cpumask)) {
106 tsk->cpumask = current->cpumask;
108 if ((start_state->cpu_id >= NR_CPUS)
109 || !cpu_isset(start_state->cpu_id, tsk->cpumask)) {
113 tsk->cpu_id = start_state->cpu_id;
114 list_head_init(&tsk->sched_link);
117 tsk->exit_status = 0;
119 /* Do architecture-specific initialization */
120 if ((status = arch_task_create(tsk, start_state)) != 0)
129 aspace_release(tsk->aspace);
131 kmem_free_pages(task_union, TASK_ORDER);
136 task_create(id_t id_request, const char *name,
137 const start_state_t *start_state, id_t *id)
140 struct task_struct *new_task;
142 unsigned long irqstate;
144 /* Allocate an ID for the new task */
145 if ((status = idspace_alloc_id(idspace, id_request, &new_id)) != 0)
148 /* Create and initialize a new task */
149 if ((status = __task_create(new_id, name, start_state, &new_task))) {
150 idspace_free_id(idspace, new_id);
154 /* Add new task to a hash table, for quick lookups by ID */
155 spin_lock_irqsave(&htable_lock, irqstate);
156 BUG_ON(htable_add(htable, new_task));
157 spin_unlock_irqrestore(&htable_lock, irqstate);
159 /* Add the new task to the target CPU's run queue */
160 sched_add_task(new_task);
168 sys_task_create(id_t id_request, const char __user *name,
169 const start_state_t __user *start_state, id_t __user *id)
172 start_state_t _start_state;
173 user_cpumask_t _cpumask;
177 if (current->uid != 0)
180 if (copy_from_user(&_start_state, start_state, sizeof(_start_state)))
183 if (_start_state.aspace_id == KERNEL_ASPACE_ID)
186 if (_start_state.cpumask) {
187 if (copy_from_user(&_cpumask, _start_state.cpumask, sizeof(_cpumask)))
189 _start_state.cpumask = &_cpumask;
192 if (name && (strncpy_from_user(_name, name, sizeof(_name)) < 0))
194 _name[sizeof(_name) - 1] = '\0';
196 if ((status = task_create(id_request, _name, &_start_state, &_id)) != 0)
199 if (id && copy_to_user(id, &_id, sizeof(*id)))
206 task_exit(int status)
208 /* Mark the task as exited...
209 * schedule() will remove it from the run queue */
210 current->exit_status = status;
211 current->state = TASKSTATE_EXIT_ZOMBIE;
212 schedule(); /* task is dead, so this should never return */
218 sys_task_exit(int status)
220 return task_exit(status);
227 * Nothing to do, schedule() will be automatically
228 * called before returning to user-space