1 #include <lwk/kernel.h>
2 #include <lwk/spinlock.h>
3 #include <lwk/percpu.h>
4 #include <lwk/aspace.h>
11 struct list_head task_list;
12 struct task_struct * idle_task;
15 static DEFINE_PER_CPU(struct run_queue, run_queue);
18 idle_task_loop(void) {
20 arch_idle_task_loop_body();
26 sched_subsys_init(void)
29 struct run_queue *runq;
30 struct task_struct *idle_task;
31 start_state_t start_state;
34 /* Reserve the idle tasks' ID. All idle tasks share the same ID. */
35 status = __task_reserve_id(IDLE_TASK_ID);
37 panic("Failed to reserve IDLE_TASK_ID (status=%d).", status);
39 /* Initialize each CPU's run queue */
40 for_each_cpu_mask(cpu_id, cpu_present_map) {
41 runq = &per_cpu(run_queue, cpu_id);
43 spin_lock_init(&runq->lock);
45 list_head_init(&runq->task_list);
48 * Create this CPU's idle task. When a CPU has no
49 * other work to do, it runs the idle task.
53 start_state.aspace_id = KERNEL_ASPACE_ID;
54 start_state.entry_point = (vaddr_t)idle_task_loop;
55 start_state.stack_ptr = 0; /* will be set automatically */
56 start_state.cpu_id = cpu_id;
57 start_state.cpumask = NULL;
59 status = __task_create(IDLE_TASK_ID, "idle_task", &start_state,
62 panic("Failed to create idle_task (status=%d).",status);
64 runq->idle_task = idle_task;
71 sched_add_task(struct task_struct *task)
73 id_t cpu = task->cpu_id;
74 struct run_queue *runq;
75 unsigned long irqstate;
77 runq = &per_cpu(run_queue, cpu);
78 spin_lock_irqsave(&runq->lock, irqstate);
79 list_add_tail(&task->sched_link, &runq->task_list);
81 spin_unlock_irqrestore(&runq->lock, irqstate);
84 xcall_reschedule(cpu);
88 sched_del_task(struct task_struct *task)
90 struct run_queue *runq;
91 unsigned long irqstate;
93 runq = &per_cpu(run_queue, task->cpu_id);
94 spin_lock_irqsave(&runq->lock, irqstate);
95 list_del(&task->sched_link);
97 spin_unlock_irqrestore(&runq->lock, irqstate);
101 sched_wakeup_task(struct task_struct *task, taskstate_t valid_states)
104 struct run_queue *runq;
106 unsigned long irqstate;
108 /* Protect against the task being migrated to a different CPU */
111 runq = &per_cpu(run_queue, cpu);
112 spin_lock_irqsave(&runq->lock, irqstate);
113 if (cpu != task->cpu_id) {
114 spin_unlock_irqrestore(&runq->lock, irqstate);
115 goto repeat_lock_runq;
117 if (task->state & valid_states) {
118 set_mb(task->state, TASKSTATE_READY);
123 spin_unlock_irqrestore(&runq->lock, irqstate);
125 if (!status && (cpu != this_cpu))
126 xcall_reschedule(cpu);
132 context_switch(struct task_struct *prev, struct task_struct *next)
134 /* Switch to the next task's address space */
135 if (prev->aspace != next->aspace)
136 arch_aspace_activate(next->aspace);
139 * Switch to the next task's register state and kernel stack.
140 * There are three tasks involved in a context switch:
141 * 1. The previous task
143 * 3. The task that was running when next was suspended
144 * arch_context_switch() returns 1 so that we can maintain
145 * the correct value of prev. Otherwise, the restored register
146 * state of next would have prev set to 3, which we don't care
147 * about (it may have moved CPUs, been destroyed, etc.).
149 prev = arch_context_switch(prev, next);
151 /* Prevent compiler from optimizing beyond this point */
158 struct run_queue *runq = &per_cpu(run_queue, this_cpu);
159 struct task_struct *prev = current, *next = NULL, *task;
161 spin_lock_irq(&runq->lock);
163 /* Move the currently running task to the end of the run queue */
164 if (!list_empty(&prev->sched_link)) {
165 list_del(&prev->sched_link);
166 /* If the task has exited, don't re-link it */
167 if (prev->state != TASKSTATE_EXIT_ZOMBIE)
168 list_add_tail(&prev->sched_link, &runq->task_list);
171 /* Look for a ready to execute task */
172 list_for_each_entry(task, &runq->task_list, sched_link) {
173 if (task->state == TASKSTATE_READY) {
179 /* If no tasks are ready to run, run the idle task */
181 next = runq->idle_task;
184 context_switch(prev, next);
185 /* next is now running, since it may have changed CPUs while
186 * it was sleeping, we need to refresh local variables */
187 runq = &per_cpu(run_queue, this_cpu);
190 spin_unlock_irq(&runq->lock);
194 schedule_new_task_tail(void)
196 struct run_queue *runq = &per_cpu(run_queue, this_cpu);
197 BUG_ON(irqs_enabled());
198 spin_unlock(&runq->lock); /* keep IRQs disabled, arch code will
199 * re-enable IRQs as part of starting