Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel'
[palacios.git] / kitten / kernel / sched.c
1 #include <lwk/kernel.h>
2 #include <lwk/spinlock.h>
3 #include <lwk/percpu.h>
4 #include <lwk/aspace.h>
5 #include <lwk/sched.h>
6 #include <lwk/xcall.h>
7
8 struct run_queue {
9         spinlock_t           lock;
10         size_t               num_tasks;
11         struct list_head     task_list;
12         struct task_struct * idle_task;
13 };
14
15 static DEFINE_PER_CPU(struct run_queue, run_queue);
16
17 static void
18 idle_task_loop(void) {
19         while (1) {
20                 arch_idle_task_loop_body();
21                 schedule();
22         }
23 }
24
25 int __init
26 sched_subsys_init(void)
27 {
28         id_t cpu_id;
29         struct run_queue *runq;
30         struct task_struct *idle_task;
31         start_state_t start_state;
32         int status;
33
34         /* Reserve the idle tasks' ID. All idle tasks share the same ID. */
35         status = __task_reserve_id(IDLE_TASK_ID);
36         if (status)
37                 panic("Failed to reserve IDLE_TASK_ID (status=%d).", status);
38
39         /* Initialize each CPU's run queue */
40         for_each_cpu_mask(cpu_id, cpu_present_map) {
41                 runq = &per_cpu(run_queue, cpu_id);
42
43                 spin_lock_init(&runq->lock);
44                 runq->num_tasks = 0;
45                 list_head_init(&runq->task_list);
46
47                 /*
48                  * Create this CPU's idle task. When a CPU has no
49                  * other work to do, it runs the idle task. 
50                  */
51                 start_state.uid         = 0;
52                 start_state.gid         = 0;
53                 start_state.aspace_id   = KERNEL_ASPACE_ID;
54                 start_state.entry_point = (vaddr_t)idle_task_loop;
55                 start_state.stack_ptr   = 0; /* will be set automatically */
56                 start_state.cpu_id      = cpu_id;
57                 start_state.cpumask     = NULL;
58
59                 status = __task_create(IDLE_TASK_ID, "idle_task", &start_state,
60                                        &idle_task);
61                 if (status)
62                         panic("Failed to create idle_task (status=%d).",status);
63
64                 runq->idle_task = idle_task;
65         }
66
67         return 0;
68 }
69
70 void
71 sched_add_task(struct task_struct *task)
72 {
73         id_t cpu = task->cpu_id;
74         struct run_queue *runq;
75         unsigned long irqstate;
76
77         runq = &per_cpu(run_queue, cpu);
78         spin_lock_irqsave(&runq->lock, irqstate);
79         list_add_tail(&task->sched_link, &runq->task_list);
80         ++runq->num_tasks;
81         spin_unlock_irqrestore(&runq->lock, irqstate);
82
83         if (cpu != this_cpu)
84                 xcall_reschedule(cpu);
85 }
86
87 void
88 sched_del_task(struct task_struct *task)
89 {
90         struct run_queue *runq;
91         unsigned long irqstate;
92
93         runq = &per_cpu(run_queue, task->cpu_id);
94         spin_lock_irqsave(&runq->lock, irqstate);
95         list_del(&task->sched_link);
96         --runq->num_tasks;
97         spin_unlock_irqrestore(&runq->lock, irqstate);
98 }
99
100 int
101 sched_wakeup_task(struct task_struct *task, taskstate_t valid_states)
102 {
103         id_t cpu;
104         struct run_queue *runq;
105         int status;
106         unsigned long irqstate;
107
108         /* Protect against the task being migrated to a different CPU */
109 repeat_lock_runq:
110         cpu  = task->cpu_id;
111         runq = &per_cpu(run_queue, cpu);
112         spin_lock_irqsave(&runq->lock, irqstate);
113         if (cpu != task->cpu_id) {
114                 spin_unlock_irqrestore(&runq->lock, irqstate);
115                 goto repeat_lock_runq;
116         }
117         if (task->state & valid_states) {
118                 set_mb(task->state, TASKSTATE_READY);
119                 status = 0;
120         } else {
121                 status = -EINVAL;
122         }
123         spin_unlock_irqrestore(&runq->lock, irqstate);
124
125         if (!status && (cpu != this_cpu))
126                 xcall_reschedule(cpu);
127
128         return status;
129 }
130
131 static void
132 context_switch(struct task_struct *prev, struct task_struct *next)
133 {
134         /* Switch to the next task's address space */
135         if (prev->aspace != next->aspace)
136                 arch_aspace_activate(next->aspace);
137
138         /**
139          * Switch to the next task's register state and kernel stack.
140          * There are three tasks involved in a context switch:
141          *     1. The previous task
142          *     2. The next task 
143          *     3. The task that was running when next was suspended
144          * arch_context_switch() returns 1 so that we can maintain
145          * the correct value of prev.  Otherwise, the restored register
146          * state of next would have prev set to 3, which we don't care
147          * about (it may have moved CPUs, been destroyed, etc.).
148          */
149         prev = arch_context_switch(prev, next);
150
151         /* Prevent compiler from optimizing beyond this point */
152         barrier();
153 }
154
155 void
156 schedule(void)
157 {
158         struct run_queue *runq = &per_cpu(run_queue, this_cpu);
159         struct task_struct *prev = current, *next = NULL, *task;
160
161         spin_lock_irq(&runq->lock);
162
163         /* Move the currently running task to the end of the run queue */
164         if (!list_empty(&prev->sched_link)) {
165                 list_del(&prev->sched_link);
166                 /* If the task has exited, don't re-link it */
167                 if (prev->state != TASKSTATE_EXIT_ZOMBIE)
168                         list_add_tail(&prev->sched_link, &runq->task_list);
169         }
170
171         /* Look for a ready to execute task */
172         list_for_each_entry(task, &runq->task_list, sched_link) {
173                 if (task->state == TASKSTATE_READY) {
174                         next = task;
175                         break;
176                 }
177         }
178
179         /* If no tasks are ready to run, run the idle task */
180         if (next == NULL)
181                 next = runq->idle_task;
182
183         if (prev != next) {
184                 context_switch(prev, next);
185                 /* next is now running, since it may have changed CPUs while
186                  * it was sleeping, we need to refresh local variables */
187                 runq = &per_cpu(run_queue, this_cpu);
188         }
189
190         spin_unlock_irq(&runq->lock);
191 }
192
193 void
194 schedule_new_task_tail(void)
195 {
196         struct run_queue *runq = &per_cpu(run_queue, this_cpu);
197         BUG_ON(irqs_enabled());
198         spin_unlock(&runq->lock);  /* keep IRQs disabled, arch code will
199                                     * re-enable IRQs as part of starting
200                                     * the new task */
201 }