3 * Copyright (c) 2001,2003 David H. Hovemeyer <daveho@cs.umd.edu>
6 * This is free software. You are permitted to use,
7 * redistribute, and modify it as specified in the file "COPYING".
10 #include <geekos/kassert.h>
11 #include <geekos/defs.h>
12 #include <geekos/screen.h>
13 #include <geekos/int.h>
14 #include <geekos/mem.h>
15 #include <geekos/symbol.h>
16 #include <geekos/string.h>
17 #include <geekos/kthread.h>
18 #include <geekos/malloc.h>
19 #include <geekos/serial.h>
21 /* ----------------------------------------------------------------------
23 * ---------------------------------------------------------------------- */
26 * List of all threads in the system.
28 static struct All_Thread_List s_allThreadList;
31 * Queue of runnable threads.
33 static struct Thread_Queue s_runQueue;
38 struct Kernel_Thread* g_currentThread;
41 * Boolean flag indicating that we need to choose a new runnable thread.
42 * It is checked by the interrupt return code (Handle_Interrupt,
43 * in lowlevel.asm) before returning from an interrupt.
48 * Boolean flag indicating that preemption is disabled.
49 * When set, external interrupts (such as the timer tick)
50 * will not cause a new thread to be selected.
52 volatile int g_preemptionDisabled;
55 * Queue of finished threads needing disposal,
56 * and a wait queue used for communication between exited threads
57 * and the reaper thread.
59 static struct Thread_Queue s_graveyardQueue;
60 static struct Thread_Queue s_reaperWaitQueue;
63 * Counter for keys that access thread-local data, and an array
64 * of destructors for freeing that data when the thread dies. This is
65 * based on POSIX threads' thread-specific data functionality.
67 static unsigned int s_tlocalKeyCounter = 0;
68 static tlocal_destructor_t s_tlocalDestructors[MAX_TLOCAL_KEYS];
70 /* ----------------------------------------------------------------------
72 * ---------------------------------------------------------------------- */
75 * Initialize a new Kernel_Thread.
77 static void Init_Thread(struct Kernel_Thread* kthread, void* stackPage,
78 int priority, bool detached)
80 static int nextFreePid = 1;
82 struct Kernel_Thread* owner = detached ? (struct Kernel_Thread*)0 : g_currentThread;
84 memset(kthread, '\0', sizeof(*kthread));
85 kthread->stackPage = stackPage;
86 kthread->esp = ((ulong_t) kthread->stackPage) + PAGE_SIZE;
87 kthread->numTicks = 0;
88 kthread->priority = priority;
89 kthread->userContext = 0;
90 kthread->owner = owner;
93 * The thread has an implicit self-reference.
94 * If the thread is not detached, then its owner
95 * also has a reference to it.
97 kthread->refCount = detached ? 1 : 2;
99 kthread->alive = true;
100 Clear_Thread_Queue(&kthread->joinQueue);
101 kthread->pid = nextFreePid++;
106 * Create a new raw thread object.
107 * Returns a null pointer if there isn't enough memory.
109 static struct Kernel_Thread* Create_Thread(int priority, bool detached)
111 struct Kernel_Thread* kthread;
115 * For now, just allocate one page each for the thread context
116 * object and the thread's stack.
118 kthread = Alloc_Page();
120 stackPage = Alloc_Page();
122 /* Make sure that the memory allocations succeeded. */
125 if (stackPage == 0) {
130 /*Print("New thread @ %x, stack @ %x\n", kthread, stackPage); */
133 * Initialize the stack pointer of the new thread
134 * and accounting info
136 Init_Thread(kthread, stackPage, priority, detached);
138 /* Add to the list of all threads in the system. */
139 Add_To_Back_Of_All_Thread_List(&s_allThreadList, kthread);
145 * Push a dword value on the stack of given thread.
146 * We use this to set up some context for the thread before
147 * we make it runnable.
149 static __inline__ void Push(struct Kernel_Thread* kthread, ulong_t value)
152 *((ulong_t *) kthread->esp) = value;
156 * Destroy given thread.
157 * This function should perform all cleanup needed to
158 * reclaim the resources used by a thread.
159 * Called with interrupts enabled.
161 static void Destroy_Thread(struct Kernel_Thread* kthread)
164 /* Dispose of the thread's memory. */
165 Disable_Interrupts();
166 Free_Page(kthread->stackPage);
169 /* Remove from list of all threads */
170 Remove_From_All_Thread_List(&s_allThreadList, kthread);
177 * Hand given thread to the reaper for destruction.
178 * Must be called with interrupts disabled!
180 static void Reap_Thread(struct Kernel_Thread* kthread)
182 KASSERT(!Interrupts_Enabled());
183 Enqueue_Thread(&s_graveyardQueue, kthread);
184 Wake_Up(&s_reaperWaitQueue);
188 * Called when a reference to the thread is broken.
190 static void Detach_Thread(struct Kernel_Thread* kthread)
192 KASSERT(!Interrupts_Enabled());
193 KASSERT(kthread->refCount > 0);
196 if (kthread->refCount == 0) {
197 Reap_Thread(kthread);
202 * This function performs any needed initialization before
203 * a thread start function is executed. Currently we just use
204 * it to enable interrupts (since Schedule() always activates
205 * a thread with interrupts disabled).
207 static void Launch_Thread(void)
213 * Push initial values for general purpose registers.
215 static void Push_General_Registers(struct Kernel_Thread* kthread)
218 * Push initial values for saved general-purpose registers.
219 * (The actual values are not important.)
221 Push(kthread, 0); /* eax */
222 Push(kthread, 0); /* ebx */
223 Push(kthread, 0); /* edx */
224 Push(kthread, 0); /* edx */
225 Push(kthread, 0); /* esi */
226 Push(kthread, 0); /* edi */
227 Push(kthread, 0); /* ebp */
231 * Shutdown a kernel thread.
232 * This is called if a kernel thread exits by falling off
233 * the end of its start function.
235 static void Shutdown_Thread(void)
241 * Set up the initial context for a kernel-mode-only thread.
243 static void Setup_Kernel_Thread(
244 struct Kernel_Thread* kthread,
245 Thread_Start_Func startFunc,
249 * Push the argument to the thread start function, and the
250 * return address (the Shutdown_Thread function, so the thread will
251 * go away cleanly when the start function returns).
254 Push(kthread, (ulong_t) &Shutdown_Thread);
256 /* Push the address of the start function. */
257 Push(kthread, (ulong_t) startFunc);
260 * To make the thread schedulable, we need to make it look
261 * like it was suspended by an interrupt. This means pushing
262 * an "eflags, cs, eip" sequence onto the stack,
263 * as well as int num, error code, saved registers, etc.
267 * The EFLAGS register will have all bits clear.
268 * The important constraint is that we want to have the IF
269 * bit clear, so that interrupts are disabled when the
272 Push(kthread, 0UL); /* EFLAGS */
275 * As the "return address" specifying where the new thread will
276 * start executing, use the Launch_Thread() function.
278 Push(kthread, KERNEL_CS);
279 Push(kthread, (ulong_t) &Launch_Thread);
281 /* Push fake error code and interrupt number. */
285 /* Push initial values for general-purpose registers. */
286 Push_General_Registers(kthread);
289 * Push values for saved segment registers.
290 * Only the ds and es registers will contain valid selectors.
291 * The fs and gs registers are not used by any instruction
294 Push(kthread, KERNEL_DS); /* ds */
295 Push(kthread, KERNEL_DS); /* es */
296 Push(kthread, 0); /* fs */
297 Push(kthread, 0); /* gs */
303 * This is the body of the idle thread. Its job is to preserve
304 * the invariant that a runnable thread always exists,
305 * i.e., the run queue is never empty.
307 static void Idle(ulong_t arg)
314 * The reaper thread. Its job is to de-allocate memory
315 * used by threads which have finished.
317 static void Reaper(ulong_t arg)
319 struct Kernel_Thread *kthread;
321 Disable_Interrupts();
324 /* See if there are any threads needing disposal. */
325 if ((kthread = s_graveyardQueue.head) == 0) {
326 /* Graveyard is empty, so wait for a thread to die. */
327 Wait(&s_reaperWaitQueue);
330 /* Make the graveyard queue empty. */
331 Clear_Thread_Queue(&s_graveyardQueue);
334 * Now we can re-enable interrupts, since we
335 * have removed all the threads needing disposal.
338 Yield(); /* allow other threads to run? */
340 /* Dispose of the dead threads. */
341 while (kthread != 0) {
342 struct Kernel_Thread* next = Get_Next_In_Thread_Queue(kthread);
344 Print("Reaper: disposing of thread @ %x, stack @ %x\n",
345 kthread, kthread->stackPage);
347 Destroy_Thread(kthread);
352 * Disable interrupts again, since we're going to
353 * do another iteration.
355 Disable_Interrupts();
361 * Find the best (highest priority) thread in given
362 * thread queue. Returns null if queue is empty.
364 static __inline__ struct Kernel_Thread* Find_Best(struct Thread_Queue* queue)
366 /* Pick the highest priority thread */
367 struct Kernel_Thread *kthread = queue->head, *best = 0;
368 while (kthread != 0) {
369 if (best == 0 || kthread->priority > best->priority)
371 kthread = Get_Next_In_Thread_Queue(kthread);
377 * Acquires pointer to thread-local data from the current thread
378 * indexed by the given key. Assumes interrupts are off.
380 static __inline__ const void** Get_Tlocal_Pointer(tlocal_key_t k)
382 struct Kernel_Thread* current = g_currentThread;
384 KASSERT(k < MAX_TLOCAL_KEYS);
386 return ¤t->tlocalData[k];
390 * Clean up any thread-local data upon thread exit. Assumes
391 * this is called with interrupts disabled. We follow the POSIX style
392 * of possibly invoking a destructor more than once, because a
393 * destructor to some thread-local data might cause other thread-local
394 * data to become alive once again. If everything is NULL by the end
395 * of an iteration, we are done.
397 static void Tlocal_Exit(struct Kernel_Thread* curr) {
398 int i, j, called = 0;
400 KASSERT(!Interrupts_Enabled());
402 for (j = 0; j<MIN_DESTRUCTOR_ITERATIONS; j++) {
404 for (i = 0; i<MAX_TLOCAL_KEYS; i++) {
406 void *x = (void *)curr->tlocalData[i];
407 if (x != NULL && s_tlocalDestructors[i] != NULL) {
409 curr->tlocalData[i] = NULL;
413 s_tlocalDestructors[i](x);
414 Disable_Interrupts();
422 /* ----------------------------------------------------------------------
424 * ---------------------------------------------------------------------- */
426 void Init_Scheduler(void)
428 struct Kernel_Thread* mainThread = (struct Kernel_Thread *) KERNEL_THREAD_OBJECT;
430 PrintBoth("Initializing Scheduler\n");
433 * Create initial kernel thread context object and stack,
434 * and make them current.
436 Init_Thread(mainThread, (void *) KERNEL_STACK, PRIORITY_NORMAL, true);
437 g_currentThread = mainThread;
438 Add_To_Back_Of_All_Thread_List(&s_allThreadList, mainThread);
441 * Create the idle thread.
443 /*Print("starting idle thread\n");*/
444 Start_Kernel_Thread(Idle, 0, PRIORITY_IDLE, true);
447 * Create the reaper thread.
449 /*Print("starting reaper thread\n");*/
450 Start_Kernel_Thread(Reaper, 0, PRIORITY_NORMAL, true);
454 * Start a kernel-mode-only thread, using given function as its body
455 * and passing given argument as its parameter. Returns pointer
456 * to the new thread if successful, null otherwise.
458 * startFunc - is the function to be called by the new thread
459 * arg - is a paramter to pass to the new function
460 * priority - the priority of this thread (use PRIORITY_NORMAL) for
462 * detached - use false for kernel threads
464 struct Kernel_Thread* Start_Kernel_Thread(
465 Thread_Start_Func startFunc,
471 struct Kernel_Thread* kthread = Create_Thread(priority, detached);
474 * Create the initial context for the thread to make
477 Setup_Kernel_Thread(kthread, startFunc, arg);
480 /* Atomically put the thread on the run queue. */
481 Make_Runnable_Atomic(kthread);
488 * Add given thread to the run queue, so that it
489 * may be scheduled. Must be called with interrupts disabled!
491 void Make_Runnable(struct Kernel_Thread* kthread)
493 KASSERT(!Interrupts_Enabled());
495 Enqueue_Thread(&s_runQueue, kthread);
499 * Atomically make a thread runnable.
500 * Assumes interrupts are currently enabled.
502 void Make_Runnable_Atomic(struct Kernel_Thread* kthread)
504 Disable_Interrupts();
505 Make_Runnable(kthread);
510 * Get the thread that currently has the CPU.
512 struct Kernel_Thread* Get_Current(void)
514 return g_currentThread;
518 * Get the next runnable thread from the run queue.
519 * This is the scheduler.
521 struct Kernel_Thread* Get_Next_Runnable(void)
523 struct Kernel_Thread* best = 0;
525 best = Find_Best(&s_runQueue);
527 Remove_Thread(&s_runQueue, best);
530 * Print("Scheduling %x\n", best);
536 * Schedule a thread that is waiting to run.
537 * Must be called with interrupts off!
538 * The current thread should already have been placed
539 * on whatever queue is appropriate (i.e., either the
540 * run queue if it is still runnable, or a wait queue
541 * if it is waiting for an event to occur).
545 struct Kernel_Thread* runnable;
547 /* Make sure interrupts really are disabled */
548 KASSERT(!Interrupts_Enabled());
550 /* Preemption should not be disabled. */
551 KASSERT(!g_preemptionDisabled);
553 /* Get next thread to run from the run queue */
554 runnable = Get_Next_Runnable();
557 * Activate the new thread, saving the context of the current thread.
558 * Eventually, this thread will get re-activated and Switch_To_Thread()
559 * will "return", and then Schedule() will return to wherever
560 * it was called from.
562 Switch_To_Thread(runnable);
566 * Voluntarily give up the CPU to another thread.
567 * Does nothing if no other threads are ready to run.
571 Disable_Interrupts();
572 Make_Runnable(g_currentThread);
578 * Exit the current thread.
579 * Calling this function initiates a context switch.
581 void Exit(int exitCode)
583 struct Kernel_Thread* current = g_currentThread;
585 if (Interrupts_Enabled())
586 Disable_Interrupts();
589 current->exitCode = exitCode;
590 current->alive = false;
592 /* Clean up any thread-local memory */
593 Tlocal_Exit(g_currentThread);
595 /* Notify the thread's owner, if any */
596 Wake_Up(¤t->joinQueue);
598 /* Remove the thread's implicit reference to itself. */
599 Detach_Thread(g_currentThread);
602 * Schedule a new thread.
603 * Since the old thread wasn't placed on any
604 * thread queue, it won't get scheduled again.
608 /* Shouldn't get here */
610 // the build dies on a warning for a 'noreturn' violation
611 // This ensures that this function never returns
616 * Wait for given thread to die.
617 * Interrupts must be enabled.
618 * Returns the thread exit code.
620 int Join(struct Kernel_Thread* kthread)
624 KASSERT(Interrupts_Enabled());
626 /* It is only legal for the owner to join */
627 KASSERT(kthread->owner == g_currentThread);
629 Disable_Interrupts();
631 /* Wait for it to die */
632 while (kthread->alive) {
633 Wait(&kthread->joinQueue);
636 /* Get thread exit code. */
637 exitCode = kthread->exitCode;
639 /* Release our reference to the thread */
640 Detach_Thread(kthread);
648 * Look up a thread by its process id.
649 * The caller must be the thread's owner.
651 struct Kernel_Thread* Lookup_Thread(int pid)
653 struct Kernel_Thread *result = 0;
655 bool iflag = Begin_Int_Atomic();
658 * TODO: we could remove the requirement that the caller
659 * needs to be the thread's owner by specifying that another
660 * reference is added to the thread before it is returned.
663 result = Get_Front_Of_All_Thread_List(&s_allThreadList);
664 while (result != 0) {
665 if (result->pid == pid) {
666 if (g_currentThread != result->owner)
670 result = Get_Next_In_All_Thread_List(result);
673 End_Int_Atomic(iflag);
680 * Wait on given wait queue.
681 * Must be called with interrupts disabled!
682 * Note that the function will return with interrupts
683 * disabled. This is desirable, because it allows us to
684 * atomically test a condition that can be affected by an interrupt
685 * and wait for it to be satisfied (if necessary).
686 * See the Wait_For_Key() function in keyboard.c
689 void Wait(struct Thread_Queue* waitQueue)
691 struct Kernel_Thread* current = g_currentThread;
693 KASSERT(!Interrupts_Enabled());
695 /* Add the thread to the wait queue. */
696 Enqueue_Thread(waitQueue, current);
698 /* Find another thread to run. */
703 * Wake up all threads waiting on given wait queue.
704 * Must be called with interrupts disabled!
705 * See Keyboard_Interrupt_Handler() function in keyboard.c
708 void Wake_Up(struct Thread_Queue* waitQueue)
710 struct Kernel_Thread *kthread = waitQueue->head, *next;
712 KASSERT(!Interrupts_Enabled());
715 * Walk throught the list of threads in the wait queue,
716 * transferring each one to the run queue.
718 while (kthread != 0) {
719 next = Get_Next_In_Thread_Queue(kthread);
720 Make_Runnable(kthread);
724 /* The wait queue is now empty. */
725 Clear_Thread_Queue(waitQueue);
729 * Wake up a single thread waiting on given wait queue
730 * (if there are any threads waiting). Chooses the highest priority thread.
731 * Interrupts must be disabled!
733 void Wake_Up_One(struct Thread_Queue* waitQueue)
735 struct Kernel_Thread* best;
737 KASSERT(!Interrupts_Enabled());
739 best = Find_Best(waitQueue);
742 Remove_Thread(waitQueue, best);
744 /*Print("Wake_Up_One: waking up %x from %x\n", best, g_currentThread); */
749 * Allocate a key for accessing thread-local data.
751 int Tlocal_Create(tlocal_key_t *key, tlocal_destructor_t destructor)
755 bool iflag = Begin_Int_Atomic();
757 if (s_tlocalKeyCounter == MAX_TLOCAL_KEYS) return -1;
758 s_tlocalDestructors[s_tlocalKeyCounter] = destructor;
759 *key = s_tlocalKeyCounter++;
761 End_Int_Atomic(iflag);
767 * Store a value for a thread-local item
769 void Tlocal_Put(tlocal_key_t k, const void *v)
773 KASSERT(k < s_tlocalKeyCounter);
775 pv = Get_Tlocal_Pointer(k);
780 * Acquire a thread-local value
782 void *Tlocal_Get(tlocal_key_t k)
786 KASSERT(k < s_tlocalKeyCounter);
788 pv = Get_Tlocal_Pointer(k);
793 * Print list of all threads in system.
796 void Dump_All_Thread_List(void)
798 struct Kernel_Thread *kthread;
800 bool iflag = Begin_Int_Atomic();
802 kthread = Get_Front_Of_All_Thread_List(&s_allThreadList);
805 while (kthread != 0) {
807 Print("<%lx,%lx,%lx>",
808 (ulong_t) Get_Prev_In_All_Thread_List(kthread),
810 (ulong_t) Get_Next_In_All_Thread_List(kthread));
811 KASSERT(kthread != Get_Next_In_All_Thread_List(kthread));
812 kthread = Get_Next_In_All_Thread_List(kthread);
815 Print("%d threads are running\n", count);
817 End_Int_Atomic(iflag);