3 * Copyright (c) 2001,2003 David H. Hovemeyer <daveho@cs.umd.edu>
6 * This is free software. You are permitted to use,
7 * redistribute, and modify it as specified in the file "COPYING".
10 #include <geekos/kassert.h>
11 #include <geekos/defs.h>
12 #include <geekos/screen.h>
13 #include <geekos/int.h>
14 #include <geekos/mem.h>
15 #include <geekos/symbol.h>
16 #include <geekos/string.h>
17 #include <geekos/kthread.h>
18 #include <geekos/malloc.h>
19 #include <geekos/serial.h>
20 #include <geekos/debug.h>
22 /* ----------------------------------------------------------------------
24 * ---------------------------------------------------------------------- */
27 * List of all threads in the system.
29 static struct All_Thread_List s_allThreadList;
32 * Queue of runnable threads.
34 static struct Thread_Queue s_runQueue;
39 struct Kernel_Thread* g_currentThread;
42 * Boolean flag indicating that we need to choose a new runnable thread.
43 * It is checked by the interrupt return code (Handle_Interrupt,
44 * in lowlevel.asm) before returning from an interrupt.
49 * Boolean flag indicating that preemption is disabled.
50 * When set, external interrupts (such as the timer tick)
51 * will not cause a new thread to be selected.
53 volatile int g_preemptionDisabled;
56 * Queue of finished threads needing disposal,
57 * and a wait queue used for communication between exited threads
58 * and the reaper thread.
60 static struct Thread_Queue s_graveyardQueue;
61 static struct Thread_Queue s_reaperWaitQueue;
64 * Counter for keys that access thread-local data, and an array
65 * of destructors for freeing that data when the thread dies. This is
66 * based on POSIX threads' thread-specific data functionality.
68 static unsigned int s_tlocalKeyCounter = 0;
69 static tlocal_destructor_t s_tlocalDestructors[MAX_TLOCAL_KEYS];
71 /* ----------------------------------------------------------------------
73 * ---------------------------------------------------------------------- */
76 * Initialize a new Kernel_Thread.
78 static void Init_Thread(struct Kernel_Thread* kthread, void* stackPage,
79 int priority, bool detached)
81 static int nextFreePid = 1;
83 struct Kernel_Thread* owner = detached ? (struct Kernel_Thread*)0 : g_currentThread;
85 memset(kthread, '\0', sizeof(*kthread));
86 kthread->stackPage = stackPage;
87 kthread->esp = ((ulong_t) kthread->stackPage) + PAGE_SIZE;
88 kthread->numTicks = 0;
89 kthread->priority = priority;
90 kthread->userContext = 0;
91 kthread->owner = owner;
94 * The thread has an implicit self-reference.
95 * If the thread is not detached, then its owner
96 * also has a reference to it.
98 kthread->refCount = detached ? 1 : 2;
100 kthread->alive = true;
101 Clear_Thread_Queue(&kthread->joinQueue);
102 kthread->pid = nextFreePid++;
107 * Create a new raw thread object.
108 * Returns a null pointer if there isn't enough memory.
110 static struct Kernel_Thread* Create_Thread(int priority, bool detached)
112 struct Kernel_Thread* kthread;
116 * For now, just allocate one page each for the thread context
117 * object and the thread's stack.
119 kthread = Alloc_Page();
121 stackPage = Alloc_Page();
123 /* Make sure that the memory allocations succeeded. */
126 if (stackPage == 0) {
131 /*Print("New thread @ %x, stack @ %x\n", kthread, stackPage); */
134 * Initialize the stack pointer of the new thread
135 * and accounting info
137 Init_Thread(kthread, stackPage, priority, detached);
139 /* Add to the list of all threads in the system. */
140 Add_To_Back_Of_All_Thread_List(&s_allThreadList, kthread);
146 * Push a dword value on the stack of given thread.
147 * We use this to set up some context for the thread before
148 * we make it runnable.
150 static __inline__ void Push(struct Kernel_Thread* kthread, ulong_t value)
153 *((ulong_t *) kthread->esp) = value;
157 * Destroy given thread.
158 * This function should perform all cleanup needed to
159 * reclaim the resources used by a thread.
160 * Called with interrupts enabled.
162 static void Destroy_Thread(struct Kernel_Thread* kthread)
165 /* Dispose of the thread's memory. */
166 Disable_Interrupts();
167 Free_Page(kthread->stackPage);
170 /* Remove from list of all threads */
171 Remove_From_All_Thread_List(&s_allThreadList, kthread);
178 * Hand given thread to the reaper for destruction.
179 * Must be called with interrupts disabled!
181 static void Reap_Thread(struct Kernel_Thread* kthread)
183 KASSERT(!Interrupts_Enabled());
184 Enqueue_Thread(&s_graveyardQueue, kthread);
185 Wake_Up(&s_reaperWaitQueue);
189 * Called when a reference to the thread is broken.
191 static void Detach_Thread(struct Kernel_Thread* kthread)
193 KASSERT(!Interrupts_Enabled());
194 KASSERT(kthread->refCount > 0);
197 if (kthread->refCount == 0) {
198 Reap_Thread(kthread);
203 * This function performs any needed initialization before
204 * a thread start function is executed. Currently we just use
205 * it to enable interrupts (since Schedule() always activates
206 * a thread with interrupts disabled).
208 static void Launch_Thread(void)
214 * Push initial values for general purpose registers.
216 static void Push_General_Registers(struct Kernel_Thread* kthread)
219 * Push initial values for saved general-purpose registers.
220 * (The actual values are not important.)
222 Push(kthread, 0); /* eax */
223 Push(kthread, 0); /* ebx */
224 Push(kthread, 0); /* edx */
225 Push(kthread, 0); /* edx */
226 Push(kthread, 0); /* esi */
227 Push(kthread, 0); /* edi */
228 Push(kthread, 0); /* ebp */
232 * Shutdown a kernel thread.
233 * This is called if a kernel thread exits by falling off
234 * the end of its start function.
236 static void Shutdown_Thread(void)
242 * Set up the initial context for a kernel-mode-only thread.
244 static void Setup_Kernel_Thread(
245 struct Kernel_Thread* kthread,
246 Thread_Start_Func startFunc,
250 * Push the argument to the thread start function, and the
251 * return address (the Shutdown_Thread function, so the thread will
252 * go away cleanly when the start function returns).
255 Push(kthread, (ulong_t) &Shutdown_Thread);
257 /* Push the address of the start function. */
258 Push(kthread, (ulong_t) startFunc);
261 * To make the thread schedulable, we need to make it look
262 * like it was suspended by an interrupt. This means pushing
263 * an "eflags, cs, eip" sequence onto the stack,
264 * as well as int num, error code, saved registers, etc.
268 * The EFLAGS register will have all bits clear.
269 * The important constraint is that we want to have the IF
270 * bit clear, so that interrupts are disabled when the
273 Push(kthread, 0UL); /* EFLAGS */
276 * As the "return address" specifying where the new thread will
277 * start executing, use the Launch_Thread() function.
279 Push(kthread, KERNEL_CS);
280 Push(kthread, (ulong_t) &Launch_Thread);
282 /* Push fake error code and interrupt number. */
286 /* Push initial values for general-purpose registers. */
287 Push_General_Registers(kthread);
290 * Push values for saved segment registers.
291 * Only the ds and es registers will contain valid selectors.
292 * The fs and gs registers are not used by any instruction
295 Push(kthread, KERNEL_DS); /* ds */
296 Push(kthread, KERNEL_DS); /* es */
297 Push(kthread, 0); /* fs */
298 Push(kthread, 0); /* gs */
304 * This is the body of the idle thread. Its job is to preserve
305 * the invariant that a runnable thread always exists,
306 * i.e., the run queue is never empty.
308 static void Idle(ulong_t arg)
315 * The reaper thread. Its job is to de-allocate memory
316 * used by threads which have finished.
318 static void Reaper(ulong_t arg)
320 struct Kernel_Thread *kthread;
322 Disable_Interrupts();
325 /* See if there are any threads needing disposal. */
326 if ((kthread = s_graveyardQueue.head) == 0) {
327 /* Graveyard is empty, so wait for a thread to die. */
328 Wait(&s_reaperWaitQueue);
331 /* Make the graveyard queue empty. */
332 Clear_Thread_Queue(&s_graveyardQueue);
335 * Now we can re-enable interrupts, since we
336 * have removed all the threads needing disposal.
339 Yield(); /* allow other threads to run? */
341 /* Dispose of the dead threads. */
342 while (kthread != 0) {
343 struct Kernel_Thread* next = Get_Next_In_Thread_Queue(kthread);
345 Print("Reaper: disposing of thread @ %x, stack @ %x\n",
346 kthread, kthread->stackPage);
348 Destroy_Thread(kthread);
353 * Disable interrupts again, since we're going to
354 * do another iteration.
356 Disable_Interrupts();
362 * Find the best (highest priority) thread in given
363 * thread queue. Returns null if queue is empty.
365 static __inline__ struct Kernel_Thread* Find_Best(struct Thread_Queue* queue)
367 /* Pick the highest priority thread */
368 struct Kernel_Thread *kthread = queue->head, *best = 0;
369 while (kthread != 0) {
370 if (best == 0 || kthread->priority > best->priority)
372 kthread = Get_Next_In_Thread_Queue(kthread);
378 * Acquires pointer to thread-local data from the current thread
379 * indexed by the given key. Assumes interrupts are off.
381 static __inline__ const void** Get_Tlocal_Pointer(tlocal_key_t k)
383 struct Kernel_Thread* current = g_currentThread;
385 KASSERT(k < MAX_TLOCAL_KEYS);
387 return ¤t->tlocalData[k];
391 * Clean up any thread-local data upon thread exit. Assumes
392 * this is called with interrupts disabled. We follow the POSIX style
393 * of possibly invoking a destructor more than once, because a
394 * destructor to some thread-local data might cause other thread-local
395 * data to become alive once again. If everything is NULL by the end
396 * of an iteration, we are done.
398 static void Tlocal_Exit(struct Kernel_Thread* curr) {
399 int i, j, called = 0;
401 KASSERT(!Interrupts_Enabled());
403 for (j = 0; j<MIN_DESTRUCTOR_ITERATIONS; j++) {
405 for (i = 0; i<MAX_TLOCAL_KEYS; i++) {
407 void *x = (void *)curr->tlocalData[i];
408 if (x != NULL && s_tlocalDestructors[i] != NULL) {
410 curr->tlocalData[i] = NULL;
414 s_tlocalDestructors[i](x);
415 Disable_Interrupts();
423 /* ----------------------------------------------------------------------
425 * ---------------------------------------------------------------------- */
427 void Init_Scheduler(void)
429 struct Kernel_Thread* mainThread = (struct Kernel_Thread *) KERNEL_THREAD_OBJECT;
431 PrintBoth("Initializing Scheduler\n");
434 * Create initial kernel thread context object and stack,
435 * and make them current.
437 Init_Thread(mainThread, (void *) KERNEL_STACK, PRIORITY_NORMAL, true);
438 g_currentThread = mainThread;
439 Add_To_Back_Of_All_Thread_List(&s_allThreadList, mainThread);
442 * Create the idle thread.
444 /*Print("starting idle thread\n");*/
445 Start_Kernel_Thread(Idle, 0, PRIORITY_IDLE, true);
448 * Create the reaper thread.
450 /*Print("starting reaper thread\n");*/
451 Start_Kernel_Thread(Reaper, 0, PRIORITY_NORMAL, true);
455 * Start a kernel-mode-only thread, using given function as its body
456 * and passing given argument as its parameter. Returns pointer
457 * to the new thread if successful, null otherwise.
459 * startFunc - is the function to be called by the new thread
460 * arg - is a paramter to pass to the new function
461 * priority - the priority of this thread (use PRIORITY_NORMAL) for
463 * detached - use false for kernel threads
465 struct Kernel_Thread* Start_Kernel_Thread(
466 Thread_Start_Func startFunc,
472 struct Kernel_Thread* kthread = Create_Thread(priority, detached);
475 * Create the initial context for the thread to make
478 Setup_Kernel_Thread(kthread, startFunc, arg);
481 /* Atomically put the thread on the run queue. */
482 Make_Runnable_Atomic(kthread);
489 * Add given thread to the run queue, so that it
490 * may be scheduled. Must be called with interrupts disabled!
492 void Make_Runnable(struct Kernel_Thread* kthread)
494 KASSERT(!Interrupts_Enabled());
496 Enqueue_Thread(&s_runQueue, kthread);
500 * Atomically make a thread runnable.
501 * Assumes interrupts are currently enabled.
503 void Make_Runnable_Atomic(struct Kernel_Thread* kthread)
505 Disable_Interrupts();
506 Make_Runnable(kthread);
511 * Get the thread that currently has the CPU.
513 struct Kernel_Thread* Get_Current(void)
515 return g_currentThread;
519 * Get the next runnable thread from the run queue.
520 * This is the scheduler.
522 struct Kernel_Thread* Get_Next_Runnable(void)
524 struct Kernel_Thread* best = 0;
526 best = Find_Best(&s_runQueue);
528 Remove_Thread(&s_runQueue, best);
531 * Print("Scheduling %x\n", best);
537 * Schedule a thread that is waiting to run.
538 * Must be called with interrupts off!
539 * The current thread should already have been placed
540 * on whatever queue is appropriate (i.e., either the
541 * run queue if it is still runnable, or a wait queue
542 * if it is waiting for an event to occur).
546 struct Kernel_Thread* runnable;
548 /* Make sure interrupts really are disabled */
549 KASSERT(!Interrupts_Enabled());
551 /* Preemption should not be disabled. */
552 KASSERT(!g_preemptionDisabled);
554 /* Get next thread to run from the run queue */
555 runnable = Get_Next_Runnable();
558 * Activate the new thread, saving the context of the current thread.
559 * Eventually, this thread will get re-activated and Switch_To_Thread()
560 * will "return", and then Schedule() will return to wherever
561 * it was called from.
564 //SerialPrint("Switch_To_Thread() in Schedule()\n");
565 Switch_To_Thread(runnable);
569 * Voluntarily give up the CPU to another thread.
570 * Does nothing if no other threads are ready to run.
574 Disable_Interrupts();
575 Make_Runnable(g_currentThread);
581 * Exit the current thread.
582 * Calling this function initiates a context switch.
584 void Exit(int exitCode)
586 struct Kernel_Thread* current = g_currentThread;
588 if (Interrupts_Enabled())
589 Disable_Interrupts();
592 current->exitCode = exitCode;
593 current->alive = false;
595 /* Clean up any thread-local memory */
596 Tlocal_Exit(g_currentThread);
598 /* Notify the thread's owner, if any */
599 Wake_Up(¤t->joinQueue);
601 /* Remove the thread's implicit reference to itself. */
602 Detach_Thread(g_currentThread);
605 * Schedule a new thread.
606 * Since the old thread wasn't placed on any
607 * thread queue, it won't get scheduled again.
611 /* Shouldn't get here */
613 // the build dies on a warning for a 'noreturn' violation
614 // This ensures that this function never returns
619 * Wait for given thread to die.
620 * Interrupts must be enabled.
621 * Returns the thread exit code.
623 int Join(struct Kernel_Thread* kthread)
627 KASSERT(Interrupts_Enabled());
629 /* It is only legal for the owner to join */
630 KASSERT(kthread->owner == g_currentThread);
632 Disable_Interrupts();
634 /* Wait for it to die */
635 while (kthread->alive) {
636 Wait(&kthread->joinQueue);
639 /* Get thread exit code. */
640 exitCode = kthread->exitCode;
642 /* Release our reference to the thread */
643 Detach_Thread(kthread);
651 * Look up a thread by its process id.
652 * The caller must be the thread's owner.
654 struct Kernel_Thread* Lookup_Thread(int pid)
656 struct Kernel_Thread *result = 0;
658 bool iflag = Begin_Int_Atomic();
661 * TODO: we could remove the requirement that the caller
662 * needs to be the thread's owner by specifying that another
663 * reference is added to the thread before it is returned.
666 result = Get_Front_Of_All_Thread_List(&s_allThreadList);
667 while (result != 0) {
668 if (result->pid == pid) {
669 if (g_currentThread != result->owner)
673 result = Get_Next_In_All_Thread_List(result);
676 End_Int_Atomic(iflag);
683 * Wait on given wait queue.
684 * Must be called with interrupts disabled!
685 * Note that the function will return with interrupts
686 * disabled. This is desirable, because it allows us to
687 * atomically test a condition that can be affected by an interrupt
688 * and wait for it to be satisfied (if necessary).
689 * See the Wait_For_Key() function in keyboard.c
692 void Wait(struct Thread_Queue* waitQueue)
694 struct Kernel_Thread* current = g_currentThread;
696 KASSERT(!Interrupts_Enabled());
698 /* Add the thread to the wait queue. */
699 Enqueue_Thread(waitQueue, current);
701 /* Find another thread to run. */
706 * Wake up all threads waiting on given wait queue.
707 * Must be called with interrupts disabled!
708 * See Keyboard_Interrupt_Handler() function in keyboard.c
711 void Wake_Up(struct Thread_Queue* waitQueue)
713 struct Kernel_Thread *kthread = waitQueue->head, *next;
715 KASSERT(!Interrupts_Enabled());
718 * Walk throught the list of threads in the wait queue,
719 * transferring each one to the run queue.
721 while (kthread != 0) {
722 next = Get_Next_In_Thread_Queue(kthread);
723 Make_Runnable(kthread);
727 /* The wait queue is now empty. */
728 Clear_Thread_Queue(waitQueue);
732 * Wake up a single thread waiting on given wait queue
733 * (if there are any threads waiting). Chooses the highest priority thread.
734 * Interrupts must be disabled!
736 void Wake_Up_One(struct Thread_Queue* waitQueue)
738 struct Kernel_Thread* best;
740 KASSERT(!Interrupts_Enabled());
742 best = Find_Best(waitQueue);
745 Remove_Thread(waitQueue, best);
747 /*Print("Wake_Up_One: waking up %x from %x\n", best, g_currentThread); */
754 * Wake up a single thread waiting on given wait queue
755 * (if there are any threads waiting). Chooses the highest priority thread.
756 * Interrupts must be disabled!
758 void Wake_Up_Thread(struct Thread_Queue* waitQueue, int pid)
760 struct Kernel_Thread* thread = Lookup_Thread(pid);;
762 KASSERT(!Interrupts_Enabled());
766 Remove_Thread(waitQueue, thread);
767 Make_Runnable(thread);
768 /*Print("Wake_Up_One: waking up %x from %x\n", best, g_currentThread); */
776 * Allocate a key for accessing thread-local data.
778 int Tlocal_Create(tlocal_key_t *key, tlocal_destructor_t destructor)
782 bool iflag = Begin_Int_Atomic();
784 if (s_tlocalKeyCounter == MAX_TLOCAL_KEYS) return -1;
785 s_tlocalDestructors[s_tlocalKeyCounter] = destructor;
786 *key = s_tlocalKeyCounter++;
788 End_Int_Atomic(iflag);
794 * Store a value for a thread-local item
796 void Tlocal_Put(tlocal_key_t k, const void *v)
800 KASSERT(k < s_tlocalKeyCounter);
802 pv = Get_Tlocal_Pointer(k);
807 * Acquire a thread-local value
809 void *Tlocal_Get(tlocal_key_t k)
813 KASSERT(k < s_tlocalKeyCounter);
815 pv = Get_Tlocal_Pointer(k);
820 * Print list of all threads in system.
823 void Dump_All_Thread_List(void)
825 struct Kernel_Thread *kthread;
827 bool iflag = Begin_Int_Atomic();
829 kthread = Get_Front_Of_All_Thread_List(&s_allThreadList);
832 while (kthread != 0) {
834 Print("<%lx,%lx,%lx>",
835 (ulong_t) Get_Prev_In_All_Thread_List(kthread),
837 (ulong_t) Get_Next_In_All_Thread_List(kthread));
838 KASSERT(kthread != Get_Next_In_All_Thread_List(kthread));
839 kthread = Get_Next_In_All_Thread_List(kthread);
842 Print("%d threads are running\n", count);
844 End_Int_Atomic(iflag);