1 // Code for manipulating stack locations.
3 // Copyright (C) 2009-2010 Kevin O'Connor <kevin@koconnor.net>
5 // This file may be distributed under the terms of the GNU LGPLv3 license.
7 #include "biosvar.h" // get_ebda_seg
8 #include "util.h" // dprintf
9 #include "bregs.h" // CR0_PE
11 // Thread info - stored at bottom of each thread stack - don't change
12 // without also updating the inline assembler below.
14 struct thread_info *next;
16 struct thread_info **pprev;
18 struct thread_info VAR32FLATVISIBLE MainThread = {
19 &MainThread, NULL, &MainThread.next
23 /****************************************************************
25 ****************************************************************/
27 static inline void sgdt(struct descloc_s *desc) {
28 asm("sgdtl %0" : "=m"(*desc));
30 static inline void lgdt(struct descloc_s *desc) {
31 asm("lgdtl %0" : : "m"(*desc) : "memory");
34 // Call a 32bit SeaBIOS function from a 16bit SeaBIOS function.
36 call32(void *func, u32 eax, u32 errret)
41 // Called in 16bit protected mode?!
44 // Backup cmos index register and disable nmi
45 u8 cmosindex = inb(PORT_CMOS_INDEX);
46 outb(cmosindex | NMI_DISABLE_BIT, PORT_CMOS_INDEX);
49 // Backup fs/gs and gdt
50 u16 fs = GET_SEG(FS), gs = GET_SEG(GS);
54 u32 bkup_ss, bkup_esp;
56 // Backup ss/esp / set esp to flat stack location
63 // Transition to 32bit mode, call func, return to 16bit
64 " movl $(" __stringify(BUILD_BIOS_ADDR) " + 1f), %%edx\n"
69 " jmp transition16big\n"
76 : "=&r" (bkup_ss), "=&r" (bkup_esp), "+a" (eax)
78 : "ecx", "edx", "cc", "memory");
80 // Restore gdt and fs/gs
85 // Restore cmos index register
86 outb(cmosindex, PORT_CMOS_INDEX);
91 // 16bit trampoline for enabling irqs from 32bit mode.
93 " .global trampoline_checkirqs\n"
94 "trampoline_checkirqs:\n"
112 extern void trampoline_checkirqs();
115 br.code.seg = SEG_BIOS;
116 br.code.offset = (u32)&trampoline_checkirqs;
120 // 16bit trampoline for waiting for an irq from 32bit mode.
122 " .global trampoline_waitirq\n"
123 "trampoline_waitirq:\n"
129 // Wait for next irq to occur.
134 asm volatile("sti ; hlt ; cli ; cld": : :"memory");
137 if (CONFIG_THREADS && MainThread.next != &MainThread) {
138 // Threads still active - do a yield instead.
142 extern void trampoline_waitirq();
145 br.code.seg = SEG_BIOS;
146 br.code.offset = (u32)&trampoline_waitirq;
151 /****************************************************************
153 ****************************************************************/
155 // Switch to the extra stack in ebda and call a function.
157 stack_hop(u32 eax, u32 edx, void *func)
160 u16 ebda_seg = get_ebda_seg(), bkup_ss;
163 // Backup current %ss/%esp values.
166 // Copy ebda seg to %ds/%ss and set %esp
172 // Restore segments and stack
176 : "+a" (eax), "+d" (edx), "+c" (func), "=&r" (bkup_ss), "=&r" (bkup_esp)
177 : "i" (EBDA_OFFSET_TOP_STACK), "r" (ebda_seg)
183 /****************************************************************
185 ****************************************************************/
187 #define THREADSTACKSIZE 4096
188 int VAR16VISIBLE CanPreempt;
190 // Return the 'struct thread_info' for the currently running thread.
195 if (esp <= BUILD_STACK_ADDR)
197 return (void*)ALIGN_DOWN(esp, THREADSTACKSIZE);
200 // Switch to next thread stack.
202 switch_next(struct thread_info *cur)
204 struct thread_info *next = cur->next;
209 " pushl $1f\n" // store return pc
210 " pushl %%ebp\n" // backup %ebp
211 " movl %%esp, 4(%%eax)\n" // cur->stackpos = %esp
212 " movl 4(%%ecx), %%esp\n" // %esp = next->stackpos
213 " popl %%ebp\n" // restore %ebp
214 " retl\n" // restore pc
216 : "+a"(cur), "+c"(next)
218 : "ebx", "edx", "esi", "edi", "cc", "memory");
221 // Briefly permit irqs to occur.
225 if (MODESEGMENT || !CONFIG_THREADS) {
226 // Just directly check irqs.
230 struct thread_info *cur = getCurThread();
231 if (cur == &MainThread)
232 // Permit irqs to fire
235 // Switch to the next thread
239 // Last thing called from a thread (called on "next" stack).
241 __end_thread(struct thread_info *old)
243 old->next->pprev = old->pprev;
244 *old->pprev = old->next;
246 dprintf(DEBUG_thread, "\\%08x/ End thread\n", (u32)old);
247 if (MainThread.next == &MainThread)
248 dprintf(1, "All threads complete.\n");
251 // Create a new thread and start executing 'func' in it.
253 run_thread(void (*func)(void*), void *data)
256 if (! CONFIG_THREADS)
258 struct thread_info *thread;
259 thread = memalign_tmphigh(THREADSTACKSIZE, THREADSTACKSIZE);
263 thread->stackpos = (void*)thread + THREADSTACKSIZE;
264 struct thread_info *cur = getCurThread();
266 thread->pprev = cur->pprev;
267 cur->pprev = &thread->next;
268 *thread->pprev = thread;
270 dprintf(DEBUG_thread, "/%08x\\ Start thread\n", (u32)thread);
273 " pushl $1f\n" // store return pc
274 " pushl %%ebp\n" // backup %ebp
275 " movl %%esp, 4(%%edx)\n" // cur->stackpos = %esp
276 " movl 4(%%ebx), %%esp\n" // %esp = thread->stackpos
277 " calll *%%ecx\n" // Call func
280 " movl (%%ebx), %%ecx\n" // %ecx = thread->next
281 " movl 4(%%ecx), %%esp\n" // %esp = next->stackpos
282 " movl %%ebx, %%eax\n"
283 " calll %4\n" // call __end_thread(thread)
284 " popl %%ebp\n" // restore %ebp
285 " retl\n" // restore pc
287 : "+a"(data), "+c"(func), "+b"(thread), "+d"(cur)
288 : "m"(*(u8*)__end_thread)
289 : "esi", "edi", "cc", "memory");
296 // Wait for all threads (other than the main thread) to complete.
301 if (! CONFIG_THREADS)
303 while (MainThread.next != &MainThread)
308 mutex_lock(struct mutex_s *mutex)
311 if (! CONFIG_THREADS)
313 while (mutex->isLocked)
319 mutex_unlock(struct mutex_s *mutex)
322 if (! CONFIG_THREADS)
328 /****************************************************************
330 ****************************************************************/
332 static u32 PreemptCount;
334 // Turn on RTC irqs and arrange for them to check the 32bit threads.
338 if (! CONFIG_THREADS || ! CONFIG_THREAD_OPTIONROMS)
345 // Turn off RTC irqs / stop checking for thread execution.
349 if (! CONFIG_THREADS || ! CONFIG_THREAD_OPTIONROMS) {
355 dprintf(9, "Done preempt - %d checks\n", PreemptCount);
359 // Check if preemption is on, and wait for it to complete if so.
363 if (MODESEGMENT || !CONFIG_THREADS || !CONFIG_THREAD_OPTIONROMS
371 // Try to execute 32bit threads.
376 switch_next(&MainThread);
379 // 16bit code that checks if threads are pending and executes them if so.
383 if (! CONFIG_THREADS || ! CONFIG_THREAD_OPTIONROMS
384 || !GET_GLOBAL(CanPreempt)
385 || GET_FLATPTR(MainThread.next) == &MainThread)
388 extern void _cfunc32flat_yield_preempt(void);
389 call32(_cfunc32flat_yield_preempt, 0, 0);