1 #include <lwk/kernel.h>
3 #include <lwk/cpuinfo.h>
4 #include <lwk/bootmem.h>
6 #include <arch/bootsetup.h>
9 #include <arch/sections.h>
10 #include <arch/proto.h>
11 #include <arch/mpspec.h>
13 #include <arch/io_apic.h>
16 * Bitmap of of PTE/PMD entry flags that are supported.
17 * This is AND'ed with a PTE/PMD entry before it is installed.
19 unsigned long __supported_pte_mask __read_mostly = ~0UL;
22 * Bitmap of features enabled in the CR4 register.
24 unsigned long mmu_cr4_features;
27 * Start and end addresses of the initrd image.
29 paddr_t __initdata initrd_start;
30 paddr_t __initdata initrd_end;
33 * The init_task ELF image.
35 paddr_t __initdata init_elf_image;
38 * Base address and size of the Extended BIOS Data Area.
40 paddr_t __initdata ebda_addr;
41 size_t __initdata ebda_size;
42 #define EBDA_ADDR_POINTER 0x40E
45 * Finds the address and length of the Extended BIOS Data Area.
51 * There is a real-mode segmented pointer pointing to the
52 * 4K EBDA area at 0x40E
54 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
57 ebda_size = *(unsigned short *)__va(ebda_addr);
59 /* Round EBDA up to pages */
63 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
64 if (ebda_size > 64*1024)
69 * This sets up the bootstrap memory allocator. It is a simple
70 * bitmap based allocator that tracks memory at a page grandularity.
71 * Once the bootstrap process is complete, each unallocated page
72 * is added to the real memory allocator's free pool. Memory allocated
73 * during bootstrap remains allocated forever, unless explicitly
74 * freed before turning things over to the real memory allocator.
77 setup_bootmem_allocator(
78 unsigned long start_pfn,
82 unsigned long bootmap_size, bootmap;
84 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
85 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
87 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
88 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
89 e820_bootmem_free(0, end_pfn << PAGE_SHIFT);
90 reserve_bootmem(bootmap, bootmap_size);
94 * Mark in-use memory regions as reserved.
95 * This prevents the bootmem allocator from allocating them.
100 /* Reserve the kernel page table memory */
101 reserve_bootmem(table_start << PAGE_SHIFT,
102 (table_end - table_start) << PAGE_SHIFT);
104 /* Reserve kernel memory */
105 reserve_bootmem(__pa_symbol(&_text),
106 __pa_symbol(&_end) - __pa_symbol(&_text));
108 /* Reserve physical page 0... it's a often a special BIOS page */
109 reserve_bootmem(0, PAGE_SIZE);
111 /* Reserve the Extended BIOS Data Area memory */
113 reserve_bootmem(ebda_addr, ebda_size);
115 /* Reserve SMP trampoline */
116 reserve_bootmem(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
118 /* Find and reserve boot-time SMP configuration */
121 /* Reserve memory used by the initrd image */
122 if (LOADER_TYPE && INITRD_START) {
123 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
125 "reserving memory used by initrd image\n");
127 " INITRD_START=0x%lx, INITRD_SIZE=%ld bytes\n",
128 (unsigned long) INITRD_START,
129 (unsigned long) INITRD_SIZE);
130 reserve_bootmem(INITRD_START, INITRD_SIZE);
131 initrd_start = INITRD_START;
132 initrd_end = initrd_start+INITRD_SIZE;
133 init_elf_image = initrd_start;
136 "initrd extends beyond end of memory "
137 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
138 (unsigned long)(INITRD_START + INITRD_SIZE),
139 (unsigned long)(end_pfn << PAGE_SHIFT));
146 * This initializes a per-CPU area for each CPU.
148 * TODO: The PDA and per-CPU areas are pretty tightly wound. It should be
149 * possible to make the per-CPU area *be* the PDA, or put another way,
150 * point %GS at the per-CPU area rather than the PDA. All of the PDA's
151 * current contents would become normal per-CPU variables.
154 setup_per_cpu_areas(void)
160 * There is an ELF section containing all per-CPU variables
161 * surrounded by __per_cpu_start and __per_cpu_end symbols.
162 * We create a copy of this ELF section for each CPU.
164 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
166 for_each_cpu_mask (i, cpu_present_map) {
169 ptr = alloc_bootmem_aligned(size, PAGE_SIZE);
171 panic("Cannot allocate cpu data for CPU %d\n", i);
174 * Pre-bias data_offset by subtracting its offset from
175 * __per_cpu_start. Later, per_cpu() will calculate a
176 * per_cpu variable's address with:
178 * addr = offset_in_percpu_ELF_section + data_offset
179 * = (__per_cpu_start + offset) + (ptr - __per_cpu_start)
182 cpu_pda(i)->data_offset = ptr - __per_cpu_start;
184 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
188 static inline int get_family(int cpuid)
190 int base = (cpuid>>8) & 0xf;
191 int extended = (cpuid>>20) &0xff;
193 return (0xf == base) ? base + extended : base;
197 * Architecture specific initialization.
198 * This is called from start_kernel() in init/main.c.
200 * NOTE: Ordering is usually important. Do not move things
201 * around unless you know what you are doing.
207 * Figure out which memory regions are usable and which are reserved.
208 * This builds the "e820" map of memory from info provided by the
211 setup_memory_region();
214 * Get the bare minimum info about the bootstrap CPU... the
215 * one we're executing on right now. Latter on, the full
216 * boot_cpu_data and cpu_info[boot_cpu_id] structures will be
217 * filled in completely.
219 boot_cpu_data.logical_id = 0;
220 early_identify_cpu(&boot_cpu_data);
223 * Find the Extended BIOS Data Area.
224 * (Not sure why exactly we need this, probably don't.)
229 * Initialize the kernel page tables.
230 * The kernel page tables map an "identity" map of all physical memory
231 * starting at virtual address PAGE_OFFSET. When the kernel executes,
232 * it runs inside of the identity map... memory below PAGE_OFFSET is
233 * from whatever task was running when the kernel got invoked.
235 init_kernel_pgtables(0, (end_pfn_map << PAGE_SHIFT));
238 * Initialize the bootstrap dynamic memory allocator.
239 * alloc_bootmem() will work after this.
241 setup_bootmem_allocator(0, end_pfn);
245 * Get the multiprocessor configuration...
246 * number of CPUs, PCI bus info, APIC info, etc.
251 * Initialize resources. Resources reserve sections of normal memory
252 * (iomem) and I/O ports (ioport) for devices and other system
253 * resources. For each resource type, there is a tree which tracks
254 * which regions are in use. This eliminates the possiblity of
255 * conflicts... e.g., two devices trying to use the same iomem region.
260 * Initialize per-CPU areas, one per CPU.
261 * Variables defined with DEFINE_PER_CPU() end up in the per-CPU area.
262 * This provides a mechanism for different CPUs to refer to their
263 * private copy of the variable using the same name
264 * (e.g., get_cpu_var(foo)).
266 setup_per_cpu_areas();
269 * Initialize the IDT table and interrupt handlers.
274 * Map the APICs into the kernel page tables.
276 * Each CPU has its own Local APIC. All Local APICs are memory mapped
277 * to the same virtual address region. A CPU accesses its Local APIC by
278 * accessing the region. A CPU cannot access another CPU's Local APIC.
280 * Each Local APIC is connected to all IO APICs in the system. Each IO
281 * APIC is mapped to a different virtual address region. A CPU accesses
282 * a given IO APIC by accessing the appropriate region. All CPUs can
283 * access all IO APICs.
289 * Initialize the virtual system call code/data page.
290 * The vsyscall page is mapped into every task's address space at a
291 * well-known address. User code can call functions in this page
292 * directly, providing a light-weight mechanism for read-only system
293 * calls such as gettimeofday().
299 current->cpumask = cpu_present_map;
303 lapic_set_timer(1000000000);