1 /* Copyright (c) 2007,2008 Sandia National Laboratories */
3 #include <lwk/kernel.h>
4 #include <lwk/aspace.h>
6 #include <lwk/init_task.h>
7 #include <arch/page.h> /* TODO: remove */
8 #include <arch/pgtable.h> /* TODO: remove */
9 #include <arch/page_table.h>
13 * Architecture specific address space initialization. This allocates a new
14 * page table root for the aspace and copies the kernel page tables into it.
18 struct aspace * aspace
23 /* Allocate a root page table for the address space */
24 if ((aspace->arch.pgd = kmem_get_pages(0)) == NULL)
27 /* Copy the current kernel page tables into the address space */
28 for (i = pgd_index(PAGE_OFFSET); i < PTRS_PER_PGD; i++)
29 aspace->arch.pgd[i] = bootstrap_task.aspace->arch.pgd[i];
36 * Architecture specific address space destruction. This frees all page table
37 * memory that the aspace was using.
41 struct aspace * aspace
46 xpte_t *pgd; /* Page Global Directory: level 0 (root of tree) */
47 xpte_t *pud; /* Page Upper Directory: level 1 */
48 xpte_t *pmd; /* Page Middle Directory: level 2 */
49 xpte_t *ptd; /* Page Table Directory: level 3 */
51 /* Walk and then free the Page Global Directory */
52 pgd = aspace->arch.pgd;
53 for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
57 /* Walk and then free the Page Upper Directory */
58 pud = __va(pgd[i].base_paddr << 12);
59 for (j = 0; j < 512; j++) {
60 if (!pud[j].present || pud[j].pagesize)
63 /* Walk and then free the Page Middle Directory */
64 pmd = __va(pud[j].base_paddr << 12);
65 for (k = 0; k < 512; k++) {
66 if (!pmd[k].present || pmd[k].pagesize)
69 /* Free the last level Page Table Directory */
70 ptd = __va(pmd[k].base_paddr << 12);
71 kmem_free_pages(ptd, 0);
73 kmem_free_pages(pmd, 0);
75 kmem_free_pages(pud, 0);
77 kmem_free_pages(pgd, 0);
82 * Loads the address space object's root page table pointer into the calling
83 * CPU's CR3 register, causing the aspace to become active.
87 struct aspace * aspace
91 "movq %0,%%cr3" :: "r" (__pa(aspace->arch.pgd)) : "memory"
97 * Allocates a new page table and links it to a parent page table entry.
106 new_table = kmem_get_pages(0);
113 memset(&_pte, 0, sizeof(_pte));
117 _pte.base_paddr = __pa(new_table) >> 12;
127 * Locates an existing page table entry or creates a new one if none exists.
128 * Returns a pointer to the page table entry.
132 struct aspace * aspace,
137 xpte_t *pgd; /* Page Global Directory: level 0 (root of tree) */
138 xpte_t *pud; /* Page Upper Directory: level 1 */
139 xpte_t *pmd; /* Page Middle Directory: level 2 */
140 xpte_t *ptd; /* Page Table Directory: level 3 */
142 xpte_t *pge; /* Page Global Directory Entry */
143 xpte_t *pue; /* Page Upper Directory Entry */
144 xpte_t *pme; /* Page Middle Directory Entry */
145 xpte_t *pte; /* Page Table Directory Entry */
147 /* Calculate indices into above directories based on vaddr specified */
148 const unsigned int pgd_index = (vaddr >> 39) & 0x1FF;
149 const unsigned int pud_index = (vaddr >> 30) & 0x1FF;
150 const unsigned int pmd_index = (vaddr >> 21) & 0x1FF;
151 const unsigned int ptd_index = (vaddr >> 12) & 0x1FF;
153 /* Traverse the Page Global Directory */
154 pgd = aspace->arch.pgd;
155 pge = &pgd[pgd_index];
156 if (!pge->present && !alloc_page_table(pge))
159 /* Traverse the Page Upper Directory */
160 pud = __va(pge->base_paddr << 12);
161 pue = &pud[pud_index];
162 if (pagesz == VM_PAGE_1GB)
164 else if (!pue->present && !alloc_page_table(pue))
166 else if (pue->pagesize)
167 panic("BUG: Can't follow PUD entry, pagesize bit set.");
169 /* Traverse the Page Middle Directory */
170 pmd = __va(pue->base_paddr << 12);
171 pme = &pmd[pmd_index];
172 if (pagesz == VM_PAGE_2MB)
174 else if (!pme->present && !alloc_page_table(pme))
176 else if (pme->pagesize)
177 panic("BUG: Can't follow PMD entry, pagesize bit set.");
179 /* Traverse the Page Table Entry Directory */
180 ptd = __va(pme->base_paddr << 12);
181 pte = &ptd[ptd_index];
187 * Examines a page table to determine if it has any active entries. If not,
188 * the page table is freed.
198 /* Determine if the table can be freed */
199 for (i = 0; i < 512; i++) {
200 if (table[i].present)
201 return -1; /* Nope */
204 /* Yup, free the page table */
205 kmem_free_pages(table, 0);
206 memset(parent_pte, 0, sizeof(xpte_t));
212 * Zeros a page table entry. If the page table that the PTE was in becomes
213 * empty (contains no active mappings), it is freed. Page table freeing
214 * continues up to the top of the page table tree (e.g., a single call may
215 * result in a PTD, PMD, and PUD being freed; the PGD is never freed by this
220 struct aspace * aspace,
225 xpte_t *pgd; /* Page Global Directory: level 0 (root of tree) */
226 xpte_t *pud; /* Page Upper Directory: level 1 */
227 xpte_t *pmd; /* Page Middle Directory: level 2 */
228 xpte_t *ptd; /* Page Table Directory: level 3 */
230 xpte_t *pge; /* Page Global Directory Entry */
231 xpte_t *pue; /* Page Upper Directory Entry */
232 xpte_t *pme; /* Page Middle Directory Entry */
233 xpte_t *pte; /* Page Table Directory Entry */
235 /* Calculate indices into above directories based on vaddr specified */
236 const unsigned int pgd_index = (vaddr >> 39) & 0x1FF;
237 const unsigned int pud_index = (vaddr >> 30) & 0x1FF;
238 const unsigned int pmd_index = (vaddr >> 21) & 0x1FF;
239 const unsigned int ptd_index = (vaddr >> 12) & 0x1FF;
241 /* Traverse the Page Global Directory */
242 pgd = aspace->arch.pgd;
243 pge = &pgd[pgd_index];
247 /* Traverse the Page Upper Directory */
248 pud = __va(pge->base_paddr << 12);
249 pue = &pud[pud_index];
252 } else if (pagesz == VM_PAGE_1GB) {
254 panic("BUG: 1GB PTE has child page table attached.\n");
256 /* Unmap the 1GB page that this PTE was mapping */
257 memset(pue, 0, sizeof(xpte_t));
259 /* Try to free PUD that the PTE was in */
260 try_to_free_table(pud, pge);
264 /* Traverse the Page Middle Directory */
265 pmd = __va(pue->base_paddr << 12);
266 pme = &pmd[pmd_index];
269 } else if (pagesz == VM_PAGE_2MB) {
271 panic("BUG: 2MB PTE has child page table attached.\n");
273 /* Unmap the 2MB page that this PTE was mapping */
274 memset(pme, 0, sizeof(xpte_t));
276 /* Try to free the PMD that the PTE was in */
277 if (try_to_free_table(pmd, pue))
278 return; /* nope, couldn't free it */
280 /* Try to free the PUD that contained the PMD just freed */
281 try_to_free_table(pud, pge);
285 /* Traverse the Page Table Entry Directory */
286 ptd = __va(pme->base_paddr << 12);
287 pte = &ptd[ptd_index];
291 /* Unmap the 4KB page that this PTE was mapping */
292 memset(pme, 0, sizeof(xpte_t));
294 /* Try to free the PTD that the PTE was in */
295 if (try_to_free_table(ptd, pme))
296 return; /* nope, couldn't free it */
298 /* Try to free the PMD that contained the PTD just freed */
299 if (try_to_free_table(pmd, pue))
300 return; /* nope, couldn't free it */
302 /* Try to free the PUD that contained the PMD just freed */
303 try_to_free_table(pud, pge);
310 * Writes a new value to a PTE.
311 * TODO: Determine if this is atomic enough.
322 memset(&_pte, 0, sizeof(_pte));
325 if (flags & VM_WRITE)
329 if (flags & VM_GLOBAL)
331 if ((flags & VM_EXEC) == 0)
334 if (pagesz == VM_PAGE_4KB) {
335 _pte.base_paddr = paddr >> 12;
336 } else if (pagesz == VM_PAGE_2MB) {
338 _pte.base_paddr = paddr >> 21;
339 } else if (pagesz == VM_PAGE_1GB) {
341 _pte.base_paddr = paddr >> 30;
343 panic("Invalid page size 0x%lx.", pagesz);
351 * Maps a page into an address space.
354 * [IN] aspace: Address space to map page into.
355 * [IN] start: Address in aspace to map page to.
356 * [IN] paddr: Physical address of the page to map.
357 * [IN] flags: Protection and memory type flags.
358 * [IN] pagesz: Size of the page being mapped, in bytes.
362 * Failure: Error Code, the page was not mapped.
365 arch_aspace_map_page(
366 struct aspace * aspace,
375 /* Locate page table entry that needs to be updated to map the page */
376 pte = find_or_create_pte(aspace, start, pagesz);
380 /* Update the page table entry */
381 write_pte(pte, paddr, flags, pagesz);
388 * Unmaps a page from an address space.
391 * [IN] aspace: Address space to unmap page from.
392 * [IN] start: Address in aspace to unmap page from.
393 * [IN] pagesz: Size of the page to unmap.
396 arch_aspace_unmap_page(
397 struct aspace * aspace,
402 find_and_delete_pte(aspace, start, pagesz);
406 arch_aspace_smartmap(struct aspace *src, struct aspace *dst,
407 vaddr_t start, size_t extent)
409 size_t n = extent / SMARTMAP_ALIGN;
411 xpte_t *src_pgd = src->arch.pgd;
412 xpte_t *dst_pgd = dst->arch.pgd;
413 xpte_t *src_pge, *dst_pge;
415 /* Make sure all of the source PGD entries are present */
416 for (i = 0; i < n; i++) {
417 src_pge = &src_pgd[i];
418 if (!src_pge->present && !alloc_page_table(src_pge))
422 /* Perform the SMARTMAP... just copy src PGEs to the dst PGD */
423 for (i = 0; i < n; i++) {
424 src_pge = &src_pgd[i];
425 dst_pge = &dst_pgd[(start >> 39) & 0x1FF];
426 BUG_ON(dst_pge->present);
434 arch_aspace_unsmartmap(struct aspace *src, struct aspace *dst,
435 vaddr_t start, size_t extent)
437 size_t n = extent / SMARTMAP_ALIGN;
439 xpte_t *dst_pgd = dst->arch.pgd;
442 /* Unmap the SMARTMAP PGEs */
443 for (i = 0; i < n; i++) {
444 dst_pge = &dst_pgd[(start >> 39) & 0x1FF];
445 dst_pge->present = 0;