Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


WTF??????
[palacios.git] / palacios / src / geekos / vmm_paging.c
1 #include <geekos/vmm_paging.h>
2
3 #include <geekos/vmm.h>
4
5
6
7 extern struct vmm_os_hooks * os_hooks;
8
9
10
11
12 /* We generate a page table to correspond to a given memory layout
13  * pulling pages from the mem_list when necessary
14  * If there are any gaps in the layout, we add them as unmapped pages
15  */
16 vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
17   ullong_t current_page_addr = 0;
18   uint_t layout_index = 0;
19   uint_t list_index = 0;
20   ullong_t layout_addr = 0;
21   int i, j;
22   uint_t num_entries = layout->num_pages;  // The number of pages left in the layout
23
24
25   
26
27   vmm_pde_t * pde = os_hooks->allocate_pages(1);
28
29   for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
30     if (num_entries == 0) { 
31       pde[i].present = 0;
32       pde[i].flags = 0;
33       pde[i].accessed = 0;
34       pde[i].reserved = 0;
35       pde[i].large_pages = 0;
36       pde[i].global_page = 0;
37       pde[i].vmm_info = 0;
38       pde[i].pt_base_addr = 0;
39     } else {
40       vmm_pte_t * pte = os_hooks->allocate_pages(1);
41
42       pde[i].present = 1;
43       pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
44       pde[i].accessed = 0;
45       pde[i].reserved = 0;
46       pde[i].large_pages = 0;
47       pde[i].global_page = 0;
48       pde[i].vmm_info = 0;
49       pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
50
51
52
53       for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
54         layout_addr = get_mem_layout_addr(layout, layout_index);
55         
56         if ((current_page_addr < layout_addr) || (num_entries == 0)) {
57           // We have a gap in the layout, fill with unmapped page
58           pte[j].present = 0;
59           pte[j].flags = 0;
60           pte[j].accessed = 0;
61           pte[j].dirty = 0;
62           pte[j].pte_attr = 0;
63           pte[j].global_page = 0;
64           pte[j].vmm_info = 0;
65           pte[j].page_base_addr = 0;
66
67           current_page_addr += PAGE_SIZE;
68         } else if (current_page_addr == layout_addr) {
69           // Set up the Table entry to map correctly to the layout region
70           layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
71
72           if (page_region->type == UNMAPPED) {
73             pte[j].present = 0;
74             pte[j].flags = 0;
75           } else {
76             pte[j].present = 1;
77             pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
78           }         
79
80           pte[j].accessed = 0;
81           pte[j].dirty = 0;
82           pte[j].pte_attr = 0;
83           pte[j].global_page = 0;
84           pte[j].vmm_info = 0;
85
86           if (page_region->type == UNMAPPED) {
87             pte[j].page_base_addr = 0;
88           } else if (page_region->type == SHARED) {
89             addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
90
91             pte[j].page_base_addr = host_addr >> 12;
92             pte[j].vmm_info = SHARED_PAGE;
93           } else if (page_region->type == GUEST) {
94             addr_t list_addr =  get_mem_list_addr(list, list_index++);
95             
96             if (list_addr == -1) {
97               // error
98               // cleanup...
99               free_guest_page_tables(pde);
100               return NULL;
101             }
102             PrintDebug("Adding guest page (%x)\n", list_addr);
103             pte[j].page_base_addr = list_addr >> 12;
104             
105             // Reset this when we move over to dynamic page allocation
106             //      pte[j].vmm_info = GUEST_PAGE;           
107             pte[j].vmm_info = SHARED_PAGE;
108           }
109
110           num_entries--;
111           current_page_addr += PAGE_SIZE;
112           layout_index++;
113         } else {
114           // error
115           PrintDebug("Error creating page table...\n");
116           // cleanup
117           free_guest_page_tables(pde);
118           return NULL;
119         }
120       }
121     }
122   }
123
124   return pde;
125 }
126
127
128 void free_guest_page_tables(vmm_pde_t * pde) {
129   int i, j;
130
131
132   for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
133     if (pde[i].present) {
134       vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
135       
136       for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
137         if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
138           os_hooks->free_page((void *)(pte[j].page_base_addr  << PAGE_POWER));
139         }
140       }
141       
142       os_hooks->free_page(pte);
143     }
144   }
145
146   os_hooks->free_page(pde);
147 }
148
149
150
151
152 void PrintPDE(void * virtual_address, vmm_pde_t * pde)
153 {
154   PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
155               virtual_address,
156               (void *) (pde->pt_base_addr << PAGE_POWER),
157               pde->present,
158               pde->flags,
159               pde->accessed,
160               pde->reserved,
161               pde->large_pages,
162               pde->global_page,
163               pde->vmm_info);
164 }
165   
166 void PrintPTE(void * virtual_address, vmm_pte_t * pte)
167 {
168   PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
169               virtual_address,
170               (void*)(pte->page_base_addr << PAGE_POWER),
171               pte->present,
172               pte->flags,
173               pte->accessed,
174               pte->dirty,
175               pte->pte_attr,
176               pte->global_page,
177               pte->vmm_info);
178 }
179
180
181
182 void PrintPD(vmm_pde_t * pde)
183 {
184   int i;
185
186   PrintDebug("Page Directory at %p:\n", pde);
187   for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) { 
188     PrintPDE((void*)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
189   }
190 }
191
192 void PrintPT(void * starting_address, vmm_pte_t * pte) 
193 {
194   int i;
195
196   PrintDebug("Page Table at %p:\n", pte);
197   for (i = 0; (i < MAX_PAGE_TABLE_ENTRIES) && pte[i].present; i++) { 
198     PrintPTE(starting_address + (PAGE_SIZE * i), &(pte[i]));
199   }
200 }
201
202
203
204
205
206 void PrintDebugPageTables(vmm_pde_t * pde)
207 {
208   int i;
209   
210   PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
211
212   for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) { 
213     PrintPDE((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
214     PrintPT((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));
215   }
216 }
217     
218     
219
220
221
222 pml4e64_t * generate_guest_page_tables_64(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
223   pml4e64_t * pml = os_hooks->allocate_pages(1);
224   int i, j, k, m;
225   ullong_t current_page_addr = 0;
226   uint_t layout_index = 0;
227   uint_t list_index = 0;
228   ullong_t layout_addr = 0;
229   uint_t num_entries = layout->num_pages;  // The number of pages left in the layout
230
231   for (m = 0; m < MAX_PAGE_MAP_ENTRIES_64; m++ ) {
232     if (num_entries == 0) {
233       pml[m].present = 0;
234       pml[m].writable = 0;
235       pml[m].user = 0;
236       pml[m].pwt = 0;
237       pml[m].pcd = 0;
238       pml[m].accessed = 0;
239       pml[m].reserved = 0;
240       pml[m].zero = 0;
241       pml[m].vmm_info = 0;
242       pml[m].pdp_base_addr_lo = 0;
243       pml[m].pdp_base_addr_hi = 0;
244       pml[m].available = 0;
245       pml[m].no_execute = 0;
246     } else {
247       pdpe64_t * pdpe = os_hooks->allocate_pages(1);
248       
249       pml[m].present = 1;
250       pml[m].writable = 1;
251       pml[m].user = 1;
252       pml[m].pwt = 0;
253       pml[m].pcd = 0;
254       pml[m].accessed = 0;
255       pml[m].reserved = 0;
256       pml[m].zero = 0;
257       pml[m].vmm_info = 0;
258       pml[m].pdp_base_addr_lo = PAGE_ALLIGNED_ADDR(pdpe) & 0xfffff;
259       pml[m].pdp_base_addr_hi = 0;
260       pml[m].available = 0;
261       pml[m].no_execute = 0;
262
263       for (k = 0; k < MAX_PAGE_DIR_PTR_ENTRIES_64; k++) {
264         if (num_entries == 0) {
265           pdpe[k].present = 0;
266           pdpe[k].writable = 0;
267           pdpe[k].user = 0;
268           pdpe[k].pwt = 0;
269           pdpe[k].pcd = 0;
270           pdpe[k].accessed = 0;
271           pdpe[k].reserved = 0;
272           pdpe[k].large_pages = 0;
273           pdpe[k].zero = 0;
274           pdpe[k].vmm_info = 0;
275           pdpe[k].pd_base_addr_lo = 0;
276           pdpe[k].pd_base_addr_hi = 0;
277           pdpe[k].available = 0;
278           pdpe[k].no_execute = 0;
279         } else {
280           pde64_t * pde = os_hooks->allocate_pages(1);
281
282           pdpe[k].present = 1;
283           pdpe[k].writable = 1;
284           pdpe[k].user = 1;
285           pdpe[k].pwt = 0;
286           pdpe[k].pcd = 0;
287           pdpe[k].accessed = 0;
288           pdpe[k].reserved = 0;
289           pdpe[k].large_pages = 0;
290           pdpe[k].zero = 0;
291           pdpe[k].vmm_info = 0;
292           pdpe[k].pd_base_addr_lo = PAGE_ALLIGNED_ADDR(pde) & 0xfffff;
293           pdpe[k].pd_base_addr_hi = 0;
294           pdpe[k].available = 0;
295           pdpe[k].no_execute = 0;
296
297
298
299           for (i = 0; i < MAX_PAGE_DIR_ENTRIES_64; i++) {
300             if (num_entries == 0) { 
301               pde[i].present = 0;
302               pde[i].flags = 0;
303               pde[i].accessed = 0;
304               pde[i].reserved = 0;
305               pde[i].large_pages = 0;
306               pde[i].reserved2 = 0;
307               pde[i].vmm_info = 0;
308               pde[i].pt_base_addr_lo = 0;
309               pde[i].pt_base_addr_hi = 0;
310               pde[i].available = 0;
311               pde[i].no_execute = 0;
312             } else {
313               pte64_t * pte = os_hooks->allocate_pages(1);
314               
315               pde[i].present = 1;
316               pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
317               pde[i].accessed = 0;
318               pde[i].reserved = 0;
319               pde[i].large_pages = 0;
320               pde[i].reserved2 = 0;
321               pde[i].vmm_info = 0;
322               pde[i].pt_base_addr_lo = PAGE_ALLIGNED_ADDR(pte) & 0xfffff;
323               pde[i].pt_base_addr_hi = 0;
324               pde[i].available = 0;
325               pde[i].no_execute = 0;
326
327               
328               for (j = 0; j < MAX_PAGE_TABLE_ENTRIES_64; j++) {
329                 layout_addr = get_mem_layout_addr(layout, layout_index);
330                 
331                 if ((current_page_addr < layout_addr) || (num_entries == 0)) {
332                   // We have a gap in the layout, fill with unmapped page
333                   pte[j].present = 0;
334                   pte[j].flags = 0;
335                   pte[j].accessed = 0;
336                   pte[j].dirty = 0;
337                   pte[j].pte_attr = 0;
338                   pte[j].global_page = 0;
339                   pte[j].vmm_info = 0;
340                   pte[j].page_base_addr_lo = 0;
341                   pte[j].page_base_addr_hi = 0;
342                   pte[j].available = 0;
343                   pte[j].no_execute = 0;
344
345                   current_page_addr += PAGE_SIZE;
346                 } else if (current_page_addr == layout_addr) {
347                   // Set up the Table entry to map correctly to the layout region
348                   layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
349                   
350                   if (page_region->type == UNMAPPED) {
351                     pte[j].present = 0;
352                     pte[j].flags = 0;
353                   } else {
354                     pte[j].present = 1;
355                     pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
356                   }         
357                   
358                   pte[j].accessed = 0;
359                   pte[j].dirty = 0;
360                   pte[j].pte_attr = 0;
361                   pte[j].global_page = 0;
362                   pte[j].vmm_info = 0;
363                   pte[j].available = 0;
364                   pte[j].no_execute = 0;
365
366                   if (page_region->type == UNMAPPED) {
367                     pte[j].page_base_addr_lo = 0;
368                     pte[j].page_base_addr_hi = 0;
369                   } else if (page_region->type == SHARED) {
370                     addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
371                     
372                     pte[j].page_base_addr_lo = PAGE_ALLIGNED_ADDR(host_addr) & 0xfffff;
373                     pte[j].page_base_addr_hi = 0;
374                     pte[j].vmm_info = SHARED_PAGE;
375                   } else if (page_region->type == GUEST) {
376                     addr_t list_addr =  get_mem_list_addr(list, list_index++);
377                     
378                     if (list_addr == -1) {
379                       // error
380                       // cleanup...
381                       //free_guest_page_tables(pde);
382                       return NULL;
383                     }
384                     PrintDebug("Adding guest page (%x)\n", list_addr);
385                     pte[j].page_base_addr_lo = PAGE_ALLIGNED_ADDR(list_addr) & 0xfffff;
386                     pte[j].page_base_addr_hi = 0;
387
388                     // Reset this when we move over to dynamic page allocation
389                     //      pte[j].vmm_info = GUEST_PAGE;           
390                     pte[j].vmm_info = SHARED_PAGE;
391                   }
392                   
393                   num_entries--;
394                   current_page_addr += PAGE_SIZE;
395                   layout_index++;
396                 } else {
397                   // error
398                   PrintDebug("Error creating page table...\n");
399                   // cleanup
400                   //              free_guest_page_tables64(pde);
401                   return NULL;
402                 }
403               }
404             }
405           }
406         }
407       }
408     }
409   }
410   return pml;
411 }