Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


*** empty log message ***
[palacios.git] / palacios / src / geekos / vmm_paging.c
1 #include <geekos/vmm_paging.h>
2
3 #include <geekos/vmm.h>
4
5
6
7 extern struct vmm_os_hooks * os_hooks;
8
9
10
11
12 /* We generate a page table to correspond to a given memory layout
13  * pulling pages from the mem_list when necessary
14  * If there are any gaps in the layout, we add them as unmapped pages
15  */
16 vmm_pde_t * generate_guest_page_tables(vmm_mem_layout_t * layout, vmm_mem_list_t * list) {
17   ullong_t current_page_addr = 0;
18   uint_t layout_index = 0;
19   uint_t list_index = 0;
20   ullong_t layout_addr = 0;
21   int i, j;
22   uint_t num_entries = layout->num_pages;  // The number of pages left in the layout
23
24
25   
26
27   vmm_pde_t * pde = os_hooks->allocate_pages(1);
28
29   for (i = 0; i < MAX_PAGE_DIR_ENTRIES; i++) {
30     if (num_entries == 0) { 
31       pde[i].present = 0;
32       pde[i].flags = 0;
33       pde[i].accessed = 0;
34       pde[i].reserved = 0;
35       pde[i].large_pages = 0;
36       pde[i].global_page = 0;
37       pde[i].vmm_info = 0;
38       pde[i].pt_base_addr = 0;
39     } else {
40       vmm_pte_t * pte = os_hooks->allocate_pages(1);
41
42       pde[i].present = 1;
43       pde[i].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
44       pde[i].accessed = 0;
45       pde[i].reserved = 0;
46       pde[i].large_pages = 0;
47       pde[i].global_page = 0;
48       pde[i].vmm_info = 0;
49       pde[i].pt_base_addr = PAGE_ALLIGNED_ADDR(pte);
50
51
52
53       for (j = 0; j < MAX_PAGE_TABLE_ENTRIES; j++) {
54         layout_addr = get_mem_layout_addr(layout, layout_index);
55         
56         if ((current_page_addr < layout_addr) || (num_entries == 0)) {
57           // We have a gap in the layout, fill with unmapped page
58           pte[j].present = 0;
59           pte[j].flags = 0;
60           pte[j].accessed = 0;
61           pte[j].dirty = 0;
62           pte[j].pte_attr = 0;
63           pte[j].global_page = 0;
64           pte[j].vmm_info = 0;
65           pte[j].page_base_addr = 0;
66
67           current_page_addr += PAGE_SIZE;
68         } else if (current_page_addr == layout_addr) {
69           // Set up the Table entry to map correctly to the layout region
70           layout_region_t * page_region = get_mem_layout_region(layout, layout_addr);
71
72           if (page_region->type == UNMAPPED) {
73             pte[j].present = 0;
74             pte[j].flags = 0;
75           } else {
76             pte[j].present = 1;
77             pte[j].flags = VM_READ | VM_WRITE | VM_EXEC | VM_USER;
78           }         
79
80           pte[j].accessed = 0;
81           pte[j].dirty = 0;
82           pte[j].pte_attr = 0;
83           pte[j].global_page = 0;
84           pte[j].vmm_info = 0;
85
86           if (page_region->type == UNMAPPED) {
87             pte[j].page_base_addr = 0;
88           } else if (page_region->type == SHARED) {
89             addr_t host_addr = page_region->host_addr + (layout_addr - page_region->start);
90
91             pte[j].page_base_addr = host_addr >> 12;
92             pte[j].vmm_info = SHARED_PAGE;
93           } else if (page_region->type == GUEST) {
94             addr_t list_addr =  get_mem_list_addr(list, list_index++);
95             
96             if (list_addr == -1) {
97               // error
98               // cleanup...
99               free_guest_page_tables(pde);
100               return NULL;
101             }
102             PrintDebug("Adding guest page (%x)\n", list_addr);
103             pte[j].page_base_addr = list_addr >> 12;
104             
105             // Reset this when we move over to dynamic page allocation
106             //      pte[j].vmm_info = GUEST_PAGE;           
107             pte[j].vmm_info = SHARED_PAGE;
108           }
109
110           num_entries--;
111           current_page_addr += PAGE_SIZE;
112           layout_index++;
113         } else {
114           // error
115           PrintDebug("Error creating page table...\n");
116           // cleanup
117           free_guest_page_tables(pde);
118           return NULL;
119         }
120       }
121     }
122   }
123
124   return pde;
125 }
126
127
128 void free_guest_page_tables(vmm_pde_t * pde) {
129   int i, j;
130
131
132   for (i = 0; (i < MAX_PAGE_DIR_ENTRIES); i++) {
133     if (pde[i].present) {
134       vmm_pte_t * pte = (vmm_pte_t *)(pde[i].pt_base_addr << PAGE_POWER);
135       
136       for (j = 0; (j < MAX_PAGE_TABLE_ENTRIES); j++) {
137         if ((pte[j].present) && (pte[j].vmm_info & GUEST_PAGE)){
138           os_hooks->free_page((void *)(pte[j].page_base_addr  << PAGE_POWER));
139         }
140       }
141       
142       os_hooks->free_page(pte);
143     }
144   }
145
146   os_hooks->free_page(pde);
147 }
148
149
150
151
152 void PrintPDE(void * virtual_address, vmm_pde_t * pde)
153 {
154   PrintDebug("PDE %p -> %p : present=%x, flags=%x, accessed=%x, reserved=%x, largePages=%x, globalPage=%x, kernelInfo=%x\n",
155               virtual_address,
156               (void *) (pde->pt_base_addr << PAGE_POWER),
157               pde->present,
158               pde->flags,
159               pde->accessed,
160               pde->reserved,
161               pde->large_pages,
162               pde->global_page,
163               pde->vmm_info);
164 }
165   
166 void PrintPTE(void * virtual_address, vmm_pte_t * pte)
167 {
168   PrintDebug("PTE %p -> %p : present=%x, flags=%x, accessed=%x, dirty=%x, pteAttribute=%x, globalPage=%x, vmm_info=%x\n",
169               virtual_address,
170               (void*)(pte->page_base_addr << PAGE_POWER),
171               pte->present,
172               pte->flags,
173               pte->accessed,
174               pte->dirty,
175               pte->pte_attr,
176               pte->global_page,
177               pte->vmm_info);
178 }
179
180
181
182 void PrintPD(vmm_pde_t * pde)
183 {
184   int i;
185
186   PrintDebug("Page Directory at %p:\n", pde);
187   for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) { 
188     PrintPDE((void*)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
189   }
190 }
191
192 void PrintPT(void * starting_address, vmm_pte_t * pte) 
193 {
194   int i;
195
196   PrintDebug("Page Table at %p:\n", pte);
197   for (i = 0; (i < MAX_PAGE_TABLE_ENTRIES) && pte[i].present; i++) { 
198     PrintPTE(starting_address + (PAGE_SIZE * i), &(pte[i]));
199   }
200 }
201
202
203
204
205
206 void PrintDebugPageTables(vmm_pde_t * pde)
207 {
208   int i;
209   
210   PrintDebug("Dumping the pages starting with the pde page at %p\n", pde);
211
212   for (i = 0; (i < MAX_PAGE_DIR_ENTRIES) && pde[i].present; i++) { 
213     PrintPDE((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), &(pde[i]));
214     PrintPT((void *)(PAGE_SIZE * MAX_PAGE_TABLE_ENTRIES * i), (void *)(pde[i].pt_base_addr << PAGE_POWER));
215   }
216 }
217     
218