1 #include <geekos/vmm_mem.h>
2 #include <geekos/vmm.h>
3 #include <geekos/vmm_util.h>
5 extern struct vmm_os_hooks * os_hooks;
8 void init_shadow_region(shadow_region_t * entry,
9 addr_t guest_addr_start,
10 addr_t guest_addr_end,
11 guest_region_type_t guest_region_type,
12 host_region_type_t host_region_type)
14 entry->guest_type = guest_region_type;
15 entry->guest_start = guest_addr_start;
16 entry->guest_end = guest_addr_end;
17 entry->host_type = host_region_type;
18 entry->next=entry->prev = NULL;
21 void init_shadow_region_physical(shadow_region_t * entry,
22 addr_t guest_addr_start,
23 addr_t guest_addr_end,
24 guest_region_type_t guest_region_type,
25 addr_t host_addr_start,
26 host_region_type_t host_region_type)
28 init_shadow_region(entry, guest_addr_start, guest_addr_end, guest_region_type, host_region_type);
29 entry->host_addr.phys_addr.host_start = host_addr_start;
34 void init_shadow_map(shadow_map_t * map) {
41 void free_shadow_map(shadow_map_t * map) {
42 shadow_region_t * cursor = map->head;
43 shadow_region_t * tmp = NULL;
47 cursor = cursor->next;
56 /* This is slightly different semantically from the mem list, in that
57 * we don't allow overlaps we could probably allow overlappig regions
58 * of the same type... but I'll let someone else deal with that
60 int add_shadow_region(shadow_map_t * map,
61 shadow_region_t * region)
63 shadow_region_t * cursor = map->head;
65 if ((!cursor) || (cursor->guest_start >= region->guest_end)) {
67 region->next = cursor;
74 // Check if it overlaps with the current cursor
75 if ((cursor->guest_end > region->guest_start) && (cursor->guest_start < region->guest_start)) {
76 // overlaps not allowed
80 if (!(cursor->next)) {
81 // add to the end of the list
82 cursor->next = region;
83 region->prev = cursor;
87 } else if (cursor->next->guest_start >= region->guest_end) {
89 region->next = cursor->next;
90 region->prev = cursor;
92 cursor->next->prev = region;
93 cursor->next = region;
98 } else if (cursor->next->guest_end < region->guest_start) {
99 cursor = cursor->next;
101 // This cannot happen!
102 // we should panic here
107 // This cannot happen
108 // We should panic here
113 int delete_shadow_region(shadow_map_t * map,
121 shadow_region_t *get_shadow_region_by_index(shadow_map_t * map,
123 shadow_region_t * reg = map->head;
137 shadow_region_t * get_shadow_region_by_addr(shadow_map_t * map,
139 shadow_region_t * reg = map->head;
142 if ((reg->guest_start <= addr) && (reg->guest_end > addr)) {
144 } else if (reg->guest_start > addr) {
155 host_region_type_t lookup_shadow_map_addr(shadow_map_t * map, addr_t guest_addr, addr_t * host_addr) {
156 shadow_region_t * reg = get_shadow_region_by_addr(map, guest_addr);
160 return HOST_REGION_INVALID;
162 switch (reg->host_type) {
163 case HOST_REGION_PHYSICAL_MEMORY:
164 *host_addr = (guest_addr - reg->guest_start) + reg->host_addr.phys_addr.host_start;
165 return reg->host_type;
166 case HOST_REGION_MEMORY_MAPPED_DEVICE:
167 case HOST_REGION_UNALLOCATED:
171 return reg->host_type;
177 int guest_paddr_to_host_paddr(shadow_region_t * entry,
179 addr_t * host_addr) {
181 if (!((guest_addr >= entry->guest_start) &&
182 (guest_addr < entry->guest_end))) {
186 switch (entry->host_type) {
187 case HOST_REGION_PHYSICAL_MEMORY:
188 case HOST_REGION_MEMORY_MAPPED_DEVICE:
189 case HOST_REGION_UNALLOCATED:
190 *host_addr = (guest_addr-entry->guest_start) + entry->host_addr.phys_addr.host_start;
202 void print_shadow_map(shadow_map_t * map) {
203 shadow_region_t * cur = map->head;
206 PrintDebug("Memory Layout (regions: %d) \n", map->num_regions);
209 PrintDebug("%d: 0x%x - 0x%x (%s) -> ", i, cur->guest_start, cur->guest_end - 1,
210 cur->guest_type == GUEST_REGION_PHYSICAL_MEMORY ? "GUEST_REGION_PHYSICAL_MEMORY" :
211 cur->guest_type == GUEST_REGION_NOTHING ? "GUEST_REGION_NOTHING" :
212 cur->guest_type == GUEST_REGION_MEMORY_MAPPED_DEVICE ? "GUEST_REGION_MEMORY_MAPPED_DEVICE" :
214 if (cur->host_type == HOST_REGION_PHYSICAL_MEMORY ||
215 cur->host_type == HOST_REGION_UNALLOCATED ||
216 cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE) {
217 PrintDebug("0x%x", cur->host_addr.phys_addr.host_start);
220 cur->host_type == HOST_REGION_PHYSICAL_MEMORY ? "HOST_REGION_PHYSICAL_MEMORY" :
221 cur->host_type == HOST_REGION_UNALLOCATED ? "HOST_REGION_UNALLOACTED" :
222 cur->host_type == HOST_REGION_NOTHING ? "HOST_REGION_NOTHING" :
223 cur->host_type == HOST_REGION_MEMORY_MAPPED_DEVICE ? "HOST_REGION_MEMORY_MAPPED_DEVICE" :
224 cur->host_type == HOST_REGION_REMOTE ? "HOST_REGION_REMOTE" :
225 cur->host_type == HOST_REGION_SWAPPED ? "HOST_REGION_SWAPPED" :
252 struct vmm_os_hooks * os_hooks;
254 void * TestMalloc(uint_t size) {
258 void * TestAllocatePages(int size) {
259 return malloc(4096 * size);
263 void TestPrint(const char * fmt, ...) {
271 int mem_list_add_test_1( vmm_mem_list_t * list) {
275 PrintDebug("\n\nTesting Memory List\n");
279 offset = PAGE_SIZE * 6;
280 PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 10));
281 add_mem_list_pages(list, offset, 10);
282 print_mem_list(list);
286 PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + PAGE_SIZE * 4);
287 add_mem_list_pages(list, offset, 4);
288 print_mem_list(list);
290 offset = PAGE_SIZE * 20;
291 PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 1));
292 add_mem_list_pages(list, offset, 1);
293 print_mem_list(list);
295 offset = PAGE_SIZE * 21;
296 PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 3));
297 add_mem_list_pages(list, offset, 3);
298 print_mem_list(list);
301 offset = PAGE_SIZE * 10;
302 PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 30));
303 add_mem_list_pages(list, offset, 30);
304 print_mem_list(list);
307 offset = PAGE_SIZE * 5;
308 PrintDebug("Adding 0x%x - 0x%x\n", offset, offset + (PAGE_SIZE * 1));
309 add_mem_list_pages(list, offset, 1);
310 print_mem_list(list);
318 int mem_layout_add_test_1(vmm_mem_layout_t * layout) {
324 PrintDebug("\n\nTesting Memory Layout\n");
326 init_mem_layout(layout);
330 PrintDebug("Adding 0x%x - 0x%x\n", start, end);
331 add_guest_mem_range(layout, start, end);
332 print_mem_layout(layout);
337 PrintDebug("Adding 0x%x - 0x%x\n", start, end);
338 add_guest_mem_range(layout, start, end);
339 print_mem_layout(layout);
343 PrintDebug("Adding 0x%x - 0x%x\n", start, end);
344 add_guest_mem_range(layout, start, end);
345 print_mem_layout(layout);
349 PrintDebug("Adding 0x%x - 0x%x\n", start, end);
350 add_guest_mem_range(layout, start, end);
351 print_mem_layout(layout);
356 PrintDebug("Adding 0x%x - 0x%x\n", start, end);
357 add_guest_mem_range(layout, start, end);
358 print_mem_layout(layout);
368 int main(int argc, char ** argv) {
369 struct vmm_os_hooks dummy_hooks;
370 os_hooks = &dummy_hooks;
372 vmm_mem_layout_t layout;
375 os_hooks->malloc = &TestMalloc;
376 os_hooks->free = &free;
377 os_hooks->print_debug = &TestPrint;
378 os_hooks->allocate_pages = &TestAllocatePages;
382 printf("mem_list_add_test_1: %d\n", mem_list_add_test_1(&list));
383 printf("layout_add_test_1: %d\n", mem_layout_add_test_1(&layout));