Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Minor cleanup in memory tracking
[palacios.git] / palacios / src / palacios / vmm_mem_track.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National
4  * Science Foundation and the Department of Energy.
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2014, The V3VEE Project <http://www.v3vee.org>
11  * All rights reserved.
12  *
13  * Author: Peter Dinda <pdinda@northwestern.edu>
14  *
15  * This is free software.  You are permitted to use,
16  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
17  */
18
19 #include <palacios/vmm.h>
20 #include <palacios/vm_guest.h>
21 #include <palacios/vm_guest_mem.h>
22 #include <palacios/vmm_mem_track.h>
23 #include <palacios/vmm_shadow_paging.h>
24 #include <palacios/vmm_direct_paging.h>
25 #include <palacios/vmm_time.h>
26
27
28 #ifndef V3_CONFIG_DEBUG_MEM_TRACK
29 #undef PrintDebug
30 #define PrintDebug(fmt, args...)
31 #endif
32
33 #define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y)))
34
35
36 // This should be identical across cores, but this
37 // implementation surely is not
38 static uint64_t host_time()
39 {
40     return v3_get_host_time(NULL);
41 }
42
43 static uint8_t *alloc_bitmap(struct v3_vm_info *vm)
44 {
45     uint8_t *b;
46     
47     if (!(b =  V3_Malloc(CEIL_DIV(CEIL_DIV(vm->mem_size,PAGE_SIZE_4KB),8)))) {
48         return NULL;
49     }
50
51     return b;
52 }
53
54
55 static void free_bitmap(uint8_t *b)
56 {
57     if (b) { 
58         V3_Free(b);
59     }
60
61 }
62
63 int v3_mem_track_deinit(struct v3_vm_info *vm)
64 {
65     int i;
66
67     for (i=0;i<vm->num_cores;i++) {
68         free_bitmap(vm->cores[i].memtrack_state.access_bitmap);
69         memset(&(vm->cores[i].memtrack_state),0,sizeof(struct v3_core_mem_track));
70     }
71
72     PrintDebug(vm,VCORE_NONE,"Memory tracking deinitialized\n");
73
74     return 0;
75 }
76
77 int v3_mem_track_init(struct v3_vm_info *vm)
78 {
79     int i;
80
81
82     memset(&(vm->memtrack_state),0,sizeof(struct v3_vm_mem_track));
83
84     for (i=0;i<vm->num_cores;i++) {
85         memset(&(vm->cores[i].memtrack_state),0,sizeof(struct v3_core_mem_track));
86         vm->cores[i].memtrack_state.num_pages=CEIL_DIV(vm->mem_size,PAGE_SIZE_4KB);
87         if (!(vm->cores[i].memtrack_state.access_bitmap = alloc_bitmap(vm))) {
88             PrintError(vm,VCORE_NONE,"Unable to allocate for memory tracking\n");
89             v3_mem_track_deinit(vm);
90             return -1;
91         }
92     }
93
94     PrintDebug(vm,VCORE_NONE,"Memory tracking initialized\n");
95
96     return 0;
97 }
98
99
100
101 //
102 // Note use of old-style callbacks here
103 //
104 static int shadow_paging_callback(struct guest_info *core, 
105                                   struct v3_shdw_pg_event *event,
106                                   void      *priv_data)
107 {
108     
109
110     if (event->event_type==SHADOW_PAGEFAULT &&
111         event->event_order==SHADOW_POSTIMPL) {
112
113         addr_t gpa;
114
115         PrintDebug(core->vm_info,core,"Memory tracking: shadow callback gva=%p\n",(void*)event->gva);
116
117         if (!v3_gva_to_gpa(core,event->gva,&gpa)) {
118             // note the assumption here that it is for a 4KB page... 
119             PrintDebug(core->vm_info,core,"Memory tracking: shadow callback corresponding gpa=%p\n",(void*)gpa);
120             SET_BIT(core->memtrack_state.access_bitmap,gpa/PAGE_SIZE_4KB);
121         } else {
122             // no worries, this isn't physical memory
123         }
124     } else {
125         // we don't care about other events
126     }
127     
128     return 0;
129 }
130
131
132 static int passthrough_paging_callback(struct guest_info *core, 
133                                        struct v3_passthrough_pg_event *event,
134                                        void      *priv_data)
135 {
136     uint64_t page_start, page_end, page;
137     
138
139     if (event->event_type==PASSTHROUGH_PAGEFAULT &&
140         event->event_order==PASSTHROUGH_POSTIMPL) {
141
142         PrintDebug(core->vm_info,core,"Memory tracking: passthrough callback gpa=%p..%p\n",(void*)event->gpa_start,(void*)event->gpa_end);
143
144         page_start = event->gpa_start/PAGE_SIZE_4KB;
145         page_end = event->gpa_end/PAGE_SIZE_4KB;
146         
147         for (page=page_start; page<=page_end;page++) { 
148             SET_BIT(core->memtrack_state.access_bitmap,page);
149         }
150     } else {
151         // we don't care about other events
152     }
153     
154     return 0;
155 }
156
157 static int nested_paging_callback(struct guest_info *core, 
158                                   struct v3_nested_pg_event *event,
159                                   void      *priv_data)
160 {
161     uint64_t page_start, page_end, page;
162
163     
164     if (event->event_type==NESTED_PAGEFAULT &&
165         event->event_order==NESTED_POSTIMPL) {
166
167         PrintDebug(core->vm_info,core,"Memory tracking: nested callback gpa=%p..%p\n",(void*)event->gpa_start,(void*)event->gpa_end);
168
169         page_start = event->gpa_start/PAGE_SIZE_4KB;
170         page_end = event->gpa_end/PAGE_SIZE_4KB;
171         
172         for (page=page_start; page<=page_end;page++) { 
173             SET_BIT(core->memtrack_state.access_bitmap,page);
174         }
175     } else {
176         // we don't care about other events
177     }
178     
179     return 0;
180 }
181
182
183
184
185 static void restart(struct guest_info *core)
186 {
187
188     core->memtrack_state.start_time=host_time();
189
190     PrintDebug(core->vm_info,core,"memtrack: restart at %llu\n",core->memtrack_state.start_time);
191
192     memset(core->memtrack_state.access_bitmap,0,CEIL_DIV(core->memtrack_state.num_pages,8));
193
194     if (core->shdw_pg_mode==SHADOW_PAGING) { 
195         v3_invalidate_shadow_pts(core);
196         v3_invalidate_passthrough_addr_range(core,0,core->vm_info->mem_size,NULL,NULL);
197     } else if (core->shdw_pg_mode==NESTED_PAGING) { 
198         v3_invalidate_nested_addr_range(core,0,core->vm_info->mem_size,NULL,NULL);
199     }
200     
201     PrintDebug(core->vm_info,core,"memtrack: restart complete at %llu\n",host_time());
202 }
203
204 int v3_mem_track_start(struct v3_vm_info *vm, v3_mem_track_access_t access, v3_mem_track_reset_t reset, uint64_t period)
205 {
206     int i;
207     int unwind=0;
208
209
210     PrintDebug(vm,VCORE_NONE,"Memory tracking: start access=0x%x, reset=0x%x, period=%llu\n",
211                access,reset,period);
212
213     if (vm->memtrack_state.started) { 
214         PrintError(vm,VCORE_NONE,"Memory tracking already started!\n");
215         return -1;
216     }
217
218     if (access != V3_MEM_TRACK_ACCESS) { 
219         PrintError(vm,VCORE_NONE,"Unsupported access mode\n");
220         return -1;
221     }
222
223     vm->memtrack_state.access_type=access;
224     vm->memtrack_state.reset_type=reset;
225     vm->memtrack_state.period=period;
226
227     vm->memtrack_state.started=1;
228
229     for (i=0;i<vm->num_cores;i++) {
230         if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) { 
231             if (v3_register_shadow_paging_event_callback(vm,shadow_paging_callback,NULL)) { 
232                 PrintError(vm,VCORE_NONE,"Mem track cannot register for shadow paging event\n");
233                 unwind=i+1;
234                 goto fail;
235             }
236
237             if (v3_register_passthrough_paging_event_callback(vm,passthrough_paging_callback,NULL)) { 
238                 PrintError(vm,VCORE_NONE,"Mem track cannot register for passthrough paging event\n");
239                 unwind=i+1;
240                 goto fail;
241             }
242         } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) { 
243             if (v3_register_nested_paging_event_callback(vm,nested_paging_callback,NULL)) { 
244                 PrintError(vm,VCORE_NONE,"Mem track cannot register for nested paging event\n");
245                 unwind=i+1;
246                 goto fail;
247             }
248         }
249         restart(&vm->cores[i]);
250     }
251     
252     return 0;
253
254  fail:
255
256     for (i=0;i<unwind;i++) {
257         if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) { 
258             v3_unregister_shadow_paging_event_callback(vm,shadow_paging_callback,NULL);
259             v3_unregister_passthrough_paging_event_callback(vm,passthrough_paging_callback,NULL);
260         } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) { 
261             v3_unregister_nested_paging_event_callback(vm,nested_paging_callback,NULL);
262         }
263     }
264     
265     return -1;
266
267 }
268
269 int v3_mem_track_stop(struct v3_vm_info *vm)
270 {
271     int i;
272
273     PrintDebug(vm,VCORE_NONE,"Memory tracking: stop\n");
274
275     if (!vm->memtrack_state.started) { 
276         PrintError(vm, VCORE_NONE, "Memory tracking was not started!\n");
277         return -1;
278     }
279
280     vm->memtrack_state.started=0;
281
282     for (i=0;i<vm->num_cores;i++) {
283         if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) { 
284             v3_unregister_shadow_paging_event_callback(vm,shadow_paging_callback,NULL);
285             v3_unregister_passthrough_paging_event_callback(vm,passthrough_paging_callback,NULL);
286         } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) { 
287             v3_unregister_nested_paging_event_callback(vm,nested_paging_callback,NULL);
288         }
289     }
290     
291     return 0;
292
293 }
294
295 void v3_mem_track_free_snapshot(v3_mem_track_snapshot *s)
296 {
297     int i;
298
299     PrintDebug(VM_NONE,VCORE_NONE,"Memory tracking: free snapshot %p\n",s);
300
301     if (s) {
302         for (i=0;i<s->num_cores;i++) {
303             free_bitmap(s->core[i].access_bitmap);
304         }
305         V3_Free(s);
306     }
307 }
308
309 v3_mem_track_snapshot *v3_mem_track_take_snapshot(struct v3_vm_info *vm)
310 {
311     int i;
312     v3_mem_track_snapshot *s;
313
314     PrintDebug(vm,VCORE_NONE,"Memory tracking: take snapshot\n");
315
316     s = V3_Malloc(sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * vm->num_cores);
317     
318     if (!s) { 
319         PrintError(vm,VCORE_NONE,"Cannot allocate memory for memory tracking snapshot\n");
320         return NULL;
321     }
322
323     memset(s,0,sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * vm->num_cores);
324     
325     for (i=0;i<vm->num_cores;i++) {
326         if (!(s->core[i].access_bitmap = alloc_bitmap(vm))) { 
327             PrintError(vm,VCORE_NONE,"Unable to allocate for memory tracking snapshot\n");
328             v3_mem_track_free_snapshot(s);
329             return NULL;
330         }
331     }
332
333     s->access_type=vm->memtrack_state.access_type;
334     s->reset_type=vm->memtrack_state.reset_type;
335     s->period=vm->memtrack_state.period;
336     s->num_cores=vm->num_cores;
337     
338     for (i=0;i<vm->num_cores;i++) { 
339         s->core[i].start_time=vm->cores[i].memtrack_state.start_time;
340         s->core[i].end_time=host_time(); // now - note, should not race...
341         s->core[i].num_pages=vm->cores[i].memtrack_state.num_pages;
342         memcpy(s->core[i].access_bitmap,vm->cores[i].memtrack_state.access_bitmap,CEIL_DIV(vm->cores[i].memtrack_state.num_pages,8));
343         PrintDebug(vm,VCORE_NONE,"memtrack: copied %llu bytes\n",CEIL_DIV(vm->cores[i].memtrack_state.num_pages,8));
344 #ifdef V3_CONFIG_DEBUG_MEM_TRACK
345         uint64_t j, sum;
346         sum=0;
347         for (j=0;j<CEIL_DIV(vm->cores[i].memtrack_state.num_pages,8);j++) {
348             sum+=!!vm->cores[i].memtrack_state.access_bitmap[j];
349         }
350         PrintDebug(vm,VCORE_NONE,"memtrack: have %llu nonzero bytes\n",sum);
351 #endif
352     }
353     
354     return s;
355 }
356
357
358 int v3_mem_track_get_sizes(struct v3_vm_info *vm, uint64_t *num_cores, uint64_t *num_pages)
359 {
360     *num_cores = vm->num_cores;
361     *num_pages = vm->mem_size / PAGE_SIZE_4KB;
362     
363     return 0;
364 }
365     
366
367 // Called only in the core thread context
368 void v3_mem_track_entry(struct guest_info *core)
369 {
370     struct v3_vm_info *vm = core->vm_info;
371     uint64_t ht = host_time();
372
373     if (vm->memtrack_state.started) { 
374         if ((ht - core->memtrack_state.start_time) >= vm->memtrack_state.period) { 
375             // drive periodic if needed
376             PrintDebug(core->vm_info, core, "memtrack: start_time=%llu, period=%llu,  host_time=%llu, diff=%llu\n",
377                        core->memtrack_state.start_time, vm->memtrack_state.period, ht, ht-core->memtrack_state.start_time);
378
379             if (vm->memtrack_state.reset_type==V3_MEM_TRACK_PERIODIC) { 
380                 restart(core);
381             } else {
382                 v3_mem_track_stop(core->vm_info);
383             }
384         }
385     }
386
387 }
388
389 // Called only in the core thread context
390 void v3_mem_track_exit(struct guest_info *core)
391 {
392     // nothing yet
393 }