2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2014, The V3VEE Project <http://www.v3vee.org>
11 * All rights reserved.
13 * Author: Peter Dinda <pdinda@northwestern.edu>
15 * This is free software. You are permitted to use,
16 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19 #include <palacios/vmm.h>
20 #include <palacios/vm_guest.h>
21 #include <palacios/vm_guest_mem.h>
22 #include <palacios/vmm_mem_track.h>
23 #include <palacios/vmm_shadow_paging.h>
24 #include <palacios/vmm_direct_paging.h>
25 #include <palacios/vmm_time.h>
28 #ifndef V3_CONFIG_DEBUG_MEM_TRACK
30 #define PrintDebug(fmt, args...)
33 #define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y)))
36 // This should be identical across cores, but this
37 // implementation surely is not
38 static uint64_t host_time()
40 return v3_get_host_time(NULL);
43 static uint8_t *alloc_bitmap(struct v3_vm_info *vm)
47 if (!(b = V3_Malloc(CEIL_DIV(CEIL_DIV(vm->mem_size,PAGE_SIZE_4KB),8)))) {
55 static void free_bitmap(uint8_t *b)
63 int v3_mem_track_deinit(struct v3_vm_info *vm)
67 for (i=0;i<vm->num_cores;i++) {
68 free_bitmap(vm->cores[i].memtrack_state.access_bitmap);
69 memset(&(vm->cores[i].memtrack_state),0,sizeof(struct v3_core_mem_track));
72 PrintDebug(vm,VCORE_NONE,"Memory tracking deinitialized\n");
77 int v3_mem_track_init(struct v3_vm_info *vm)
82 memset(&(vm->memtrack_state),0,sizeof(struct v3_vm_mem_track));
84 for (i=0;i<vm->num_cores;i++) {
85 memset(&(vm->cores[i].memtrack_state),0,sizeof(struct v3_core_mem_track));
86 vm->cores[i].memtrack_state.num_pages=CEIL_DIV(vm->mem_size,PAGE_SIZE_4KB);
87 if (!(vm->cores[i].memtrack_state.access_bitmap = alloc_bitmap(vm))) {
88 PrintError(vm,VCORE_NONE,"Unable to allocate for memory tracking\n");
89 v3_mem_track_deinit(vm);
94 PrintDebug(vm,VCORE_NONE,"Memory tracking initialized\n");
102 // Note use of old-style callbacks here
104 static int shadow_paging_callback(struct guest_info *core,
105 struct v3_shdw_pg_event *event,
110 if (event->event_type==SHADOW_PAGEFAULT &&
111 event->event_order==SHADOW_POSTIMPL) {
115 PrintDebug(core->vm_info,core,"Memory tracking: shadow callback gva=%p\n",(void*)event->gva);
117 if (!v3_gva_to_gpa(core,event->gva,&gpa)) {
118 // note the assumption here that it is for a 4KB page...
119 PrintDebug(core->vm_info,core,"Memory tracking: shadow callback corresponding gpa=%p\n",(void*)gpa);
120 SET_BIT(core->memtrack_state.access_bitmap,gpa/PAGE_SIZE_4KB);
122 // no worries, this isn't physical memory
125 // we don't care about other events
132 static int passthrough_paging_callback(struct guest_info *core,
133 struct v3_passthrough_pg_event *event,
136 uint64_t page_start, page_end, page;
139 if (event->event_type==PASSTHROUGH_PAGEFAULT &&
140 event->event_order==PASSTHROUGH_POSTIMPL) {
142 PrintDebug(core->vm_info,core,"Memory tracking: passthrough callback gpa=%p..%p\n",(void*)event->gpa_start,(void*)event->gpa_end);
144 page_start = event->gpa_start/PAGE_SIZE_4KB;
145 page_end = event->gpa_end/PAGE_SIZE_4KB;
147 for (page=page_start; page<=page_end;page++) {
148 SET_BIT(core->memtrack_state.access_bitmap,page);
151 // we don't care about other events
157 static int nested_paging_callback(struct guest_info *core,
158 struct v3_nested_pg_event *event,
161 uint64_t page_start, page_end, page;
164 if (event->event_type==NESTED_PAGEFAULT &&
165 event->event_order==NESTED_POSTIMPL) {
167 PrintDebug(core->vm_info,core,"Memory tracking: nested callback gpa=%p..%p\n",(void*)event->gpa_start,(void*)event->gpa_end);
169 page_start = event->gpa_start/PAGE_SIZE_4KB;
170 page_end = event->gpa_end/PAGE_SIZE_4KB;
172 for (page=page_start; page<=page_end;page++) {
173 SET_BIT(core->memtrack_state.access_bitmap,page);
176 // we don't care about other events
185 static void restart(struct guest_info *core)
188 core->memtrack_state.start_time=host_time();
190 PrintDebug(core->vm_info,core,"memtrack: restart at %llu\n",core->memtrack_state.start_time);
192 memset(core->memtrack_state.access_bitmap,0,CEIL_DIV(core->memtrack_state.num_pages,8));
194 if (core->shdw_pg_mode==SHADOW_PAGING) {
195 v3_invalidate_shadow_pts(core);
196 v3_invalidate_passthrough_addr_range(core,0,core->vm_info->mem_size,NULL,NULL);
197 } else if (core->shdw_pg_mode==NESTED_PAGING) {
198 v3_invalidate_nested_addr_range(core,0,core->vm_info->mem_size,NULL,NULL);
201 PrintDebug(core->vm_info,core,"memtrack: restart complete at %llu\n",host_time());
204 int v3_mem_track_start(struct v3_vm_info *vm, v3_mem_track_access_t access, v3_mem_track_reset_t reset, uint64_t period)
210 PrintDebug(vm,VCORE_NONE,"Memory tracking: start access=0x%x, reset=0x%x, period=%llu\n",
211 access,reset,period);
213 if (vm->memtrack_state.started) {
214 PrintError(vm,VCORE_NONE,"Memory tracking already started!\n");
218 if (access != V3_MEM_TRACK_ACCESS) {
219 PrintError(vm,VCORE_NONE,"Unsupported access mode\n");
223 vm->memtrack_state.access_type=access;
224 vm->memtrack_state.reset_type=reset;
225 vm->memtrack_state.period=period;
227 vm->memtrack_state.started=1;
229 for (i=0;i<vm->num_cores;i++) {
230 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
231 if (v3_register_shadow_paging_event_callback(vm,shadow_paging_callback,NULL)) {
232 PrintError(vm,VCORE_NONE,"Mem track cannot register for shadow paging event\n");
237 if (v3_register_passthrough_paging_event_callback(vm,passthrough_paging_callback,NULL)) {
238 PrintError(vm,VCORE_NONE,"Mem track cannot register for passthrough paging event\n");
242 } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) {
243 if (v3_register_nested_paging_event_callback(vm,nested_paging_callback,NULL)) {
244 PrintError(vm,VCORE_NONE,"Mem track cannot register for nested paging event\n");
249 restart(&vm->cores[i]);
256 for (i=0;i<unwind;i++) {
257 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
258 v3_unregister_shadow_paging_event_callback(vm,shadow_paging_callback,NULL);
259 v3_unregister_passthrough_paging_event_callback(vm,passthrough_paging_callback,NULL);
260 } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) {
261 v3_unregister_nested_paging_event_callback(vm,nested_paging_callback,NULL);
269 int v3_mem_track_stop(struct v3_vm_info *vm)
273 PrintDebug(vm,VCORE_NONE,"Memory tracking: stop\n");
275 if (!vm->memtrack_state.started) {
276 PrintError(vm, VCORE_NONE, "Memory tracking was not started!\n");
280 vm->memtrack_state.started=0;
282 for (i=0;i<vm->num_cores;i++) {
283 if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) {
284 v3_unregister_shadow_paging_event_callback(vm,shadow_paging_callback,NULL);
285 v3_unregister_passthrough_paging_event_callback(vm,passthrough_paging_callback,NULL);
286 } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) {
287 v3_unregister_nested_paging_event_callback(vm,nested_paging_callback,NULL);
295 void v3_mem_track_free_snapshot(v3_mem_track_snapshot *s)
299 PrintDebug(VM_NONE,VCORE_NONE,"Memory tracking: free snapshot %p\n",s);
302 for (i=0;i<s->num_cores;i++) {
303 free_bitmap(s->core[i].access_bitmap);
309 v3_mem_track_snapshot *v3_mem_track_take_snapshot(struct v3_vm_info *vm)
312 v3_mem_track_snapshot *s;
314 PrintDebug(vm,VCORE_NONE,"Memory tracking: take snapshot\n");
316 s = V3_Malloc(sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * vm->num_cores);
319 PrintError(vm,VCORE_NONE,"Cannot allocate memory for memory tracking snapshot\n");
323 memset(s,0,sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * vm->num_cores);
325 for (i=0;i<vm->num_cores;i++) {
326 if (!(s->core[i].access_bitmap = alloc_bitmap(vm))) {
327 PrintError(vm,VCORE_NONE,"Unable to allocate for memory tracking snapshot\n");
328 v3_mem_track_free_snapshot(s);
333 s->access_type=vm->memtrack_state.access_type;
334 s->reset_type=vm->memtrack_state.reset_type;
335 s->period=vm->memtrack_state.period;
336 s->num_cores=vm->num_cores;
338 for (i=0;i<vm->num_cores;i++) {
339 s->core[i].start_time=vm->cores[i].memtrack_state.start_time;
340 s->core[i].end_time=host_time(); // now - note, should not race...
341 s->core[i].num_pages=vm->cores[i].memtrack_state.num_pages;
342 memcpy(s->core[i].access_bitmap,vm->cores[i].memtrack_state.access_bitmap,CEIL_DIV(vm->cores[i].memtrack_state.num_pages,8));
343 PrintDebug(vm,VCORE_NONE,"memtrack: copied %llu bytes\n",CEIL_DIV(vm->cores[i].memtrack_state.num_pages,8));
344 #ifdef V3_CONFIG_DEBUG_MEM_TRACK
347 for (j=0;j<CEIL_DIV(vm->cores[i].memtrack_state.num_pages,8);j++) {
348 sum+=!!vm->cores[i].memtrack_state.access_bitmap[j];
350 PrintDebug(vm,VCORE_NONE,"memtrack: have %llu nonzero bytes\n",sum);
358 int v3_mem_track_get_sizes(struct v3_vm_info *vm, uint64_t *num_cores, uint64_t *num_pages)
360 *num_cores = vm->num_cores;
361 *num_pages = vm->mem_size / PAGE_SIZE_4KB;
367 // Called only in the core thread context
368 void v3_mem_track_entry(struct guest_info *core)
370 struct v3_vm_info *vm = core->vm_info;
371 uint64_t ht = host_time();
373 if (vm->memtrack_state.started) {
374 if ((ht - core->memtrack_state.start_time) >= vm->memtrack_state.period) {
375 // drive periodic if needed
376 PrintDebug(core->vm_info, core, "memtrack: start_time=%llu, period=%llu, host_time=%llu, diff=%llu\n",
377 core->memtrack_state.start_time, vm->memtrack_state.period, ht, ht-core->memtrack_state.start_time);
379 if (vm->memtrack_state.reset_type==V3_MEM_TRACK_PERIODIC) {
382 v3_mem_track_stop(core->vm_info);
389 // Called only in the core thread context
390 void v3_mem_track_exit(struct guest_info *core)