From: Peter Dinda Date: Fri, 27 Jun 2014 00:52:56 +0000 (-0500) Subject: Add memory tracking functionality to Palacios X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?p=palacios.git;a=commitdiff_plain;h=ce0f119828348c3c57a00c4aa268a8a223ccd7f8 Add memory tracking functionality to Palacios This creates an abstraction for tracking memory references at the granularity of pages, regardless of the paging mechanism being used. --- diff --git a/Kconfig b/Kconfig index 1813178..853c1e7 100644 --- a/Kconfig +++ b/Kconfig @@ -338,6 +338,21 @@ config DEBUG_SWAPPING depends on SWAPPING help Provides debugging output from the swapping system + +config MEM_TRACK + bool "Enable memory access tracking" + default n + depends on SHADOW_PAGING || NESTED_PAGING + help + Allows tracking of memory accesses on a page granularity + +config DEBUG_MEM_TRACK + bool "Enable memory access tracking debugging" + default n + depends on MEM_TRACK + help + Provides debugging output for memory access tracking + endmenu menu "Symbiotic Functions" diff --git a/palacios/include/palacios/vm_guest.h b/palacios/include/palacios/vm_guest.h index bc12edc..a4fca43 100644 --- a/palacios/include/palacios/vm_guest.h +++ b/palacios/include/palacios/vm_guest.h @@ -65,6 +65,10 @@ struct v3_sym_core_state; #endif +#ifdef V3_CONFIG_MEM_TRACK +#include +#endif + #include @@ -147,6 +151,9 @@ struct guest_info { struct v3_core_pwrstat_telemetry pwrstat_telem; #endif +#ifdef V3_CONFIG_MEM_TRACK + struct v3_core_mem_track memtrack_state; +#endif /* struct v3_core_dev_mgr core_dev_mgr; */ void * decoder_state; @@ -240,6 +247,10 @@ struct v3_vm_info { struct v3_telemetry_state telemetry; #endif +#ifdef V3_CONFIG_MEM_TRACK + struct v3_vm_mem_track memtrack_state; +#endif + uint64_t yield_cycle_period; diff --git a/palacios/include/palacios/vmm_direct_paging.h b/palacios/include/palacios/vmm_direct_paging.h index 925a299..ee41816 100644 --- a/palacios/include/palacios/vmm_direct_paging.h +++ b/palacios/include/palacios/vmm_direct_paging.h @@ -91,7 +91,7 @@ int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm, int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm, int (*callback)(struct guest_info *core, - struct v3_passthrough_pg_event, + struct v3_passthrough_pg_event *, void *priv_data), void *priv_data); @@ -155,7 +155,7 @@ int v3_register_nested_paging_event_callback(struct v3_vm_info *vm, int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm, int (*callback)(struct guest_info *core, - struct v3_nested_pg_event, + struct v3_nested_pg_event *, void *priv_data), void *priv_data); diff --git a/palacios/include/palacios/vmm_mem_track.h b/palacios/include/palacios/vmm_mem_track.h new file mode 100644 index 0000000..5b0af70 --- /dev/null +++ b/palacios/include/palacios/vmm_mem_track.h @@ -0,0 +1,91 @@ +/* + * This file is part of the Palacios Virtual Machine Monitor developed + * by the V3VEE Project with funding from the United States National + * Science Foundation and the Department of Energy. + * + * The V3VEE Project is a joint project between Northwestern University + * and the University of New Mexico. You can find out more at + * http://www.v3vee.org + * + * Copyright (c) 2014, The V3VEE Project + * All rights reserved. + * + * Author: Peter Dinda + * + * This is free software. You are permitted to use, + * redistribute, and modify it as specified in the file "V3VEE_LICENSE". + */ + +#ifndef __VMM_MEM_TRACK_H__ +#define __VMM_MEM_TRACK_H__ + +struct guest_info; +struct v3_vm_info; + + +// Currently, only V3_MEM_TRACK_ACCESS is supported. .. +#define V3_MEM_TRACK_NONE 0 +#define V3_MEM_TRACK_READ 1 +#define V3_MEM_TRACK_WRITE 2 +#define V3_MEM_TRACK_EXEC 4 +#define V3_MEM_TRACK_ACCESS (V3_MEM_TRACK_READ | V3_MEM_TRACK_WRITE | V3_MEM_TRACK_EXEC) + +typedef uint32_t v3_mem_track_access_t; +typedef enum {V3_MEM_TRACK_ONESHOT, V3_MEM_TRACK_PERIODIC } v3_mem_track_reset_t; + + +// each VM contains this +struct v3_vm_mem_track { + int started; + v3_mem_track_access_t access_type; + v3_mem_track_reset_t reset_type; + + uint64_t period; // or the interval for oneshot (in cycles) (0=continuous) +}; + +// each core contains this +struct v3_core_mem_track { + uint64_t start_time; // cycle count when we started + uint64_t end_time; // ... ended + + uint64_t num_pages; // the GPA in 4K pages (size of bitmap in bits) + +#define SET_BIT(x,pos) do { (x)[(pos)/8] |= 1 << ((pos)%8); } while (0) +#define CLR_BIT(x,pos) do { (x)[(pos)/8] &= ~(1 << ((pos)%8);) } while (0) +#define GET_BIT(x,pos) ((x)[(pos)/8] >> ((pos)%8) & 0x1) + + uint8_t *access_bitmap; // the only one so far (1 bit per page) +}; + + +// self-contained info +typedef struct mem_track_snapshot { + v3_mem_track_access_t access_type; + v3_mem_track_reset_t reset_type; + uint64_t period; + + uint32_t num_cores; // how many cores + struct v3_core_mem_track core[0]; // one per core +} v3_mem_track_snapshot; + + + +int v3_mem_track_init(struct v3_vm_info *vm); +int v3_mem_track_deinit(struct v3_vm_info *vm); + +int v3_mem_track_start(struct v3_vm_info *vm, v3_mem_track_access_t access, v3_mem_track_reset_t reset, uint64_t period); +int v3_mem_track_stop(struct v3_vm_info *vm); + +int v3_mem_track_get_sizes(struct v3_vm_info *vm, uint64_t *num_cores, uint64_t *num_pages); + + +v3_mem_track_snapshot *v3_mem_track_take_snapshot(struct v3_vm_info *vm); +void v3_mem_track_free_snapshot(v3_mem_track_snapshot *snapshot); + +// call with interrupts on... +void v3_mem_track_entry(struct guest_info *core); +void v3_mem_track_exit(struct guest_info *core); + + + +#endif diff --git a/palacios/src/palacios/Makefile b/palacios/src/palacios/Makefile index 2e2571a..dee8c0d 100644 --- a/palacios/src/palacios/Makefile +++ b/palacios/src/palacios/Makefile @@ -91,4 +91,6 @@ obj-$(V3_CONFIG_SYMCALL) += vmm_symcall.o obj-$(V3_CONFIG_SYMMOD) += vmm_symmod.o +obj-$(V3_CONFIG_MEM_TRACK) += vmm_mem_track.o + obj-y += mmu/ diff --git a/palacios/src/palacios/svm.c b/palacios/src/palacios/svm.c index d1125ee..ed0cfb9 100644 --- a/palacios/src/palacios/svm.c +++ b/palacios/src/palacios/svm.c @@ -53,6 +53,10 @@ #include +#ifdef V3_CONFIG_MEM_TRACK +#include +#endif + #ifdef V3_CONFIG_TM_FUNC #include #endif @@ -633,12 +637,17 @@ int v3_svm_enter(struct guest_info * info) { // Conditionally yield the CPU if the timeslice has expired v3_schedule(info); +#ifdef V3_CONFIG_MEM_TRACK + v3_mem_track_entry(info); +#endif + // Update timer devices after being in the VM before doing // IRQ updates, so that any interrupts they raise get seen // immediately. v3_advance_time(info, NULL); v3_update_timers(info); + // disable global interrupts for vm state transition v3_clgi(); @@ -799,6 +808,7 @@ int v3_svm_enter(struct guest_info * info) { // reenable global interrupts after vm exit v3_stgi(); + // Conditionally yield the CPU if the timeslice has expired v3_schedule(info); @@ -808,6 +818,7 @@ int v3_svm_enter(struct guest_info * info) { v3_advance_time(info, NULL); v3_update_timers(info); + { int ret = v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2); @@ -823,6 +834,10 @@ int v3_svm_enter(struct guest_info * info) { v3_handle_timeouts(info, guest_cycles); } +#ifdef V3_CONFIG_MEM_TRACK + v3_mem_track_exit(info); +#endif + return 0; } diff --git a/palacios/src/palacios/vm_guest.c b/palacios/src/palacios/vm_guest.c index 9f7afe8..569fa0b 100644 --- a/palacios/src/palacios/vm_guest.c +++ b/palacios/src/palacios/vm_guest.c @@ -33,6 +33,9 @@ #include #include +#ifdef V3_CONFIG_MEM_TRACK +#include +#endif v3_cpu_mode_t v3_get_vm_cpu_mode(struct guest_info * info) { struct cr0_32 * cr0; @@ -260,6 +263,10 @@ int v3_init_vm(struct v3_vm_info * vm) { return -1; } +#ifdef V3_CONFIG_MEM_TRACK + v3_mem_track_init(vm); +#endif + v3_init_time_vm(vm); v3_init_vm_debugging(vm); @@ -371,6 +378,10 @@ int v3_free_vm_internal(struct v3_vm_info * vm) { v3_deinit_events(vm); +#ifdef V3_CONFIG_MEM_TRACK + v3_mem_track_deinit(vm); +#endif + v3_fw_cfg_deinit(vm); diff --git a/palacios/src/palacios/vmm_direct_paging.c b/palacios/src/palacios/vmm_direct_paging.c index a736571..e2fad69 100644 --- a/palacios/src/palacios/vmm_direct_paging.c +++ b/palacios/src/palacios/vmm_direct_paging.c @@ -401,6 +401,55 @@ int v3_deinit_passthrough_paging_core(struct guest_info *core) } +int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event *, + void *priv_data), + void *priv_data) +{ + struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback)); + + if (!ec) { + PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); + return -1; + } + + ec->callback = callback; + ec->priv_data = priv_data; + + list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list)); + + return 0; + +} + + + +int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event *, + void *priv_data), + void *priv_data) +{ + struct passthrough_event_callback *cb,*temp; + + list_for_each_entry_safe(cb, + temp, + &(vm->passthrough_impl.event_callback_list), + node) { + if ((callback == cb->callback) && (priv_data == cb->priv_data)) { + list_del(&(cb->node)); + V3_Free(cb); + return 0; + } + } + + PrintError(vm, VCORE_NONE, "No callback found!\n"); + + return -1; +} + + // inline nested paging support for Intel and AMD #include "svm_npt.h" #include "vmx_npt.h" @@ -550,3 +599,52 @@ int v3_deinit_nested_paging_core(struct guest_info *core) return 0; } + + +int v3_register_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event *, + void *priv_data), + void *priv_data) +{ + struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback)); + + if (!ec) { + PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n"); + return -1; + } + + ec->callback = callback; + ec->priv_data = priv_data; + + list_add(&(ec->node),&(vm->nested_impl.event_callback_list)); + + return 0; + +} + + + +int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event *, + void *priv_data), + void *priv_data) +{ + struct nested_event_callback *cb,*temp; + + list_for_each_entry_safe(cb, + temp, + &(vm->nested_impl.event_callback_list), + node) { + if ((callback == cb->callback) && (priv_data == cb->priv_data)) { + list_del(&(cb->node)); + V3_Free(cb); + return 0; + } + } + + PrintError(vm, VCORE_NONE, "No callback found!\n"); + + return -1; +} diff --git a/palacios/src/palacios/vmm_mem_track.c b/palacios/src/palacios/vmm_mem_track.c new file mode 100644 index 0000000..864f74d --- /dev/null +++ b/palacios/src/palacios/vmm_mem_track.c @@ -0,0 +1,391 @@ +/* + * This file is part of the Palacios Virtual Machine Monitor developed + * by the V3VEE Project with funding from the United States National + * Science Foundation and the Department of Energy. + * + * The V3VEE Project is a joint project between Northwestern University + * and the University of New Mexico. You can find out more at + * http://www.v3vee.org + * + * Copyright (c) 2014, The V3VEE Project + * All rights reserved. + * + * Author: Peter Dinda + * + * This is free software. You are permitted to use, + * redistribute, and modify it as specified in the file "V3VEE_LICENSE". + */ + +#include +#include +#include +#include +#include +#include +#include + + +#ifndef V3_CONFIG_DEBUG_MEM_TRACK +#undef PrintDebug +#define PrintDebug(fmt, args...) +#endif + +#define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y))) + + +// This should be identical across cores, but this +// implementation surely is not +static uint64_t host_time() +{ + return v3_get_host_time(NULL); +} + +static uint8_t *alloc_bitmap(struct v3_vm_info *vm) +{ + uint8_t *b; + + if (!(b = V3_Malloc(CEIL_DIV(CEIL_DIV(vm->mem_size,PAGE_SIZE_4KB),8)))) { + return NULL; + } + + return b; +} + + +static void free_bitmap(uint8_t *b) +{ + if (b) { + V3_Free(b); + } + +} + +int v3_mem_track_deinit(struct v3_vm_info *vm) +{ + int i; + + for (i=0;inum_cores;i++) { + free_bitmap(vm->cores[i].memtrack_state.access_bitmap); + memset(&(vm->cores[i].memtrack_state),0,sizeof(struct v3_core_mem_track)); + } + + PrintDebug(vm,VCORE_NONE,"Memory tracking deinitialized\n"); + + return 0; +} + +int v3_mem_track_init(struct v3_vm_info *vm) +{ + int i; + + + memset(&(vm->memtrack_state),0,sizeof(struct v3_vm_mem_track)); + + for (i=0;inum_cores;i++) { + memset(&(vm->cores[i].memtrack_state),0,sizeof(struct v3_core_mem_track)); + vm->cores[i].memtrack_state.num_pages=CEIL_DIV(vm->mem_size,PAGE_SIZE_4KB); + if (!(vm->cores[i].memtrack_state.access_bitmap = alloc_bitmap(vm))) { + PrintError(vm,VCORE_NONE,"Unable to allocate for memory tracking\n"); + v3_mem_track_deinit(vm); + return -1; + } + } + + PrintDebug(vm,VCORE_NONE,"Memory tracking initialized\n"); + + return 0; +} + + + +// +// Note use of old-style callbacks here +// +static int shadow_paging_callback(struct guest_info *core, + struct v3_shdw_pg_event *event, + void *priv_data) +{ + + + if (event->event_type==SHADOW_PAGEFAULT && + event->event_order==SHADOW_POSTIMPL) { + + addr_t gpa; + + PrintDebug(core->vm_info,core,"Memory tracking: shadow callback gva=%p\n",(void*)event->gva); + + if (!v3_gva_to_gpa(core,event->gva,&gpa)) { + // note the assumption here that it is for a 4KB page... + PrintDebug(core->vm_info,core,"Memory tracking: shadow callback corresponding gpa=%p\n",(void*)gpa); + SET_BIT(core->memtrack_state.access_bitmap,gpa/PAGE_SIZE_4KB); + } else { + // no worries, this isn't physical memory + } + } else { + // we don't care about other events + } + + return 0; +} + + +static int passthrough_paging_callback(struct guest_info *core, + struct v3_passthrough_pg_event *event, + void *priv_data) +{ + uint64_t page_start, page_end, page; + + + if (event->event_type==PASSTHROUGH_PAGEFAULT && + event->event_order==PASSTHROUGH_POSTIMPL) { + + PrintDebug(core->vm_info,core,"Memory tracking: passthrough callback gpa=%p..%p\n",(void*)event->gpa_start,(void*)event->gpa_end); + + page_start = event->gpa_start/PAGE_SIZE_4KB; + page_end = event->gpa_end/PAGE_SIZE_4KB; + + for (page=page_start; page<=page_end;page++) { + SET_BIT(core->memtrack_state.access_bitmap,page); + } + } else { + // we don't care about other events + } + + return 0; +} + +static int nested_paging_callback(struct guest_info *core, + struct v3_nested_pg_event *event, + void *priv_data) +{ + uint64_t page_start, page_end, page; + + + if (event->event_type==NESTED_PAGEFAULT && + event->event_order==NESTED_POSTIMPL) { + + PrintDebug(core->vm_info,core,"Memory tracking: nested callback gpa=%p..%p\n",(void*)event->gpa_start,(void*)event->gpa_end); + + page_start = event->gpa_start/PAGE_SIZE_4KB; + page_end = event->gpa_end/PAGE_SIZE_4KB; + + for (page=page_start; page<=page_end;page++) { + SET_BIT(core->memtrack_state.access_bitmap,page); + } + } else { + // we don't care about other events + } + + return 0; +} + + + + +static void restart(struct guest_info *core) +{ + + core->memtrack_state.start_time=host_time(); + + PrintDebug(core->vm_info,core,"memtrack: restart at %llu\n",core->memtrack_state.start_time); + + memset(core->memtrack_state.access_bitmap,0,CEIL_DIV(core->memtrack_state.num_pages,8)); + + if (core->shdw_pg_mode==SHADOW_PAGING) { + v3_invalidate_shadow_pts(core); + v3_invalidate_passthrough_addr_range(core,0,core->vm_info->mem_size,NULL,NULL); + } else if (core->shdw_pg_mode==NESTED_PAGING) { + v3_invalidate_nested_addr_range(core,0,core->vm_info->mem_size,NULL,NULL); + } + + PrintDebug(core->vm_info,core,"memtrack: restart complete at %llu\n",host_time()); +} + +int v3_mem_track_start(struct v3_vm_info *vm, v3_mem_track_access_t access, v3_mem_track_reset_t reset, uint64_t period) +{ + int i; + int unwind=0; + + + PrintDebug(vm,VCORE_NONE,"Memory tracking: start access=0x%x, reset=0x%x, period=%llu\n", + access,reset,period); + + if (vm->memtrack_state.started) { + PrintError(vm,VCORE_NONE,"Memory tracking already started!\n"); + return -1; + } + + if (access != V3_MEM_TRACK_ACCESS) { + PrintError(vm,VCORE_NONE,"Unsupported access mode\n"); + return -1; + } + + vm->memtrack_state.access_type=access; + vm->memtrack_state.reset_type=reset; + vm->memtrack_state.period=period; + + vm->memtrack_state.started=1; + + for (i=0;inum_cores;i++) { + if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) { + if (v3_register_shadow_paging_event_callback(vm,shadow_paging_callback,NULL)) { + PrintError(vm,VCORE_NONE,"Mem track cannot register for shadow paging event\n"); + unwind=i+1; + goto fail; + } + + if (v3_register_passthrough_paging_event_callback(vm,passthrough_paging_callback,NULL)) { + PrintError(vm,VCORE_NONE,"Mem track cannot register for passthrough paging event\n"); + unwind=i+1; + goto fail; + } + } else if (vm->cores[i].shdw_pg_mode==NESTED_PAGING) { + if (v3_register_nested_paging_event_callback(vm,nested_paging_callback,NULL)) { + PrintError(vm,VCORE_NONE,"Mem track cannot register for nested paging event\n"); + unwind=i+1; + goto fail; + } + } + restart(&vm->cores[i]); + } + + return 0; + + fail: + + for (i=0;icores[i].shdw_pg_mode==SHADOW_PAGING) { + v3_unregister_shadow_paging_event_callback(vm,shadow_paging_callback,NULL); + v3_unregister_passthrough_paging_event_callback(vm,passthrough_paging_callback,NULL); + } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) { + v3_unregister_nested_paging_event_callback(vm,nested_paging_callback,NULL); + } + } + + return -1; + +} + +int v3_mem_track_stop(struct v3_vm_info *vm) +{ + int i; + + PrintDebug(vm,VCORE_NONE,"Memory tracking: stop\n"); + + if (!vm->memtrack_state.started) { + PrintError(vm, VCORE_NONE, "Memory tracking was not started!\n"); + return -1; + } + + vm->memtrack_state.started=0; + + for (i=0;inum_cores;i++) { + if (vm->cores[i].shdw_pg_mode==SHADOW_PAGING) { + v3_unregister_shadow_paging_event_callback(vm,shadow_paging_callback,NULL); + v3_unregister_passthrough_paging_event_callback(vm,passthrough_paging_callback,NULL); + } else if (vm->cores[0].shdw_pg_mode==NESTED_PAGING) { + v3_unregister_nested_paging_event_callback(vm,nested_paging_callback,NULL); + } + } + + return 0; + +} + +void v3_mem_track_free_snapshot(v3_mem_track_snapshot *s) +{ + int i; + + PrintDebug(VM_NONE,VCORE_NONE,"Memory tracking: free snapshot %p\n",s); + + if (s) { + for (i=0;inum_cores;i++) { + free_bitmap(s->core[i].access_bitmap); + } + V3_Free(s); + } +} + +v3_mem_track_snapshot *v3_mem_track_take_snapshot(struct v3_vm_info *vm) +{ + int i; + v3_mem_track_snapshot *s; + + PrintDebug(vm,VCORE_NONE,"Memory tracking: take snapshot\n"); + + s = V3_Malloc(sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * vm->num_cores); + + if (!s) { + PrintError(vm,VCORE_NONE,"Cannot allocate memory for memory tracking snapshot\n"); + return NULL; + } + + memset(s,0,sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * vm->num_cores); + + for (i=0;inum_cores;i++) { + if (!(s->core[i].access_bitmap = alloc_bitmap(vm))) { + PrintError(vm,VCORE_NONE,"Unable to allocate for memory tracking snapshot\n"); + v3_mem_track_free_snapshot(s); + return NULL; + } + } + + s->access_type=vm->memtrack_state.access_type; + s->reset_type=vm->memtrack_state.reset_type; + s->period=vm->memtrack_state.period; + s->num_cores=vm->num_cores; + + for (i=0;inum_cores;i++) { + s->core[i].start_time=vm->cores[i].memtrack_state.start_time; + s->core[i].end_time=host_time(); // now - note, should not race... + s->core[i].num_pages=vm->cores[i].memtrack_state.num_pages; + memcpy(s->core[i].access_bitmap,vm->cores[i].memtrack_state.access_bitmap,CEIL_DIV(vm->cores[i].memtrack_state.num_pages,8)); + PrintDebug(vm,VCORE_NONE,"memtrack: copied %llu bytes\n",CEIL_DIV(vm->cores[i].memtrack_state.num_pages,8)); + uint64_t j, sum; + sum=0; + for (j=0;jcores[i].memtrack_state.num_pages,8);j++) { + sum+=!!vm->cores[i].memtrack_state.access_bitmap[i]; + } + PrintDebug(vm,VCORE_NONE,"memtrack: have %llu nonzero bytes\n",sum); + } + + return s; +} + + +int v3_mem_track_get_sizes(struct v3_vm_info *vm, uint64_t *num_cores, uint64_t *num_pages) +{ + *num_cores = vm->num_cores; + *num_pages = vm->mem_size / PAGE_SIZE_4KB; + + return 0; +} + + +// Called only in the core thread context +void v3_mem_track_entry(struct guest_info *core) +{ + struct v3_vm_info *vm = core->vm_info; + uint64_t ht = host_time(); + + if (vm->memtrack_state.started) { + if ((ht - core->memtrack_state.start_time) >= vm->memtrack_state.period) { + // drive periodic if needed + PrintDebug(core->vm_info, core, "memtrack: start_time=%llu, period=%llu, host_time=%llu, diff=%llu\n", + core->memtrack_state.start_time, vm->memtrack_state.period, ht, ht-core->memtrack_state.start_time); + + if (vm->memtrack_state.reset_type==V3_MEM_TRACK_PERIODIC) { + restart(core); + } else { + v3_mem_track_stop(core->vm_info); + } + } + } + +} + +// Called only in the core thread context +void v3_mem_track_exit(struct guest_info *core) +{ + // nothing yet +} diff --git a/palacios/src/palacios/vmx.c b/palacios/src/palacios/vmx.c index 7665fd3..741a636 100644 --- a/palacios/src/palacios/vmx.c +++ b/palacios/src/palacios/vmx.c @@ -44,6 +44,10 @@ #include #include +#ifdef V3_CONFIG_MEM_TRACK +#include +#endif + #ifndef V3_CONFIG_DEBUG_VMX #undef PrintDebug #define PrintDebug(fmt, args...) @@ -978,6 +982,10 @@ int v3_vmx_enter(struct guest_info * info) { // Conditionally yield the CPU if the timeslice has expired v3_schedule(info); +#ifdef V3_CONFIG_MEM_TRACK + v3_mem_track_entry(info); +#endif + // Update timer devices late after being in the VM so that as much // of the time in the VM is accounted for as possible. Also do it before // updating IRQ entry state so that any interrupts the timers raise get @@ -1178,6 +1186,10 @@ int v3_vmx_enter(struct guest_info * info) { v3_handle_timeouts(info, guest_cycles); } +#ifdef V3_CONFIG_MEM_TRACK + v3_mem_track_exit(info); +#endif + return 0; }