7 #include "v3_guest_mem.h"
11 #define GUEST_FILE "/proc/v3vee/v3-guests-details"
12 //#define GUEST_FILE "/v-test/numa/palacios-devel/test.proc"
15 struct v3_guest_mem_map * v3_guest_mem_get_map(char *vmdev)
22 uint64_t start, end, num;
27 if (!(f=fopen(GUEST_FILE,"r"))) {
28 fprintf(stderr,"Cannot open %s - is Palacios active?\n",GUEST_FILE);
33 if (!fgets(buf,MAXLINE,f)) {
34 fprintf(stderr,"Could not find info for %s\n",vmdev);
37 if (sscanf(buf,"Device: %s",dev)==1) {
38 if (!strcmp(dev,vmdev)) {
45 // Now we need the number of regions
47 if (!fgets(buf,MAXLINE,f)) {
48 fprintf(stderr,"Could not find number of regions for %s\n",vmdev);
51 if (sscanf(buf,"Regions: %llu",&num_regions)==1) {
56 struct v3_guest_mem_map *m =
57 (struct v3_guest_mem_map *) malloc(sizeof(struct v3_guest_mem_map)+num_regions*sizeof(struct v3_guest_mem_block));
59 fprintf(stderr, "Cannot allocate space\n");
64 memset(m,0,sizeof(struct v3_guest_mem_map)+num_regions*sizeof(struct v3_guest_mem_block));
66 m->numblocks=num_regions;
68 // Now collect the region info
70 while (i<num_regions) {
71 if (!fgets(buf,MAXLINE,f)) {
72 fprintf(stderr,"Did not find all regions...\n");
76 if (sscanf(buf," region %d has HPAs %llx-%llx",&num,&start,&end)==3) {
77 m->block[i].gpa = (void*)guest_cur;
78 m->block[i].hpa = (void*)start;
79 m->block[i].numpages = (end-start) / 4096 + !!((end-start) % 4096);
80 if ((end-start)%4096) {
81 fprintf(stderr,"Odd, region %d is a non-integral number of pages");
84 m->block[i].cumgpa=(void*)(guest_cur-1);
95 int v3_map_guest_mem(struct v3_guest_mem_map *map)
100 fprintf(stderr, "Memory appears to already be mapped\n");
104 map->fd = open("/dev/mem", O_RDWR | O_SYNC);
107 fprintf(stderr, "Cannot open /dev/mem - are you root?\n");
112 for (i=0; i<map->numblocks; i++) {
113 //fprintf(stderr,"Mapping %llu bytes of /dev/mem offset 0x%llx\n",
114 // map->block[i].numpages*4096, (off_t)(map->block[i].hpa));
115 map->block[i].uva = mmap(NULL,
116 map->block[i].numpages*4096,
117 PROT_READ | PROT_WRITE,
120 (off_t) (map->block[i].hpa));
122 if (map->block[i].uva == MAP_FAILED) {
123 fprintf(stderr, "Failed to map block %llu\n",i);
125 v3_unmap_guest_mem(map);
134 int v3_unmap_guest_mem(struct v3_guest_mem_map *map)
138 for (i=0; i<map->numblocks; i++) {
139 if (map->block[i].uva) {
140 munmap(map->block[i].uva, map->block[i].numpages*4096);
152 void *v3_gpa_start(struct v3_guest_mem_map *map)
154 return 0; // all guests start at zero for now
157 void *v3_gpa_end(struct v3_guest_mem_map *map)
159 struct v3_guest_mem_block *l = &(map->block[map->numblocks-1]);
161 // currently, the regions are consecutive, so we just need the last block
162 return l->gpa+l->numpages*4096-1;
166 int v3_guest_mem_apply(void (*func)(void *data, uint64_t num_bytes, void *priv),
167 struct v3_guest_mem_map *map, void *gpa, uint64_t num_bytes, void *priv)
172 uint64_t block_bytes;
178 if (gpa < v3_gpa_start(map) || gpa+num_bytes-1 > v3_gpa_end(map)) {
183 left_bytes = num_bytes;
186 cur_uva = v3_gpa_to_uva(map, cur_gpa, &block_bytes);
190 if (block_bytes>left_bytes) {
191 block_bytes = left_bytes;
193 func(cur_uva,block_bytes,priv);
194 left_bytes-=block_bytes;
195 cur_gpa+=block_bytes;
203 static void copy_out(void *uva, uint64_t num_bytes, void *curoff)
205 memcpy(*((void**)(curoff)), uva, num_bytes);
206 *(void**)curoff += num_bytes;
209 static void copy_in(void *uva, uint64_t num_bytes, void *curoff)
211 memcpy(uva, *((void**)(curoff)), num_bytes);
212 *(void**)curoff += num_bytes;
215 static void do_hash(void *uva, uint64_t num_bytes, void *priv)
218 uint64_t *curhash = (uint64_t *)priv;
220 for (i=0;i<num_bytes;i++) {
221 *curhash += ((uint8_t*)uva)[i];
225 int v3_guest_mem_read(struct v3_guest_mem_map *map, void *gpa, uint64_t num_bytes, char *data)
229 return v3_guest_mem_apply(copy_out,map,gpa,num_bytes,&cpy_ptr);
232 int v3_guest_mem_write(struct v3_guest_mem_map *map, void *gpa, uint64_t num_bytes, char *data)
236 return v3_guest_mem_apply(copy_in,map,gpa,num_bytes,&cpy_ptr);
239 int v3_guest_mem_hash(struct v3_guest_mem_map *map, void *gpa, uint64_t num_bytes, uint64_t *hash)
243 return v3_guest_mem_apply(do_hash,map,gpa,num_bytes,hash);
248 int v3_guest_mem_track_start(char *vmdev,
249 v3_mem_track_access_t access,
250 v3_mem_track_reset_t reset,
253 struct v3_mem_track_cmd cmd;
255 cmd.request=V3_MEM_TRACK_START;
256 cmd.config.access_type=access;
257 cmd.config.reset_type=reset;
258 cmd.config.period=period;
260 return v3_vm_ioctl(vmdev,V3_VM_MEM_TRACK_CMD,&cmd);
264 int v3_guest_mem_track_stop(char *vmdev)
266 struct v3_mem_track_cmd cmd;
268 cmd.request=V3_MEM_TRACK_STOP;
270 return v3_vm_ioctl(vmdev,V3_VM_MEM_TRACK_CMD,&cmd);
275 #define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y)))
277 static uint8_t *alloc_bitmap(uint64_t num_pages)
281 if (!(b = malloc(CEIL_DIV(num_pages,8)))) {
285 memset(b,0,CEIL_DIV(num_pages,8));
291 static void free_bitmap(uint8_t *b)
300 void v3_guest_mem_track_free_snapshot(v3_mem_track_snapshot *s)
305 for (i=0;i<s->num_cores;i++) {
306 free_bitmap(s->core[i].access_bitmap);
313 static v3_mem_track_snapshot *alloc_snapshot(uint64_t num_cores, uint64_t num_pages)
316 v3_mem_track_snapshot *s;
318 s = malloc(sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * num_cores);
324 memset(s,0,sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * num_cores);
326 s->num_cores=num_cores;
328 for (i=0;i<num_cores;i++) {
329 if (!(s->core[i].access_bitmap = alloc_bitmap(num_pages))) {
330 v3_guest_mem_track_free_snapshot(s);
333 s->core[i].num_pages=num_pages;
340 v3_mem_track_snapshot *v3_guest_mem_track_snapshot(char *vmdev)
342 struct v3_mem_track_sizes size;
343 v3_mem_track_snapshot *s;
346 rc = v3_vm_ioctl(vmdev,V3_VM_MEM_TRACK_SIZE,&size);
352 //printf("returned size num_cores=%u, num_pages=%llu",size.num_cores,size.num_pages);
354 // allocate a snapshot;
355 if (!(s=alloc_snapshot(size.num_cores,size.num_pages))) {
361 if (v3_vm_ioctl(vmdev,V3_VM_MEM_TRACK_SNAP,s)) {
362 v3_guest_mem_track_free_snapshot(s);