7 #include "v3_guest_mem.h"
11 #define GUEST_FILE "/proc/v3vee/v3-guests-details"
12 //#define GUEST_FILE "/v-test/numa/palacios-devel/test.proc"
15 struct v3_guest_mem_map * v3_guest_mem_get_map(char *vmdev)
22 uint64_t start, end, num;
27 if (!(f=fopen(GUEST_FILE,"r"))) {
28 fprintf(stderr,"Cannot open %s - is Palacios active?\n",GUEST_FILE);
33 if (!fgets(buf,MAXLINE,f)) {
34 fprintf(stderr,"Could not find info for %s\n",vmdev);
37 if (sscanf(buf,"Device: %s",dev)==1) {
38 if (!strcmp(dev,vmdev)) {
45 // Now we need the number of regions
47 if (!fgets(buf,MAXLINE,f)) {
48 fprintf(stderr,"Could not find number of regions for %s\n",vmdev);
51 if (sscanf(buf,"Regions: %llu",&num_regions)==1) {
56 struct v3_guest_mem_map *m =
57 (struct v3_guest_mem_map *) malloc(sizeof(struct v3_guest_mem_map)+num_regions*sizeof(struct v3_guest_mem_block));
59 fprintf(stderr, "Cannot allocate space\n");
64 memset(m,0,sizeof(struct v3_guest_mem_map)+num_regions*sizeof(struct v3_guest_mem_block));
66 m->numblocks=num_regions;
68 // Now collect the region info
71 while (i<num_regions) {
72 if (!fgets(buf,MAXLINE,f)) {
73 fprintf(stderr,"Did not find all regions...\n");
77 if (sscanf(buf," region %d has HPAs %llx-%llx",&num,&start,&end)==3) {
78 m->block[i].gpa = (void*)guest_cur;
79 m->block[i].hpa = (void*)start;
80 m->block[i].numpages = (end-start) / 4096 + !!((end-start) % 4096);
81 if ((end-start)%4096) {
82 fprintf(stderr,"Odd, region %d is a non-integral number of pages",i);
85 m->block[i].cumgpa=(void*)(guest_cur-1);
96 int v3_map_guest_mem(struct v3_guest_mem_map *map)
101 fprintf(stderr, "Memory appears to already be mapped\n");
105 map->fd = open("/dev/mem", O_RDWR | O_SYNC);
108 fprintf(stderr, "Cannot open /dev/mem - are you root?\n");
113 for (i=0; i<map->numblocks; i++) {
114 //fprintf(stderr,"Mapping %llu bytes of /dev/mem offset 0x%llx\n",
115 // map->block[i].numpages*4096, (off_t)(map->block[i].hpa));
116 map->block[i].uva = mmap(NULL,
117 map->block[i].numpages*4096,
118 PROT_READ | PROT_WRITE,
121 (off_t) (map->block[i].hpa));
123 if (map->block[i].uva == MAP_FAILED) {
124 fprintf(stderr, "Failed to map block %llu\n",i);
126 v3_unmap_guest_mem(map);
135 int v3_unmap_guest_mem(struct v3_guest_mem_map *map)
139 for (i=0; i<map->numblocks; i++) {
140 if (map->block[i].uva) {
141 munmap(map->block[i].uva, map->block[i].numpages*4096);
153 void *v3_gpa_start(struct v3_guest_mem_map *map)
155 return 0; // all guests start at zero for now
158 void *v3_gpa_end(struct v3_guest_mem_map *map)
160 struct v3_guest_mem_block *l = &(map->block[map->numblocks-1]);
162 // currently, the regions are consecutive, so we just need the last block
163 return l->gpa+l->numpages*4096-1;
167 int v3_guest_mem_apply(void (*func)(void *data, uint64_t num_bytes, void *priv),
168 struct v3_guest_mem_map *map, void *gpa, uint64_t num_bytes, void *priv)
173 uint64_t block_bytes;
179 if (gpa < v3_gpa_start(map) || gpa+num_bytes-1 > v3_gpa_end(map)) {
184 left_bytes = num_bytes;
187 cur_uva = v3_gpa_to_uva(map, cur_gpa, &block_bytes);
191 if (block_bytes>left_bytes) {
192 block_bytes = left_bytes;
194 func(cur_uva,block_bytes,priv);
195 left_bytes-=block_bytes;
196 cur_gpa+=block_bytes;
204 static void copy_out(void *uva, uint64_t num_bytes, void *curoff)
206 memcpy(*((void**)(curoff)), uva, num_bytes);
207 *(void**)curoff += num_bytes;
210 static void copy_in(void *uva, uint64_t num_bytes, void *curoff)
212 memcpy(uva, *((void**)(curoff)), num_bytes);
213 *(void**)curoff += num_bytes;
216 static void do_hash(void *uva, uint64_t num_bytes, void *priv)
219 uint64_t *curhash = (uint64_t *)priv;
221 for (i=0;i<num_bytes;i++) {
222 *curhash += ((uint8_t*)uva)[i];
226 int v3_guest_mem_read(struct v3_guest_mem_map *map, void *gpa, uint64_t num_bytes, char *data)
230 return v3_guest_mem_apply(copy_out,map,gpa,num_bytes,&cpy_ptr);
233 int v3_guest_mem_write(struct v3_guest_mem_map *map, void *gpa, uint64_t num_bytes, char *data)
237 return v3_guest_mem_apply(copy_in,map,gpa,num_bytes,&cpy_ptr);
240 int v3_guest_mem_hash(struct v3_guest_mem_map *map, void *gpa, uint64_t num_bytes, uint64_t *hash)
244 return v3_guest_mem_apply(do_hash,map,gpa,num_bytes,hash);
249 int v3_guest_mem_track_start(char *vmdev,
250 v3_mem_track_access_t access,
251 v3_mem_track_reset_t reset,
254 struct v3_mem_track_cmd cmd;
256 cmd.request=V3_MEM_TRACK_START;
257 cmd.config.access_type=access;
258 cmd.config.reset_type=reset;
259 cmd.config.period=period;
261 return v3_vm_ioctl(vmdev,V3_VM_MEM_TRACK_CMD,&cmd);
265 int v3_guest_mem_track_stop(char *vmdev)
267 struct v3_mem_track_cmd cmd;
269 cmd.request=V3_MEM_TRACK_STOP;
271 return v3_vm_ioctl(vmdev,V3_VM_MEM_TRACK_CMD,&cmd);
276 #define CEIL_DIV(x,y) (((x)/(y)) + !!((x)%(y)))
278 static uint8_t *alloc_bitmap(uint64_t num_pages)
282 if (!(b = malloc(CEIL_DIV(num_pages,8)))) {
286 memset(b,0,CEIL_DIV(num_pages,8));
292 static void free_bitmap(uint8_t *b)
301 void v3_guest_mem_track_free_snapshot(v3_mem_track_snapshot *s)
306 for (i=0;i<s->num_cores;i++) {
307 free_bitmap(s->core[i].access_bitmap);
314 static v3_mem_track_snapshot *alloc_snapshot(uint64_t num_cores, uint64_t num_pages)
317 v3_mem_track_snapshot *s;
319 s = malloc(sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * num_cores);
325 memset(s,0,sizeof(v3_mem_track_snapshot) + sizeof(struct v3_core_mem_track) * num_cores);
327 s->num_cores=num_cores;
329 for (i=0;i<num_cores;i++) {
330 if (!(s->core[i].access_bitmap = alloc_bitmap(num_pages))) {
331 v3_guest_mem_track_free_snapshot(s);
334 s->core[i].num_pages=num_pages;
341 v3_mem_track_snapshot *v3_guest_mem_track_snapshot(char *vmdev)
343 struct v3_mem_track_sizes size;
344 v3_mem_track_snapshot *s;
347 rc = v3_vm_ioctl(vmdev,V3_VM_MEM_TRACK_SIZE,&size);
353 //printf("returned size num_cores=%u, num_pages=%llu",size.num_cores,size.num_pages);
355 // allocate a snapshot;
356 if (!(s=alloc_snapshot(size.num_cores,size.num_pages))) {
362 if (v3_vm_ioctl(vmdev,V3_VM_MEM_TRACK_SNAP,s)) {
363 v3_guest_mem_track_free_snapshot(s);