Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Virtio NIC's guest-driven, vmm-driven, or adaptive modes can now
[palacios.git] / palacios / src / palacios / mmu / vmm_shdw_pg_cache.c
index 3f1f0ca..ae0db99 100644 (file)
@@ -31,7 +31,7 @@
 
 #define V3_CACHED_PG 0x1
 
-#ifndef CONFIG_DEBUG_SHDW_PG_CACHE
+#ifndef V3_CONFIG_DEBUG_SHDW_PG_CACHE
 #undef PrintDebug
 #define PrintDebug(fmt, ...)
 #endif
@@ -100,37 +100,37 @@ static  inline int evict_pt(void * pt, addr_t va, page_type_t pt_type) {
     switch (pt_type) {
        case PAGE_PD32: {
            pde32_t * pde = pt;
-           pde[PDE32_INDEX(va)].writable = 1;
+           pde[PDE32_INDEX(va)].present = 0;
            break;
        }
        case PAGE_4MB: {
            pde32_4MB_t * pde = pt;
-           pde[PDE32_INDEX(va)].writable = 1;
+           pde[PDE32_INDEX(va)].present = 0;
            break;
        }
        case PAGE_PT32: {
            pte32_t * pte = pt;
-           pte[PTE32_INDEX(va)].writable = 1;
+           pte[PTE32_INDEX(va)].present = 0;
            break;
        }
        case PAGE_PML464: {
            pml4e64_t * pml = pt;
-           pml[PML4E64_INDEX(va)].writable = 1;
+           pml[PML4E64_INDEX(va)].present = 0;
            break;
        }
        case PAGE_PDP64: {
            pdpe64_t * pdp = pt;
-           pdp[PDPE64_INDEX(va)].writable = 1;
+           pdp[PDPE64_INDEX(va)].present = 0;
            break;
        }
        case PAGE_PD64: {
            pde64_t * pde = pt;
-           pde[PDE64_INDEX(va)].writable = 1;
+           pde[PDE64_INDEX(va)].present = 0;
            break;
        }
        case PAGE_PT64: {
            pte64_t * pte = pt;
-           pte[PTE64_INDEX(va)].writable = 1;
+           pte[PTE64_INDEX(va)].present = 0;
            break;
        }
        default:
@@ -220,6 +220,12 @@ static int add_rmap(struct v3_vm_info * vm, struct shdw_pg_data * pg_data, addr_
 
     if (rmap_list == NULL) {
        rmap_list = V3_Malloc(sizeof(struct list_head));
+
+       if (!rmap_list) {
+           PrintError("Cannot allocate\n");
+           return -1;
+       }
+
        INIT_LIST_HEAD(rmap_list);
 
        v3_htable_insert(cache_state->reverse_map, gpa, (addr_t)rmap_list);
@@ -227,6 +233,11 @@ static int add_rmap(struct v3_vm_info * vm, struct shdw_pg_data * pg_data, addr_
     
     entry = V3_Malloc(sizeof(struct rmap_entry));
 
+    if (!entry) {
+       PrintError("Cannot allocate\n");
+       return -1;
+    }
+
     entry->gva = gva;
     entry->gpa = pg_data->tuple.gpa;
     entry->pt_type = pg_data->tuple.pt_type;
@@ -261,7 +272,7 @@ static int update_rmap_entries(struct v3_vm_info * vm, addr_t gpa) {
        pg_data = (struct shdw_pg_data *)v3_htable_search(cache_state->page_htable, (addr_t)&tuple);
 
        if (!pg_data) {
-           PrintError("Invalid PTE reference...\n");
+           PrintError("Invalid PTE reference... Should Delete rmap entry\n");
            continue;
        }
 
@@ -282,6 +293,12 @@ static int update_rmap_entries(struct v3_vm_info * vm, addr_t gpa) {
 
 static int link_shdw_pg(struct shdw_pg_data * child_pg, struct shdw_pg_data * parent_pg, addr_t gva) {
     struct shdw_back_ptr * back_ptr = V3_Malloc(sizeof(struct shdw_back_ptr));
+
+    if (!back_ptr) {
+       PrintError("Cannot allocate\n");
+       return -1;
+    }
+
     memset(back_ptr, 0, sizeof(struct shdw_back_ptr));
 
     back_ptr->pg_data = parent_pg;
@@ -341,6 +358,8 @@ static struct shdw_pg_data * pop_queue_pg(struct v3_vm_info * vm,
                                          struct cache_vm_state * cache_state) {
     struct shdw_pg_data * pg_data = NULL;
 
+    PrintError("popping page from queue\n");
+
     pg_data = list_tail_entry(&(cache_state->pg_queue), struct shdw_pg_data, pg_queue_node);
 
 
@@ -367,10 +386,23 @@ static struct shdw_pg_data * create_shdw_pt(struct v3_vm_info * vm, addr_t gpa,
     if (cache_state->pgs_in_cache < cache_state->max_cache_pgs) {
        pg_data = V3_Malloc(sizeof(struct shdw_pg_data));
 
+       if (!pg_data) {
+           PrintError("Cannot allocate\n");
+           return NULL;
+       }
+
        pg_data->hpa = (addr_t)V3_AllocPages(1);
+
+       if (!pg_data->hpa) {
+           PrintError("Cannot allocate page for shadow page table\n");
+           return NULL;
+       }
+
        pg_data->hva = (void *)V3_VAddr((void *)pg_data->hpa);
 
     } else if (cache_state->pgs_in_free_list) {
+
+       PrintError("pulling page from free list\n");
        // pull from free list
        pg_data = list_tail_entry(&(cache_state->free_list), struct shdw_pg_data, pg_queue_node);
        
@@ -445,6 +477,12 @@ static int cache_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
     V3_Print("Shadow Page Cache initialization\n");
 
     cache_state = V3_Malloc(sizeof(struct cache_vm_state));
+
+    if (!cache_state) {
+       PrintError("Cannot allocate\n");
+       return -1;
+    }
+
     memset(cache_state, 0, sizeof(struct cache_vm_state));
 
     cache_state->page_htable = v3_create_htable(0, cache_hash_fn, cache_eq_fn);