static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code);
-int init_shadow_page_state(struct guest_info * info) {
+int v3_init_shadow_page_state(struct guest_info * info) {
struct shadow_page_state * state = &(info->shdw_pg_state);
- state->guest_mode = PDE32;
- state->shadow_mode = PDE32;
state->guest_cr3 = 0;
state->shadow_cr3 = 0;
}
*/
-int cache_page_tables32(struct guest_info * info, addr_t pde) {
+int v3_cache_page_tables32(struct guest_info * info, addr_t pde) {
struct shadow_page_state * state = &(info->shdw_pg_state);
addr_t pde_host_addr;
pde32_t * tmp_pde;
pde32_t * shadow_pde = (pde32_t *)&(shadow_pd[PDE32_INDEX(location)]);
if (shadow_pde->large_page == 0) {
- pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
+ pte32_t * shadow_pt = (pte32_t *)(addr_t)PDE32_T_ADDR((*shadow_pde));
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(location)]);
//if (shadow_pte->present == 1) {
-int handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
+int v3_handle_shadow_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
if (info->mem_mode == PHYSICAL_MEM) {
// If paging is not turned on we need to handle the special cases
}
}
-addr_t create_new_shadow_pt32() {
+addr_t v3_create_new_shadow_pt32() {
void * host_pde = 0;
- host_pde = V3_AllocPages(1);
+ host_pde = V3_VAddr(V3_AllocPages(1));
memset(host_pde, 0, PAGE_SIZE);
return (addr_t)host_pde;
static int handle_shadow_pagefault32(struct guest_info * info, addr_t fault_addr, pf_error_t error_code) {
pde32_t * guest_pd = NULL;
pde32_t * shadow_pd = (pde32_t *)CR3_TO_PDE32(info->shdw_pg_state.shadow_cr3);
- addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
+ addr_t guest_cr3 = (addr_t)CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
pt_access_status_t guest_pde_access;
pt_access_status_t shadow_pde_access;
pde32_t * guest_pde = NULL;
if (shadow_pde_access == PT_ENTRY_NOT_PRESENT)
{
- pte32_t * shadow_pt = (pte32_t *)create_new_shadow_pt32();
+ pte32_t * shadow_pt = (pte32_t *)v3_create_new_shadow_pt32();
shadow_pde->present = 1;
shadow_pde->user_page = guest_pde->user_page;
guest_pde->accessed = 1;
- shadow_pde->pt_base_addr = PD32_BASE_ADDR(shadow_pt);
+ shadow_pde->pt_base_addr = PD32_BASE_ADDR((addr_t)V3_PAddr(shadow_pt));
if (guest_pde->large_page == 0) {
shadow_pde->writable = guest_pde->writable;
} else {
+ // ?? What if guest pde is dirty a this point?
((pde32_4MB_t *)guest_pde)->dirty = 0;
shadow_pde->writable = 0;
}
//
// PTE fault
//
- pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
+ pte32_t * shadow_pt = (pte32_t *)(addr_t)PDE32_T_ADDR((*shadow_pde));
if (guest_pde->large_page == 0) {
pte32_t * guest_pt = NULL;
state->cached_cr3 = 0;
}
- } else if ((guest_pte->dirty = 0) && (error_code.write == 0)) {
+ } else if ((guest_pte->dirty == 0) && (error_code.write == 0)) { // was =
shadow_pte->writable = 0;
}
/* Currently Does not work with Segmentation!!! */
-int handle_shadow_invlpg(struct guest_info * info) {
+int v3_handle_shadow_invlpg(struct guest_info * info) {
if (info->mem_mode != VIRTUAL_MEM) {
// Paging must be turned on...
// should handle with some sort of fault I think
if (info->cpu_mode == PROTECTED) {
- char instr[15];
+ uchar_t instr[15];
int ret;
int index = 0;
addr_t first_operand;
addr_t second_operand;
- operand_type_t addr_type;
- addr_t guest_cr3 = CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
+ v3_operand_type_t addr_type;
+ addr_t guest_cr3 = (addr_t)CR3_TO_PDE32(info->shdw_pg_state.guest_cr3);
pde32_t * guest_pd = NULL;
} else {
if (shadow_pde->present == 1) {
- pte32_t * shadow_pt = (pte32_t *)PDE32_T_ADDR((*shadow_pde));
+ pte32_t * shadow_pt = (pte32_t *)(addr_t)PDE32_T_ADDR((*shadow_pde));
pte32_t * shadow_pte = (pte32_t *)&shadow_pt[PTE32_INDEX(first_operand)];
#ifdef DEBUG_SHADOW_PAGING