X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Finclude%2Fpalacios%2Fvmm_direct_paging.h;h=925a29966a878a8f78db1c0a182cb4ddbeea7e26;hb=a4fd5bcc79e7cdf9a3bd879294566bff0666ced7;hp=6404e6f3ed0314d383a3747a59395dfa3034951c;hpb=d3d6d09894dba66f4e2361bb2f903139a2d83684;p=palacios.git diff --git a/palacios/include/palacios/vmm_direct_paging.h b/palacios/include/palacios/vmm_direct_paging.h index 6404e6f..925a299 100644 --- a/palacios/include/palacios/vmm_direct_paging.h +++ b/palacios/include/palacios/vmm_direct_paging.h @@ -25,26 +25,140 @@ #include #include +#include + + +/********************************** + PASSTHROUGH PAGING - CORE FUNC + **********************************/ + + +struct v3_passthrough_impl_state { + // currently there is only a single implementation + // that internally includes SVM and VMX support + // The externally visible state is just the callbacks + struct list_head event_callback_list; +}; + + +int v3_init_passthrough_paging(struct v3_vm_info *vm); +int v3_init_passthrough_paging_core(struct guest_info *core); +int v3_deinit_passthrough_paging(struct v3_vm_info *vm); +int v3_deinit_passthrough_paging_core(struct guest_info *core); int v3_init_passthrough_pts(struct guest_info * guest_info); int v3_free_passthrough_pts(struct guest_info * core); int v3_reset_passthrough_pts(struct guest_info * guest_info); -int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); -int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code); +// actual_start/end may be null if you don't want this info +// If non-null, these return the actual affected GPA range +int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code, + addr_t *actual_start, addr_t *actual_end); int v3_activate_passthrough_pt(struct guest_info * info); -int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr); +int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr, + addr_t *actual_start, addr_t *actual_end); + // The range invalidated is minimally [start, end] int v3_invalidate_passthrough_addr_range(struct guest_info * info, - addr_t inv_addr_start, addr_t inv_addr_end); + addr_t inv_addr_start, addr_t inv_addr_end, + addr_t *actual_start, addr_t *actual_end); + +/********************************** + PASSTHROUGH PAGING - EVENTS + **********************************/ + +struct v3_passthrough_pg_event { + enum {PASSTHROUGH_PAGEFAULT,PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_ACTIVATE} event_type; + enum {PASSTHROUGH_PREIMPL, PASSTHROUGH_POSTIMPL} event_order; + addr_t gpa; // for pf + pf_error_t error_code; // for pf + addr_t gpa_start; // for invalidation of range or page fault + addr_t gpa_end; // for invalidation of range or page fault (range is [start,end] ) + // PREIMPL: start/end is the requested range + // POSTIMPL: start/end is the actual range invalidated +}; + + + +int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event *, + void *priv_data), + void *priv_data); + +int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_passthrough_pg_event, + void *priv_data), + void *priv_data); + + + +/***************************** + NESTED PAGING - CORE FUNC + *****************************/ + + +struct v3_nested_impl_state { + // currently there is only a single implementation + // that internally includes SVM and VMX support + // The externally visible state is just the callbacks + struct list_head event_callback_list; +}; + +int v3_init_nested_paging(struct v3_vm_info *vm); +int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo); +int v3_deinit_nested_paging(struct v3_vm_info *vm); +int v3_deinit_nested_paging_core(struct guest_info *core); + + +// actual_start/end may be null if you don't want this info +// If non-null, these return the actual affected GPA range +int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, void *pfinfo, + addr_t *actual_start, addr_t *actual_end); + +int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr, + addr_t *actual_start, addr_t *actual_end); -int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr); // The range invalidated is minimally [start, end] int v3_invalidate_nested_addr_range(struct guest_info * info, - addr_t inv_addr_start, addr_t inv_addr_end); + addr_t inv_addr_start, addr_t inv_addr_end, + addr_t *actual_start, addr_t *actual_end); + + + +/***************************** + NESTED PAGING - EVENTS + *****************************/ + +struct v3_nested_pg_event { + enum {NESTED_PAGEFAULT,NESTED_INVALIDATE_RANGE} event_type; + enum {NESTED_PREIMPL, NESTED_POSTIMPL} event_order; + addr_t gpa; // for pf + pf_error_t error_code; // for pf + addr_t gpa_start; // for invalidation of range or page fault + addr_t gpa_end; // for invalidation of range or page fault (range is [start,end] ) + // PREIMPL: start/end is the requested range + // POSTIMPL: start/end is the actual range invalidated +}; + + + +int v3_register_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event *, + void *priv_data), + void *priv_data); + +int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm, + int (*callback)(struct guest_info *core, + struct v3_nested_pg_event, + void *priv_data), + void *priv_data); + #endif // ! __V3VEE__