2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
11 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
12 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
17 * This is free software. You are permitted to use,
18 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
21 #ifndef __VMM_DIRECT_PAGING_H__
22 #define __VMM_DIRECT_PAGING_H__
26 #include <palacios/vmm_mem.h>
27 #include <palacios/vmm_paging.h>
28 #include <palacios/vmm_list.h>
31 /**********************************
32 PASSTHROUGH PAGING - CORE FUNC
33 **********************************/
36 struct v3_passthrough_impl_state {
37 // currently there is only a single implementation
38 // that internally includes SVM and VMX support
39 // The externally visible state is just the callbacks
40 struct list_head event_callback_list;
44 int v3_init_passthrough_paging(struct v3_vm_info *vm);
45 int v3_init_passthrough_paging_core(struct guest_info *core);
46 int v3_deinit_passthrough_paging(struct v3_vm_info *vm);
47 int v3_deinit_passthrough_paging_core(struct guest_info *core);
49 int v3_init_passthrough_pts(struct guest_info * guest_info);
50 int v3_free_passthrough_pts(struct guest_info * core);
52 int v3_reset_passthrough_pts(struct guest_info * guest_info);
54 // actual_start/end may be null if you don't want this info
55 // If non-null, these return the actual affected GPA range
56 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
57 addr_t *actual_start, addr_t *actual_end);
59 int v3_activate_passthrough_pt(struct guest_info * info);
61 int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr,
62 addr_t *actual_start, addr_t *actual_end);
64 // The range invalidated is minimally [start, end]
65 int v3_invalidate_passthrough_addr_range(struct guest_info * info,
66 addr_t inv_addr_start, addr_t inv_addr_end,
67 addr_t *actual_start, addr_t *actual_end);
69 /**********************************
70 PASSTHROUGH PAGING - EVENTS
71 **********************************/
73 struct v3_passthrough_pg_event {
74 enum {PASSTHROUGH_PAGEFAULT,PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_ACTIVATE} event_type;
75 enum {PASSTHROUGH_PREIMPL, PASSTHROUGH_POSTIMPL} event_order;
77 pf_error_t error_code; // for pf
78 addr_t gpa_start; // for invalidation of range or page fault
79 addr_t gpa_end; // for invalidation of range or page fault (range is [start,end] )
80 // PREIMPL: start/end is the requested range
81 // POSTIMPL: start/end is the actual range invalidated
86 int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm,
87 int (*callback)(struct guest_info *core,
88 struct v3_passthrough_pg_event *,
92 int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm,
93 int (*callback)(struct guest_info *core,
94 struct v3_passthrough_pg_event *,
100 /*****************************
101 NESTED PAGING - CORE FUNC
102 *****************************/
105 struct v3_nested_impl_state {
106 // currently there is only a single implementation
107 // that internally includes SVM and VMX support
108 // The externally visible state is just the callbacks
109 struct list_head event_callback_list;
112 int v3_init_nested_paging(struct v3_vm_info *vm);
113 int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo);
114 int v3_deinit_nested_paging(struct v3_vm_info *vm);
115 int v3_deinit_nested_paging_core(struct guest_info *core);
118 // actual_start/end may be null if you don't want this info
119 // If non-null, these return the actual affected GPA range
120 int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, void *pfinfo,
121 addr_t *actual_start, addr_t *actual_end);
123 int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr,
124 addr_t *actual_start, addr_t *actual_end);
126 // The range invalidated is minimally [start, end]
127 int v3_invalidate_nested_addr_range(struct guest_info * info,
128 addr_t inv_addr_start, addr_t inv_addr_end,
129 addr_t *actual_start, addr_t *actual_end);
133 /*****************************
134 NESTED PAGING - EVENTS
135 *****************************/
137 struct v3_nested_pg_event {
138 enum {NESTED_PAGEFAULT,NESTED_INVALIDATE_RANGE} event_type;
139 enum {NESTED_PREIMPL, NESTED_POSTIMPL} event_order;
140 addr_t gpa; // for pf
141 pf_error_t error_code; // for pf
142 addr_t gpa_start; // for invalidation of range or page fault
143 addr_t gpa_end; // for invalidation of range or page fault (range is [start,end] )
144 // PREIMPL: start/end is the requested range
145 // POSTIMPL: start/end is the actual range invalidated
150 int v3_register_nested_paging_event_callback(struct v3_vm_info *vm,
151 int (*callback)(struct guest_info *core,
152 struct v3_nested_pg_event *,
156 int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm,
157 int (*callback)(struct guest_info *core,
158 struct v3_nested_pg_event *,
163 #endif // ! __V3VEE__