Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Correct VM deallocation on fail before nested paging init
[palacios.git] / palacios / include / palacios / vmm_direct_paging.h
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Steven Jaconette <stevenjaconette2007@u.northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #ifndef __VMM_DIRECT_PAGING_H__
22 #define __VMM_DIRECT_PAGING_H__
23
24 #ifdef __V3VEE__
25
26 #include <palacios/vmm_mem.h>
27 #include <palacios/vmm_paging.h>
28 #include <palacios/vmm_list.h>
29 #include <palacios/vmm_lock.h>
30  
31
32 /**********************************
33    PASSTHROUGH PAGING - CORE FUNC
34  **********************************/
35
36
37 struct v3_passthrough_impl_state {
38     // currently there is only a single implementation
39     // that internally includes SVM and VMX support
40     // The externally visible state is just the callbacks
41     v3_rw_lock_t     event_callback_lock;
42     struct list_head event_callback_list;
43     int              inited; 
44 };
45
46
47 int v3_init_passthrough_paging(struct v3_vm_info *vm);
48 int v3_init_passthrough_paging_core(struct guest_info *core);
49 int v3_deinit_passthrough_paging(struct v3_vm_info *vm);
50 int v3_deinit_passthrough_paging_core(struct guest_info *core);
51
52 int v3_init_passthrough_pts(struct guest_info * guest_info);
53 int v3_free_passthrough_pts(struct guest_info * core);
54
55 int v3_reset_passthrough_pts(struct guest_info * guest_info);
56
57 // actual_start/end may be null if you don't want this info
58 // If non-null, these return the actual affected GPA range
59 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
60                                     addr_t *actual_start, addr_t *actual_end);
61
62 int v3_activate_passthrough_pt(struct guest_info * info);
63
64 int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr,
65                                    addr_t *actual_start, addr_t *actual_end);
66
67 // The range invalidated is minimally [start, end]
68 int v3_invalidate_passthrough_addr_range(struct guest_info * info, 
69                                          addr_t inv_addr_start, addr_t inv_addr_end,
70                                          addr_t *actual_start, addr_t *actual_end);
71
72 /**********************************
73    PASSTHROUGH PAGING - EVENTS
74  **********************************/
75
76 struct v3_passthrough_pg_event {
77     enum {PASSTHROUGH_PAGEFAULT,PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_ACTIVATE} event_type;
78     enum {PASSTHROUGH_PREIMPL, PASSTHROUGH_POSTIMPL} event_order;
79     addr_t     gpa;        // for pf 
80     pf_error_t error_code; // for pf
81     addr_t     gpa_start;  // for invalidation of range or page fault
82     addr_t     gpa_end;    // for invalidation of range or page fault (range is [start,end] )
83                            // PREIMPL: start/end is the requested range
84                            // POSTIMPL: start/end is the actual range invalidated
85 };
86
87
88
89 int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm,
90                                                   int (*callback)(struct guest_info *core, 
91                                                                   struct v3_passthrough_pg_event *,
92                                                                   void      *priv_data),
93                                                   void *priv_data);
94
95 int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm,
96                                                     int (*callback)(struct guest_info *core, 
97                                                                     struct v3_passthrough_pg_event *,
98                                                                     void      *priv_data),
99                                                     void *priv_data);
100
101
102
103 /*****************************
104    NESTED PAGING - CORE FUNC
105  *****************************/
106
107
108 struct v3_nested_impl_state {
109     // currently there is only a single implementation
110     // that internally includes SVM and VMX support
111     // The externally visible state is just the callbacks
112     v3_rw_lock_t     event_callback_lock;
113     struct list_head event_callback_list;
114     int              inited; 
115 };
116
117 int v3_init_nested_paging(struct v3_vm_info *vm);
118 int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo);
119 int v3_deinit_nested_paging(struct v3_vm_info *vm);
120 int v3_deinit_nested_paging_core(struct guest_info *core);
121
122
123 // actual_start/end may be null if you don't want this info
124 // If non-null, these return the actual affected GPA range
125 int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, void *pfinfo,
126                                addr_t *actual_start, addr_t *actual_end);
127
128 int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr,
129                               addr_t *actual_start, addr_t *actual_end);
130
131 // The range invalidated is minimally [start, end]
132 int v3_invalidate_nested_addr_range(struct guest_info * info, 
133                                     addr_t inv_addr_start, addr_t inv_addr_end,
134                                     addr_t *actual_start, addr_t *actual_end);
135
136
137
138 /*****************************
139    NESTED PAGING - EVENTS
140  *****************************/
141
142 struct v3_nested_pg_event {
143     enum {NESTED_PAGEFAULT,NESTED_INVALIDATE_RANGE} event_type;
144     enum {NESTED_PREIMPL, NESTED_POSTIMPL} event_order;
145     addr_t     gpa;        // for pf 
146     pf_error_t error_code; // for pf
147     addr_t     gpa_start;  // for invalidation of range or page fault
148     addr_t     gpa_end;    // for invalidation of range or page fault (range is [start,end] )
149                            // PREIMPL: start/end is the requested range
150                            // POSTIMPL: start/end is the actual range invalidated
151 };
152
153
154
155 int v3_register_nested_paging_event_callback(struct v3_vm_info *vm,
156                                             int (*callback)(struct guest_info *core, 
157                                                             struct v3_nested_pg_event *,
158                                                             void      *priv_data),
159                                             void *priv_data);
160
161 int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm,
162                                               int (*callback)(struct guest_info *core, 
163                                                               struct v3_nested_pg_event *,
164                                                               void      *priv_data),
165                                               void *priv_data);
166
167
168 #endif // ! __V3VEE__
169
170 #endif