Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


More init checks to allow graceful fail out when VM cannot be created
[palacios.git] / palacios / include / palacios / vmm_direct_paging.h
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Steven Jaconette <stevenjaconette2007@u.northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
16  *
17  * This is free software.  You are permitted to use,
18  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
19  */
20
21 #ifndef __VMM_DIRECT_PAGING_H__
22 #define __VMM_DIRECT_PAGING_H__
23
24 #ifdef __V3VEE__
25
26 #include <palacios/vmm_mem.h>
27 #include <palacios/vmm_paging.h>
28 #include <palacios/vmm_list.h>
29 #include <palacios/vmm_lock.h>
30  
31
32 /**********************************
33    PASSTHROUGH PAGING - CORE FUNC
34  **********************************/
35
36
37 struct v3_passthrough_impl_state {
38     // currently there is only a single implementation
39     // that internally includes SVM and VMX support
40     // The externally visible state is just the callbacks
41     v3_rw_lock_t     event_callback_lock;
42     struct list_head event_callback_list;
43     int              inited; 
44 };
45
46
47 int v3_init_passthrough_paging(struct v3_vm_info *vm);
48 int v3_init_passthrough_paging_core(struct guest_info *core);
49 int v3_deinit_passthrough_paging(struct v3_vm_info *vm);
50 int v3_deinit_passthrough_paging_core(struct guest_info *core);
51
52 int v3_init_passthrough_pts(struct guest_info * guest_info);
53 int v3_free_passthrough_pts(struct guest_info * core);
54
55 int v3_reset_passthrough_pts(struct guest_info * guest_info);
56
57 // actual_start/end may be null if you don't want this info
58 // If non-null, these return the actual affected GPA range
59 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
60                                     addr_t *actual_start, addr_t *actual_end);
61
62 int v3_activate_passthrough_pt(struct guest_info * info);
63
64 int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr,
65                                    addr_t *actual_start, addr_t *actual_end);
66
67 // The range invalidated is minimally [start, end]
68 int v3_invalidate_passthrough_addr_range(struct guest_info * info, 
69                                          addr_t inv_addr_start, addr_t inv_addr_end,
70                                          addr_t *actual_start, addr_t *actual_end);
71
72 /**********************************
73    PASSTHROUGH PAGING - EVENTS
74  **********************************/
75
76 struct v3_passthrough_pg_event {
77     enum {PASSTHROUGH_PAGEFAULT,PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_ACTIVATE} event_type;
78     enum {PASSTHROUGH_PREIMPL, PASSTHROUGH_POSTIMPL} event_order;
79     addr_t     gpa;        // for pf 
80     pf_error_t error_code; // for pf
81     addr_t     gpa_start;  // for invalidation of range or page fault
82     addr_t     gpa_end;    // for invalidation of range or page fault (range is [start,end] )
83                            // PREIMPL: start/end is the requested range
84                            // POSTIMPL: start/end is the actual range invalidated
85 };
86
87
88
89 int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm,
90                                                   int (*callback)(struct guest_info *core, 
91                                                                   struct v3_passthrough_pg_event *,
92                                                                   void      *priv_data),
93                                                   void *priv_data);
94
95 int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm,
96                                                     int (*callback)(struct guest_info *core, 
97                                                                     struct v3_passthrough_pg_event *,
98                                                                     void      *priv_data),
99                                                     void *priv_data);
100
101
102
103 /*****************************
104    NESTED PAGING - CORE FUNC
105  *****************************/
106
107
108 struct v3_nested_impl_state {
109     // currently there is only a single implementation
110     // that internally includes SVM and VMX support
111     // The externally visible state is just the callbacks
112     v3_rw_lock_t     event_callback_lock;
113     struct list_head event_callback_list;
114 };
115
116 int v3_init_nested_paging(struct v3_vm_info *vm);
117 int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo);
118 int v3_deinit_nested_paging(struct v3_vm_info *vm);
119 int v3_deinit_nested_paging_core(struct guest_info *core);
120
121
122 // actual_start/end may be null if you don't want this info
123 // If non-null, these return the actual affected GPA range
124 int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, void *pfinfo,
125                                addr_t *actual_start, addr_t *actual_end);
126
127 int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr,
128                               addr_t *actual_start, addr_t *actual_end);
129
130 // The range invalidated is minimally [start, end]
131 int v3_invalidate_nested_addr_range(struct guest_info * info, 
132                                     addr_t inv_addr_start, addr_t inv_addr_end,
133                                     addr_t *actual_start, addr_t *actual_end);
134
135
136
137 /*****************************
138    NESTED PAGING - EVENTS
139  *****************************/
140
141 struct v3_nested_pg_event {
142     enum {NESTED_PAGEFAULT,NESTED_INVALIDATE_RANGE} event_type;
143     enum {NESTED_PREIMPL, NESTED_POSTIMPL} event_order;
144     addr_t     gpa;        // for pf 
145     pf_error_t error_code; // for pf
146     addr_t     gpa_start;  // for invalidation of range or page fault
147     addr_t     gpa_end;    // for invalidation of range or page fault (range is [start,end] )
148                            // PREIMPL: start/end is the requested range
149                            // POSTIMPL: start/end is the actual range invalidated
150 };
151
152
153
154 int v3_register_nested_paging_event_callback(struct v3_vm_info *vm,
155                                             int (*callback)(struct guest_info *core, 
156                                                             struct v3_nested_pg_event *,
157                                                             void      *priv_data),
158                                             void *priv_data);
159
160 int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm,
161                                               int (*callback)(struct guest_info *core, 
162                                                               struct v3_nested_pg_event *,
163                                                               void      *priv_data),
164                                               void *priv_data);
165
166
167 #endif // ! __V3VEE__
168
169 #endif