Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


7c3e72fb2531f80cb7c0bcdb40cceed6d42936b9
[palacios.git] / palacios / src / palacios / vmm_direct_paging.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Steven Jaconette <stevenjaconette2007@u.northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
16  *         Peter Dinda <pdinda@northwestern.edu> (refactor + events)
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm_direct_paging.h>
23 #include <palacios/vmm_paging.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vm_guest.h>
27 #include <palacios/vmm_ctrl_regs.h>
28
29
30 #if !defined(V3_CONFIG_DEBUG_NESTED_PAGING) && !defined(V3_CONFIG_DEBUG_SHADOW_PAGING)
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35
36
37 /*
38
39   "Direct Paging" combines these three functionalities:
40
41    1. Passthrough paging for SVM and VMX
42
43       Passthrough paging is used for shadow paging when
44       the guest does not have paging turn on, for example 
45       when it is running in real mode or protected mode 
46       early in a typical boot process.    Passthrough page
47       tables are shadow page tables that are built assuming
48       the guest virtual to guest physical mapping is the identity.
49       Thus, what they implement are the GPA->HPA mapping. 
50
51       Passthrough page tables are built using 32PAE paging.
52       
53
54    2. Nested paging on SVM
55   
56       The SVM nested page tables have the same format as
57       regular page tables.   For this reason, we can reuse 
58       much of the passthrough implementation.   A nested page
59       table mapping is a GPA->HPA mapping, creating a very 
60       simlar model as with passthrough paging, just that it's 
61       always active, whether the guest has paging on or not.
62
63
64    3. Nested paging on VMX
65
66       The VMX nested page tables have a different format
67       than regular page tables.  For this reason, we have
68       implemented them in the vmx_npt.h file.  The code
69       here then is a wrapper, allowing us to make nested
70       paging functionality appear uniform across VMX and SVM
71       elsewhere in the codebase.
72
73 */
74
75
76
77 static inline int is_vmx_nested()
78 {
79     extern v3_cpu_arch_t v3_mach_type;
80
81     return (v3_mach_type==V3_VMX_EPT_CPU || v3_mach_type==V3_VMX_EPT_UG_CPU);
82 }
83
84 static inline int is_svm_nested()
85 {
86     extern v3_cpu_arch_t v3_mach_type;
87
88     return (v3_mach_type==V3_SVM_REV3_CPU);
89 }
90
91
92 struct passthrough_event_callback {
93     int (*callback)(struct guest_info *core, struct v3_passthrough_pg_event *event, void *priv_data);
94     void *priv_data;
95
96     struct list_head node;
97 };
98
99
100 static int have_passthrough_callbacks(struct guest_info *core)
101 {
102     // lock acquistion unnecessary
103     // caller will acquire the lock before *iterating* through the list
104     // so any race will be resolved then
105     return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list));
106 }
107
108 static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event)
109 {
110     struct passthrough_event_callback *cb,*temp;
111  
112     v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock));
113    
114     list_for_each_entry_safe(cb,
115                              temp,
116                              &(core->vm_info->passthrough_impl.event_callback_list),
117                              node) {
118         cb->callback(core,event,cb->priv_data);
119     }
120
121     v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock));
122
123 }
124
125 struct nested_event_callback {
126     int (*callback)(struct guest_info *core, struct v3_nested_pg_event *event, void *priv_data);
127     void *priv_data;
128
129     struct list_head node;
130 };
131
132
133 static int have_nested_callbacks(struct guest_info *core)
134 {
135     // lock acquistion unnecessary
136     // caller will acquire the lock before *iterating* through the list
137     // so any race will be resolved then
138     return !list_empty(&(core->vm_info->nested_impl.event_callback_list));
139 }
140
141 static void dispatch_nested_event(struct guest_info *core, struct v3_nested_pg_event *event)
142 {
143     struct nested_event_callback *cb,*temp;
144     
145     v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock));
146
147     list_for_each_entry_safe(cb,
148                              temp,
149                              &(core->vm_info->nested_impl.event_callback_list),
150                              node) {
151         cb->callback(core,event,cb->priv_data);
152     }
153
154     v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock));
155 }
156
157
158
159
160 static addr_t create_generic_pt_page(struct guest_info *core) {
161     void * page = 0;
162     void *temp;
163
164     temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, 
165                                  core->resource_control.pg_node_id,
166                                  core->resource_control.pg_filter_func,
167                                  core->resource_control.pg_filter_state);
168
169     if (!temp) {  
170         PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n");
171         return 0;
172     }
173
174     page = V3_VAddr(temp);
175     memset(page, 0, PAGE_SIZE);
176
177     return (addr_t)page;
178 }
179
180 // Inline handler functions for each cpu mode
181 #include "vmm_direct_paging_32.h"
182 #include "vmm_direct_paging_32pae.h"
183 #include "vmm_direct_paging_64.h"
184
185
186
187 int v3_init_passthrough_pts(struct guest_info * info) {
188     if (info->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { 
189         // skip - ept_init will do this allocation
190         return 0;
191     }
192     info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info));
193     return 0;
194 }
195
196
197 int v3_free_passthrough_pts(struct guest_info * core) {
198     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
199
200     if (core->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { 
201         // there are no passthrough page tables, but
202         // the EPT implementation is using direct_map_pt to store
203         // the EPT root table pointer...  and the EPT tables
204         // are not compatible with regular x86 tables, so we
205         // must not attempt to free them here...
206         return 0;
207     }
208   
209     // we are either in shadow or in SVM nested
210     // in either case, we can nuke the PTs
211
212     // Delete the old direct map page tables
213     switch(mode) {
214         case REAL:
215         case PROTECTED:
216           // Intentional fallthrough here
217           // There are *only* PAE tables
218         case PROTECTED_PAE:
219         case LONG:
220         case LONG_32_COMPAT:
221             // Long mode will only use 32PAE page tables...
222             if (core->direct_map_pt) { 
223                 delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); 
224             }
225             break;
226         default:
227             PrintError(core->vm_info, core, "Unknown CPU Mode\n");
228             return -1;
229             break;
230     }
231
232     return 0;
233 }
234
235
236 int v3_reset_passthrough_pts(struct guest_info * core) {
237
238     v3_free_passthrough_pts(core);
239
240     // create new direct map page table
241     v3_init_passthrough_pts(core);
242     
243     return 0;
244 }
245
246
247
248 int v3_activate_passthrough_pt(struct guest_info * info) {
249     // For now... But we need to change this....
250     // As soon as shadow paging becomes active the passthrough tables are hosed
251     // So this will cause chaos if it is called at that time
252
253     if (have_passthrough_callbacks(info)) { 
254         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},0,0};
255         dispatch_passthrough_event(info,&event);
256     }
257         
258     struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *) &(info->ctrl_regs.cr3);
259     struct cr4_32 * shadow_cr4 = (struct cr4_32 *) &(info->ctrl_regs.cr4);
260     addr_t shadow_pt_addr = *(addr_t*)&(info->direct_map_pt);
261     // Passthrough PTs will only be PAE page tables.
262     shadow_cr3->pdpt_base_addr = shadow_pt_addr >> 5;
263     shadow_cr4->pae = 1;
264     PrintDebug(info->vm_info, info, "Activated Passthrough Page tables\n");
265
266     if (have_passthrough_callbacks(info)) { 
267         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},0,0};
268         dispatch_passthrough_event(info,&event);
269     }
270
271     return 0;
272 }
273
274
275
276 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
277                                     addr_t *actual_start, addr_t *actual_end) {
278     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
279     addr_t start, end;
280     int rc;
281
282     if (have_passthrough_callbacks(info)) {                                    
283         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_PREIMPL,fault_addr,error_code,fault_addr,fault_addr};
284         dispatch_passthrough_event(info,&event);        
285     }
286
287     if (!actual_start) { actual_start=&start; }
288     if (!actual_end) { actual_end=&end; }
289
290
291     rc=-1;
292
293     switch(mode) {
294         case REAL:
295         case PROTECTED:
296           // Note intentional fallthrough here
297           // There are only PAE page tables now
298         case PROTECTED_PAE:
299         case LONG:
300         case LONG_32_COMPAT:
301             // Long mode will only use 32PAE page tables...
302             rc=handle_passthrough_pagefault_32pae(info, fault_addr, error_code, actual_start, actual_end);
303             break;
304         default:
305             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
306             break;
307     }
308
309     if (have_passthrough_callbacks(info)) {                                    
310         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_POSTIMPL,fault_addr,error_code,*actual_start,*actual_end};
311         dispatch_passthrough_event(info,&event);        
312     }
313
314     return rc;
315 }
316
317
318
319 int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr, 
320                                    addr_t *actual_start, addr_t *actual_end) {
321
322     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
323     addr_t start, end;
324     int rc;
325
326     if (have_passthrough_callbacks(info)) {                                    
327         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
328         dispatch_passthrough_event(info,&event);        
329     }
330
331     if (!actual_start) { actual_start=&start;}
332     if (!actual_end) { actual_end=&end;}
333
334
335
336     rc=-1;
337
338     switch(mode) {
339         case REAL:
340         case PROTECTED:
341           // Intentional fallthrough - there
342           // are only PAE page tables now
343         case PROTECTED_PAE:
344         case LONG:
345         case LONG_32_COMPAT:
346             // Long mode will only use 32PAE page tables...
347             rc=invalidate_addr_32pae(info, inv_addr, actual_start, actual_end);
348             break;
349         default:
350             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
351             break;
352     }
353
354     if (have_passthrough_callbacks(info)) {                                    
355         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
356         dispatch_passthrough_event(info,&event);        
357     }
358
359
360     return rc;
361 }
362
363
364 int v3_invalidate_passthrough_addr_range(struct guest_info * info, 
365                                          addr_t inv_addr_start, addr_t inv_addr_end,
366                                          addr_t *actual_start, addr_t *actual_end) {
367     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
368     addr_t start, end;
369     int rc;
370
371     if (!actual_start) { actual_start=&start;}
372     if (!actual_end) { actual_end=&end;}
373
374     if (have_passthrough_callbacks(info)) {                                    
375         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
376         dispatch_passthrough_event(info,&event);        
377     }
378     
379     rc=-1;
380
381     switch(mode) {
382         case REAL:
383         case PROTECTED:
384           // Intentional fallthrough
385           // There are only PAE PTs now
386         case PROTECTED_PAE:
387         case LONG:
388         case LONG_32_COMPAT:
389             // Long mode will only use 32PAE page tables...
390           rc=invalidate_addr_32pae_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
391           break;
392         default:
393             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
394             break;
395     }
396
397     if (have_passthrough_callbacks(info)) {                                    
398         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
399         dispatch_passthrough_event(info,&event);        
400     }
401
402     return rc;
403 }
404
405
406 int v3_init_passthrough_paging(struct v3_vm_info *vm)
407 {
408   INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list));
409   v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock));
410   return 0;
411 }
412
413 int v3_deinit_passthrough_paging(struct v3_vm_info *vm)
414 {
415   struct passthrough_event_callback *cb,*temp;
416   addr_t flags;
417   
418   flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
419   
420   list_for_each_entry_safe(cb,
421                            temp,
422                            &(vm->passthrough_impl.event_callback_list),
423                            node) {
424     list_del(&(cb->node));
425     V3_Free(cb);
426   }
427
428   v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
429
430   v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock));
431   
432   return 0;
433 }
434
435 int v3_init_passthrough_paging_core(struct guest_info *core)
436 {
437   // currently nothing to init
438   return 0;
439 }
440
441 int v3_deinit_passthrough_paging_core(struct guest_info *core)
442 {
443   // currently nothing to deinit
444   return 0;
445 }
446
447
448 int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm,
449                                                   int (*callback)(struct guest_info *core, 
450                                                                   struct v3_passthrough_pg_event *,
451                                                                   void      *priv_data),
452                                                   void *priv_data)
453 {
454     struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback));
455     addr_t flags;
456     
457     if (!ec) { 
458         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
459         return -1;
460     }
461     
462     ec->callback = callback;
463     ec->priv_data = priv_data;
464     
465     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
466     list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list));
467     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
468
469     return 0;
470
471 }
472
473
474
475 int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm,
476                                                     int (*callback)(struct guest_info *core, 
477                                                                     struct v3_passthrough_pg_event *,
478                                                                     void      *priv_data),
479                                                     void *priv_data)
480 {
481     struct passthrough_event_callback *cb,*temp;
482     addr_t flags;
483
484     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
485
486     list_for_each_entry_safe(cb,
487                              temp,
488                              &(vm->passthrough_impl.event_callback_list),
489                              node) {
490         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
491             list_del(&(cb->node));
492             V3_Free(cb);
493             v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
494             return 0;
495         }
496     }
497     
498     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
499
500     PrintError(vm, VCORE_NONE, "No callback found!\n");
501     
502     return -1;
503 }
504
505
506 // inline nested paging support for Intel and AMD
507 #include "svm_npt.h"
508 #include "vmx_npt.h"
509
510
511 inline void convert_to_pf_error(void *pfinfo, pf_error_t *out)
512 {
513   if (is_vmx_nested()) {
514 #ifdef V3_CONFIG_VMX
515     ept_exit_qual_to_pf_error((struct ept_exit_qual *)pfinfo, out);
516 #endif
517   } else {
518     *out = *(pf_error_t *)pfinfo;
519   }
520 }
521
522 int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, void *pfinfo, addr_t *actual_start, addr_t *actual_end)
523 {
524   int rc;
525   pf_error_t err;
526   addr_t start, end;
527
528   if (!actual_start) { actual_start=&start; }
529   if (!actual_end) { actual_end=&end; }
530
531   convert_to_pf_error(pfinfo,&err);
532
533   if (have_nested_callbacks(info)) {                                   
534       struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_PREIMPL,fault_addr,err,fault_addr,fault_addr};
535       dispatch_nested_event(info,&event);       
536   }
537
538   
539   if (is_vmx_nested()) { 
540     rc = handle_vmx_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
541   } else {
542     rc = handle_svm_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
543   }
544   
545   if (have_nested_callbacks(info)) {
546     struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_POSTIMPL,fault_addr,err,*actual_start,*actual_end};
547     dispatch_nested_event(info,&event);
548   }
549   
550   return rc;
551 }
552   
553
554
555 int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr,
556                               addr_t *actual_start, addr_t *actual_end) 
557 {
558   int rc;
559   
560   addr_t start, end;
561
562   if (!actual_start) { actual_start=&start; }
563   if (!actual_end) { actual_end=&end; }
564   
565
566   if (have_nested_callbacks(info)) { 
567     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
568     dispatch_nested_event(info,&event);
569   }
570
571   if (is_vmx_nested()) {
572     rc = handle_vmx_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
573   } else {
574     rc = handle_svm_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
575   }
576   
577   if (have_nested_callbacks(info)) { 
578     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_POSTIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
579     dispatch_nested_event(info,&event);
580   }
581   return rc;
582 }
583
584
585 int v3_invalidate_nested_addr_range(struct guest_info * info, 
586                                     addr_t inv_addr_start, addr_t inv_addr_end,
587                                     addr_t *actual_start, addr_t *actual_end) 
588 {
589   int rc;
590
591   addr_t start, end;
592
593   if (!actual_start) { actual_start=&start; }
594   if (!actual_end) { actual_end=&end; }
595
596   if (have_nested_callbacks(info)) { 
597     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
598     dispatch_nested_event(info,&event);
599   }
600   
601   if (is_vmx_nested()) {
602     rc = handle_vmx_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
603   } else {
604     rc = handle_svm_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
605   }
606   
607
608   if (have_nested_callbacks(info)) { 
609     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
610     dispatch_nested_event(info,&event);
611   }
612   
613   return rc;
614   
615 }
616
617
618 int v3_init_nested_paging(struct v3_vm_info *vm)
619 {
620   INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list));
621   v3_rw_lock_init(&(vm->nested_impl.event_callback_lock));
622   return 0;
623 }
624
625 int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo)
626 {
627   if (is_vmx_nested()) { 
628     return init_ept(core, (struct vmx_hw_info *) hwinfo);
629   } else {
630     // no initialization for SVM
631     // the direct map page tables are used since the 
632     // nested pt format is identical to the main pt format
633     return 0;
634   }
635 }
636     
637 int v3_deinit_nested_paging(struct v3_vm_info *vm)
638 {
639   struct nested_event_callback *cb,*temp;
640   addr_t flags;
641   
642   flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
643     
644   list_for_each_entry_safe(cb,
645                            temp,
646                            &(vm->nested_impl.event_callback_list),
647                            node) {
648     list_del(&(cb->node));
649     V3_Free(cb);
650   }
651   
652   v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
653   
654   v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock));
655
656   return 0;
657 }
658
659 int v3_deinit_nested_paging_core(struct guest_info *core)
660 {
661   if (core->shdw_pg_mode == NESTED_PAGING) {
662     if (is_vmx_nested()) {
663      return deinit_ept(core);
664     } else {
665       // SVM nested deinit is handled by the passthrough paging teardown
666       return 0;
667     }
668   } else {
669     // not relevant
670     return 0;
671   }
672 }
673
674
675 int v3_register_nested_paging_event_callback(struct v3_vm_info *vm,
676                                             int (*callback)(struct guest_info *core, 
677                                                             struct v3_nested_pg_event *,
678                                                             void      *priv_data),
679                                             void *priv_data)
680 {
681     struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback));
682     addr_t flags;
683
684     if (!ec) { 
685         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
686         return -1;
687     }
688     
689     ec->callback = callback;
690     ec->priv_data = priv_data;
691
692     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
693     list_add(&(ec->node),&(vm->nested_impl.event_callback_list));
694     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
695
696     return 0;
697
698 }
699
700
701
702 int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm,
703                                               int (*callback)(struct guest_info *core, 
704                                                               struct v3_nested_pg_event *,
705                                                               void      *priv_data),
706                                               void *priv_data)
707 {
708     struct nested_event_callback *cb,*temp;
709     addr_t flags;
710
711     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
712
713     list_for_each_entry_safe(cb,
714                              temp,
715                              &(vm->nested_impl.event_callback_list),
716                              node) {
717         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
718             list_del(&(cb->node));
719             V3_Free(cb);
720             v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
721             return 0;
722         }
723     }
724     
725     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
726
727     PrintError(vm, VCORE_NONE, "No callback found!\n");
728     
729     return -1;
730 }