Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Generalization of constraints on page allocation and implementation/use
[palacios.git] / palacios / src / palacios / vmm_direct_paging.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Steven Jaconette <stevenjaconette2007@u.northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
16  *         Peter Dinda <pdinda@northwestern.edu> (refactor + events)
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm_direct_paging.h>
23 #include <palacios/vmm_paging.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vm_guest.h>
27 #include <palacios/vmm_ctrl_regs.h>
28
29
30 #if !defined(V3_CONFIG_DEBUG_NESTED_PAGING) && !defined(V3_CONFIG_DEBUG_SHADOW_PAGING)
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35
36
37 /*
38
39   "Direct Paging" combines these three functionalities:
40
41    1. Passthrough paging for SVM and VMX
42
43       Passthrough paging is used for shadow paging when
44       the guest does not have paging turn on, for example 
45       when it is running in real mode or protected mode 
46       early in a typical boot process.    Passthrough page
47       tables are shadow page tables that are built assuming
48       the guest virtual to guest physical mapping is the identity.
49       Thus, what they implement are the GPA->HPA mapping. 
50
51       Passthrough page tables are built using 32PAE paging.
52       
53
54    2. Nested paging on SVM
55   
56       The SVM nested page tables have the same format as
57       regular page tables.   For this reason, we can reuse 
58       much of the passthrough implementation.   A nested page
59       table mapping is a GPA->HPA mapping, creating a very 
60       simlar model as with passthrough paging, just that it's 
61       always active, whether the guest has paging on or not.
62
63
64    3. Nested paging on VMX
65
66       The VMX nested page tables have a different format
67       than regular page tables.  For this reason, we have
68       implemented them in the vmx_npt.h file.  The code
69       here then is a wrapper, allowing us to make nested
70       paging functionality appear uniform across VMX and SVM
71       elsewhere in the codebase.
72
73 */
74
75
76
77 static inline int is_vmx_nested()
78 {
79     extern v3_cpu_arch_t v3_mach_type;
80
81     return (v3_mach_type==V3_VMX_EPT_CPU || v3_mach_type==V3_VMX_EPT_UG_CPU);
82 }
83
84 static inline int is_svm_nested()
85 {
86     extern v3_cpu_arch_t v3_mach_type;
87
88     return (v3_mach_type==V3_SVM_REV3_CPU);
89 }
90
91
92 struct passthrough_event_callback {
93     int (*callback)(struct guest_info *core, struct v3_passthrough_pg_event *event, void *priv_data);
94     void *priv_data;
95
96     struct list_head node;
97 };
98
99
100 static int have_passthrough_callbacks(struct guest_info *core)
101 {
102     // lock acquistion unnecessary
103     // caller will acquire the lock before *iterating* through the list
104     // so any race will be resolved then
105     return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list));
106 }
107
108 static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event)
109 {
110     struct passthrough_event_callback *cb,*temp;
111  
112     v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock));
113    
114     list_for_each_entry_safe(cb,
115                              temp,
116                              &(core->vm_info->passthrough_impl.event_callback_list),
117                              node) {
118         cb->callback(core,event,cb->priv_data);
119     }
120
121     v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock));
122
123 }
124
125 struct nested_event_callback {
126     int (*callback)(struct guest_info *core, struct v3_nested_pg_event *event, void *priv_data);
127     void *priv_data;
128
129     struct list_head node;
130 };
131
132
133 static int have_nested_callbacks(struct guest_info *core)
134 {
135     // lock acquistion unnecessary
136     // caller will acquire the lock before *iterating* through the list
137     // so any race will be resolved then
138     return !list_empty(&(core->vm_info->nested_impl.event_callback_list));
139 }
140
141 static void dispatch_nested_event(struct guest_info *core, struct v3_nested_pg_event *event)
142 {
143     struct nested_event_callback *cb,*temp;
144     
145     v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock));
146
147     list_for_each_entry_safe(cb,
148                              temp,
149                              &(core->vm_info->nested_impl.event_callback_list),
150                              node) {
151         cb->callback(core,event,cb->priv_data);
152     }
153
154     v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock));
155 }
156
157
158
159
160 static addr_t create_generic_pt_page(struct guest_info *core) {
161     void * page = 0;
162     void *temp;
163
164     temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0, 0); // no constraints
165
166     if (!temp) {  
167         PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n");
168         return 0;
169     }
170
171     page = V3_VAddr(temp);
172     memset(page, 0, PAGE_SIZE);
173
174     return (addr_t)page;
175 }
176
177 // Inline handler functions for each cpu mode
178 #include "vmm_direct_paging_32.h"
179 #include "vmm_direct_paging_32pae.h"
180 #include "vmm_direct_paging_64.h"
181
182
183
184 int v3_init_passthrough_pts(struct guest_info * info) {
185     if (info->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { 
186         // skip - ept_init will do this allocation
187         return 0;
188     }
189     info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info));
190     return 0;
191 }
192
193
194 int v3_free_passthrough_pts(struct guest_info * core) {
195     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
196
197     if (core->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { 
198         // there are no passthrough page tables, but
199         // the EPT implementation is using direct_map_pt to store
200         // the EPT root table pointer...  and the EPT tables
201         // are not compatible with regular x86 tables, so we
202         // must not attempt to free them here...
203         return 0;
204     }
205   
206     // we are either in shadow or in SVM nested
207     // in either case, we can nuke the PTs
208
209     // Delete the old direct map page tables
210     switch(mode) {
211         case REAL:
212         case PROTECTED:
213           // Intentional fallthrough here
214           // There are *only* PAE tables
215         case PROTECTED_PAE:
216         case LONG:
217         case LONG_32_COMPAT:
218             // Long mode will only use 32PAE page tables...
219             if (core->direct_map_pt) { 
220                 delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); 
221             }
222             break;
223         default:
224             PrintError(core->vm_info, core, "Unknown CPU Mode\n");
225             return -1;
226             break;
227     }
228
229     return 0;
230 }
231
232
233 int v3_reset_passthrough_pts(struct guest_info * core) {
234
235     v3_free_passthrough_pts(core);
236
237     // create new direct map page table
238     v3_init_passthrough_pts(core);
239     
240     return 0;
241 }
242
243
244
245 int v3_activate_passthrough_pt(struct guest_info * info) {
246     // For now... But we need to change this....
247     // As soon as shadow paging becomes active the passthrough tables are hosed
248     // So this will cause chaos if it is called at that time
249
250     if (have_passthrough_callbacks(info)) { 
251         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},0,0};
252         dispatch_passthrough_event(info,&event);
253     }
254         
255     struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *) &(info->ctrl_regs.cr3);
256     struct cr4_32 * shadow_cr4 = (struct cr4_32 *) &(info->ctrl_regs.cr4);
257     addr_t shadow_pt_addr = *(addr_t*)&(info->direct_map_pt);
258     // Passthrough PTs will only be PAE page tables.
259     shadow_cr3->pdpt_base_addr = shadow_pt_addr >> 5;
260     shadow_cr4->pae = 1;
261     PrintDebug(info->vm_info, info, "Activated Passthrough Page tables\n");
262
263     if (have_passthrough_callbacks(info)) { 
264         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},0,0};
265         dispatch_passthrough_event(info,&event);
266     }
267
268     return 0;
269 }
270
271
272
273 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
274                                     addr_t *actual_start, addr_t *actual_end) {
275     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
276     addr_t start, end;
277     int rc;
278
279     if (have_passthrough_callbacks(info)) {                                    
280         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_PREIMPL,fault_addr,error_code,fault_addr,fault_addr};
281         dispatch_passthrough_event(info,&event);        
282     }
283
284     if (!actual_start) { actual_start=&start; }
285     if (!actual_end) { actual_end=&end; }
286
287
288     rc=-1;
289
290     switch(mode) {
291         case REAL:
292         case PROTECTED:
293           // Note intentional fallthrough here
294           // There are only PAE page tables now
295         case PROTECTED_PAE:
296         case LONG:
297         case LONG_32_COMPAT:
298             // Long mode will only use 32PAE page tables...
299             rc=handle_passthrough_pagefault_32pae(info, fault_addr, error_code, actual_start, actual_end);
300             break;
301         default:
302             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
303             break;
304     }
305
306     if (have_passthrough_callbacks(info)) {                                    
307         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_POSTIMPL,fault_addr,error_code,*actual_start,*actual_end};
308         dispatch_passthrough_event(info,&event);        
309     }
310
311     return rc;
312 }
313
314
315
316 int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr, 
317                                    addr_t *actual_start, addr_t *actual_end) {
318
319     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
320     addr_t start, end;
321     int rc;
322
323     if (have_passthrough_callbacks(info)) {                                    
324         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
325         dispatch_passthrough_event(info,&event);        
326     }
327
328     if (!actual_start) { actual_start=&start;}
329     if (!actual_end) { actual_end=&end;}
330
331
332
333     rc=-1;
334
335     switch(mode) {
336         case REAL:
337         case PROTECTED:
338           // Intentional fallthrough - there
339           // are only PAE page tables now
340         case PROTECTED_PAE:
341         case LONG:
342         case LONG_32_COMPAT:
343             // Long mode will only use 32PAE page tables...
344             rc=invalidate_addr_32pae(info, inv_addr, actual_start, actual_end);
345             break;
346         default:
347             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
348             break;
349     }
350
351     if (have_passthrough_callbacks(info)) {                                    
352         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
353         dispatch_passthrough_event(info,&event);        
354     }
355
356
357     return rc;
358 }
359
360
361 int v3_invalidate_passthrough_addr_range(struct guest_info * info, 
362                                          addr_t inv_addr_start, addr_t inv_addr_end,
363                                          addr_t *actual_start, addr_t *actual_end) {
364     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
365     addr_t start, end;
366     int rc;
367
368     if (!actual_start) { actual_start=&start;}
369     if (!actual_end) { actual_end=&end;}
370
371     if (have_passthrough_callbacks(info)) {                                    
372         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
373         dispatch_passthrough_event(info,&event);        
374     }
375     
376     rc=-1;
377
378     switch(mode) {
379         case REAL:
380         case PROTECTED:
381           // Intentional fallthrough
382           // There are only PAE PTs now
383         case PROTECTED_PAE:
384         case LONG:
385         case LONG_32_COMPAT:
386             // Long mode will only use 32PAE page tables...
387           rc=invalidate_addr_32pae_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
388           break;
389         default:
390             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
391             break;
392     }
393
394     if (have_passthrough_callbacks(info)) {                                    
395         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
396         dispatch_passthrough_event(info,&event);        
397     }
398
399     return rc;
400 }
401
402
403 int v3_init_passthrough_paging(struct v3_vm_info *vm)
404 {
405   INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list));
406   v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock));
407   return 0;
408 }
409
410 int v3_deinit_passthrough_paging(struct v3_vm_info *vm)
411 {
412   struct passthrough_event_callback *cb,*temp;
413   addr_t flags;
414   
415   flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
416   
417   list_for_each_entry_safe(cb,
418                            temp,
419                            &(vm->passthrough_impl.event_callback_list),
420                            node) {
421     list_del(&(cb->node));
422     V3_Free(cb);
423   }
424
425   v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
426
427   v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock));
428   
429   return 0;
430 }
431
432 int v3_init_passthrough_paging_core(struct guest_info *core)
433 {
434   // currently nothing to init
435   return 0;
436 }
437
438 int v3_deinit_passthrough_paging_core(struct guest_info *core)
439 {
440   // currently nothing to deinit
441   return 0;
442 }
443
444
445 int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm,
446                                                   int (*callback)(struct guest_info *core, 
447                                                                   struct v3_passthrough_pg_event *,
448                                                                   void      *priv_data),
449                                                   void *priv_data)
450 {
451     struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback));
452     addr_t flags;
453     
454     if (!ec) { 
455         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
456         return -1;
457     }
458     
459     ec->callback = callback;
460     ec->priv_data = priv_data;
461     
462     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
463     list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list));
464     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
465
466     return 0;
467
468 }
469
470
471
472 int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm,
473                                                     int (*callback)(struct guest_info *core, 
474                                                                     struct v3_passthrough_pg_event *,
475                                                                     void      *priv_data),
476                                                     void *priv_data)
477 {
478     struct passthrough_event_callback *cb,*temp;
479     addr_t flags;
480
481     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
482
483     list_for_each_entry_safe(cb,
484                              temp,
485                              &(vm->passthrough_impl.event_callback_list),
486                              node) {
487         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
488             list_del(&(cb->node));
489             V3_Free(cb);
490             v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
491             return 0;
492         }
493     }
494     
495     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
496
497     PrintError(vm, VCORE_NONE, "No callback found!\n");
498     
499     return -1;
500 }
501
502
503 // inline nested paging support for Intel and AMD
504 #include "svm_npt.h"
505 #include "vmx_npt.h"
506
507
508 inline void convert_to_pf_error(void *pfinfo, pf_error_t *out)
509 {
510   if (is_vmx_nested()) {
511 #ifdef V3_CONFIG_VMX
512     ept_exit_qual_to_pf_error((struct ept_exit_qual *)pfinfo, out);
513 #endif
514   } else {
515     *out = *(pf_error_t *)pfinfo;
516   }
517 }
518
519 int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, void *pfinfo, addr_t *actual_start, addr_t *actual_end)
520 {
521   int rc;
522   pf_error_t err;
523   addr_t start, end;
524
525   if (!actual_start) { actual_start=&start; }
526   if (!actual_end) { actual_end=&end; }
527
528   convert_to_pf_error(pfinfo,&err);
529
530   if (have_nested_callbacks(info)) {                                   
531       struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_PREIMPL,fault_addr,err,fault_addr,fault_addr};
532       dispatch_nested_event(info,&event);       
533   }
534
535   
536   if (is_vmx_nested()) { 
537     rc = handle_vmx_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
538   } else {
539     rc = handle_svm_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
540   }
541   
542   if (have_nested_callbacks(info)) {
543     struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_POSTIMPL,fault_addr,err,*actual_start,*actual_end};
544     dispatch_nested_event(info,&event);
545   }
546   
547   return rc;
548 }
549   
550
551
552 int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr,
553                               addr_t *actual_start, addr_t *actual_end) 
554 {
555   int rc;
556   
557   addr_t start, end;
558
559   if (!actual_start) { actual_start=&start; }
560   if (!actual_end) { actual_end=&end; }
561   
562
563   if (have_nested_callbacks(info)) { 
564     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
565     dispatch_nested_event(info,&event);
566   }
567
568   if (is_vmx_nested()) {
569     rc = handle_vmx_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
570   } else {
571     rc = handle_svm_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
572   }
573   
574   if (have_nested_callbacks(info)) { 
575     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_POSTIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
576     dispatch_nested_event(info,&event);
577   }
578   return rc;
579 }
580
581
582 int v3_invalidate_nested_addr_range(struct guest_info * info, 
583                                     addr_t inv_addr_start, addr_t inv_addr_end,
584                                     addr_t *actual_start, addr_t *actual_end) 
585 {
586   int rc;
587
588   addr_t start, end;
589
590   if (!actual_start) { actual_start=&start; }
591   if (!actual_end) { actual_end=&end; }
592
593   if (have_nested_callbacks(info)) { 
594     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
595     dispatch_nested_event(info,&event);
596   }
597   
598   if (is_vmx_nested()) {
599     rc = handle_vmx_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
600   } else {
601     rc = handle_svm_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
602   }
603   
604
605   if (have_nested_callbacks(info)) { 
606     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
607     dispatch_nested_event(info,&event);
608   }
609   
610   return rc;
611   
612 }
613
614
615 int v3_init_nested_paging(struct v3_vm_info *vm)
616 {
617   INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list));
618   v3_rw_lock_init(&(vm->nested_impl.event_callback_lock));
619   return 0;
620 }
621
622 int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo)
623 {
624   if (is_vmx_nested()) { 
625     return init_ept(core, (struct vmx_hw_info *) hwinfo);
626   } else {
627     // no initialization for SVM
628     // the direct map page tables are used since the 
629     // nested pt format is identical to the main pt format
630     return 0;
631   }
632 }
633     
634 int v3_deinit_nested_paging(struct v3_vm_info *vm)
635 {
636   struct nested_event_callback *cb,*temp;
637   addr_t flags;
638   
639   flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
640     
641   list_for_each_entry_safe(cb,
642                            temp,
643                            &(vm->nested_impl.event_callback_list),
644                            node) {
645     list_del(&(cb->node));
646     V3_Free(cb);
647   }
648   
649   v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
650   
651   v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock));
652
653   return 0;
654 }
655
656 int v3_deinit_nested_paging_core(struct guest_info *core)
657 {
658   if (core->shdw_pg_mode == NESTED_PAGING) {
659     if (is_vmx_nested()) {
660      return deinit_ept(core);
661     } else {
662       // SVM nested deinit is handled by the passthrough paging teardown
663       return 0;
664     }
665   } else {
666     // not relevant
667     return 0;
668   }
669 }
670
671
672 int v3_register_nested_paging_event_callback(struct v3_vm_info *vm,
673                                             int (*callback)(struct guest_info *core, 
674                                                             struct v3_nested_pg_event *,
675                                                             void      *priv_data),
676                                             void *priv_data)
677 {
678     struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback));
679     addr_t flags;
680
681     if (!ec) { 
682         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
683         return -1;
684     }
685     
686     ec->callback = callback;
687     ec->priv_data = priv_data;
688
689     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
690     list_add(&(ec->node),&(vm->nested_impl.event_callback_list));
691     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
692
693     return 0;
694
695 }
696
697
698
699 int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm,
700                                               int (*callback)(struct guest_info *core, 
701                                                               struct v3_nested_pg_event *,
702                                                               void      *priv_data),
703                                               void *priv_data)
704 {
705     struct nested_event_callback *cb,*temp;
706     addr_t flags;
707
708     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
709
710     list_for_each_entry_safe(cb,
711                              temp,
712                              &(vm->nested_impl.event_callback_list),
713                              node) {
714         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
715             list_del(&(cb->node));
716             V3_Free(cb);
717             v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
718             return 0;
719         }
720     }
721     
722     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
723
724     PrintError(vm, VCORE_NONE, "No callback found!\n");
725     
726     return -1;
727 }