Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Assorted bug fixes and defensive programming
[palacios.git] / palacios / src / palacios / vmm_direct_paging.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Steven Jaconette <stevenjaconette2007@u.northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
16  *         Peter Dinda <pdinda@northwestern.edu> (refactor + events)
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm_direct_paging.h>
23 #include <palacios/vmm_paging.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vm_guest.h>
27 #include <palacios/vmm_ctrl_regs.h>
28
29
30 #if !defined(V3_CONFIG_DEBUG_NESTED_PAGING) && !defined(V3_CONFIG_DEBUG_SHADOW_PAGING)
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35
36
37 /*
38
39   "Direct Paging" combines these three functionalities:
40
41    1. Passthrough paging for SVM and VMX
42
43       Passthrough paging is used for shadow paging when
44       the guest does not have paging turn on, for example 
45       when it is running in real mode or protected mode 
46       early in a typical boot process.    Passthrough page
47       tables are shadow page tables that are built assuming
48       the guest virtual to guest physical mapping is the identity.
49       Thus, what they implement are the GPA->HPA mapping. 
50
51       Passthrough page tables are built using 32PAE paging.
52       
53
54    2. Nested paging on SVM
55   
56       The SVM nested page tables have the same format as
57       regular page tables.   For this reason, we can reuse 
58       much of the passthrough implementation.   A nested page
59       table mapping is a GPA->HPA mapping, creating a very 
60       simlar model as with passthrough paging, just that it's 
61       always active, whether the guest has paging on or not.
62
63
64    3. Nested paging on VMX
65
66       The VMX nested page tables have a different format
67       than regular page tables.  For this reason, we have
68       implemented them in the vmx_npt.h file.  The code
69       here then is a wrapper, allowing us to make nested
70       paging functionality appear uniform across VMX and SVM
71       elsewhere in the codebase.
72
73 */
74
75
76
77 static inline int is_vmx_nested()
78 {
79     extern v3_cpu_arch_t v3_mach_type;
80
81     return (v3_mach_type==V3_VMX_EPT_CPU || v3_mach_type==V3_VMX_EPT_UG_CPU);
82 }
83
84 static inline int is_svm_nested()
85 {
86     extern v3_cpu_arch_t v3_mach_type;
87
88     return (v3_mach_type==V3_SVM_REV3_CPU);
89 }
90
91
92 struct passthrough_event_callback {
93     int (*callback)(struct guest_info *core, struct v3_passthrough_pg_event *event, void *priv_data);
94     void *priv_data;
95
96     struct list_head node;
97 };
98
99
100 static int have_passthrough_callbacks(struct guest_info *core)
101 {
102     // lock acquistion unnecessary
103     // caller will acquire the lock before *iterating* through the list
104     // so any race will be resolved then
105     return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list));
106 }
107
108 static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event)
109 {
110     struct passthrough_event_callback *cb,*temp;
111  
112     v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock));
113    
114     list_for_each_entry_safe(cb,
115                              temp,
116                              &(core->vm_info->passthrough_impl.event_callback_list),
117                              node) {
118         cb->callback(core,event,cb->priv_data);
119     }
120
121     v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock));
122
123 }
124
125 struct nested_event_callback {
126     int (*callback)(struct guest_info *core, struct v3_nested_pg_event *event, void *priv_data);
127     void *priv_data;
128
129     struct list_head node;
130 };
131
132
133 static int have_nested_callbacks(struct guest_info *core)
134 {
135     // lock acquistion unnecessary
136     // caller will acquire the lock before *iterating* through the list
137     // so any race will be resolved then
138     return !list_empty(&(core->vm_info->nested_impl.event_callback_list));
139 }
140
141 static void dispatch_nested_event(struct guest_info *core, struct v3_nested_pg_event *event)
142 {
143     struct nested_event_callback *cb,*temp;
144     
145     v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock));
146
147     list_for_each_entry_safe(cb,
148                              temp,
149                              &(core->vm_info->nested_impl.event_callback_list),
150                              node) {
151         cb->callback(core,event,cb->priv_data);
152     }
153
154     v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock));
155 }
156
157
158
159
160 static addr_t create_generic_pt_page(struct guest_info *core) {
161     void * page = 0;
162     void *temp;
163
164     temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0); // no constraints
165
166     if (!temp) {  
167         PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n");
168         return 0;
169     }
170
171     page = V3_VAddr(temp);
172     memset(page, 0, PAGE_SIZE);
173
174     return (addr_t)page;
175 }
176
177 // Inline handler functions for each cpu mode
178 #include "vmm_direct_paging_32.h"
179 #include "vmm_direct_paging_32pae.h"
180 #include "vmm_direct_paging_64.h"
181
182
183
184 int v3_init_passthrough_pts(struct guest_info * info) {
185     info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info));
186     return 0;
187 }
188
189
190 int v3_free_passthrough_pts(struct guest_info * core) {
191     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
192
193     // Delete the old direct map page tables
194     switch(mode) {
195         case REAL:
196         case PROTECTED:
197           // Intentional fallthrough here
198           // There are *only* PAE tables
199         case PROTECTED_PAE:
200         case LONG:
201         case LONG_32_COMPAT:
202             // Long mode will only use 32PAE page tables...
203             if (core->direct_map_pt) { 
204                 delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); 
205             }
206             break;
207         default:
208             PrintError(core->vm_info, core, "Unknown CPU Mode\n");
209             return -1;
210             break;
211     }
212
213     return 0;
214 }
215
216
217 int v3_reset_passthrough_pts(struct guest_info * core) {
218
219     v3_free_passthrough_pts(core);
220
221     // create new direct map page table
222     v3_init_passthrough_pts(core);
223     
224     return 0;
225 }
226
227
228
229 int v3_activate_passthrough_pt(struct guest_info * info) {
230     // For now... But we need to change this....
231     // As soon as shadow paging becomes active the passthrough tables are hosed
232     // So this will cause chaos if it is called at that time
233
234     if (have_passthrough_callbacks(info)) { 
235         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},0,0};
236         dispatch_passthrough_event(info,&event);
237     }
238         
239     struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *) &(info->ctrl_regs.cr3);
240     struct cr4_32 * shadow_cr4 = (struct cr4_32 *) &(info->ctrl_regs.cr4);
241     addr_t shadow_pt_addr = *(addr_t*)&(info->direct_map_pt);
242     // Passthrough PTs will only be PAE page tables.
243     shadow_cr3->pdpt_base_addr = shadow_pt_addr >> 5;
244     shadow_cr4->pae = 1;
245     PrintDebug(info->vm_info, info, "Activated Passthrough Page tables\n");
246
247     if (have_passthrough_callbacks(info)) { 
248         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},0,0};
249         dispatch_passthrough_event(info,&event);
250     }
251
252     return 0;
253 }
254
255
256
257 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
258                                     addr_t *actual_start, addr_t *actual_end) {
259     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
260     addr_t start, end;
261     int rc;
262
263     if (have_passthrough_callbacks(info)) {                                    
264         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_PREIMPL,fault_addr,error_code,fault_addr,fault_addr};
265         dispatch_passthrough_event(info,&event);        
266     }
267
268     if (!actual_start) { actual_start=&start; }
269     if (!actual_end) { actual_end=&end; }
270
271
272     rc=-1;
273
274     switch(mode) {
275         case REAL:
276         case PROTECTED:
277           // Note intentional fallthrough here
278           // There are only PAE page tables now
279         case PROTECTED_PAE:
280         case LONG:
281         case LONG_32_COMPAT:
282             // Long mode will only use 32PAE page tables...
283             rc=handle_passthrough_pagefault_32pae(info, fault_addr, error_code, actual_start, actual_end);
284             break;
285         default:
286             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
287             break;
288     }
289
290     if (have_passthrough_callbacks(info)) {                                    
291         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_POSTIMPL,fault_addr,error_code,*actual_start,*actual_end};
292         dispatch_passthrough_event(info,&event);        
293     }
294
295     return rc;
296 }
297
298
299
300 int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr, 
301                                    addr_t *actual_start, addr_t *actual_end) {
302
303     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
304     addr_t start, end;
305     int rc;
306
307     if (have_passthrough_callbacks(info)) {                                    
308         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
309         dispatch_passthrough_event(info,&event);        
310     }
311
312     if (!actual_start) { actual_start=&start;}
313     if (!actual_end) { actual_end=&end;}
314
315
316
317     rc=-1;
318
319     switch(mode) {
320         case REAL:
321         case PROTECTED:
322           // Intentional fallthrough - there
323           // are only PAE page tables now
324         case PROTECTED_PAE:
325         case LONG:
326         case LONG_32_COMPAT:
327             // Long mode will only use 32PAE page tables...
328             rc=invalidate_addr_32pae(info, inv_addr, actual_start, actual_end);
329             break;
330         default:
331             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
332             break;
333     }
334
335     if (have_passthrough_callbacks(info)) {                                    
336         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
337         dispatch_passthrough_event(info,&event);        
338     }
339
340
341     return rc;
342 }
343
344
345 int v3_invalidate_passthrough_addr_range(struct guest_info * info, 
346                                          addr_t inv_addr_start, addr_t inv_addr_end,
347                                          addr_t *actual_start, addr_t *actual_end) {
348     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
349     addr_t start, end;
350     int rc;
351
352     if (!actual_start) { actual_start=&start;}
353     if (!actual_end) { actual_end=&end;}
354
355     if (have_passthrough_callbacks(info)) {                                    
356         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
357         dispatch_passthrough_event(info,&event);        
358     }
359     
360     rc=-1;
361
362     switch(mode) {
363         case REAL:
364         case PROTECTED:
365           // Intentional fallthrough
366           // There are only PAE PTs now
367         case PROTECTED_PAE:
368         case LONG:
369         case LONG_32_COMPAT:
370             // Long mode will only use 32PAE page tables...
371           rc=invalidate_addr_32pae_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
372           break;
373         default:
374             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
375             break;
376     }
377
378     if (have_passthrough_callbacks(info)) {                                    
379         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
380         dispatch_passthrough_event(info,&event);        
381     }
382
383     return rc;
384 }
385
386
387 int v3_init_passthrough_paging(struct v3_vm_info *vm)
388 {
389   INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list));
390   v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock));
391   return 0;
392 }
393
394 int v3_deinit_passthrough_paging(struct v3_vm_info *vm)
395 {
396   struct passthrough_event_callback *cb,*temp;
397   addr_t flags;
398   
399   flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
400   
401   list_for_each_entry_safe(cb,
402                            temp,
403                            &(vm->passthrough_impl.event_callback_list),
404                            node) {
405     list_del(&(cb->node));
406     V3_Free(cb);
407   }
408
409   v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
410
411   v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock));
412   
413   return 0;
414 }
415
416 int v3_init_passthrough_paging_core(struct guest_info *core)
417 {
418   // currently nothing to init
419   return 0;
420 }
421
422 int v3_deinit_passthrough_paging_core(struct guest_info *core)
423 {
424   // currently nothing to deinit
425   return 0;
426 }
427
428
429 int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm,
430                                                   int (*callback)(struct guest_info *core, 
431                                                                   struct v3_passthrough_pg_event *,
432                                                                   void      *priv_data),
433                                                   void *priv_data)
434 {
435     struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback));
436     addr_t flags;
437     
438     if (!ec) { 
439         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
440         return -1;
441     }
442     
443     ec->callback = callback;
444     ec->priv_data = priv_data;
445     
446     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
447     list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list));
448     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
449
450     return 0;
451
452 }
453
454
455
456 int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm,
457                                                     int (*callback)(struct guest_info *core, 
458                                                                     struct v3_passthrough_pg_event *,
459                                                                     void      *priv_data),
460                                                     void *priv_data)
461 {
462     struct passthrough_event_callback *cb,*temp;
463     addr_t flags;
464
465     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
466
467     list_for_each_entry_safe(cb,
468                              temp,
469                              &(vm->passthrough_impl.event_callback_list),
470                              node) {
471         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
472             list_del(&(cb->node));
473             V3_Free(cb);
474             v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
475             return 0;
476         }
477     }
478     
479     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
480
481     PrintError(vm, VCORE_NONE, "No callback found!\n");
482     
483     return -1;
484 }
485
486
487 // inline nested paging support for Intel and AMD
488 #include "svm_npt.h"
489 #include "vmx_npt.h"
490
491
492 inline void convert_to_pf_error(void *pfinfo, pf_error_t *out)
493 {
494   if (is_vmx_nested()) {
495 #ifdef V3_CONFIG_VMX
496     ept_exit_qual_to_pf_error((struct ept_exit_qual *)pfinfo, out);
497 #endif
498   } else {
499     *out = *(pf_error_t *)pfinfo;
500   }
501 }
502
503 int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, void *pfinfo, addr_t *actual_start, addr_t *actual_end)
504 {
505   int rc;
506   pf_error_t err;
507   addr_t start, end;
508
509   if (!actual_start) { actual_start=&start; }
510   if (!actual_end) { actual_end=&end; }
511
512   convert_to_pf_error(pfinfo,&err);
513
514   if (have_nested_callbacks(info)) {                                   
515       struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_PREIMPL,fault_addr,err,fault_addr,fault_addr};
516       dispatch_nested_event(info,&event);       
517   }
518
519   
520   if (is_vmx_nested()) { 
521     rc = handle_vmx_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
522   } else {
523     rc = handle_svm_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
524   }
525   
526   if (have_nested_callbacks(info)) {
527     struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_POSTIMPL,fault_addr,err,*actual_start,*actual_end};
528     dispatch_nested_event(info,&event);
529   }
530   
531   return rc;
532 }
533   
534
535
536 int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr,
537                               addr_t *actual_start, addr_t *actual_end) 
538 {
539   int rc;
540   
541   addr_t start, end;
542
543   if (!actual_start) { actual_start=&start; }
544   if (!actual_end) { actual_end=&end; }
545   
546
547   if (have_nested_callbacks(info)) { 
548     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
549     dispatch_nested_event(info,&event);
550   }
551
552   if (is_vmx_nested()) {
553     rc = handle_vmx_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
554   } else {
555     rc = handle_svm_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
556   }
557   
558   if (have_nested_callbacks(info)) { 
559     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_POSTIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
560     dispatch_nested_event(info,&event);
561   }
562   return rc;
563 }
564
565
566 int v3_invalidate_nested_addr_range(struct guest_info * info, 
567                                     addr_t inv_addr_start, addr_t inv_addr_end,
568                                     addr_t *actual_start, addr_t *actual_end) 
569 {
570   int rc;
571
572   addr_t start, end;
573
574   if (!actual_start) { actual_start=&start; }
575   if (!actual_end) { actual_end=&end; }
576
577   if (have_nested_callbacks(info)) { 
578     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
579     dispatch_nested_event(info,&event);
580   }
581   
582   if (is_vmx_nested()) {
583     rc = handle_vmx_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
584   } else {
585     rc = handle_svm_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
586   }
587   
588
589   if (have_nested_callbacks(info)) { 
590     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
591     dispatch_nested_event(info,&event);
592   }
593   
594   return rc;
595   
596 }
597
598
599 int v3_init_nested_paging(struct v3_vm_info *vm)
600 {
601   INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list));
602   v3_rw_lock_init(&(vm->nested_impl.event_callback_lock));
603   return 0;
604 }
605
606 int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo)
607 {
608   if (is_vmx_nested()) { 
609     return init_ept(core, (struct vmx_hw_info *) hwinfo);
610   } else {
611     // no initialization for SVM
612     return 0;
613   }
614 }
615     
616 int v3_deinit_nested_paging(struct v3_vm_info *vm)
617 {
618   struct nested_event_callback *cb,*temp;
619   addr_t flags;
620   
621   flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
622     
623   list_for_each_entry_safe(cb,
624                            temp,
625                            &(vm->nested_impl.event_callback_list),
626                            node) {
627     list_del(&(cb->node));
628     V3_Free(cb);
629   }
630   
631   v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
632   
633   v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock));
634
635   return 0;
636 }
637
638 int v3_deinit_nested_paging_core(struct guest_info *core)
639 {
640   // nothing to do..  probably dealloc?  FIXME PAD
641
642   return 0;
643 }
644
645
646 int v3_register_nested_paging_event_callback(struct v3_vm_info *vm,
647                                             int (*callback)(struct guest_info *core, 
648                                                             struct v3_nested_pg_event *,
649                                                             void      *priv_data),
650                                             void *priv_data)
651 {
652     struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback));
653     addr_t flags;
654
655     if (!ec) { 
656         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
657         return -1;
658     }
659     
660     ec->callback = callback;
661     ec->priv_data = priv_data;
662
663     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
664     list_add(&(ec->node),&(vm->nested_impl.event_callback_list));
665     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
666
667     return 0;
668
669 }
670
671
672
673 int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm,
674                                               int (*callback)(struct guest_info *core, 
675                                                               struct v3_nested_pg_event *,
676                                                               void      *priv_data),
677                                               void *priv_data)
678 {
679     struct nested_event_callback *cb,*temp;
680     addr_t flags;
681
682     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
683
684     list_for_each_entry_safe(cb,
685                              temp,
686                              &(vm->nested_impl.event_callback_list),
687                              node) {
688         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
689             list_del(&(cb->node));
690             V3_Free(cb);
691             v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
692             return 0;
693         }
694     }
695     
696     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
697
698     PrintError(vm, VCORE_NONE, "No callback found!\n");
699     
700     return -1;
701 }