Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Better support for dynamic changes to page event callbacks
[palacios.git] / palacios / src / palacios / vmm_direct_paging.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Steven Jaconette <stevenjaconette2007@u.northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
16  *         Peter Dinda <pdinda@northwestern.edu> (refactor + events)
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm_direct_paging.h>
23 #include <palacios/vmm_paging.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vm_guest.h>
27 #include <palacios/vmm_ctrl_regs.h>
28
29
30 #if !defined(V3_CONFIG_DEBUG_NESTED_PAGING) && !defined(V3_CONFIG_DEBUG_SHADOW_PAGING)
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35
36
37 /*
38
39   "Direct Paging" combines these three functionalities:
40
41    1. Passthrough paging for SVM and VMX
42
43       Passthrough paging is used for shadow paging when
44       the guest does not have paging turn on, for example 
45       when it is running in real mode or protected mode 
46       early in a typical boot process.    Passthrough page
47       tables are shadow page tables that are built assuming
48       the guest virtual to guest physical mapping is the identity.
49       Thus, what they implement are the GPA->HPA mapping. 
50
51       Passthrough page tables are built using 32PAE paging.
52       
53
54    2. Nested paging on SVM
55   
56       The SVM nested page tables have the same format as
57       regular page tables.   For this reason, we can reuse 
58       much of the passthrough implementation.   A nested page
59       table mapping is a GPA->HPA mapping, creating a very 
60       simlar model as with passthrough paging, just that it's 
61       always active, whether the guest has paging on or not.
62
63
64    3. Nested paging on VMX
65
66       The VMX nested page tables have a different format
67       than regular page tables.  For this reason, we have
68       implemented them in the vmx_npt.h file.  The code
69       here then is a wrapper, allowing us to make nested
70       paging functionality appear uniform across VMX and SVM
71       elsewhere in the codebase.
72
73 */
74
75
76
77 static inline int is_vmx_nested()
78 {
79     extern v3_cpu_arch_t v3_mach_type;
80
81     return (v3_mach_type==V3_VMX_EPT_CPU || v3_mach_type==V3_VMX_EPT_UG_CPU);
82 }
83
84 static inline int is_svm_nested()
85 {
86     extern v3_cpu_arch_t v3_mach_type;
87
88     return (v3_mach_type==V3_SVM_REV3_CPU);
89 }
90
91
92 struct passthrough_event_callback {
93     int (*callback)(struct guest_info *core, struct v3_passthrough_pg_event *event, void *priv_data);
94     void *priv_data;
95
96     struct list_head node;
97 };
98
99
100 static int have_passthrough_callbacks(struct guest_info *core)
101 {
102     // lock acquistion unnecessary
103     // caller will acquire the lock before *iterating* through the list
104     // so any race will be resolved then
105     return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list));
106 }
107
108 static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event)
109 {
110     struct passthrough_event_callback *cb,*temp;
111  
112     v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock));
113    
114     list_for_each_entry_safe(cb,
115                              temp,
116                              &(core->vm_info->passthrough_impl.event_callback_list),
117                              node) {
118         cb->callback(core,event,cb->priv_data);
119     }
120
121     v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock));
122
123 }
124
125 struct nested_event_callback {
126     int (*callback)(struct guest_info *core, struct v3_nested_pg_event *event, void *priv_data);
127     void *priv_data;
128
129     struct list_head node;
130 };
131
132
133 static int have_nested_callbacks(struct guest_info *core)
134 {
135     // lock acquistion unnecessary
136     // caller will acquire the lock before *iterating* through the list
137     // so any race will be resolved then
138     return !list_empty(&(core->vm_info->nested_impl.event_callback_list));
139 }
140
141 static void dispatch_nested_event(struct guest_info *core, struct v3_nested_pg_event *event)
142 {
143     struct nested_event_callback *cb,*temp;
144     
145     v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock));
146
147     list_for_each_entry_safe(cb,
148                              temp,
149                              &(core->vm_info->nested_impl.event_callback_list),
150                              node) {
151         cb->callback(core,event,cb->priv_data);
152     }
153
154     v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock));
155 }
156
157
158
159
160 static addr_t create_generic_pt_page(struct guest_info *core) {
161     void * page = 0;
162     void *temp;
163
164     temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, -1, 0); // no constraints
165
166     if (!temp) {  
167         PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n");
168         return 0;
169     }
170
171     page = V3_VAddr(temp);
172     memset(page, 0, PAGE_SIZE);
173
174     return (addr_t)page;
175 }
176
177 // Inline handler functions for each cpu mode
178 #include "vmm_direct_paging_32.h"
179 #include "vmm_direct_paging_32pae.h"
180 #include "vmm_direct_paging_64.h"
181
182
183
184 int v3_init_passthrough_pts(struct guest_info * info) {
185     info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info));
186     return 0;
187 }
188
189
190 int v3_free_passthrough_pts(struct guest_info * core) {
191     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
192
193     // Delete the old direct map page tables
194     switch(mode) {
195         case REAL:
196         case PROTECTED:
197           // Intentional fallthrough here
198           // There are *only* PAE tables
199         case PROTECTED_PAE:
200         case LONG:
201         case LONG_32_COMPAT:
202             // Long mode will only use 32PAE page tables...
203             delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt)));
204             break;
205         default:
206             PrintError(core->vm_info, core, "Unknown CPU Mode\n");
207             return -1;
208             break;
209     }
210
211     return 0;
212 }
213
214
215 int v3_reset_passthrough_pts(struct guest_info * core) {
216
217     v3_free_passthrough_pts(core);
218
219     // create new direct map page table
220     v3_init_passthrough_pts(core);
221     
222     return 0;
223 }
224
225
226
227 int v3_activate_passthrough_pt(struct guest_info * info) {
228     // For now... But we need to change this....
229     // As soon as shadow paging becomes active the passthrough tables are hosed
230     // So this will cause chaos if it is called at that time
231
232     if (have_passthrough_callbacks(info)) { 
233         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},0,0};
234         dispatch_passthrough_event(info,&event);
235     }
236         
237     struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *) &(info->ctrl_regs.cr3);
238     struct cr4_32 * shadow_cr4 = (struct cr4_32 *) &(info->ctrl_regs.cr4);
239     addr_t shadow_pt_addr = *(addr_t*)&(info->direct_map_pt);
240     // Passthrough PTs will only be PAE page tables.
241     shadow_cr3->pdpt_base_addr = shadow_pt_addr >> 5;
242     shadow_cr4->pae = 1;
243     PrintDebug(info->vm_info, info, "Activated Passthrough Page tables\n");
244
245     if (have_passthrough_callbacks(info)) { 
246         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},0,0};
247         dispatch_passthrough_event(info,&event);
248     }
249
250     return 0;
251 }
252
253
254
255 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
256                                     addr_t *actual_start, addr_t *actual_end) {
257     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
258     addr_t start, end;
259     int rc;
260
261     if (have_passthrough_callbacks(info)) {                                    
262         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_PREIMPL,fault_addr,error_code,fault_addr,fault_addr};
263         dispatch_passthrough_event(info,&event);        
264     }
265
266     if (!actual_start) { actual_start=&start; }
267     if (!actual_end) { actual_end=&end; }
268
269
270     rc=-1;
271
272     switch(mode) {
273         case REAL:
274         case PROTECTED:
275           // Note intentional fallthrough here
276           // There are only PAE page tables now
277         case PROTECTED_PAE:
278         case LONG:
279         case LONG_32_COMPAT:
280             // Long mode will only use 32PAE page tables...
281             rc=handle_passthrough_pagefault_32pae(info, fault_addr, error_code, actual_start, actual_end);
282             break;
283         default:
284             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
285             break;
286     }
287
288     if (have_passthrough_callbacks(info)) {                                    
289         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_POSTIMPL,fault_addr,error_code,*actual_start,*actual_end};
290         dispatch_passthrough_event(info,&event);        
291     }
292
293     return rc;
294 }
295
296
297
298 int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr, 
299                                    addr_t *actual_start, addr_t *actual_end) {
300
301     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
302     addr_t start, end;
303     int rc;
304
305     if (have_passthrough_callbacks(info)) {                                    
306         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
307         dispatch_passthrough_event(info,&event);        
308     }
309
310     if (!actual_start) { actual_start=&start;}
311     if (!actual_end) { actual_end=&end;}
312
313
314
315     rc=-1;
316
317     switch(mode) {
318         case REAL:
319         case PROTECTED:
320           // Intentional fallthrough - there
321           // are only PAE page tables now
322         case PROTECTED_PAE:
323         case LONG:
324         case LONG_32_COMPAT:
325             // Long mode will only use 32PAE page tables...
326             rc=invalidate_addr_32pae(info, inv_addr, actual_start, actual_end);
327             break;
328         default:
329             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
330             break;
331     }
332
333     if (have_passthrough_callbacks(info)) {                                    
334         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
335         dispatch_passthrough_event(info,&event);        
336     }
337
338
339     return rc;
340 }
341
342
343 int v3_invalidate_passthrough_addr_range(struct guest_info * info, 
344                                          addr_t inv_addr_start, addr_t inv_addr_end,
345                                          addr_t *actual_start, addr_t *actual_end) {
346     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
347     addr_t start, end;
348     int rc;
349
350     if (!actual_start) { actual_start=&start;}
351     if (!actual_end) { actual_end=&end;}
352
353     if (have_passthrough_callbacks(info)) {                                    
354         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
355         dispatch_passthrough_event(info,&event);        
356     }
357     
358     rc=-1;
359
360     switch(mode) {
361         case REAL:
362         case PROTECTED:
363           // Intentional fallthrough
364           // There are only PAE PTs now
365         case PROTECTED_PAE:
366         case LONG:
367         case LONG_32_COMPAT:
368             // Long mode will only use 32PAE page tables...
369           rc=invalidate_addr_32pae_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
370           break;
371         default:
372             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
373             break;
374     }
375
376     if (have_passthrough_callbacks(info)) {                                    
377         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
378         dispatch_passthrough_event(info,&event);        
379     }
380
381     return rc;
382 }
383
384
385 int v3_init_passthrough_paging(struct v3_vm_info *vm)
386 {
387   INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list));
388   v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock));
389   return 0;
390 }
391
392 int v3_deinit_passthrough_paging(struct v3_vm_info *vm)
393 {
394   struct passthrough_event_callback *cb,*temp;
395   addr_t flags;
396   
397   flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
398   
399   list_for_each_entry_safe(cb,
400                            temp,
401                            &(vm->passthrough_impl.event_callback_list),
402                            node) {
403     list_del(&(cb->node));
404     V3_Free(cb);
405   }
406
407   v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
408
409   v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock));
410   
411   return 0;
412 }
413
414 int v3_init_passthrough_paging_core(struct guest_info *core)
415 {
416   // currently nothing to init
417   return 0;
418 }
419
420 int v3_deinit_passthrough_paging_core(struct guest_info *core)
421 {
422   // currently nothing to deinit
423   return 0;
424 }
425
426
427 int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm,
428                                                   int (*callback)(struct guest_info *core, 
429                                                                   struct v3_passthrough_pg_event *,
430                                                                   void      *priv_data),
431                                                   void *priv_data)
432 {
433     struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback));
434     addr_t flags;
435     
436     if (!ec) { 
437         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
438         return -1;
439     }
440     
441     ec->callback = callback;
442     ec->priv_data = priv_data;
443     
444     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
445     list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list));
446     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
447
448     return 0;
449
450 }
451
452
453
454 int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm,
455                                                     int (*callback)(struct guest_info *core, 
456                                                                     struct v3_passthrough_pg_event *,
457                                                                     void      *priv_data),
458                                                     void *priv_data)
459 {
460     struct passthrough_event_callback *cb,*temp;
461     addr_t flags;
462
463     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
464
465     list_for_each_entry_safe(cb,
466                              temp,
467                              &(vm->passthrough_impl.event_callback_list),
468                              node) {
469         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
470             list_del(&(cb->node));
471             V3_Free(cb);
472             v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
473             return 0;
474         }
475     }
476     
477     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
478
479     PrintError(vm, VCORE_NONE, "No callback found!\n");
480     
481     return -1;
482 }
483
484
485 // inline nested paging support for Intel and AMD
486 #include "svm_npt.h"
487 #include "vmx_npt.h"
488
489
490 inline void convert_to_pf_error(void *pfinfo, pf_error_t *out)
491 {
492   if (is_vmx_nested()) {
493 #ifdef V3_CONFIG_VMX
494     ept_exit_qual_to_pf_error((struct ept_exit_qual *)pfinfo, out);
495 #endif
496   } else {
497     *out = *(pf_error_t *)pfinfo;
498   }
499 }
500
501 int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, void *pfinfo, addr_t *actual_start, addr_t *actual_end)
502 {
503   int rc;
504   pf_error_t err;
505   addr_t start, end;
506
507   if (!actual_start) { actual_start=&start; }
508   if (!actual_end) { actual_end=&end; }
509
510   convert_to_pf_error(pfinfo,&err);
511
512   if (have_nested_callbacks(info)) {                                   
513       struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_PREIMPL,fault_addr,err,fault_addr,fault_addr};
514       dispatch_nested_event(info,&event);       
515   }
516
517   
518   if (is_vmx_nested()) { 
519     rc = handle_vmx_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
520   } else {
521     rc = handle_svm_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
522   }
523   
524   if (have_nested_callbacks(info)) {
525     struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_POSTIMPL,fault_addr,err,*actual_start,*actual_end};
526     dispatch_nested_event(info,&event);
527   }
528   
529   return rc;
530 }
531   
532
533
534 int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr,
535                               addr_t *actual_start, addr_t *actual_end) 
536 {
537   int rc;
538   
539   addr_t start, end;
540
541   if (!actual_start) { actual_start=&start; }
542   if (!actual_end) { actual_end=&end; }
543   
544
545   if (have_nested_callbacks(info)) { 
546     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
547     dispatch_nested_event(info,&event);
548   }
549
550   if (is_vmx_nested()) {
551     rc = handle_vmx_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
552   } else {
553     rc = handle_svm_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
554   }
555   
556   if (have_nested_callbacks(info)) { 
557     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_POSTIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
558     dispatch_nested_event(info,&event);
559   }
560   return rc;
561 }
562
563
564 int v3_invalidate_nested_addr_range(struct guest_info * info, 
565                                     addr_t inv_addr_start, addr_t inv_addr_end,
566                                     addr_t *actual_start, addr_t *actual_end) 
567 {
568   int rc;
569
570   addr_t start, end;
571
572   if (!actual_start) { actual_start=&start; }
573   if (!actual_end) { actual_end=&end; }
574
575   if (have_nested_callbacks(info)) { 
576     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
577     dispatch_nested_event(info,&event);
578   }
579   
580   if (is_vmx_nested()) {
581     rc = handle_vmx_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
582   } else {
583     rc = handle_svm_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
584   }
585   
586
587   if (have_nested_callbacks(info)) { 
588     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
589     dispatch_nested_event(info,&event);
590   }
591   
592   return rc;
593   
594 }
595
596
597 int v3_init_nested_paging(struct v3_vm_info *vm)
598 {
599   INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list));
600   v3_rw_lock_init(&(vm->nested_impl.event_callback_lock));
601   return 0;
602 }
603
604 int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo)
605 {
606   if (is_vmx_nested()) { 
607     return init_ept(core, (struct vmx_hw_info *) hwinfo);
608   } else {
609     // no initialization for SVM
610     return 0;
611   }
612 }
613     
614 int v3_deinit_nested_paging(struct v3_vm_info *vm)
615 {
616   struct nested_event_callback *cb,*temp;
617   addr_t flags;
618   
619   flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
620     
621   list_for_each_entry_safe(cb,
622                            temp,
623                            &(vm->nested_impl.event_callback_list),
624                            node) {
625     list_del(&(cb->node));
626     V3_Free(cb);
627   }
628   
629   v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
630   
631   v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock));
632
633   return 0;
634 }
635
636 int v3_deinit_nested_paging_core(struct guest_info *core)
637 {
638   // nothing to do..  probably dealloc?  FIXME PAD
639
640   return 0;
641 }
642
643
644 int v3_register_nested_paging_event_callback(struct v3_vm_info *vm,
645                                             int (*callback)(struct guest_info *core, 
646                                                             struct v3_nested_pg_event *,
647                                                             void      *priv_data),
648                                             void *priv_data)
649 {
650     struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback));
651     addr_t flags;
652
653     if (!ec) { 
654         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
655         return -1;
656     }
657     
658     ec->callback = callback;
659     ec->priv_data = priv_data;
660
661     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
662     list_add(&(ec->node),&(vm->nested_impl.event_callback_list));
663     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
664
665     return 0;
666
667 }
668
669
670
671 int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm,
672                                               int (*callback)(struct guest_info *core, 
673                                                               struct v3_nested_pg_event *,
674                                                               void      *priv_data),
675                                               void *priv_data)
676 {
677     struct nested_event_callback *cb,*temp;
678     addr_t flags;
679
680     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
681
682     list_for_each_entry_safe(cb,
683                              temp,
684                              &(vm->nested_impl.event_callback_list),
685                              node) {
686         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
687             list_del(&(cb->node));
688             V3_Free(cb);
689             v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
690             return 0;
691         }
692     }
693     
694     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
695
696     PrintError(vm, VCORE_NONE, "No callback found!\n");
697     
698     return -1;
699 }