Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


More init checks to allow graceful fail out when VM cannot be created
[palacios.git] / palacios / src / palacios / vmm_direct_paging.c
1 /*
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2008, Steven Jaconette <stevenjaconette2007@u.northwestern.edu> 
11  * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu> 
12  * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Steven Jaconette <stevenjaconette2007@u.northwestern.edu>
16  *         Peter Dinda <pdinda@northwestern.edu> (refactor + events)
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21
22 #include <palacios/vmm_direct_paging.h>
23 #include <palacios/vmm_paging.h>
24 #include <palacios/vmm.h>
25 #include <palacios/vm_guest_mem.h>
26 #include <palacios/vm_guest.h>
27 #include <palacios/vmm_ctrl_regs.h>
28
29
30 #if !defined(V3_CONFIG_DEBUG_NESTED_PAGING) && !defined(V3_CONFIG_DEBUG_SHADOW_PAGING)
31 #undef PrintDebug
32 #define PrintDebug(fmt, args...)
33 #endif
34
35
36
37 /*
38
39   "Direct Paging" combines these three functionalities:
40
41    1. Passthrough paging for SVM and VMX
42
43       Passthrough paging is used for shadow paging when
44       the guest does not have paging turn on, for example 
45       when it is running in real mode or protected mode 
46       early in a typical boot process.    Passthrough page
47       tables are shadow page tables that are built assuming
48       the guest virtual to guest physical mapping is the identity.
49       Thus, what they implement are the GPA->HPA mapping. 
50
51       Passthrough page tables are built using 32PAE paging.
52       
53
54    2. Nested paging on SVM
55   
56       The SVM nested page tables have the same format as
57       regular page tables.   For this reason, we can reuse 
58       much of the passthrough implementation.   A nested page
59       table mapping is a GPA->HPA mapping, creating a very 
60       simlar model as with passthrough paging, just that it's 
61       always active, whether the guest has paging on or not.
62
63
64    3. Nested paging on VMX
65
66       The VMX nested page tables have a different format
67       than regular page tables.  For this reason, we have
68       implemented them in the vmx_npt.h file.  The code
69       here then is a wrapper, allowing us to make nested
70       paging functionality appear uniform across VMX and SVM
71       elsewhere in the codebase.
72
73 */
74
75
76
77 static inline int is_vmx_nested()
78 {
79     extern v3_cpu_arch_t v3_mach_type;
80
81     return (v3_mach_type==V3_VMX_EPT_CPU || v3_mach_type==V3_VMX_EPT_UG_CPU);
82 }
83
84 static inline int is_svm_nested()
85 {
86     extern v3_cpu_arch_t v3_mach_type;
87
88     return (v3_mach_type==V3_SVM_REV3_CPU);
89 }
90
91
92 struct passthrough_event_callback {
93     int (*callback)(struct guest_info *core, struct v3_passthrough_pg_event *event, void *priv_data);
94     void *priv_data;
95
96     struct list_head node;
97 };
98
99
100 static int have_passthrough_callbacks(struct guest_info *core)
101 {
102     // lock acquistion unnecessary
103     // caller will acquire the lock before *iterating* through the list
104     // so any race will be resolved then
105     return !list_empty(&(core->vm_info->passthrough_impl.event_callback_list));
106 }
107
108 static void dispatch_passthrough_event(struct guest_info *core, struct v3_passthrough_pg_event *event)
109 {
110     struct passthrough_event_callback *cb,*temp;
111  
112     v3_read_lock(&(core->vm_info->passthrough_impl.event_callback_lock));
113    
114     list_for_each_entry_safe(cb,
115                              temp,
116                              &(core->vm_info->passthrough_impl.event_callback_list),
117                              node) {
118         cb->callback(core,event,cb->priv_data);
119     }
120
121     v3_read_unlock(&(core->vm_info->passthrough_impl.event_callback_lock));
122
123 }
124
125 struct nested_event_callback {
126     int (*callback)(struct guest_info *core, struct v3_nested_pg_event *event, void *priv_data);
127     void *priv_data;
128
129     struct list_head node;
130 };
131
132
133 static int have_nested_callbacks(struct guest_info *core)
134 {
135     // lock acquistion unnecessary
136     // caller will acquire the lock before *iterating* through the list
137     // so any race will be resolved then
138     return !list_empty(&(core->vm_info->nested_impl.event_callback_list));
139 }
140
141 static void dispatch_nested_event(struct guest_info *core, struct v3_nested_pg_event *event)
142 {
143     struct nested_event_callback *cb,*temp;
144     
145     v3_read_lock(&(core->vm_info->nested_impl.event_callback_lock));
146
147     list_for_each_entry_safe(cb,
148                              temp,
149                              &(core->vm_info->nested_impl.event_callback_list),
150                              node) {
151         cb->callback(core,event,cb->priv_data);
152     }
153
154     v3_read_unlock(&(core->vm_info->nested_impl.event_callback_lock));
155 }
156
157
158
159
160 static addr_t create_generic_pt_page(struct guest_info *core) {
161     void * page = 0;
162     void *temp;
163
164     temp = V3_AllocPagesExtended(1, PAGE_SIZE_4KB, 
165                                  core->resource_control.pg_node_id,
166                                  core->resource_control.pg_filter_func,
167                                  core->resource_control.pg_filter_state);
168
169     if (!temp) {  
170         PrintError(VM_NONE, VCORE_NONE,"Cannot allocate page\n");
171         return 0;
172     }
173
174     page = V3_VAddr(temp);
175     memset(page, 0, PAGE_SIZE);
176
177     return (addr_t)page;
178 }
179
180 // Inline handler functions for each cpu mode
181 #include "vmm_direct_paging_32.h"
182 #include "vmm_direct_paging_32pae.h"
183 #include "vmm_direct_paging_64.h"
184
185
186
187 int v3_init_passthrough_pts(struct guest_info * info) {
188     if (info->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { 
189         // skip - ept_init will do this allocation
190         return 0;
191     }
192     info->direct_map_pt = (addr_t)V3_PAddr((void *)create_generic_pt_page(info));
193     return 0;
194 }
195
196
197 int v3_free_passthrough_pts(struct guest_info * core) {
198     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(core);
199
200     if (core->shdw_pg_mode == NESTED_PAGING && is_vmx_nested()) { 
201         // there are no passthrough page tables, but
202         // the EPT implementation is using direct_map_pt to store
203         // the EPT root table pointer...  and the EPT tables
204         // are not compatible with regular x86 tables, so we
205         // must not attempt to free them here...
206         return 0;
207     }
208   
209     // we are either in shadow or in SVM nested
210     // in either case, we can nuke the PTs
211
212     // Delete the old direct map page tables
213     switch(mode) {
214         case REAL:
215         case PROTECTED:
216           // Intentional fallthrough here
217           // There are *only* PAE tables
218         case PROTECTED_PAE:
219         case LONG:
220         case LONG_32_COMPAT:
221             // Long mode will only use 32PAE page tables...
222             if (core->direct_map_pt) { 
223                 delete_page_tables_32pae((pdpe32pae_t *)V3_VAddr((void *)(core->direct_map_pt))); 
224             }
225             break;
226         default:
227             PrintError(core->vm_info, core, "Unknown CPU Mode\n");
228             return -1;
229             break;
230     }
231
232     return 0;
233 }
234
235
236 int v3_reset_passthrough_pts(struct guest_info * core) {
237
238     v3_free_passthrough_pts(core);
239
240     // create new direct map page table
241     v3_init_passthrough_pts(core);
242     
243     return 0;
244 }
245
246
247
248 int v3_activate_passthrough_pt(struct guest_info * info) {
249     // For now... But we need to change this....
250     // As soon as shadow paging becomes active the passthrough tables are hosed
251     // So this will cause chaos if it is called at that time
252
253     if (have_passthrough_callbacks(info)) { 
254         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},0,0};
255         dispatch_passthrough_event(info,&event);
256     }
257         
258     struct cr3_32_PAE * shadow_cr3 = (struct cr3_32_PAE *) &(info->ctrl_regs.cr3);
259     struct cr4_32 * shadow_cr4 = (struct cr4_32 *) &(info->ctrl_regs.cr4);
260     addr_t shadow_pt_addr = *(addr_t*)&(info->direct_map_pt);
261     // Passthrough PTs will only be PAE page tables.
262     shadow_cr3->pdpt_base_addr = shadow_pt_addr >> 5;
263     shadow_cr4->pae = 1;
264     PrintDebug(info->vm_info, info, "Activated Passthrough Page tables\n");
265
266     if (have_passthrough_callbacks(info)) { 
267         struct v3_passthrough_pg_event event={PASSTHROUGH_ACTIVATE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},0,0};
268         dispatch_passthrough_event(info,&event);
269     }
270
271     return 0;
272 }
273
274
275
276 int v3_handle_passthrough_pagefault(struct guest_info * info, addr_t fault_addr, pf_error_t error_code,
277                                     addr_t *actual_start, addr_t *actual_end) {
278     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
279     addr_t start, end;
280     int rc;
281
282     if (have_passthrough_callbacks(info)) {                                    
283         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_PREIMPL,fault_addr,error_code,fault_addr,fault_addr};
284         dispatch_passthrough_event(info,&event);        
285     }
286
287     if (!actual_start) { actual_start=&start; }
288     if (!actual_end) { actual_end=&end; }
289
290
291     rc=-1;
292
293     switch(mode) {
294         case REAL:
295         case PROTECTED:
296           // Note intentional fallthrough here
297           // There are only PAE page tables now
298         case PROTECTED_PAE:
299         case LONG:
300         case LONG_32_COMPAT:
301             // Long mode will only use 32PAE page tables...
302             rc=handle_passthrough_pagefault_32pae(info, fault_addr, error_code, actual_start, actual_end);
303             break;
304         default:
305             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
306             break;
307     }
308
309     if (have_passthrough_callbacks(info)) {                                    
310         struct v3_passthrough_pg_event event={PASSTHROUGH_PAGEFAULT,PASSTHROUGH_POSTIMPL,fault_addr,error_code,*actual_start,*actual_end};
311         dispatch_passthrough_event(info,&event);        
312     }
313
314     return rc;
315 }
316
317
318
319 int v3_invalidate_passthrough_addr(struct guest_info * info, addr_t inv_addr, 
320                                    addr_t *actual_start, addr_t *actual_end) {
321
322     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
323     addr_t start, end;
324     int rc;
325
326     if (have_passthrough_callbacks(info)) {                                    
327         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
328         dispatch_passthrough_event(info,&event);        
329     }
330
331     if (!actual_start) { actual_start=&start;}
332     if (!actual_end) { actual_end=&end;}
333
334
335
336     rc=-1;
337
338     switch(mode) {
339         case REAL:
340         case PROTECTED:
341           // Intentional fallthrough - there
342           // are only PAE page tables now
343         case PROTECTED_PAE:
344         case LONG:
345         case LONG_32_COMPAT:
346             // Long mode will only use 32PAE page tables...
347             rc=invalidate_addr_32pae(info, inv_addr, actual_start, actual_end);
348             break;
349         default:
350             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
351             break;
352     }
353
354     if (have_passthrough_callbacks(info)) {                                    
355         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
356         dispatch_passthrough_event(info,&event);        
357     }
358
359
360     return rc;
361 }
362
363
364 int v3_invalidate_passthrough_addr_range(struct guest_info * info, 
365                                          addr_t inv_addr_start, addr_t inv_addr_end,
366                                          addr_t *actual_start, addr_t *actual_end) {
367     v3_cpu_mode_t mode = v3_get_vm_cpu_mode(info);
368     addr_t start, end;
369     int rc;
370
371     if (!actual_start) { actual_start=&start;}
372     if (!actual_end) { actual_end=&end;}
373
374     if (have_passthrough_callbacks(info)) {                                    
375         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
376         dispatch_passthrough_event(info,&event);        
377     }
378     
379     rc=-1;
380
381     switch(mode) {
382         case REAL:
383         case PROTECTED:
384           // Intentional fallthrough
385           // There are only PAE PTs now
386         case PROTECTED_PAE:
387         case LONG:
388         case LONG_32_COMPAT:
389             // Long mode will only use 32PAE page tables...
390           rc=invalidate_addr_32pae_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
391           break;
392         default:
393             PrintError(info->vm_info, info, "Unknown CPU Mode\n");
394             break;
395     }
396
397     if (have_passthrough_callbacks(info)) {                                    
398         struct v3_passthrough_pg_event event={PASSTHROUGH_INVALIDATE_RANGE,PASSTHROUGH_POSTIMPL,0,{0,0,0,0,0,0},*actual_start,*actual_end};
399         dispatch_passthrough_event(info,&event);        
400     }
401
402     return rc;
403 }
404
405
406 int v3_init_passthrough_paging(struct v3_vm_info *vm)
407 {
408   INIT_LIST_HEAD(&(vm->passthrough_impl.event_callback_list));
409   v3_rw_lock_init(&(vm->passthrough_impl.event_callback_lock));
410   vm->passthrough_impl.inited=1;
411   return 0;
412 }
413
414 int v3_deinit_passthrough_paging(struct v3_vm_info *vm)
415 {
416   struct passthrough_event_callback *cb,*temp;
417   addr_t flags;
418   
419   if (!vm->passthrough_impl.inited) { 
420       return 0;
421   }
422
423   flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
424   
425   list_for_each_entry_safe(cb,
426                            temp,
427                            &(vm->passthrough_impl.event_callback_list),
428                            node) {
429     list_del(&(cb->node));
430     V3_Free(cb);
431   }
432
433   v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
434
435   v3_rw_lock_deinit(&(vm->passthrough_impl.event_callback_lock));
436   
437   return 0;
438 }
439
440 int v3_init_passthrough_paging_core(struct guest_info *core)
441 {
442   // currently nothing to init
443   return 0;
444 }
445
446 int v3_deinit_passthrough_paging_core(struct guest_info *core)
447 {
448   // currently nothing to deinit
449   return 0;
450 }
451
452
453 int v3_register_passthrough_paging_event_callback(struct v3_vm_info *vm,
454                                                   int (*callback)(struct guest_info *core, 
455                                                                   struct v3_passthrough_pg_event *,
456                                                                   void      *priv_data),
457                                                   void *priv_data)
458 {
459     struct passthrough_event_callback *ec = V3_Malloc(sizeof(struct passthrough_event_callback));
460     addr_t flags;
461     
462     if (!ec) { 
463         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
464         return -1;
465     }
466     
467     ec->callback = callback;
468     ec->priv_data = priv_data;
469     
470     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
471     list_add(&(ec->node),&(vm->passthrough_impl.event_callback_list));
472     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
473
474     return 0;
475
476 }
477
478
479
480 int v3_unregister_passthrough_paging_event_callback(struct v3_vm_info *vm,
481                                                     int (*callback)(struct guest_info *core, 
482                                                                     struct v3_passthrough_pg_event *,
483                                                                     void      *priv_data),
484                                                     void *priv_data)
485 {
486     struct passthrough_event_callback *cb,*temp;
487     addr_t flags;
488
489     flags=v3_write_lock_irqsave(&(vm->passthrough_impl.event_callback_lock));
490
491     list_for_each_entry_safe(cb,
492                              temp,
493                              &(vm->passthrough_impl.event_callback_list),
494                              node) {
495         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
496             list_del(&(cb->node));
497             V3_Free(cb);
498             v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
499             return 0;
500         }
501     }
502     
503     v3_write_unlock_irqrestore(&(vm->passthrough_impl.event_callback_lock),flags);
504
505     PrintError(vm, VCORE_NONE, "No callback found!\n");
506     
507     return -1;
508 }
509
510
511 // inline nested paging support for Intel and AMD
512 #include "svm_npt.h"
513 #include "vmx_npt.h"
514
515
516 inline void convert_to_pf_error(void *pfinfo, pf_error_t *out)
517 {
518   if (is_vmx_nested()) {
519 #ifdef V3_CONFIG_VMX
520     ept_exit_qual_to_pf_error((struct ept_exit_qual *)pfinfo, out);
521 #endif
522   } else {
523     *out = *(pf_error_t *)pfinfo;
524   }
525 }
526
527 int v3_handle_nested_pagefault(struct guest_info * info, addr_t fault_addr, void *pfinfo, addr_t *actual_start, addr_t *actual_end)
528 {
529   int rc;
530   pf_error_t err;
531   addr_t start, end;
532
533   if (!actual_start) { actual_start=&start; }
534   if (!actual_end) { actual_end=&end; }
535
536   convert_to_pf_error(pfinfo,&err);
537
538   if (have_nested_callbacks(info)) {                                   
539       struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_PREIMPL,fault_addr,err,fault_addr,fault_addr};
540       dispatch_nested_event(info,&event);       
541   }
542
543   
544   if (is_vmx_nested()) { 
545     rc = handle_vmx_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
546   } else {
547     rc = handle_svm_nested_pagefault(info,fault_addr,pfinfo,actual_start,actual_end);
548   }
549   
550   if (have_nested_callbacks(info)) {
551     struct v3_nested_pg_event event={NESTED_PAGEFAULT,NESTED_POSTIMPL,fault_addr,err,*actual_start,*actual_end};
552     dispatch_nested_event(info,&event);
553   }
554   
555   return rc;
556 }
557   
558
559
560 int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr,
561                               addr_t *actual_start, addr_t *actual_end) 
562 {
563   int rc;
564   
565   addr_t start, end;
566
567   if (!actual_start) { actual_start=&start; }
568   if (!actual_end) { actual_end=&end; }
569   
570
571   if (have_nested_callbacks(info)) { 
572     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr),PAGE_ADDR(inv_addr)+PAGE_SIZE-1};
573     dispatch_nested_event(info,&event);
574   }
575
576   if (is_vmx_nested()) {
577     rc = handle_vmx_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
578   } else {
579     rc = handle_svm_invalidate_nested_addr(info, inv_addr, actual_start, actual_end);
580   }
581   
582   if (have_nested_callbacks(info)) { 
583     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_POSTIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
584     dispatch_nested_event(info,&event);
585   }
586   return rc;
587 }
588
589
590 int v3_invalidate_nested_addr_range(struct guest_info * info, 
591                                     addr_t inv_addr_start, addr_t inv_addr_end,
592                                     addr_t *actual_start, addr_t *actual_end) 
593 {
594   int rc;
595
596   addr_t start, end;
597
598   if (!actual_start) { actual_start=&start; }
599   if (!actual_end) { actual_end=&end; }
600
601   if (have_nested_callbacks(info)) { 
602     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},PAGE_ADDR(inv_addr_start),PAGE_ADDR(inv_addr_end-1)+PAGE_SIZE-1};
603     dispatch_nested_event(info,&event);
604   }
605   
606   if (is_vmx_nested()) {
607     rc = handle_vmx_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
608   } else {
609     rc = handle_svm_invalidate_nested_addr_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
610   }
611   
612
613   if (have_nested_callbacks(info)) { 
614     struct v3_nested_pg_event event={NESTED_INVALIDATE_RANGE,NESTED_PREIMPL,0,{0,0,0,0,0,0},*actual_start, *actual_end};
615     dispatch_nested_event(info,&event);
616   }
617   
618   return rc;
619   
620 }
621
622
623 int v3_init_nested_paging(struct v3_vm_info *vm)
624 {
625   INIT_LIST_HEAD(&(vm->nested_impl.event_callback_list));
626   v3_rw_lock_init(&(vm->nested_impl.event_callback_lock));
627   return 0;
628 }
629
630 int v3_init_nested_paging_core(struct guest_info *core, void *hwinfo)
631 {
632   if (is_vmx_nested()) { 
633     return init_ept(core, (struct vmx_hw_info *) hwinfo);
634   } else {
635     // no initialization for SVM
636     // the direct map page tables are used since the 
637     // nested pt format is identical to the main pt format
638     return 0;
639   }
640 }
641     
642 int v3_deinit_nested_paging(struct v3_vm_info *vm)
643 {
644   struct nested_event_callback *cb,*temp;
645   addr_t flags;
646   
647   flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
648     
649   list_for_each_entry_safe(cb,
650                            temp,
651                            &(vm->nested_impl.event_callback_list),
652                            node) {
653     list_del(&(cb->node));
654     V3_Free(cb);
655   }
656   
657   v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
658   
659   v3_rw_lock_deinit(&(vm->nested_impl.event_callback_lock));
660
661   return 0;
662 }
663
664 int v3_deinit_nested_paging_core(struct guest_info *core)
665 {
666   if (core->shdw_pg_mode == NESTED_PAGING) {
667     if (is_vmx_nested()) {
668      return deinit_ept(core);
669     } else {
670       // SVM nested deinit is handled by the passthrough paging teardown
671       return 0;
672     }
673   } else {
674     // not relevant
675     return 0;
676   }
677 }
678
679
680 int v3_register_nested_paging_event_callback(struct v3_vm_info *vm,
681                                             int (*callback)(struct guest_info *core, 
682                                                             struct v3_nested_pg_event *,
683                                                             void      *priv_data),
684                                             void *priv_data)
685 {
686     struct nested_event_callback *ec = V3_Malloc(sizeof(struct nested_event_callback));
687     addr_t flags;
688
689     if (!ec) { 
690         PrintError(vm, VCORE_NONE, "Unable to allocate for a nested paging event callback\n");
691         return -1;
692     }
693     
694     ec->callback = callback;
695     ec->priv_data = priv_data;
696
697     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
698     list_add(&(ec->node),&(vm->nested_impl.event_callback_list));
699     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
700
701     return 0;
702
703 }
704
705
706
707 int v3_unregister_nested_paging_event_callback(struct v3_vm_info *vm,
708                                               int (*callback)(struct guest_info *core, 
709                                                               struct v3_nested_pg_event *,
710                                                               void      *priv_data),
711                                               void *priv_data)
712 {
713     struct nested_event_callback *cb,*temp;
714     addr_t flags;
715
716     flags=v3_write_lock_irqsave(&(vm->nested_impl.event_callback_lock));
717
718     list_for_each_entry_safe(cb,
719                              temp,
720                              &(vm->nested_impl.event_callback_list),
721                              node) {
722         if ((callback == cb->callback) && (priv_data == cb->priv_data)) { 
723             list_del(&(cb->node));
724             V3_Free(cb);
725             v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
726             return 0;
727         }
728     }
729     
730     v3_write_unlock_irqrestore(&(vm->nested_impl.event_callback_lock),flags);
731
732     PrintError(vm, VCORE_NONE, "No callback found!\n");
733     
734     return -1;
735 }