int v3_init_dev_mgr(struct v3_vm_info * vm);
-int v3_dev_mgr_deinit(struct v3_vm_info * vm);
+int v3_deinit_dev_mgr(struct v3_vm_info * vm);
+int v3_free_vm_devices(struct v3_vm_info * vm);
-int v3_init_devices();
+
+
+int V3_init_devices();
+
struct v3_device_ops {
#ifdef CONFIG_SVM
case V3_SVM_CPU:
case V3_SVM_REV3_CPU:
- v3_init_svm_io_map(vm);
- v3_init_svm_msr_map(vm);
+ v3_deinit_svm_io_map(vm);
+ v3_deinit_svm_msr_map(vm);
break;
#endif
#ifdef CONFIG_VMX
case V3_VMX_CPU:
case V3_VMX_EPT_CPU:
- v3_init_vmx_io_map(vm);
- v3_init_vmx_msr_map(vm);
+ v3_deinit_vmx_io_map(vm);
+ v3_deinit_vmx_msr_map(vm);
break;
#endif
default:
return -1;
}
+ v3_deinit_dev_mgr(vm);
+
return 0;
}
}
// Register all the possible device types
- v3_init_devices();
+ V3_init_devices();
// Register all shadow paging handlers
V3_init_shdw_paging();
int i = 0;
// deinitialize guest (free memory, etc...)
- v3_dev_mgr_deinit(vm);
+ v3_free_vm_devices(vm);
for (i = 0; i < vm->num_cores; i++) {
// free cores
}
-int v3_init_devices() {
+int V3_init_devices() {
extern struct v3_device_info __start__v3_devices[];
extern struct v3_device_info __stop__v3_devices[];
struct v3_device_info * tmp_dev = __start__v3_devices;
}
-int v3_dev_mgr_deinit(struct v3_vm_info * vm) {
- struct vm_device * dev;
+int v3_free_vm_devices(struct v3_vm_info * vm) {
struct vmm_dev_mgr * mgr = &(vm->dev_mgr);
+ struct vm_device * dev;
struct vm_device * tmp;
list_for_each_entry_safe(dev, tmp, &(mgr->dev_list), dev_link) {
v3_remove_device(dev);
}
+ return 0;
+}
+
+int v3_deinit_dev_mgr(struct v3_vm_info * vm) {
+ struct vmm_dev_mgr * mgr = &(vm->dev_mgr);
+
v3_free_htable(mgr->blk_table, 0, 0);
v3_free_htable(mgr->net_table, 0, 0);
v3_free_htable(mgr->char_table, 0, 0);