struct {
int num_ports;
uint16_t default_base_port;
- int (*io_read)(ushort_t port, void * dst, uint_t length, void * private_data);
- int (*io_write)(ushort_t port, void * src, uint_t length, void * private_data);
+ int (*io_read)(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * private_data);
+ int (*io_write)(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data);
};
struct {
} __attribute__((packed));
-int v3_init_svm_io_map(struct guest_info * info);
+int v3_init_svm_io_map(struct v3_vm_info * vm);
int v3_handle_svm_io_in(struct guest_info * info, struct svm_io_info * io_info);
int v3_handle_svm_io_ins(struct guest_info * info, struct svm_io_info * io_info);
#include <palacios/vmm.h>
-int v3_init_svm_msr_map(struct guest_info * info);
+int v3_init_svm_msr_map(struct v3_vm_info * vm);
#endif // ! __V3VEE__
struct v3_sym_state;
#endif
+
struct guest_info {
uint64_t rip;
uint_t cpl;
- addr_t mem_size; // In bytes for now
- v3_shdw_map_t mem_map;
-
- struct v3_config * cfg_data;
- v3_vm_class_t vm_class;
-
struct vm_time time_state;
-
v3_paging_mode_t shdw_pg_mode;
struct shadow_page_state shdw_pg_state;
addr_t direct_map_pt;
// This structure is how we get interrupts for the guest
- struct v3_intr_state intr_state;
+ struct v3_intr_core_state intr_core_state;
// This structure is how we get exceptions for the guest
struct v3_excp_state excp_state;
- struct v3_io_map io_map;
-
- struct v3_msr_map msr_map;
-
- struct v3_cpuid_map cpuid_map;
-
-#ifdef CONFIG_SYMBIOTIC
- // Symbiotic state
- struct v3_sym_state sym_state;
-
-#ifdef CONFIG_SYMBIOTIC_SWAP
- struct v3_sym_swap_state swap_state;
-#endif
-#endif
-
- v3_hypercall_map_t hcall_map;
-
- // device_map
- struct vmm_dev_mgr dev_mgr;
-
- struct v3_host_events host_event_hooks;
v3_cpu_mode_t cpu_mode;
v3_mem_mode_t mem_mode;
uint_t addr_width;
-
struct v3_gprs vm_regs;
struct v3_ctrl_regs ctrl_regs;
struct v3_dbg_regs dbg_regs;
struct v3_segments segments;
- v3_vm_operating_mode_t run_state;
+
void * vmm_data;
- uint64_t yield_cycle_period;
uint64_t yield_start_cycle;
uint64_t num_exits;
#ifdef CONFIG_TELEMETRY
- uint_t enable_telemetry;
struct v3_telemetry_state telemetry;
#endif
-
+ // struct v3_core_dev_mgr core_dev_mgr;
void * decoder_state;
+
+ struct v3_vm_info * vm_info;
// the logical cpu this guest context is executing on
int cpu_id;
};
+
+
+struct v3_vm_info {
+ v3_vm_class_t vm_class;
+
+ addr_t mem_size; // In bytes for now
+ struct v3_mem_map mem_map;
+
+
+ struct v3_io_map io_map;
+ struct v3_msr_map msr_map;
+ struct v3_cpuid_map cpuid_map;
+
+ v3_hypercall_map_t hcall_map;
+
+
+ struct v3_intr_routers intr_routers;
+ // device_map
+ struct vmm_dev_mgr dev_mgr;
+
+ struct v3_host_events host_event_hooks;
+
+ struct v3_config * cfg_data;
+
+ v3_vm_operating_mode_t run_state;
+
+#ifdef CONFIG_SYMBIOTIC
+ // Symbiotic state
+ struct v3_sym_state sym_state;
+#ifdef CONFIG_SYMBIOTIC_SWAP
+ struct v3_sym_swap_state swap_state;
+#endif
+#endif
+
+
+
+#ifdef CONFIG_TELEMETRY
+ uint_t enable_telemetry;
+#endif
+
+ uint64_t yield_cycle_period;
+
+ int num_cores;
+ struct guest_info cores[0];
+
+};
+
+
+
+
+
uint_t v3_get_addr_width(struct guest_info * info);
v3_cpu_mode_t v3_get_vm_cpu_mode(struct guest_info * info);
v3_mem_mode_t v3_get_vm_mem_mode(struct guest_info * info);
} while (0)
-#define V3_Hook_Interrupt(irq, opaque) ({ \
+#define V3_Hook_Interrupt(vm, irq) ({ \
int ret = 0; \
extern struct v3_os_hooks * os_hooks; \
if ((os_hooks) && (os_hooks)->hook_interrupt) { \
- ret = (os_hooks)->hook_interrupt(irq, opaque); \
+ ret = (os_hooks)->hook_interrupt(vm, irq); \
} \
ret; \
}) \
void v3_print_cond(const char * fmt, ...);
-void v3_interrupt_cpu(struct guest_info * vm, int logical_cpu);
+void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu);
unsigned int v3_get_cpu_id();
-struct guest_info;
+struct v3_vm_info;
/* This will contain function pointers that provide OS services */
struct v3_os_hooks {
void *(*paddr_to_vaddr)(void *addr);
void *(*vaddr_to_paddr)(void *addr);
- int (*hook_interrupt)(struct guest_info * vm, unsigned int irq);
+ int (*hook_interrupt)(struct v3_vm_info * vm, unsigned int irq);
int (*ack_irq)(int irq);
void (*mutex_unlock)(void * mutex);
unsigned int (*get_cpu)(void);
- void (*interrupt_cpu)(struct guest_info * vm, int logical_cpu);
+ void (*interrupt_cpu)(struct v3_vm_info * vm, int logical_cpu);
void (*call_on_cpu)(int logical_cpu, void (*fn)(void * arg), void * arg);
void (*start_thread_on_cpu)(int logical_cpu, int (*fn)(void * arg), void * arg, char * thread_name);
};
void Init_V3(struct v3_os_hooks * hooks, int num_cpus);
-int v3_start_vm(struct guest_info * info, unsigned int cpu_mask);
-struct guest_info * v3_create_vm(void * cfg);
+int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask);
+struct v3_vm_info * v3_create_vm(void * cfg);
-int v3_deliver_irq(struct guest_info * vm, struct v3_interrupt * intr);
+int v3_deliver_irq(struct v3_vm_info * vm, struct v3_interrupt * intr);
//#include <palacios/svm.h>
-struct guest_info;
+struct v3_vm_info;
-int v3_config_guest(struct guest_info * info, void * cfg_blob);
+struct v3_vm_info * v3_config_guest( void * cfg_blob);
struct v3_cfg_file {
void * data;
};
-struct v3_cfg_file * v3_cfg_get_file(struct guest_info * info, char * tag);
+struct v3_cfg_file * v3_cfg_get_file(struct v3_vm_info * info, char * tag);
char * v3_cfg_val(v3_cfg_tree_t * tree, char * tag);
v3_cfg_tree_t * v3_cfg_subtree(v3_cfg_tree_t * tree, char * tag);
struct guest_info;
-void v3_init_cpuid_map(struct guest_info * info);
+void v3_init_cpuid_map(struct v3_vm_info * vm);
struct v3_cpuid_hook {
struct rb_root map;
};
-void v3_print_cpuid_map(struct guest_info * info);
+void v3_print_cpuid_map(struct v3_vm_info * vm);
-int v3_hook_cpuid(struct guest_info * info, uint32_t cpuid,
+int v3_hook_cpuid(struct v3_vm_info * vm, uint32_t cpuid,
int (*hook_fn)(struct guest_info * info, uint32_t cpuid, \
uint32_t * eax, uint32_t * ebx, \
uint32_t * ecx, uint32_t * edx, \
void * private_data),
void * private_data);
-int v3_unhook_cpuid(struct guest_info * info, uint32_t cpuid);
+int v3_unhook_cpuid(struct v3_vm_info * vm, uint32_t cpuid);
int v3_handle_cpuid(struct guest_info * info);
#include <palacios/vmm_config.h>
-struct guest_info;
+struct v3_vm_info;
struct v3_device_ops;
struct v3_device_ops * ops;
- struct guest_info * vm;
+ struct v3_vm_info * vm;
struct list_head dev_link;
};
+int v3_create_device(struct v3_vm_info * vm, const char * dev_name, v3_cfg_tree_t * cfg);
-
-int v3_create_device(struct guest_info * info, const char * dev_name, v3_cfg_tree_t * cfg);
void v3_free_device(struct vm_device * dev);
-struct vm_device * v3_find_dev(struct guest_info * info, const char * dev_name);
+struct vm_device * v3_find_dev(struct v3_vm_info * info, const char * dev_name);
// Registration of devices
-int v3_init_dev_mgr(struct guest_info * info);
-int v3_dev_mgr_deinit(struct guest_info * info);
+int v3_init_dev_mgr(struct v3_vm_info * vm);
+int v3_dev_mgr_deinit(struct v3_vm_info * vm);
+
+
int v3_dev_hook_io(struct vm_device *dev,
ushort_t port,
- int (*read)(ushort_t port, void * dst, uint_t length, struct vm_device * dev),
- int (*write)(ushort_t port, void * src, uint_t length, struct vm_device * dev));
+ int (*read)(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev),
+ int (*write)(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev));
int v3_dev_unhook_io(struct vm_device *dev,
ushort_t port);
-int v3_attach_device(struct guest_info * vm, struct vm_device * dev);
+int v3_attach_device(struct v3_vm_info * vm, struct vm_device * dev);
int v3_detach_device(struct vm_device * dev);
struct vm_device * v3_allocate_device(char * name, struct v3_device_ops * ops, void * private_data);
struct v3_device_info {
char * name;
- int (*init)(struct guest_info * info, v3_cfg_tree_t * cfg);
+ int (*init)(struct v3_vm_info * info, v3_cfg_tree_t * cfg);
};
-void v3_print_dev_mgr(struct guest_info * info);
+void v3_print_dev_mgr(struct v3_vm_info * vm);
struct v3_dev_blk_ops {
};
-int v3_dev_add_blk_frontend(struct guest_info * info,
+int v3_dev_add_blk_frontend(struct v3_vm_info * vm,
char * name,
- int (*connect)(struct guest_info * info,
+ int (*connect)(struct v3_vm_info * vm,
void * frontend_data,
struct v3_dev_blk_ops * ops,
v3_cfg_tree_t * cfg,
void * private_data),
void * priv_data);
-int v3_dev_connect_blk(struct guest_info * info,
+int v3_dev_connect_blk(struct v3_vm_info * vm,
char * frontend_name,
struct v3_dev_blk_ops * ops,
v3_cfg_tree_t * cfg,
void * private_data);
-int v3_dev_add_net_frontend(struct guest_info * info,
+int v3_dev_add_net_frontend(struct v3_vm_info * vm,
char * name,
- int (*connect)(struct guest_info * info,
+ int (*connect)(struct v3_vm_info * vm,
void * frontend_data,
struct v3_dev_net_ops * ops,
v3_cfg_tree_t * cfg,
void * private_data),
void * priv_data);
-int v3_dev_connect_net(struct guest_info * info,
+int v3_dev_connect_net(struct v3_vm_info * vm,
char * frontend_name,
struct v3_dev_net_ops * ops,
v3_cfg_tree_t * cfg,
#include <palacios/vmm_list.h>
+struct v3_vm_info;
typedef enum {HOST_KEYBOARD_EVT,
HOST_MOUSE_EVT,
union v3_host_event_handler {
- int (*keyboard_handler)(struct guest_info * info, struct v3_keyboard_event * evt, void * priv_data);
- int (*mouse_handler)(struct guest_info * info, struct v3_mouse_event * evt, void * priv_data);
- int (*timer_handler)(struct guest_info * info, struct v3_timer_event * evt, void * priv_data);
+ int (*keyboard_handler)(struct v3_vm_info * vm, struct v3_keyboard_event * evt, void * priv_data);
+ int (*mouse_handler)(struct v3_vm_info * vm, struct v3_mouse_event * evt, void * priv_data);
+ int (*timer_handler)(struct v3_vm_info * vm, struct v3_timer_event * evt, void * priv_data);
};
-int v3_init_host_events(struct guest_info * info);
+int v3_init_host_events(struct v3_vm_info * vm);
#define V3_HOST_EVENT_HANDLER(cb) ((union v3_host_event_handler)cb)
-int v3_hook_host_event(struct guest_info * info,
+int v3_hook_host_event(struct v3_vm_info * vm,
v3_host_evt_type_t event_type,
union v3_host_event_handler cb,
void * private_data);
-int v3_deliver_keyboard_event(struct guest_info * info, struct v3_keyboard_event * evt);
-int v3_deliver_mouse_event(struct guest_info * info, struct v3_mouse_event * evt);
-int v3_deliver_timer_event(struct guest_info * info, struct v3_timer_event * evt);
+int v3_deliver_keyboard_event(struct v3_vm_info * vm, struct v3_keyboard_event * evt);
+int v3_deliver_mouse_event(struct v3_vm_info * vm, struct v3_mouse_event * evt);
+int v3_deliver_timer_event(struct v3_vm_info * vm, struct v3_timer_event * evt);
typedef struct rb_root v3_hypercall_map_t;
struct guest_info;
+struct v3_vm_info;
-void v3_init_hypercall_map(struct guest_info * info);
+void v3_init_hypercall_map(struct v3_vm_info * vm);
-int v3_register_hypercall(struct guest_info * info, uint_t hypercall_id,
- int (*hypercall)(struct guest_info * info, uint_t hcall_id, void * priv_data),
+int v3_register_hypercall(struct v3_vm_info * vm, uint_t hypercall_id,
+ int (*hypercall)(struct guest_info * info , uint_t hcall_id, void * priv_data),
void * priv_data);
typedef enum {V3_INVALID_INTR, V3_EXTERNAL_IRQ, V3_VIRTUAL_IRQ, V3_NMI, V3_SOFTWARE_INTR} v3_intr_type_t;
struct guest_info;
+struct v3_vm_info;
struct v3_interrupt;
struct v3_irq_hook {
- int (*handler)(struct guest_info * info, struct v3_interrupt * intr, void * priv_data);
+ int (*handler)(struct v3_vm_info * vm, struct v3_interrupt * intr, void * priv_data);
void * priv_data;
};
#define MAX_IRQ 256
+struct v3_intr_routers {
+ struct list_head router_list;
+ v3_lock_t irq_lock;
-struct v3_intr_state {
-
- struct list_head controller_list;
+ /* some way to get the [A]PIC intr */
+ struct v3_irq_hook * hooks[256];
+};
+struct v3_intr_core_state {
uint_t irq_pending;
uint_t irq_started;
uint_t irq_vector;
v3_lock_t irq_lock;
- /* some way to get the [A]PIC intr */
- struct v3_irq_hook * hooks[256];
-
+ struct list_head controller_list;
};
-void v3_init_interrupt_state(struct guest_info * info);
-
+void v3_init_intr_controllers(struct guest_info * info);
+void v3_init_intr_routers(struct v3_vm_info * vm);
int v3_raise_virq(struct guest_info * info, int irq);
int v3_lower_virq(struct guest_info * info, int irq);
-int v3_raise_irq(struct guest_info * info, int irq);
-int v3_lower_irq(struct guest_info * info, int irq);
+int v3_raise_irq(struct v3_vm_info * vm, int irq);
+int v3_lower_irq(struct v3_vm_info * vm, int irq);
struct intr_ctrl_ops {
int (*intr_pending)(struct guest_info * info, void * private_data);
int (*get_intr_number)(struct guest_info * info, void * private_data);
- int (*raise_intr)(struct guest_info * info, void * private_data, int irq);
- int (*lower_intr)(struct guest_info * info, void * private_data, int irq);
int (*begin_irq)(struct guest_info * info, void * private_data, int irq);
};
+struct intr_router_ops {
+ int (*raise_intr)(struct v3_vm_info * vm, void * private_data, int irq);
+ int (*lower_intr)(struct v3_vm_info * vm, void * private_data, int irq);
+};
-void v3_register_intr_controller(struct guest_info * info, struct intr_ctrl_ops * ops, void * state);
+
+int v3_register_intr_controller(struct guest_info * info, struct intr_ctrl_ops * ops, void * priv_data);
+int v3_register_intr_router(struct v3_vm_info * vm, struct intr_router_ops * ops, void * priv_data);
v3_intr_type_t v3_intr_pending(struct guest_info * info);
uint32_t v3_get_intr(struct guest_info * info);
+int v3_injecting_intr(struct guest_info * info, uint_t intr_num, v3_intr_type_t type);
-//intr_type_t v3_get_intr_type(struct guest_info * info);
-int v3_injecting_intr(struct guest_info * info, uint_t intr_num, v3_intr_type_t type);
/*
int start_irq(struct vm_intr * intr);
-int v3_hook_irq(struct guest_info * info,
+int v3_hook_irq(struct v3_vm_info * vm,
uint_t irq,
- int (*handler)(struct guest_info * info, struct v3_interrupt * intr, void * priv_data),
+ int (*handler)(struct v3_vm_info * vm, struct v3_interrupt * intr, void * priv_data),
void * priv_data);
-int v3_hook_passthrough_irq(struct guest_info *info, uint_t irq);
+int v3_hook_passthrough_irq(struct v3_vm_info * vm, uint_t irq);
+struct v3_vm_info;
struct guest_info;
-void v3_init_io_map(struct guest_info * info);
+void v3_init_io_map(struct v3_vm_info * vm);
/* External API */
-int v3_hook_io_port(struct guest_info * info, uint16_t port,
- int (*read)(uint16_t port, void * dst, uint_t length, void * priv_data),
- int (*write)(uint16_t port, void * src, uint_t length, void * priv_data),
+int v3_hook_io_port(struct v3_vm_info * vm, uint16_t port,
+ int (*read)(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * priv_data),
+ int (*write)(struct guest_info * core, uint16_t port, void * src, uint_t length, void * priv_data),
void * priv_data);
-int v3_unhook_io_port(struct guest_info * info, uint16_t port);
+int v3_unhook_io_port(struct v3_vm_info * vm, uint16_t port);
uint16_t port;
// Reads data into the IO port (IN, INS)
- int (*read)(uint16_t port, void * dst, uint_t length, void * priv_data);
+ int (*read)(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * priv_data);
// Writes data from the IO port (OUT, OUTS)
- int (*write)(uint16_t port, void * src, uint_t length, void * priv_data);
-
-
+ int (*write)(struct guest_info * core, uint16_t port, void * src, uint_t length, void * priv_data);
void * priv_data;
struct rb_node tree_node;
-
};
struct v3_io_map {
struct rb_root map;
- int (*update_map)(struct guest_info * info, uint16_t port, int hook_read, int hook_write);
+ int (*update_map)(struct v3_vm_info * vm, uint16_t port, int hook_read, int hook_write);
void * arch_data;
};
-struct v3_io_hook * v3_get_io_hook(struct guest_info * info, uint16_t port);
+struct v3_io_hook * v3_get_io_hook(struct v3_vm_info * vm, uint16_t port);
-void v3_print_io_map(struct guest_info * info);
+void v3_print_io_map(struct v3_vm_info * vm);
-void v3_refresh_io_map(struct guest_info * info);
+void v3_refresh_io_map(struct v3_vm_info * vm);
void v3_outb(uint16_t port, uint8_t value);
#include <palacios/vmm_paging.h>
#include <palacios/vmm_rbtree.h>
-
+#include <palacios/vmm_list.h>
struct guest_info;
+struct v3_vm_info;
addr_t host_addr; // This either points to a host address mapping
-
// Called when data is read from a memory page
int (*read_hook)(addr_t guest_addr, void * dst, uint_t length, void * priv_data);
// Called when data is written to a memory page
void * priv_data;
- struct rb_node tree_node;
+ int core_id;
+
+ struct rb_node tree_node; // This for memory regions mapped to the global map
};
-typedef struct v3_shdw_map {
+struct v3_mem_map {
struct v3_shadow_region base_region;
-
- addr_t hook_hva;
-
struct rb_root shdw_regions;
-} v3_shdw_map_t;
+
+ void * hook_hvas; // this is an array of pages, equal to the number of cores
+};
+int v3_init_mem_map(struct guest_info * info);
+void v3_delete_mem_map(struct guest_info * info);
-int v3_init_shadow_map(struct guest_info * info);
-void v3_delete_shadow_map(struct guest_info * info);
-int v3_add_shadow_mem(struct guest_info * guest_info,
+int v3_add_shadow_mem(struct v3_vm_info * vm,
addr_t guest_addr_start,
addr_t guest_addr_end,
addr_t host_addr);
-int v3_hook_full_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+int v3_hook_full_mem(struct v3_vm_info * vm, addr_t guest_addr_start, addr_t guest_addr_end,
int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data);
-int v3_hook_write_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+int v3_hook_write_mem(struct v3_vm_info * vm, addr_t guest_addr_start, addr_t guest_addr_end,
addr_t host_addr,
int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data);
+int v3_unhook_mem(struct v3_vm_info * vm, addr_t guest_addr_start);
+
+
-int v3_unhook_mem(struct guest_info * info, addr_t guest_addr_start);
-void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region * reg);
+void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg);
-struct v3_shadow_region * v3_get_shadow_region(struct guest_info * info, addr_t guest_addr);
+struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, addr_t guest_addr /*, int core_id */);
addr_t v3_get_shadow_addr(struct v3_shadow_region * reg, addr_t guest_addr);
-void v3_print_mem_map(struct guest_info * info);
+void v3_print_mem_map(struct v3_vm_info * vm);
#include <palacios/vmm_list.h>
struct guest_info;
-
+struct v3_vm_info;
struct v3_msr {
uint_t num_hooks;
struct list_head hook_list;
- int (*update_map)(struct guest_info * info, uint_t msr, int hook_read, int hook_write);
+ int (*update_map)(struct v3_vm_info * vm, uint_t msr, int hook_read, int hook_write);
void * arch_data;
};
-void v3_init_msr_map(struct guest_info * info);
+void v3_init_msr_map(struct v3_vm_info * vm);
-int v3_unhook_msr(struct guest_info * info, uint_t msr);
+int v3_unhook_msr(struct v3_vm_info * vm, uint_t msr);
-int v3_hook_msr(struct guest_info * info, uint_t msr,
+int v3_hook_msr(struct v3_vm_info * vm, uint_t msr,
int (*read)(uint_t msr, struct v3_msr * dst, void * priv_data),
int (*write)(uint_t msr, struct v3_msr src, void * priv_data),
void * priv_data);
-struct v3_msr_hook * v3_get_msr_hook(struct guest_info * info, uint_t msr);
+struct v3_msr_hook * v3_get_msr_hook(struct v3_vm_info * vm, uint_t msr);
-void v3_refresh_msr_map(struct guest_info * info);
+void v3_refresh_msr_map(struct v3_vm_info * vm);
-void v3_print_msr_map(struct guest_info * info);
+void v3_print_msr_map(struct v3_vm_info * vm);
int v3_handle_msr_write(struct guest_info * info);
#include <palacios/vm_guest.h>
-struct v3_sym_context {
+struct v3_sym_core_context {
struct v3_gprs vm_regs;
struct v3_segment cs;
struct v3_segment ss;
uint8_t cpl;
};
-
-struct v3_sym_state {
-
- struct v3_sym_interface * sym_page;
- addr_t sym_page_pa;
-
- uint64_t guest_pg_addr;
-
+struct v3_symcall_state{
struct {
uint_t active : 1; // activated when symbiotic page MSR is written
uint_t sym_call_active : 1;
uint_t sym_call_error : 1;
} __attribute__((packed));
- struct v3_sym_context old_ctx;
+ struct v3_sym_core_context old_ctx;
int sym_call_errno;
uint64_t sym_call_fs;
};
+struct v3_sym_state {
+
+ struct v3_sym_interface * sym_page;
+ addr_t sym_page_pa;
+
+ uint64_t guest_pg_addr;
+
+ struct v3_symcall_state * symcalls;
+};
+
-int v3_init_sym_iface(struct guest_info * info);
+int v3_init_sym_iface(struct v3_vm_info * vm);
typedef uint64_t sym_arg_t;
-int v3_sym_map_pci_passthrough(struct guest_info * info, uint_t bus, uint_t dev, uint_t fn);
-int v3_sym_unmap_pci_passthrough(struct guest_info * info, uint_t bus, uint_t dev, uint_t fn);
+int v3_sym_map_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn);
+int v3_sym_unmap_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn);
/* Symcall numbers */
-int v3_init_sym_swap(struct guest_info * info);
+int v3_init_sym_swap(struct v3_vm_info * vm);
-int v3_register_swap_disk(struct guest_info * info, int dev_index,
+int v3_register_swap_disk(struct v3_vm_info * vm, int dev_index,
struct v3_swap_ops * ops, void * private_data);
-int v3_swap_in_notify(struct guest_info * info, int pg_index, int dev_index);
+int v3_swap_in_notify(struct v3_vm_info * vm, int pg_index, int dev_index);
int v3_get_vaddr_perms(struct guest_info * info, addr_t vaddr, pte32_t * guest_pte, pf_error_t * page_perms);
-addr_t v3_get_swapped_pg_addr(struct guest_info * info, pte32_t * guest_pte);
-addr_t v3_map_swp_page(struct guest_info * info, pte32_t * shadow_pte, pte32_t * guest_pte, void * swp_page_ptr);
+addr_t v3_get_swapped_pg_addr(struct v3_vm_info * vm, pte32_t * guest_pte);
+addr_t v3_map_swp_page(struct v3_vm_info * vm, pte32_t * shadow_pte, pte32_t * guest_pte, void * swp_page_ptr);
-int v3_swap_flush(struct guest_info * info);
+int v3_swap_flush(struct v3_vm_info * vm);
#endif
struct vm_timer_ops {
- void (*update_time)(ullong_t cpu_cycles, ullong_t cpu_freq, void * priv_data);
-
+ void (*update_time)(struct guest_info * info, ullong_t cpu_cycles, ullong_t cpu_freq, void * priv_data);
+ void (*advance_timer)(struct guest_info * info);
};
struct vm_timer {
int v3_add_timer(struct guest_info * info, struct vm_timer_ops * ops, void * private_data);
int v3_remove_timer(struct guest_info * info, struct vm_timer * timer);
+void v3_advance_time(struct guest_info * info);
void v3_update_time(struct guest_info * info, ullong_t cycles);
struct vmx_exit_info;
struct guest_info;
+struct v3_vm_info;
-int v3_init_vmx_io_map(struct guest_info * info);
+int v3_init_vmx_io_map(struct v3_vm_info * vm);
int v3_handle_vmx_io_in(struct guest_info * info, struct vmx_exit_info * exit_info);
int v3_handle_vmx_io_ins(struct guest_info * info, struct vmx_exit_info * exit_info);
#include <palacios/vm_guest.h>
-int v3_init_vmx_msr_map(struct guest_info * info);
+int v3_init_vmx_msr_map(struct v3_vm_info * vm);
#endif
#endif
}
+#include <palacios/vm_guest.h>
-static void pit_update_time(ullong_t cpu_cycles, ullong_t cpu_freq, void * private_data) {
+static void pit_update_time(struct guest_info * info, ullong_t cpu_cycles, ullong_t cpu_freq, void * private_data) {
struct vm_device * dev = (struct vm_device *)private_data;
struct pit * state = (struct pit *)dev->private_data;
// ullong_t tmp_ctr = state->pit_counter;
if (handle_crystal_tics(dev, &(state->ch_0), oscillations) == 1) {
// raise interrupt
PrintDebug("8254 PIT: Injecting Timer interrupt to guest\n");
- v3_raise_irq(dev->vm, 0);
+ v3_raise_irq(info->vm_info, 0);
}
//handle_crystal_tics(dev, &(state->ch_1), oscillations);
-static int pit_read_channel(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int pit_read_channel(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
struct pit * state = (struct pit *)dev->private_data;
char * val = (char *)dst;
-static int pit_write_channel(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int pit_write_channel(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct pit * state = (struct pit *)dev->private_data;
char val = *(char *)src;
-static int pit_write_command(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int pit_write_command(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct pit * state = (struct pit *)dev->private_data;
struct pit_cmd_word * cmd = (struct pit_cmd_word *)src;
};
+#include <palacios/vm_guest.h>
-static int pit_init(struct guest_info * info, v3_cfg_tree_t * cfg) {
+static int pit_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct pit * pit_state = NULL;
struct vm_device * dev = NULL;
char * name = v3_cfg_val(cfg, "name");
+
+ // PIT is only usable in non-multicore environments
+ // just hardcode the core context
+ struct guest_info * info = &(vm->cores[0]);
uint_t cpu_khz = V3_CPU_KHZ();
ullong_t reload_val = (ullong_t)cpu_khz * 1000;
dev = v3_allocate_device(name, &dev_ops, pit_state);
- if (v3_attach_device(info, dev) == -1) {
+ if (v3_attach_device(vm, dev) == -1) {
PrintError("Could not attach device %s\n", name);
return -1;
}
PrintDebug("\n");
#endif
+
v3_add_timer(info, &timer_ops, dev);
// Get cpu frequency and calculate the global pit oscilattor counter/cycle
}
-static int pic_raise_intr(struct guest_info * info, void * private_data, int irq) {
+static int pic_raise_intr(struct v3_vm_info * vm, void * private_data, int irq) {
struct pic_internal * state = (struct pic_internal*)private_data;
if (irq == 2) {
return -1;
}
- v3_interrupt_cpu(info, 0);
+ v3_interrupt_cpu(vm, 0);
return 0;
}
-static int pic_lower_intr(struct guest_info * info, void * private_data, int irq) {
+static int pic_lower_intr(struct v3_vm_info * vm, void * private_data, int irq) {
struct pic_internal * state = (struct pic_internal*)private_data;
PrintDebug("[pic_lower_intr] IRQ line %d now low\n", irq);
static struct intr_ctrl_ops intr_ops = {
.intr_pending = pic_intr_pending,
.get_intr_number = pic_get_intr_number,
- .raise_intr = pic_raise_intr,
- .begin_irq = pic_begin_irq,
- .lower_intr = pic_lower_intr,
-
+ .begin_irq = pic_begin_irq
};
+static struct intr_router_ops router_ops = {
+ .raise_intr = pic_raise_intr,
+ .lower_intr = pic_lower_intr
+};
-
-static int read_master_port1(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int read_master_port1(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
if (length != 1) {
return 1;
}
-static int read_master_port2(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int read_master_port2(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
if (length != 1) {
}
-static int read_slave_port1(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int read_slave_port1(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
if (length != 1) {
return 1;
}
-static int read_slave_port2(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int read_slave_port2(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
if (length != 1) {
}
-static int write_master_port1(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int write_master_port1(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
uchar_t cw = *(uchar_t *)src;
return 1;
}
-static int write_master_port2(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int write_master_port2(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
uchar_t cw = *(uchar_t *)src;
return 1;
}
-static int write_slave_port1(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int write_slave_port1(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
uchar_t cw = *(uchar_t *)src;
return 1;
}
-static int write_slave_port2(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int write_slave_port2(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
uchar_t cw = *(uchar_t *)src;
-static int read_elcr_port(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int read_elcr_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
if (length != 1) {
}
-static int write_elcr_port(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int write_elcr_port(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct pic_internal * state = (struct pic_internal*)dev->private_data;
if (length != 1) {
-static int pic_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+#include <palacios/vm_guest.h>
+
+static int pic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct pic_internal * state = NULL;
state = (struct pic_internal *)V3_Malloc(sizeof(struct pic_internal));
char * name = v3_cfg_val(cfg, "name");
+ // PIC is only usable in non-multicore environments
+ // just hardcode the core context
+ struct guest_info * core = &(vm->cores[0]);
+
V3_ASSERT(state != NULL);
struct vm_device * dev = v3_allocate_device(name, &dev_ops, state);
}
- v3_register_intr_controller(vm, &intr_ops, state);
+ v3_register_intr_controller(core, &intr_ops, state);
+ v3_register_intr_router(vm, &router_ops, state);
state->master_irr = 0;
state->master_isr = 0;
uint32_t eoi;
+ struct guest_info * core;
};
static int apic_read(addr_t guest_addr, void * dst, uint_t length, void * priv_data);
/* Timer Functions */
-static void apic_update_time(ullong_t cpu_cycles, ullong_t cpu_freq, void * priv_data) {
+static void apic_update_time(struct guest_info * info, ullong_t cpu_cycles, ullong_t cpu_freq, void * priv_data) {
struct vm_device * dev = (struct vm_device *)priv_data;
struct apic_state * apic = (struct apic_state *)dev->private_data;
// The 32 bit GCC runtime is a pile of shit
PrintDebug("Raising APIC Timer interrupt (periodic=%d) (icnt=%d) (div=%d)\n",
apic->tmr_vec_tbl.tmr_mode, apic->tmr_init_cnt, shift_num);
- if (apic_intr_pending(dev->vm, priv_data)) {
+ if (apic_intr_pending(info, priv_data)) {
PrintDebug("Overriding pending IRQ %d\n", apic_get_intr_number(dev->vm, priv_data));
}
static int apic_free(struct vm_device * dev) {
- struct guest_info * info = dev->vm;
+ // struct apic_state * apic = (struct apic_state *)dev->private_data;
- v3_unhook_msr(info, BASE_ADDR_MSR);
+ v3_unhook_msr(dev->vm, BASE_ADDR_MSR);
return 0;
}
-static int apic_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int apic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
PrintDebug("Creating APIC\n");
char * name = v3_cfg_val(cfg, "name");
return 0;
}
-static int atapi_read10(struct vm_device * dev, struct ide_channel * channel) {
+static int atapi_read10(struct guest_info * core,
+ struct vm_device * dev,
+ struct ide_channel * channel) {
struct ide_drive * drive = get_selected_drive(channel);
struct atapi_read10_cmd * cmd = (struct atapi_read10_cmd *)(drive->data_buf);
uint32_t lba = be_to_le_32(cmd->lba);
if (channel->features.dma) {
if (channel->dma_status.active == 1) {
- if (dma_read(dev, channel) == -1) {
+ if (dma_read(core, dev, channel) == -1) {
PrintError("Error in DMA read for CD Read10 command\n");
return -1;
}
}
-static int atapi_handle_packet(struct vm_device * dev, struct ide_channel * channel) {
+static int atapi_handle_packet(struct guest_info * core, struct vm_device * dev, struct ide_channel * channel) {
struct ide_drive * drive = get_selected_drive(channel);
uint8_t cmd = drive->data_buf[0];
break;
case 0x28: // read(10)
- if (atapi_read10(dev, channel) == -1) {
+ if (atapi_read10(core, dev, channel) == -1) {
PrintError("IDE: Error in ATAPI read (%x)\n", cmd);
return -1;
}
uint_t cons_offset;
};
-static int handle_info_write(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int handle_info_write(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct debug_state * state = (struct debug_state *)dev->private_data;
state->info_buf[state->info_offset++] = *(char*)src;
}
-static int handle_debug_write(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int handle_debug_write(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct debug_state * state = (struct debug_state *)dev->private_data;
state->debug_buf[state->debug_offset++] = *(char*)src;
}
-static int handle_console_write(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int handle_console_write(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct debug_state * state = (struct debug_state *)dev->private_data;
state->cons_buf[state->cons_offset++] = *(char*)src;
}
-static int handle_gen_write(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int handle_gen_write(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
switch (length) {
case 1:
-static int debug_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int debug_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct debug_state * state = NULL;
char * name = v3_cfg_val(cfg, "name");
};
-static int connect_fn(struct guest_info * info,
+static int connect_fn(struct v3_vm_info * vm,
void * frontend_data,
struct v3_dev_blk_ops * ops,
v3_cfg_tree_t * cfg,
model->seek_usecs = seek_time;
model->private_data = private_data;
- if (v3_dev_connect_blk(info, v3_cfg_val(frontend_cfg, "tag"),
+ if (v3_dev_connect_blk(vm, v3_cfg_val(frontend_cfg, "tag"),
&blk_ops, frontend_cfg, model) == -1) {
PrintError("Could not connect to frontend %s\n",
v3_cfg_val(frontend_cfg, "tag"));
return 0;
}
-static int model_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int model_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
char * name = v3_cfg_val(cfg, "name");
-static int generic_write_port_passthrough(uint16_t port, void * src,
+static int generic_write_port_passthrough(struct guest_info * core, uint16_t port, void * src,
uint_t length, struct vm_device * dev) {
uint_t i;
return length;
}
-static int generic_read_port_passthrough(uint16_t port, void * src,
+static int generic_read_port_passthrough(struct guest_info * core, uint16_t port, void * src,
uint_t length, struct vm_device * dev) {
uint_t i;
return length;
}
-static int generic_write_port_ignore(uint16_t port, void * src,
+static int generic_write_port_ignore(struct guest_info * core, uint16_t port, void * src,
uint_t length, struct vm_device * dev) {
uint_t i;
return length;
}
-static int generic_read_port_ignore(uint16_t port, void * src,
+static int generic_read_port_ignore(struct guest_info * core, uint16_t port, void * src,
uint_t length, struct vm_device * dev) {
PrintDebug("generic: reading 0x%x bytes from port 0x%x ...", length, port);
-static int generic_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int generic_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct generic_internal * state = (struct generic_internal *)V3_Malloc(sizeof(struct generic_internal));
char * name = v3_cfg_val(cfg, "name");
};
-static int io_read(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int io_read(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
PrintError("Unhandled read on port %x\n", port);
return -1;
}
-static int io_write(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int io_write(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
PrintError("Unhandled write on port %x\n", port);
return -1;
}
-static int i440_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int i440_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct pci_device * pci_dev = NULL;
struct v3_pci_bar bars[6];
int i;
}
-static int dma_read(struct vm_device * dev, struct ide_channel * channel);
-static int dma_write(struct vm_device * dev, struct ide_channel * channel);
+static int dma_read(struct guest_info * core, struct vm_device * dev, struct ide_channel * channel);
+static int dma_write(struct guest_info * core, struct vm_device * dev, struct ide_channel * channel);
/* ATAPI functions */
#endif
/* IO Operations */
-static int dma_read(struct vm_device * dev, struct ide_channel * channel) {
+static int dma_read(struct guest_info * core, struct vm_device * dev, struct ide_channel * channel) {
struct ide_drive * drive = get_selected_drive(channel);
// This is at top level scope to do the EOT test at the end
struct ide_dma_prd prd_entry;
PrintDebug("PRD table address = %x\n", channel->dma_prd_addr);
- ret = read_guest_pa_memory(dev->vm, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
+ ret = read_guest_pa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
if (ret != sizeof(struct ide_dma_prd)) {
PrintError("Could not read PRD\n");
drive->current_lba++;
- ret = write_guest_pa_memory(dev->vm, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
+ ret = write_guest_pa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
if (ret != bytes_to_write) {
PrintError("Failed to copy data into guest memory... (ret=%d)\n", ret);
}
-static int dma_write(struct vm_device * dev, struct ide_channel * channel) {
+static int dma_write(struct guest_info * core, struct vm_device * dev, struct ide_channel * channel) {
struct ide_drive * drive = get_selected_drive(channel);
// This is at top level scope to do the EOT test at the end
struct ide_dma_prd prd_entry;
PrintDebug("PRD Table address = %x\n", channel->dma_prd_addr);
- ret = read_guest_pa_memory(dev->vm, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
+ ret = read_guest_pa_memory(core, prd_entry_addr, sizeof(struct ide_dma_prd), (void *)&prd_entry);
if (ret != sizeof(struct ide_dma_prd)) {
PrintError("Could not read PRD\n");
bytes_to_write = (prd_bytes_left > HD_SECTOR_SIZE) ? HD_SECTOR_SIZE : prd_bytes_left;
- ret = read_guest_pa_memory(dev->vm, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
+ ret = read_guest_pa_memory(core, prd_entry.base_addr + prd_offset, bytes_to_write, drive->data_buf);
if (ret != bytes_to_write) {
PrintError("Faild to copy data from guest memory... (ret=%d)\n", ret);
#define DMA_CHANNEL_FLAG 0x08
-static int write_dma_port(ushort_t port, void * src, uint_t length, void * private_data) {
+static int write_dma_port(struct guest_info * core, ushort_t port, void * src, uint_t length, void * private_data) {
struct vm_device * dev = (struct vm_device *)private_data;
struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
if (channel->dma_cmd.read == 1) {
// DMA Read
- if (dma_read(dev, channel) == -1) {
+ if (dma_read(core, dev, channel) == -1) {
PrintError("Failed DMA Read\n");
return -1;
}
} else {
// DMA write
- if (dma_write(dev, channel) == -1) {
+ if (dma_write(core, dev, channel) == -1) {
PrintError("Failed DMA Write\n");
return -1;
}
}
-static int read_dma_port(ushort_t port, void * dst, uint_t length, void * private_data) {
+static int read_dma_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, void * private_data) {
struct vm_device * dev = (struct vm_device *)private_data;
struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
uint16_t port_offset = port & (DMA_CHANNEL_FLAG - 1);
-static int write_cmd_port(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int write_cmd_port(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
struct ide_channel * channel = get_selected_channel(ide, port);
struct ide_drive * drive = get_selected_drive(channel);
if (channel->dma_status.active == 1) {
// DMA Read
- if (dma_read(dev, channel) == -1) {
+ if (dma_read(core, dev, channel) == -1) {
PrintError("Failed DMA Read\n");
return -1;
}
if (channel->dma_status.active == 1) {
// DMA Write
- if (dma_write(dev, channel) == -1) {
+ if (dma_write(core, dev, channel) == -1) {
PrintError("Failed DMA Write\n");
return -1;
}
}
-static int write_data_port(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int write_data_port(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
struct ide_channel * channel = get_selected_channel(ide, port);
struct ide_drive * drive = get_selected_drive(channel);
return -1;
case 0xa0: // ATAPI packet command
- if (atapi_handle_packet(dev, channel) == -1) {
+ if (atapi_handle_packet(core, dev, channel) == -1) {
PrintError("Error handling ATAPI packet\n");
return -1;
}
}
-static int read_drive_id(uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
+static int read_drive_id( uint8_t * dst, uint_t length, struct vm_device * dev, struct ide_channel * channel) {
struct ide_drive * drive = get_selected_drive(channel);
channel->status.busy = 0;
}
-static int ide_read_data_port(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int ide_read_data_port(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
struct ide_channel * channel = get_selected_channel(ide, port);
struct ide_drive * drive = get_selected_drive(channel);
return length;
}
-static int write_port_std(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int write_port_std(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
struct ide_channel * channel = get_selected_channel(ide, port);
struct ide_drive * drive = get_selected_drive(channel);
}
-static int read_port_std(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int read_port_std(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
struct ide_internal * ide = (struct ide_internal *)(dev->private_data);
struct ide_channel * channel = get_selected_channel(ide, port);
struct ide_drive * drive = get_selected_drive(channel);
-static int connect_fn(struct guest_info * info,
+static int connect_fn(struct v3_vm_info * vm,
void * frontend_data,
struct v3_dev_blk_ops * ops,
v3_cfg_tree_t * cfg,
return -1;
}
-
drive->ops = ops;
if (ide->ide_pci) {
-static int ide_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int ide_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct ide_internal * ide = (struct ide_internal *)V3_Malloc(sizeof(struct ide_internal));
char * name = v3_cfg_val(cfg, "name");
#include <palacios/vmm_telemetry.h>
-static int key_event_handler(struct guest_info * info,
+static int key_event_handler(struct v3_vm_info * vm,
struct v3_keyboard_event * evt,
void * private_data) {
struct vm_device * dev = (struct vm_device *)private_data;
PrintDebug("keyboard: injected status 0x%x, and scancode 0x%x\n", evt->status, evt->scan_code);
if (evt->scan_code == 0x44) { // F10 debug dump
- v3_print_guest_state(info);
+ int i = 0;
+ for (i = 0; i < vm->num_cores; i++) {
+ v3_print_guest_state(&(vm->cores[i]));
+ }
// PrintGuestPageTables(info, info->shdw_pg_state.guest_cr3);
}
#ifdef CONFIG_SYMBIOTIC
sym_arg_t a2 = 0x3333;
sym_arg_t a3 = 0x4444;
sym_arg_t a4 = 0x5555;
+ uint64_t call_start = 0;
+ uint64_t call_end = 0;
+
+ V3_Print("Exits before symcall: %d\n", (uint32_t)info->num_exits);
+ rdtscll(call_start);
v3_sym_call5(info, SYMCALL_TEST, &a0, &a1, &a2, &a3, &a4);
+ rdtscll(call_end);
+
+ V3_Print("Symcall latency = %d cycles (%d exits)\n", (uint32_t)(call_end - call_start), (uint32_t)info->num_exits);
V3_Print("Symcall Test Returned arg0=%x, arg1=%x, arg2=%x, arg3=%x, arg4=%x\n",
(uint32_t)a0, (uint32_t)a1, (uint32_t)a2, (uint32_t)a3, (uint32_t)a4);
PrintDebug("Toggling Debugging\n");
v3_dbg_enable ^= 1;
} else if (evt->scan_code == 0x41) { // F7 telemetry dump
+#ifdef CONFIG_TELEMETRY
v3_print_telemetry(info);
-
+#endif
}
}
-static int mouse_event_handler(struct guest_info * info,
+static int mouse_event_handler(struct v3_vm_info * vm,
struct v3_mouse_event * evt,
void * private_data) {
struct vm_device * dev = (struct vm_device *)private_data;
}
}
-static int keyboard_read_delay(ushort_t port, void * dest, uint_t length, struct vm_device * dev) {
+static int keyboard_read_delay(struct guest_info * core, ushort_t port, void * dest, uint_t length, struct vm_device * dev) {
if (length == 1) {
*(uint8_t *)dest = v3_inb(port);
-static int keyboard_write_command(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int keyboard_write_command(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct keyboard_internal * state = (struct keyboard_internal *)(dev->private_data);
uint8_t cmd = *(uint8_t *)src;
return length;
}
-static int keyboard_read_status(ushort_t port, void * dest, uint_t length, struct vm_device * dev) {
+static int keyboard_read_status(struct guest_info * core, ushort_t port, void * dest, uint_t length, struct vm_device * dev) {
struct keyboard_internal *state = (struct keyboard_internal *)(dev->private_data);
if (length != 1) {
return length;
}
-static int keyboard_write_output(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int keyboard_write_output(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct keyboard_internal *state = (struct keyboard_internal *)(dev->private_data);
int ret = length;
return ret;
}
-static int keyboard_read_input(ushort_t port, void * dest, uint_t length, struct vm_device * dev) {
+static int keyboard_read_input(struct guest_info * core, ushort_t port, void * dest, uint_t length, struct vm_device * dev) {
struct keyboard_internal * state = (struct keyboard_internal *)(dev->private_data);
if (length != 1) {
-static int keyboard_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int keyboard_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct keyboard_internal * keyboard_state = NULL;
char * name = v3_cfg_val(cfg, "name");
}
-static int handle_kick(struct vm_device * dev) {
+static int handle_kick(struct guest_info * core, struct vm_device * dev) {
struct virtio_balloon_state * virtio = (struct virtio_balloon_state *)dev->private_data;
struct virtio_queue * q = virtio->cur_queue;
tmp_desc->flags, tmp_desc->next);
- if (guest_pa_to_host_va(dev->vm, tmp_desc->addr_gpa, (addr_t *)&(page_addr)) == -1) {
+ if (guest_pa_to_host_va(core, tmp_desc->addr_gpa, (addr_t *)&(page_addr)) == -1) {
PrintError("Could not translate block header address\n");
return -1;
}
return 0;
}
-static int virtio_io_write(uint16_t port, void * src, uint_t length, void * private_data) {
+static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
struct vm_device * dev = (struct vm_device *)private_data;
struct virtio_balloon_state * virtio = (struct virtio_balloon_state *)dev->private_data;
int port_idx = port % virtio->io_range_size;
// round up to next page boundary.
virtio->cur_queue->ring_used_addr = (virtio->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
- if (guest_pa_to_host_va(dev->vm, virtio->cur_queue->ring_desc_addr, (addr_t *)&(virtio->cur_queue->desc)) == -1) {
+ if (guest_pa_to_host_va(core, virtio->cur_queue->ring_desc_addr, (addr_t *)&(virtio->cur_queue->desc)) == -1) {
PrintError("Could not translate ring descriptor address\n");
return -1;
}
- if (guest_pa_to_host_va(dev->vm, virtio->cur_queue->ring_avail_addr, (addr_t *)&(virtio->cur_queue->avail)) == -1) {
+ if (guest_pa_to_host_va(core, virtio->cur_queue->ring_avail_addr, (addr_t *)&(virtio->cur_queue->avail)) == -1) {
PrintError("Could not translate ring available address\n");
return -1;
}
- if (guest_pa_to_host_va(dev->vm, virtio->cur_queue->ring_used_addr, (addr_t *)&(virtio->cur_queue->used)) == -1) {
+ if (guest_pa_to_host_va(core, virtio->cur_queue->ring_used_addr, (addr_t *)&(virtio->cur_queue->used)) == -1) {
PrintError("Could not translate ring used address\n");
return -1;
}
break;
case VRING_Q_NOTIFY_PORT:
PrintDebug("Handling Kick\n");
- if (handle_kick(dev) == -1) {
+ if (handle_kick(core, dev) == -1) {
PrintError("Could not handle Balloon Notification\n");
return -1;
}
}
-static int virtio_io_read(uint16_t port, void * dst, uint_t length, void * private_data) {
+static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
struct vm_device * dev = (struct vm_device *)private_data;
struct virtio_balloon_state * virtio = (struct virtio_balloon_state *)dev->private_data;
int port_idx = port % virtio->io_range_size;
-static int virtio_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int virtio_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
struct virtio_balloon_state * virtio_state = NULL;
struct pci_device * pci_dev = NULL;
struct virtio_dev_state {
struct vm_device * pci_bus;
struct list_head dev_list;
- struct guest_info * vm;
};
struct virtio_blk_state {
// multiple block operations need to increment the sector
-static int handle_block_op(struct virtio_blk_state * blk_state, struct blk_op_hdr * hdr,
+static int handle_block_op(struct guest_info * core, struct virtio_blk_state * blk_state, struct blk_op_hdr * hdr,
struct vring_desc * buf_desc, uint8_t * status) {
uint8_t * buf = NULL;
PrintDebug("Handling Block op\n");
- if (guest_pa_to_host_va(blk_state->virtio_dev->vm, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
+ if (guest_pa_to_host_va(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
PrintError("Could not translate buffer address\n");
return -1;
}
-static int handle_kick(struct virtio_blk_state * blk_state) {
+static int handle_kick(struct guest_info * core, struct virtio_blk_state * blk_state) {
struct virtio_queue * q = &(blk_state->queue);
PrintDebug("VIRTIO KICK: cur_index=%d (mod=%d), avail_index=%d\n",
PrintDebug("Header Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", hdr_desc,
(void *)(hdr_desc->addr_gpa), hdr_desc->length, hdr_desc->flags, hdr_desc->next);
- if (guest_pa_to_host_va(blk_state->virtio_dev->vm, hdr_desc->addr_gpa, &(hdr_addr)) == -1) {
+ if (guest_pa_to_host_va(core, hdr_desc->addr_gpa, &(hdr_addr)) == -1) {
PrintError("Could not translate block header address\n");
return -1;
}
PrintDebug("Buffer Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", buf_desc,
(void *)(buf_desc->addr_gpa), buf_desc->length, buf_desc->flags, buf_desc->next);
- if (handle_block_op(blk_state, &hdr, buf_desc, &tmp_status) == -1) {
+ if (handle_block_op(core, blk_state, &hdr, buf_desc, &tmp_status) == -1) {
PrintError("Error handling block operation\n");
return -1;
}
PrintDebug("Status Descriptor (ptr=%p) gpa=%p, len=%d, flags=%x, next=%d\n", status_desc,
(void *)(status_desc->addr_gpa), status_desc->length, status_desc->flags, status_desc->next);
- if (guest_pa_to_host_va(blk_state->virtio_dev->vm, status_desc->addr_gpa, (addr_t *)&(status_ptr)) == -1) {
+ if (guest_pa_to_host_va(core, status_desc->addr_gpa, (addr_t *)&(status_ptr)) == -1) {
PrintError("Could not translate status address\n");
return -1;
}
return 0;
}
-static int virtio_io_write(uint16_t port, void * src, uint_t length, void * private_data) {
+static int virtio_io_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * private_data) {
struct virtio_blk_state * blk_state = (struct virtio_blk_state *)private_data;
int port_idx = port % blk_state->io_range_size;
// round up to next page boundary.
blk_state->queue.ring_used_addr = (blk_state->queue.ring_used_addr + 0xfff) & ~0xfff;
- if (guest_pa_to_host_va(blk_state->virtio_dev->vm, blk_state->queue.ring_desc_addr, (addr_t *)&(blk_state->queue.desc)) == -1) {
+ if (guest_pa_to_host_va(core, blk_state->queue.ring_desc_addr, (addr_t *)&(blk_state->queue.desc)) == -1) {
PrintError("Could not translate ring descriptor address\n");
return -1;
}
- if (guest_pa_to_host_va(blk_state->virtio_dev->vm, blk_state->queue.ring_avail_addr, (addr_t *)&(blk_state->queue.avail)) == -1) {
+ if (guest_pa_to_host_va(core, blk_state->queue.ring_avail_addr, (addr_t *)&(blk_state->queue.avail)) == -1) {
PrintError("Could not translate ring available address\n");
return -1;
}
- if (guest_pa_to_host_va(blk_state->virtio_dev->vm, blk_state->queue.ring_used_addr, (addr_t *)&(blk_state->queue.used)) == -1) {
+ if (guest_pa_to_host_va(core, blk_state->queue.ring_used_addr, (addr_t *)&(blk_state->queue.used)) == -1) {
PrintError("Could not translate ring used address\n");
return -1;
}
break;
case VRING_Q_NOTIFY_PORT:
PrintDebug("Handling Kick\n");
- if (handle_kick(blk_state) == -1) {
+ if (handle_kick(core, blk_state) == -1) {
PrintError("Could not handle Block Notification\n");
return -1;
}
}
-static int virtio_io_read(uint16_t port, void * dst, uint_t length, void * private_data) {
+static int virtio_io_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * private_data) {
struct virtio_blk_state * blk_state = (struct virtio_blk_state *)private_data;
int port_idx = port % blk_state->io_range_size;
}
-static int connect_fn(struct guest_info * info,
+static int connect_fn(struct v3_vm_info * vm,
void * frontend_data,
struct v3_dev_blk_ops * ops,
v3_cfg_tree_t * cfg,
}
-static int virtio_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int virtio_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct vm_device * pci_bus = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
struct virtio_dev_state * virtio_state = NULL;
char * name = v3_cfg_val(cfg, "name");
INIT_LIST_HEAD(&(virtio_state->dev_list));
virtio_state->pci_bus = pci_bus;
- virtio_state->vm = vm;
+
struct vm_device * dev = v3_allocate_device(name, &dev_ops, virtio_state);
if (v3_attach_device(vm, dev) == -1) {
}
-static int handle_timer_event(struct guest_info * info,
+static int handle_timer_event(struct v3_vm_info * vm,
struct v3_timer_event * evt,
void * priv_data) {
}
}
-static int init_nvram_state(struct vm_device * dev) {
- struct guest_info * info = dev->vm;
+static int init_nvram_state(struct v3_vm_info * vm, struct vm_device * dev) {
+
struct nvram_internal * nvram = (struct nvram_internal *)dev->private_data;
memset(nvram->mem_state, 0, NVRAM_REG_MAX);
nvram->us = 0;
nvram->pus = 0;
- set_memory_size(nvram, info->mem_size);
+ set_memory_size(nvram, vm->mem_size);
init_harddrives(nvram);
nvram->dev_state = NVRAM_READY;
-static int nvram_write_reg_port(ushort_t port,
- void * src,
- uint_t length,
- struct vm_device * dev) {
+static int nvram_write_reg_port(struct guest_info * core, ushort_t port,
+ void * src, uint_t length, struct vm_device * dev) {
struct nvram_internal * data = (struct nvram_internal *)dev->private_data;
return 1;
}
-static int nvram_read_data_port(ushort_t port,
- void * dst,
- uint_t length,
- struct vm_device * dev) {
+static int nvram_read_data_port(struct guest_info * core, ushort_t port,
+ void * dst, uint_t length, struct vm_device * dev) {
struct nvram_internal * data = (struct nvram_internal *)dev->private_data;
}
-static int nvram_write_data_port(ushort_t port,
- void * src,
- uint_t length,
- struct vm_device * dev) {
+static int nvram_write_data_port(struct guest_info * core, ushort_t port,
+ void * src, uint_t length, struct vm_device * dev) {
struct nvram_internal * data = (struct nvram_internal *)dev->private_data;
-static int nvram_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int nvram_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct nvram_internal * nvram_state = NULL;
struct vm_device * ide = v3_find_dev(vm, v3_cfg_val(cfg, "storage"));
char * name = v3_cfg_val(cfg, "name");
return -1;
}
- init_nvram_state(dev);
+ init_nvram_state(vm, dev);
// hook ports
v3_dev_hook_io(dev, NVRAM_REG_PORT, NULL, &nvram_write_reg_port);
};
-static int handle_gen_write(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int handle_gen_write(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct debug_state * state = (struct debug_state *)dev->private_data;
state->debug_buf[state->debug_offset++] = *(char*)src;
-static int debug_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int debug_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct debug_state * state = NULL;
char * name = v3_cfg_val(cfg, "name");
#include <devices/pci.h>
#include <devices/pci_types.h>
+#include <palacios/vm_guest.h>
+
#ifndef CONFIG_DEBUG_PCI
-static int addr_port_read(ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
+static int addr_port_read(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * dev) {
struct pci_internal * pci_state = (struct pci_internal *)dev->private_data;
int reg_offset = port & 0x3;
uint8_t * reg_addr = ((uint8_t *)&(pci_state->addr_reg.val)) + reg_offset;
}
-static int addr_port_write(ushort_t port, void * src, uint_t length, struct vm_device * dev) {
+static int addr_port_write(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * dev) {
struct pci_internal * pci_state = (struct pci_internal *)dev->private_data;
int reg_offset = port & 0x3;
uint8_t * reg_addr = ((uint8_t *)&(pci_state->addr_reg.val)) + reg_offset;
}
-static int data_port_read(ushort_t port, void * dst, uint_t length, struct vm_device * vmdev) {
+static int data_port_read(struct guest_info * core, ushort_t port, void * dst, uint_t length, struct vm_device * vmdev) {
struct pci_internal * pci_state = (struct pci_internal *)(vmdev->private_data);
struct pci_device * pci_dev = NULL;
uint_t reg_num = (pci_state->addr_reg.reg_num << 2) + (port & 0x3);
}
}
-
static int bar_update(struct guest_info * info, struct pci_device * pci, int bar_num, uint32_t new_val) {
struct v3_pci_bar * bar = &(pci->bar[bar_num]);
PrintDebug("Rehooking PCI IO port (old port=%u) (new port=%u)\n",
PCI_IO_BASE(bar->val) + i, PCI_IO_BASE(new_val) + i);
- v3_unhook_io_port(info, PCI_IO_BASE(bar->val) + i);
+ v3_unhook_io_port(info->vm_info, PCI_IO_BASE(bar->val) + i);
- if (v3_hook_io_port(info, PCI_IO_BASE(new_val) + i,
+ if (v3_hook_io_port(info->vm_info, PCI_IO_BASE(new_val) + i,
bar->io_read, bar->io_write,
bar->private_data) == -1) {
PrintError("Could not hook PCI IO port (old port=%u) (new port=%u)\n",
PCI_IO_BASE(bar->val) + i, PCI_IO_BASE(new_val) + i);
- v3_print_io_map(info);
+ v3_print_io_map(info->vm_info);
return -1;
}
}
break;
}
case PCI_BAR_MEM32: {
- v3_unhook_mem(info, (addr_t)(bar->val));
+ v3_unhook_mem(info->vm_info, (addr_t)(bar->val));
if (bar->mem_read) {
- v3_hook_full_mem(info, PCI_MEM32_BASE(new_val),
+ v3_hook_full_mem(info->vm_info, PCI_MEM32_BASE(new_val),
PCI_MEM32_BASE(new_val) + (bar->num_pages * PAGE_SIZE_4KB),
bar->mem_read, bar->mem_write, pci->priv_data);
} else {
}
-static int data_port_write(ushort_t port, void * src, uint_t length, struct vm_device * vmdev) {
+static int data_port_write(struct guest_info * core, ushort_t port, void * src, uint_t length, struct vm_device * vmdev) {
struct pci_internal * pci_state = (struct pci_internal *)vmdev->private_data;
struct pci_device * pci_dev = NULL;
uint_t reg_num = (pci_state->addr_reg.reg_num << 2) + (port & 0x3);
// check special flags....
// bar_update
- if (bar_update(vmdev->vm, pci_dev, i, *(uint32_t *)(pci_dev->config_space + bar_offset)) == -1) {
+ if (bar_update(core, pci_dev, i, *(uint32_t *)(pci_dev->config_space + bar_offset)) == -1) {
PrintError("PCI Device %s: Bar update Error Bar=%d\n", pci_dev->name, i);
return -1;
}
-static int pci_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int pci_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct pci_internal * pci_state = V3_Malloc(sizeof(struct pci_internal));
int i = 0;
char * name = v3_cfg_val(cfg, "name");
device_register("PCI", pci_init)
-static inline int init_bars(struct guest_info * info, struct pci_device * pci_dev) {
+static inline int init_bars(struct v3_vm_info * vm, struct pci_device * pci_dev) {
int i = 0;
for (i = 0; i < 6; i++) {
for (j = 0; j < pci_dev->bar[i].num_ports; j++) {
// hook IO
if (pci_dev->bar[i].default_base_port != 0xffff) {
- if (v3_hook_io_port(info, pci_dev->bar[i].default_base_port + j,
+ if (v3_hook_io_port(vm, pci_dev->bar[i].default_base_port + j,
pci_dev->bar[i].io_read, pci_dev->bar[i].io_write,
pci_dev->bar[i].private_data) == -1) {
PrintError("Could not hook default io port %x\n", pci_dev->bar[i].default_base_port + j);
// hook memory
if (pci_dev->bar[i].mem_read) {
// full hook
- v3_hook_full_mem(info, pci_dev->bar[i].default_base_addr,
+ v3_hook_full_mem(vm, pci_dev->bar[i].default_base_addr,
pci_dev->bar[i].default_base_addr + (pci_dev->bar[i].num_pages * PAGE_SIZE_4KB),
pci_dev->bar[i].mem_read, pci_dev->bar[i].mem_write, pci_dev->priv_data);
} else if (pci_dev->bar[i].mem_write) {
-static int passthrough_init(struct guest_info * info, v3_cfg_tree_t * cfg) {
+static int passthrough_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct pt_dev_state * state = V3_Malloc(sizeof(struct pt_dev_state));
struct vm_device * dev = NULL;
- struct vm_device * pci = v3_find_dev(info, v3_cfg_val(cfg, "bus"));
+ struct vm_device * pci = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
char * name = v3_cfg_val(cfg, "name");
memset(state, 0, sizeof(struct pt_dev_state));
dev = v3_allocate_device(name, &dev_ops, state);
- if (v3_attach_device(info, dev) == -1) {
+ if (v3_attach_device(vm, dev) == -1) {
PrintError("Could not attach device %s\n", name);
return -1;
}
return 0;
}
- setup_virt_pci_dev(info, dev);
+ setup_virt_pci_dev(vm, dev);
- v3_hook_irq(info, atoi(v3_cfg_val(cfg, "irq")), irq_handler, dev);
+ v3_hook_irq(vm, atoi(v3_cfg_val(cfg, "irq")), irq_handler, dev);
// v3_hook_irq(info, 64, irq_handler, dev);
return 0;
return 0;
}
-static int piix3_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int piix3_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct v3_southbridge * piix3 = (struct v3_southbridge *)V3_Malloc(sizeof(struct v3_southbridge));
struct vm_device * dev = NULL;
struct vm_device * pci = v3_find_dev(vm, v3_cfg_val(cfg, "bus"));
-static int disk_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int disk_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct disk_state * disk = NULL;
struct v3_cfg_file * file = NULL;
char * name = v3_cfg_val(cfg, "name");
-static int blk_init(struct guest_info * vm, v3_cfg_tree_t * cfg) {
+static int blk_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
struct blk_state * blk = NULL;
v3_cfg_tree_t * frontend_cfg = v3_cfg_subtree(cfg, "frontend");
char * name = v3_cfg_val(cfg, "name");
-static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * vm_info) {
+static void Init_VMCB_BIOS(vmcb_t * vmcb, struct guest_info * core) {
vmcb_ctrl_t * ctrl_area = GET_VMCB_CTRL_AREA(vmcb);
vmcb_saved_state_t * guest_state = GET_VMCB_SAVE_STATE_AREA(vmcb);
uint_t i;
/* Setup Guest Machine state */
- vm_info->vm_regs.rsp = 0x00;
- vm_info->rip = 0xfff0;
+ core->vm_regs.rsp = 0x00;
+ core->rip = 0xfff0;
- vm_info->vm_regs.rdx = 0x00000f00;
+ core->vm_regs.rdx = 0x00000f00;
- vm_info->cpl = 0;
+ core->cpl = 0;
- vm_info->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
- vm_info->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
- vm_info->ctrl_regs.efer |= EFER_MSR_svm_enable;
+ core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
+ core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
+ core->ctrl_regs.efer |= EFER_MSR_svm_enable;
- vm_info->segments.cs.selector = 0xf000;
- vm_info->segments.cs.limit = 0xffff;
- vm_info->segments.cs.base = 0x0000000f0000LL;
+ core->segments.cs.selector = 0xf000;
+ core->segments.cs.limit = 0xffff;
+ core->segments.cs.base = 0x0000000f0000LL;
// (raw attributes = 0xf3)
- vm_info->segments.cs.type = 0x3;
- vm_info->segments.cs.system = 0x1;
- vm_info->segments.cs.dpl = 0x3;
- vm_info->segments.cs.present = 1;
+ core->segments.cs.type = 0x3;
+ core->segments.cs.system = 0x1;
+ core->segments.cs.dpl = 0x3;
+ core->segments.cs.present = 1;
- struct v3_segment * segregs [] = {&(vm_info->segments.ss), &(vm_info->segments.ds),
- &(vm_info->segments.es), &(vm_info->segments.fs),
- &(vm_info->segments.gs), NULL};
+ struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds),
+ &(core->segments.es), &(core->segments.fs),
+ &(core->segments.gs), NULL};
for ( i = 0; segregs[i] != NULL; i++) {
struct v3_segment * seg = segregs[i];
seg->present = 1;
}
- vm_info->segments.gdtr.limit = 0x0000ffff;
- vm_info->segments.gdtr.base = 0x0000000000000000LL;
- vm_info->segments.idtr.limit = 0x0000ffff;
- vm_info->segments.idtr.base = 0x0000000000000000LL;
+ core->segments.gdtr.limit = 0x0000ffff;
+ core->segments.gdtr.base = 0x0000000000000000LL;
+ core->segments.idtr.limit = 0x0000ffff;
+ core->segments.idtr.base = 0x0000000000000000LL;
- vm_info->segments.ldtr.selector = 0x0000;
- vm_info->segments.ldtr.limit = 0x0000ffff;
- vm_info->segments.ldtr.base = 0x0000000000000000LL;
- vm_info->segments.tr.selector = 0x0000;
- vm_info->segments.tr.limit = 0x0000ffff;
- vm_info->segments.tr.base = 0x0000000000000000LL;
+ core->segments.ldtr.selector = 0x0000;
+ core->segments.ldtr.limit = 0x0000ffff;
+ core->segments.ldtr.base = 0x0000000000000000LL;
+ core->segments.tr.selector = 0x0000;
+ core->segments.tr.limit = 0x0000ffff;
+ core->segments.tr.base = 0x0000000000000000LL;
- vm_info->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
- vm_info->dbg_regs.dr7 = 0x0000000000000400LL;
+ core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
+ core->dbg_regs.dr7 = 0x0000000000000400LL;
- v3_init_svm_io_map(vm_info);
- ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(vm_info->io_map.arch_data);
+ /** THESE NEED TO BE MOVED TO GLOBAL LOCATION **/
+ v3_init_svm_io_map(core->vm_info);
+ ctrl_area->IOPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->io_map.arch_data);
ctrl_area->instrs.IOIO_PROT = 1;
-
- v3_init_svm_msr_map(vm_info);
- ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(vm_info->msr_map.arch_data);
+ v3_init_svm_msr_map(core->vm_info);
+ ctrl_area->MSRPM_BASE_PA = (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data);
ctrl_area->instrs.MSR_PROT = 1;
+ /** *** **/
PrintDebug("Exiting on interrupts\n");
ctrl_area->instrs.INTR = 1;
- if (vm_info->shdw_pg_mode == SHADOW_PAGING) {
+ if (core->shdw_pg_mode == SHADOW_PAGING) {
PrintDebug("Creating initial shadow page table\n");
/* JRL: This is a performance killer, and a simplistic solution */
ctrl_area->guest_ASID = 1;
- if (v3_init_passthrough_pts(vm_info) == -1) {
+ if (v3_init_passthrough_pts(core) == -1) {
PrintError("Could not initialize passthrough page tables\n");
return ;
}
- vm_info->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
+ core->shdw_pg_state.guest_cr0 = 0x0000000000000010LL;
PrintDebug("Created\n");
- vm_info->ctrl_regs.cr0 |= 0x80000000;
- vm_info->ctrl_regs.cr3 = vm_info->direct_map_pt;
+ core->ctrl_regs.cr0 |= 0x80000000;
+ core->ctrl_regs.cr3 = core->direct_map_pt;
ctrl_area->cr_reads.cr0 = 1;
ctrl_area->cr_writes.cr0 = 1;
ctrl_area->cr_reads.cr3 = 1;
ctrl_area->cr_writes.cr3 = 1;
- v3_hook_msr(vm_info, EFER_MSR,
+ v3_hook_msr(core->vm_info, EFER_MSR,
&v3_handle_efer_read,
&v3_handle_efer_write,
- vm_info);
+ core);
ctrl_area->instrs.INVLPG = 1;
- } else if (vm_info->shdw_pg_mode == NESTED_PAGING) {
+ } else if (core->shdw_pg_mode == NESTED_PAGING) {
// Flush the TLB on entries/exits
ctrl_area->TLB_CONTROL = 1;
ctrl_area->guest_ASID = 1;
PrintDebug("NP_Enable at 0x%p\n", (void *)&(ctrl_area->NP_ENABLE));
// Set the Nested Page Table pointer
- if (v3_init_passthrough_pts(vm_info) == -1) {
+ if (v3_init_passthrough_pts(core) == -1) {
PrintError("Could not initialize Nested page tables\n");
return ;
}
- ctrl_area->N_CR3 = vm_info->direct_map_pt;
+ ctrl_area->N_CR3 = core->direct_map_pt;
guest_state->g_pat = 0x7040600070406ULL;
}
static int update_irq_exit_state(struct guest_info * info) {
vmcb_ctrl_t * guest_ctrl = GET_VMCB_CTRL_AREA((vmcb_t*)(info->vmm_data));
- if ((info->intr_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
+ if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("INTAK cycle completed for irq %d\n", info->intr_state.irq_vector);
+ PrintDebug("INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
#endif
- info->intr_state.irq_started = 1;
- info->intr_state.irq_pending = 0;
+ info->intr_core_state.irq_started = 1;
+ info->intr_core_state.irq_pending = 0;
- v3_injecting_intr(info, info->intr_state.irq_vector, V3_EXTERNAL_IRQ);
+ v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
}
- if ((info->intr_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
+ if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Interrupt %d taken by guest\n", info->intr_state.irq_vector);
+ PrintDebug("Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
#endif
// Interrupt was taken fully vectored
- info->intr_state.irq_started = 0;
+ info->intr_core_state.irq_started = 0;
} else {
#ifdef CONFIG_DEBUG_INTERRUPTS
#endif
v3_injecting_excp(info, excp);
- } else if (info->intr_state.irq_started == 1) {
+ } else if (info->intr_core_state.irq_started == 1) {
#ifdef CONFIG_DEBUG_INTERRUPTS
PrintDebug("IRQ pending from previous injection\n");
#endif
guest_ctrl->guest_ctrl.V_IRQ = 1;
- guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_state.irq_vector;
+ guest_ctrl->guest_ctrl.V_INTR_VECTOR = info->intr_core_state.irq_vector;
guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
(void *)(addr_t)info->rip);
#endif
- info->intr_state.irq_pending = 1;
- info->intr_state.irq_vector = irq;
+ info->intr_core_state.irq_pending = 1;
+ info->intr_core_state.irq_vector = irq;
break;
}
guest_state->rsp = info->vm_regs.rsp;
#ifdef CONFIG_SYMBIOTIC
- if (info->sym_state.sym_call_active == 0) {
+ if (info->vm_info->sym_state.symcalls[info->cpu_id].sym_call_active == 0) {
update_irq_entry_state(info);
}
#else
*/
#ifdef CONFIG_SYMBIOTIC
- if (info->sym_state.sym_call_active == 1) {
+ if (info->vm_info->sym_state.symcalls[info->cpu_id].sym_call_active == 1) {
if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
}
#ifdef CONFIG_SYMBIOTIC
- if (info->sym_state.sym_call_active == 0) {
+ if (info->vm_info->sym_state.symcalls[info->cpu_id].sym_call_active == 0) {
update_irq_exit_state(info);
}
#else
PrintDebug("Launching SVM VM (vmcb=%p)\n", (void *)info->vmm_data);
//PrintDebugVMCB((vmcb_t*)(info->vmm_data));
- info->run_state = VM_RUNNING;
+ info->vm_info->run_state = VM_RUNNING;
rdtscll(info->yield_start_cycle);
addr_t host_addr;
addr_t linear_addr = 0;
- info->run_state = VM_ERROR;
+ info->vm_info->run_state = VM_ERROR;
V3_Print("SVM ERROR!!\n");
int v3_handle_svm_exit(struct guest_info * info, addr_t exit_code, addr_t exit_info1, addr_t exit_info2) {
#ifdef CONFIG_TELEMETRY
- if (info->enable_telemetry) {
+ if (info->vm_info->enable_telemetry) {
v3_telemetry_start_exit(info);
}
#endif
// END OF SWITCH (EXIT_CODE)
#ifdef CONFIG_TELEMETRY
- if (info->enable_telemetry) {
+ if (info->vm_info->enable_telemetry) {
v3_telemetry_end_exit(info, exit_code);
}
#endif
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
*/
+
#include <palacios/svm_io.h>
#include <palacios/vmm_io.h>
#include <palacios/vmm_ctrl_regs.h>
#endif
-static int update_map(struct guest_info * info, uint16_t port, int hook_read, int hook_write) {
- uchar_t * bitmap = (uint8_t *)(info->io_map.arch_data);;
+static int update_map(struct v3_vm_info * vm, uint16_t port, int hook_read, int hook_write) {
+ uchar_t * bitmap = (uint8_t *)(vm->io_map.arch_data);;
int major = port / 8;
int minor = port % 8;
}
-int v3_init_svm_io_map(struct guest_info * info) {
- info->io_map.update_map = update_map;
+int v3_init_svm_io_map(struct v3_vm_info * vm) {
+ vm->io_map.update_map = update_map;
- info->io_map.arch_data = V3_VAddr(V3_AllocPages(3));
- memset(info->io_map.arch_data, 0, PAGE_SIZE_4KB * 3);
+ vm->io_map.arch_data = V3_VAddr(V3_AllocPages(3));
+ memset(vm->io_map.arch_data, 0, PAGE_SIZE_4KB * 3);
- v3_refresh_io_map(info);
+ v3_refresh_io_map(vm);
return 0;
}
// This should package up an IO request and call vmm_handle_io
-int v3_handle_svm_io_in(struct guest_info * info, struct svm_io_info * io_info) {
- struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
+int v3_handle_svm_io_in(struct guest_info * core, struct svm_io_info * io_info) {
+ struct v3_io_hook * hook = v3_get_io_hook(core->vm_info, io_info->port);
int read_size = 0;
if (hook == NULL) {
PrintDebug("IN of %d bytes on port %d (0x%x)\n", read_size, io_info->port, io_info->port);
- if (hook->read(io_info->port, &(info->vm_regs.rax), read_size, hook->priv_data) != read_size) {
+ if (hook->read(core, io_info->port, &(core->vm_regs.rax), read_size, hook->priv_data) != read_size) {
// not sure how we handle errors.....
PrintError("Read Failure for in on port 0x%x\n", io_info->port);
return -1;
/* We might not handle wrap around of the RDI register correctly...
* In that if we do wrap around the effect will manifest in the higher bits of the register
*/
-int v3_handle_svm_io_ins(struct guest_info * info, struct svm_io_info * io_info) {
- struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
+int v3_handle_svm_io_ins(struct guest_info * core, struct svm_io_info * io_info) {
+ struct v3_io_hook * hook = v3_get_io_hook(core->vm_info, io_info->port);
int read_size = 0;
addr_t dst_addr = 0;
uint_t rep_num = 1;
ullong_t mask = 0;
- struct v3_segment * theseg = &(info->segments.es); // default is ES
+ struct v3_segment * theseg = &(core->segments.es); // default is ES
addr_t inst_ptr;
// direction can equal either 1 or -1
// We will multiply the final added offset by this value to go the correct direction
int direction = 1;
- struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
+ struct rflags * flags = (struct rflags *)&(core->ctrl_regs.rflags);
if (flags->df) {
direction = -1;
- if (guest_va_to_host_va(info, get_addr_linear(info, info->rip, &(info->segments.cs)), &inst_ptr) == -1) {
+ if (guest_va_to_host_va(core, get_addr_linear(core, core->rip, &(core->segments.cs)), &inst_ptr) == -1) {
PrintError("Can't access instruction\n");
return -1;
}
while (is_prefix_byte(*((char *)inst_ptr))) {
switch (*((char *)inst_ptr)) {
case PREFIX_CS_OVERRIDE:
- theseg = &(info->segments.cs);
+ theseg = &(core->segments.cs);
break;
case PREFIX_SS_OVERRIDE:
- theseg = &(info->segments.ss);
+ theseg = &(core->segments.ss);
break;
case PREFIX_DS_OVERRIDE:
- theseg = &(info->segments.ds);
+ theseg = &(core->segments.ds);
break;
case PREFIX_ES_OVERRIDE:
- theseg = &(info->segments.es);
+ theseg = &(core->segments.es);
break;
case PREFIX_FS_OVERRIDE:
- theseg = &(info->segments.fs);
+ theseg = &(core->segments.fs);
break;
case PREFIX_GS_OVERRIDE:
- theseg = &(info->segments.gs);
+ theseg = &(core->segments.gs);
break;
default:
break;
mask = 0xffffffffffffffffLL;
} else {
// This value should be set depending on the host register size...
- mask = get_gpr_mask(info);
+ mask = get_gpr_mask(core);
PrintDebug("INS io_info invalid address size, mask=0x%p, io_info=0x%p\n",
(void *)(addr_t)mask, (void *)(addr_t)(io_info));
}
if (io_info->rep) {
- rep_num = info->vm_regs.rcx & mask;
+ rep_num = core->vm_regs.rcx & mask;
//rep_num = info->vm_regs.rcx;
}
while (rep_num > 0) {
addr_t host_addr;
- dst_addr = get_addr_linear(info, (info->vm_regs.rdi & mask), theseg);
+ dst_addr = get_addr_linear(core, (core->vm_regs.rdi & mask), theseg);
// PrintDebug("Writing 0x%p\n", (void *)dst_addr);
- if (guest_va_to_host_va(info, dst_addr, &host_addr) == -1) {
+ if (guest_va_to_host_va(core, dst_addr, &host_addr) == -1) {
// either page fault or gpf...
PrintError("Could not convert Guest VA to host VA\n");
return -1;
}
- if (hook->read(io_info->port, (char *)host_addr, read_size, hook->priv_data) != read_size) {
+ if (hook->read(core, io_info->port, (char *)host_addr, read_size, hook->priv_data) != read_size) {
// not sure how we handle errors.....
PrintError("Read Failure for ins on port 0x%x\n", io_info->port);
return -1;
}
- info->vm_regs.rdi += (read_size * direction);
+ core->vm_regs.rdi += (read_size * direction);
if (io_info->rep) {
- info->vm_regs.rcx--;
+ core->vm_regs.rcx--;
}
rep_num--;
return 0;
}
-int v3_handle_svm_io_out(struct guest_info * info, struct svm_io_info * io_info) {
- struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
+int v3_handle_svm_io_out(struct guest_info * core, struct svm_io_info * io_info) {
+ struct v3_io_hook * hook = v3_get_io_hook(core->vm_info, io_info->port);
int write_size = 0;
if (hook == NULL) {
PrintDebug("OUT of %d bytes on port %d (0x%x)\n", write_size, io_info->port, io_info->port);
- if (hook->write(io_info->port, &(info->vm_regs.rax), write_size, hook->priv_data) != write_size) {
+ if (hook->write(core, io_info->port, &(core->vm_regs.rax), write_size, hook->priv_data) != write_size) {
// not sure how we handle errors.....
PrintError("Write Failure for out on port 0x%x\n", io_info->port);
return -1;
* In that if we do wrap around the effect will manifest in the higher bits of the register
*/
-int v3_handle_svm_io_outs(struct guest_info * info, struct svm_io_info * io_info) {
+int v3_handle_svm_io_outs(struct guest_info * core, struct svm_io_info * io_info) {
- struct v3_io_hook * hook = v3_get_io_hook(info, io_info->port);
+ struct v3_io_hook * hook = v3_get_io_hook(core->vm_info, io_info->port);
int write_size = 0;
addr_t dst_addr = 0;
uint_t rep_num = 1;
ullong_t mask = 0;
addr_t inst_ptr;
- struct v3_segment * theseg = &(info->segments.es); // default is ES
+ struct v3_segment * theseg = &(core->segments.es); // default is ES
// This is kind of hacky...
// direction can equal either 1 or -1
// We will multiply the final added offset by this value to go the correct direction
int direction = 1;
- struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
+ struct rflags * flags = (struct rflags *)&(core->ctrl_regs.rflags);
if (flags->df) {
direction = -1;
mask = 0xffffffffffffffffLL;
} else {
// This value should be set depending on the host register size...
- mask = get_gpr_mask(info);
+ mask = get_gpr_mask(core);
PrintDebug("OUTS io_info invalid address size, mask=0%p, io_info=0x%p\n",
(void *)(addr_t)mask, (void *)(addr_t)io_info);
}
if (io_info->rep) {
- rep_num = info->vm_regs.rcx & mask;
+ rep_num = core->vm_regs.rcx & mask;
}
- if (guest_va_to_host_va(info, get_addr_linear(info, info->rip, &(info->segments.cs)), &inst_ptr) == -1) {
+ if (guest_va_to_host_va(core, get_addr_linear(core, core->rip, &(core->segments.cs)), &inst_ptr) == -1) {
PrintError("Can't access instruction\n");
return -1;
}
while (is_prefix_byte(*((char *)inst_ptr))) {
switch (*((char *)inst_ptr)) {
case PREFIX_CS_OVERRIDE:
- theseg = &(info->segments.cs);
+ theseg = &(core->segments.cs);
break;
case PREFIX_SS_OVERRIDE:
- theseg = &(info->segments.ss);
+ theseg = &(core->segments.ss);
break;
case PREFIX_DS_OVERRIDE:
- theseg = &(info->segments.ds);
+ theseg = &(core->segments.ds);
break;
case PREFIX_ES_OVERRIDE:
- theseg = &(info->segments.es);
+ theseg = &(core->segments.es);
break;
case PREFIX_FS_OVERRIDE:
- theseg = &(info->segments.fs);
+ theseg = &(core->segments.fs);
break;
case PREFIX_GS_OVERRIDE:
- theseg = &(info->segments.gs);
+ theseg = &(core->segments.gs);
break;
default:
break;
while (rep_num > 0) {
addr_t host_addr;
- dst_addr = get_addr_linear(info, (info->vm_regs.rsi & mask), theseg);
+ dst_addr = get_addr_linear(core, (core->vm_regs.rsi & mask), theseg);
- if (guest_va_to_host_va(info, dst_addr, &host_addr) == -1) {
+ if (guest_va_to_host_va(core, dst_addr, &host_addr) == -1) {
// either page fault or gpf...
}
- if (hook->write(io_info->port, (char*)host_addr, write_size, hook->priv_data) != write_size) {
+ if (hook->write(core, io_info->port, (char*)host_addr, write_size, hook->priv_data) != write_size) {
// not sure how we handle errors.....
PrintError("Write Failure for outs on port 0x%x\n", io_info->port);
return -1;
}
- info->vm_regs.rsi += write_size * direction;
+ core->vm_regs.rsi += write_size * direction;
if (io_info->rep) {
- info->vm_regs.rcx--;
+ core->vm_regs.rcx--;
}
rep_num--;
}
-static int update_map(struct guest_info * info, uint_t msr, int hook_reads, int hook_writes) {
+static int update_map(struct v3_vm_info * vm, uint_t msr, int hook_reads, int hook_writes) {
int index = get_bitmap_index(msr);
uint_t major = index / 4;
uint_t minor = (index % 4) * 2;
uchar_t val = 0;
uchar_t mask = 0x3;
- uint8_t * bitmap = (uint8_t *)(info->msr_map.arch_data);
+ uint8_t * bitmap = (uint8_t *)(vm->msr_map.arch_data);
if (hook_reads) {
val |= 0x1;
}
-int v3_init_svm_msr_map(struct guest_info * info) {
- struct v3_msr_map * msr_map = &(info->msr_map);
+int v3_init_svm_msr_map(struct v3_vm_info * vm) {
+ struct v3_msr_map * msr_map = &(vm->msr_map);
msr_map->update_map = update_map;
msr_map->arch_data = V3_VAddr(V3_AllocPages(2));
memset(msr_map->arch_data, 0, PAGE_SIZE_4KB * 2);
- v3_refresh_msr_map(info);
+ v3_refresh_msr_map(vm);
return 0;
}
-int guest_pa_to_host_pa(struct guest_info * guest_info, addr_t guest_pa, addr_t * host_pa) {
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(guest_info, guest_pa);
+int guest_pa_to_host_pa(struct guest_info * info, addr_t guest_pa, addr_t * host_pa) {
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_pa);
if (shdw_reg == NULL) {
PrintError("In GPA->HPA: Could not find address in shadow map (addr=%p) (NULL REGION)\n",
int v3_dbg_enable = 0;
-static struct guest_info * allocate_guest() {
- void * info = V3_Malloc(sizeof(struct guest_info));
- memset(info, 0, sizeof(struct guest_info));
- return info;
-}
-
static void init_cpu(void * arg) {
uint32_t cpu_id = (uint32_t)(addr_t)arg;
}
-struct guest_info * v3_create_vm(void * cfg) {
- struct guest_info * info = allocate_guest();
-
- if (!info) {
- PrintError("Could not allocate Guest\n");
- return NULL;
- }
+struct v3_vm_info * v3_create_vm(void * cfg) {
+ struct v3_vm_info * vm = v3_config_guest(cfg);
- if (v3_config_guest(info, cfg) == -1) {
+ if (vm == NULL) {
PrintError("Could not configure guest\n");
return NULL;
}
- return info;
+ return vm;
}
-int v3_start_vm(struct guest_info * info, unsigned int cpu_mask) {
-
- info->cpu_id = v3_get_cpu_id();
-
+int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask) {
+ int i = 0;
V3_Print("V3 -- Starting VM\n");
- switch (v3_cpu_types[info->cpu_id]) {
+
+ for (i = 0; i < vm->num_cores; i++) {
+ struct guest_info * info = &(vm->cores[i]);
+
+ /* GRUESOM HACK... */
+ // vm->cpu_id = v3_get_cpu_id();
+
+ switch (v3_cpu_types[info->cpu_id]) {
#ifdef CONFIG_SVM
- case V3_SVM_CPU:
- case V3_SVM_REV3_CPU:
- return v3_start_svm_guest(info);
- break;
+ case V3_SVM_CPU:
+ case V3_SVM_REV3_CPU:
+ return v3_start_svm_guest(info);
+ break;
#endif
#if CONFIG_VMX
- case V3_VMX_CPU:
- case V3_VMX_EPT_CPU:
- return v3_start_vmx_guest(info);
- break;
+ case V3_VMX_CPU:
+ case V3_VMX_EPT_CPU:
+ return v3_start_vmx_guest(info);
+ break;
#endif
- default:
- PrintError("Attemping to enter a guest on an invalid CPU\n");
- return -1;
+ default:
+ PrintError("Attemping to enter a guest on an invalid CPU\n");
+ return -1;
+ }
}
return 0;
uint64_t cur_cycle;
rdtscll(cur_cycle);
- if (cur_cycle > (info->yield_start_cycle + info->yield_cycle_period)) {
+ if (cur_cycle > (info->yield_start_cycle + info->vm_info->yield_cycle_period)) {
/*
PrintDebug("Conditional Yield (cur_cyle=%p, start_cycle=%p, period=%p)\n",
-void v3_interrupt_cpu(struct guest_info * info, int logical_cpu) {
+void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu) {
extern struct v3_os_hooks * os_hooks;
if ((os_hooks) && (os_hooks)->interrupt_cpu) {
- (os_hooks)->interrupt_cpu(info, logical_cpu);
+ (os_hooks)->interrupt_cpu(vm, logical_cpu);
}
}
-static int setup_memory_map(struct guest_info * info, struct v3_config * config_ptr);
-static int setup_devices(struct guest_info * info, struct v3_config * config_ptr);
+static int setup_memory_map(struct v3_vm_info * vm, v3_cfg_tree_t * cfg);
+static int setup_devices(struct v3_vm_info * vm, v3_cfg_tree_t * cfg);
+
char * v3_cfg_val(v3_cfg_tree_t * tree, char * tag) {
-struct v3_cfg_file * v3_cfg_get_file(struct guest_info * info, char * tag) {
+struct v3_cfg_file * v3_cfg_get_file(struct v3_vm_info * vm, char * tag) {
struct v3_cfg_file * file = NULL;
- file = (struct v3_cfg_file *)v3_htable_search(info->cfg_data->file_table, (addr_t)tag);
+ file = (struct v3_cfg_file *)v3_htable_search(vm->cfg_data->file_table, (addr_t)tag);
return file;
}
return cfg;
}
-
-static int pre_config_guest(struct guest_info * info, struct v3_config * config_ptr) {
- extern v3_cpu_arch_t v3_cpu_types[];
- char * memory_str = v3_cfg_val(config_ptr->cfg, "memory");
- char * paging = v3_cfg_val(config_ptr->cfg, "paging");
- char * schedule_hz_str = v3_cfg_val(config_ptr->cfg, "schedule_hz");
- char * vm_class = v3_cfg_val(config_ptr->cfg, "class");
+static int pre_config_vm(struct v3_vm_info * vm, v3_cfg_tree_t * vm_cfg) {
+ char * memory_str = v3_cfg_val(vm_cfg, "memory");
+ char * schedule_hz_str = v3_cfg_val(vm_cfg, "schedule_hz");
+ char * vm_class = v3_cfg_val(vm_cfg, "class");
uint32_t sched_hz = 100; // set the schedule frequency to 100 HZ
if (!memory_str) {
PrintDebug("Memory=%s\n", memory_str);
// Amount of ram the Guest will have, always in MB
- info->mem_size = atoi(memory_str) * 1024 * 1024;
+ vm->mem_size = atoi(memory_str) * 1024 * 1024;
if (strcasecmp(vm_class, "PC") == 0) {
- info->vm_class = V3_PC_VM;
+ vm->vm_class = V3_PC_VM;
} else {
PrintError("Invalid VM class\n");
return -1;
}
-
- /*
- * Initialize the subsystem data strutures
- */
#ifdef CONFIG_TELEMETRY
{
- char * telemetry = v3_cfg_val(config_ptr->cfg, "telemetry");
+ char * telemetry = v3_cfg_val(vm_cfg, "telemetry");
// This should go first, because other subsystems will depend on the guest_info flag
if ((telemetry) && (strcasecmp(telemetry, "enable") == 0)) {
- info->enable_telemetry = 1;
- v3_init_telemetry(info);
+ vm->enable_telemetry = 1;
} else {
- info->enable_telemetry = 0;
+ vm->enable_telemetry = 0;
}
}
#endif
- v3_init_hypercall_map(info);
- v3_init_io_map(info);
- v3_init_msr_map(info);
- v3_init_cpuid_map(info);
- v3_init_host_events(info);
+ v3_init_hypercall_map(vm);
+ v3_init_io_map(vm);
+ v3_init_msr_map(vm);
+ v3_init_cpuid_map(vm);
+ v3_init_host_events(vm);
+ v3_init_intr_routers(vm);
// Initialize the memory map
- if (v3_init_shadow_map(info) == -1) {
+ if (v3_init_mem_map(&(vm->cores[0])) == -1) {
PrintError("Could not initialize shadow map\n");
return -1;
}
+
+#ifdef CONFIG_SYMBIOTIC
+ v3_init_sym_iface(vm);
+#endif
+
+ v3_init_dev_mgr(vm);
+
+
+#ifdef CONFIG_SYMBIOTIC_SWAP
+ PrintDebug("initializing symbiotic swap\n");
+ v3_init_sym_swap(vm);
+#endif
+
+ if (schedule_hz_str) {
+ sched_hz = atoi(schedule_hz_str);
+ }
+
+ PrintDebug("CPU_KHZ = %d, schedule_freq=%p\n", V3_CPU_KHZ(),
+ (void *)(addr_t)sched_hz);
+
+ vm->yield_cycle_period = (V3_CPU_KHZ() * 1000) / sched_hz;
+
+ return 0;
+}
+
+static int pre_config_core(struct guest_info * info, v3_cfg_tree_t * core_cfg) {
+ extern v3_cpu_arch_t v3_cpu_types[];
+ char * paging = v3_cfg_val(core_cfg, "paging");
+
+ /*
+ * Initialize the subsystem data strutures
+ */
+#ifdef CONFIG_TELEMETRY
+ if (info->vm_info->enable_telemetry) {
+ v3_init_telemetry(info);
+ }
+#endif
+
if ((v3_cpu_types[info->cpu_id] == V3_SVM_REV3_CPU) &&
(paging) && (strcasecmp(paging, "nested") == 0)) {
info->shdw_pg_mode = SHADOW_PAGING;
}
-#ifdef CONFIG_SYMBIOTIC
- v3_init_sym_iface(info);
-#endif
+
v3_init_time(info);
- v3_init_interrupt_state(info);
+ v3_init_intr_controllers(info);
v3_init_exception_state(info);
- v3_init_dev_mgr(info);
+
v3_init_decoder(info);
-#ifdef CONFIG_SYMBIOTIC_SWAP
- PrintDebug("initializing symbiotic swap\n");
- v3_init_sym_swap(info);
-#endif
- if (schedule_hz_str) {
- sched_hz = atoi(schedule_hz_str);
- }
- PrintDebug("CPU_KHZ = %d, schedule_freq=%p\n", V3_CPU_KHZ(),
- (void *)(addr_t)sched_hz);
-
- info->yield_cycle_period = (V3_CPU_KHZ() * 1000) / sched_hz;
-
- if (info->vm_class == V3_PC_VM) {
- if (pre_config_pc(info, config_ptr) == -1) {
+ if (info->vm_info->vm_class == V3_PC_VM) {
+ if (pre_config_pc_core(info, core_cfg) == -1) {
PrintError("PC Post configuration failure\n");
return -1;
}
}
-static int post_config_guest(struct guest_info * info, struct v3_config * config_ptr) {
+
+static int post_config_vm(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
+
+ vm->run_state = VM_STOPPED;
// Configure the memory map for the guest
- if (setup_memory_map(info, config_ptr) == -1) {
+ if (setup_memory_map(vm, cfg) == -1) {
PrintError("Setting up guest memory map failed...\n");
return -1;
}
//v3_hook_io_port(info, 1234, &IO_Read, NULL, info);
- if (setup_devices(info, config_ptr) == -1) {
+ if (setup_devices(vm, cfg) == -1) {
PrintError("Failed to setup devices\n");
return -1;
}
+
// v3_print_io_map(info);
- v3_print_msr_map(info);
+ v3_print_msr_map(vm);
+
+
+ if (vm->vm_class == V3_PC_VM) {
+ if (post_config_pc(vm, cfg) == -1) {
+ PrintError("PC Post configuration failure\n");
+ return -1;
+ }
+ } else {
+ PrintError("Invalid VM Class\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+static int post_config_core(struct guest_info * info, v3_cfg_tree_t * cfg) {
- info->run_state = VM_STOPPED;
- if (info->vm_class == V3_PC_VM) {
- if (post_config_pc(info, config_ptr) == -1) {
+
+ if (info->vm_info->vm_class == V3_PC_VM) {
+ if (post_config_pc_core(info, cfg) == -1) {
PrintError("PC Post configuration failure\n");
return -1;
}
-int v3_config_guest(struct guest_info * info, void * cfg_blob) {
+static struct v3_vm_info * allocate_guest(int num_cores) {
+ int guest_state_size = sizeof(struct v3_vm_info) + (sizeof(struct guest_info) * num_cores);
+ struct v3_vm_info * vm = V3_Malloc(guest_state_size);
+
+ memset(vm, 0, guest_state_size);
+
+ vm->num_cores = num_cores;
+
+ return vm;
+}
+
+
+
+struct v3_vm_info * v3_config_guest(void * cfg_blob) {
v3_cpu_arch_t cpu_type = v3_get_cpu_type(v3_get_cpu_id());
+ struct v3_config * cfg_data = NULL;
+ struct v3_vm_info * vm = NULL;
+ int num_cores = 0;
+ int i = 0;
+ v3_cfg_tree_t * cores_cfg = NULL;
+ v3_cfg_tree_t * per_core_cfg = NULL;
if (cpu_type == V3_INVALID_CPU) {
PrintError("Configuring guest on invalid CPU\n");
- return -1;
+ return NULL;
}
- info->cfg_data = parse_config(cfg_blob);
+ cfg_data = parse_config(cfg_blob);
- if (!info->cfg_data) {
+ if (!cfg_data) {
PrintError("Could not parse configuration\n");
- return -1;
+ return NULL;
}
+ cores_cfg = v3_cfg_subtree(cfg_data->cfg, "cores");
+
+ num_cores = atoi(v3_cfg_val(cores_cfg, "count"));
+
+ V3_Print("Configuring %d cores\n", num_cores);
+
+ vm = allocate_guest(num_cores);
+
+ if (!vm) {
+ PrintError("Could not allocate %d core guest\n", vm->num_cores);
+ return NULL;
+ }
+
+ vm->cfg_data = cfg_data;
+
V3_Print("Preconfiguration\n");
- if (pre_config_guest(info, info->cfg_data) == -1) {
+ if (pre_config_vm(vm, vm->cfg_data->cfg) == -1) {
PrintError("Error in preconfiguration\n");
- return -1;
+ return NULL;
}
- V3_Print("Arch dependent configuration\n");
- // init SVM/VMX
+ V3_Print("Per core configuration\n");
+ per_core_cfg = v3_cfg_subtree(cores_cfg, "core");
+
+ // per core configuration
+ for (i = 0; i < vm->num_cores; i++) {
+ struct guest_info * info = &(vm->cores[i]);
+
+
+ info->cpu_id = i;
+ info->vm_info = vm;
+
+ pre_config_core(info, per_core_cfg);
+
+ // init SVM/VMX
#ifdef CONFIG_SVM
- if ((cpu_type == V3_SVM_CPU) || (cpu_type == V3_SVM_REV3_CPU)) {
- if (v3_init_svm_vmcb(info, info->vm_class) == -1) {
- PrintError("Error in SVM initialization\n");
- return -1;
+ if ((cpu_type == V3_SVM_CPU) || (cpu_type == V3_SVM_REV3_CPU)) {
+ if (v3_init_svm_vmcb(info, vm->vm_class) == -1) {
+ PrintError("Error in SVM initialization\n");
+ return NULL;
+ }
}
- }
#endif
#ifdef CONFIG_VMX
- else if ((cpu_type == V3_VMX_CPU) || (cpu_type == V3_VMX_EPT_CPU)) {
- if (v3_init_vmx_vmcs(info, info->vm_class) == -1) {
- PrintError("Error in VMX initialization\n");
- return -1;
+ else if ((cpu_type == V3_VMX_CPU) || (cpu_type == V3_VMX_EPT_CPU)) {
+ if (v3_init_vmx_vmcs(info, vm->vm_class) == -1) {
+ PrintError("Error in VMX initialization\n");
+ return NULL;
+ }
}
- }
#endif
- else {
- PrintError("Invalid CPU Type\n");
- return -1;
+ else {
+ PrintError("Invalid CPU Type\n");
+ return NULL;
+ }
+
+ per_core_cfg = v3_cfg_next_branch(per_core_cfg);
}
+
V3_Print("Post Configuration\n");
- if (post_config_guest(info, info->cfg_data) == -1) {
+ if (post_config_vm(vm, vm->cfg_data->cfg) == -1) {
PrintError("Error in postconfiguration\n");
- return -1;
+ return NULL;
+ }
+
+
+ per_core_cfg = v3_cfg_subtree(cores_cfg, "core");
+
+ // per core configuration
+ for (i = 0; i < vm->num_cores; i++) {
+ struct guest_info * info = &(vm->cores[i]);
+
+ post_config_core(info, per_core_cfg);
+
+ per_core_cfg = v3_cfg_next_branch(per_core_cfg);
}
V3_Print("Configuration successfull\n");
- return 0;
+ return vm;
}
-static int setup_memory_map(struct guest_info * info, struct v3_config * config_ptr) {
- v3_cfg_tree_t * mem_region = v3_cfg_subtree(v3_cfg_subtree(config_ptr->cfg, "memmap"), "region");
+static int setup_memory_map(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
+ v3_cfg_tree_t * mem_region = v3_cfg_subtree(v3_cfg_subtree(cfg, "memmap"), "region");
while (mem_region) {
addr_t start_addr = atox(v3_cfg_val(mem_region, "start"));
addr_t host_addr = atox(v3_cfg_val(mem_region, "host_addr"));
- if (v3_add_shadow_mem(info, start_addr, end_addr, host_addr) == -1) {
+ if (v3_add_shadow_mem(vm, start_addr, end_addr, host_addr) == -1) {
PrintError("Could not map memory region: %p-%p => %p\n",
(void *)start_addr, (void *)end_addr, (void *)host_addr);
return -1;
-
-
-
-
-
-static int setup_devices(struct guest_info * info, struct v3_config * config_ptr) {
- v3_cfg_tree_t * device = v3_cfg_subtree(v3_cfg_subtree(config_ptr->cfg, "devices"), "device");
+static int setup_devices(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
+ v3_cfg_tree_t * device = v3_cfg_subtree(v3_cfg_subtree(cfg, "devices"), "device");
while (device) {
V3_Print("configuring device %s\n", id);
- if (v3_create_device(info, id, device) == -1) {
+ if (v3_create_device(vm, id, device) == -1) {
PrintError("Error creating device %s\n", id);
return -1;
}
device = v3_cfg_next_branch(device);
}
-
- v3_print_dev_mgr(info);
+ v3_print_dev_mgr(vm);
return 0;
}
+
+
*/
-static int pre_config_pc(struct guest_info * info, struct v3_config * config_ptr) {
+static int pre_config_pc_core(struct guest_info * info, v3_cfg_tree_t * cfg) {
info->cpu_mode = REAL;
return 0;
}
-static int post_config_pc(struct guest_info * info, struct v3_config * config_ptr) {
+static int post_config_pc_core(struct guest_info * info, v3_cfg_tree_t * cfg) {
+
+ v3_print_mem_map(info->vm_info);
+ return 0;
+}
+
+static int post_config_pc(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
#define VGABIOS_START 0x000c0000
#define ROMBIOS_START 0x000f0000
extern uint8_t v3_vgabios_start[];
extern uint8_t v3_vgabios_end[];
- addr_t vgabios_dst = v3_get_shadow_addr(&(info->mem_map.base_region), VGABIOS_START);
+ addr_t vgabios_dst = v3_get_shadow_addr(&(vm->mem_map.base_region), VGABIOS_START);
memcpy(V3_VAddr((void *)vgabios_dst), v3_vgabios_start, v3_vgabios_end - v3_vgabios_start);
}
extern uint8_t v3_rombios_start[];
extern uint8_t v3_rombios_end[];
- addr_t rombios_dst = v3_get_shadow_addr(&(info->mem_map.base_region), ROMBIOS_START);
+ addr_t rombios_dst = v3_get_shadow_addr(&(vm->mem_map.base_region), ROMBIOS_START);
memcpy(V3_VAddr((void *)rombios_dst), v3_rombios_start, v3_rombios_end - v3_rombios_start);
}
- v3_print_mem_map(info);
-
return 0;
}
#include <palacios/vm_guest.h>
-void v3_init_cpuid_map(struct guest_info * info) {
- info->cpuid_map.map.rb_node = NULL;
+void v3_init_cpuid_map(struct v3_vm_info * vm) {
+ vm->cpuid_map.map.rb_node = NULL;
}
-static inline struct v3_cpuid_hook * __insert_cpuid_hook(struct guest_info * info, struct v3_cpuid_hook * hook) {
- struct rb_node ** p = &(info->cpuid_map.map.rb_node);
+static inline struct v3_cpuid_hook * __insert_cpuid_hook(struct v3_vm_info * vm, struct v3_cpuid_hook * hook) {
+ struct rb_node ** p = &(vm->cpuid_map.map.rb_node);
struct rb_node * parent = NULL;
struct v3_cpuid_hook * tmp_hook = NULL;
}
-static inline struct v3_cpuid_hook * insert_cpuid_hook(struct guest_info * info, struct v3_cpuid_hook * hook) {
+static inline struct v3_cpuid_hook * insert_cpuid_hook(struct v3_vm_info * vm, struct v3_cpuid_hook * hook) {
struct v3_cpuid_hook * ret;
- if ((ret = __insert_cpuid_hook(info, hook))) {
+ if ((ret = __insert_cpuid_hook(vm, hook))) {
return ret;
}
- v3_rb_insert_color(&(hook->tree_node), &(info->cpuid_map.map));
+ v3_rb_insert_color(&(hook->tree_node), &(vm->cpuid_map.map));
return NULL;
}
-static struct v3_cpuid_hook * get_cpuid_hook(struct guest_info * info, uint32_t cpuid) {
- struct rb_node * n = info->cpuid_map.map.rb_node;
+static struct v3_cpuid_hook * get_cpuid_hook(struct v3_vm_info * vm, uint32_t cpuid) {
+ struct rb_node * n = vm->cpuid_map.map.rb_node;
struct v3_cpuid_hook * hook = NULL;
while (n) {
}
-int v3_unhook_cpuid(struct guest_info * info, uint32_t cpuid) {
- struct v3_cpuid_hook * hook = get_cpuid_hook(info, cpuid);
+int v3_unhook_cpuid(struct v3_vm_info * vm, uint32_t cpuid) {
+ struct v3_cpuid_hook * hook = get_cpuid_hook(vm, cpuid);
if (hook == NULL) {
PrintError("Could not find cpuid to unhook (0x%x)\n", cpuid);
return -1;
}
- v3_rb_erase(&(hook->tree_node), &(info->cpuid_map.map));
+ v3_rb_erase(&(hook->tree_node), &(vm->cpuid_map.map));
V3_Free(hook);
return 0;
}
-int v3_hook_cpuid(struct guest_info * info, uint32_t cpuid,
+int v3_hook_cpuid(struct v3_vm_info * vm, uint32_t cpuid,
int (*hook_fn)(struct guest_info * info, uint32_t cpuid, \
uint32_t * eax, uint32_t * ebx, \
uint32_t * ecx, uint32_t * edx, \
hook->private_data = private_data;
hook->hook_fn = hook_fn;
- if (insert_cpuid_hook(info, hook)) {
+ if (insert_cpuid_hook(vm, hook)) {
PrintError("Could not hook cpuid 0x%x (already hooked)\n", cpuid);
V3_Free(hook);
return -1;
int v3_handle_cpuid(struct guest_info * info) {
uint32_t cpuid = info->vm_regs.rax;
- struct v3_cpuid_hook * hook = get_cpuid_hook(info, cpuid);
+ struct v3_cpuid_hook * hook = get_cpuid_hook(info->vm_info, cpuid);
//PrintDebug("CPUID called for 0x%x\n", cpuid);
}
-int v3_init_dev_mgr(struct guest_info * info) {
- struct vmm_dev_mgr * mgr = &(info->dev_mgr);
+int v3_init_dev_mgr(struct v3_vm_info * vm) {
+ struct vmm_dev_mgr * mgr = &(vm->dev_mgr);
INIT_LIST_HEAD(&(mgr->dev_list));
mgr->num_devs = 0;
}
-int v3_dev_mgr_deinit(struct guest_info * info) {
+int v3_dev_mgr_deinit(struct v3_vm_info * vm) {
struct vm_device * dev;
- struct vmm_dev_mgr * mgr = &(info->dev_mgr);
+ struct vmm_dev_mgr * mgr = &(vm->dev_mgr);
struct vm_device * tmp;
list_for_each_entry_safe(dev, tmp, &(mgr->dev_list), dev_link) {
v3_free_device(dev);
}
+
+ /* TODO: Clear hash tables */
+
return 0;
}
+/*
+int v3_init_core_dev_mgr(struct v3_vm_info * vm) {
+ struct v3_core_dev_mgr * mgr = &(vm->core_dev_mgr);
+
+ INIT_LIST_HEAD(&(mgr->dev_list));
+ mgr->dev_table = v3_create_htable(0, dev_hash_fn, dev_eq_fn);
+ return 0;
+}
-int v3_create_device(struct guest_info * info, const char * dev_name, v3_cfg_tree_t * cfg) {
- int (*dev_init)(struct guest_info * info, void * cfg_data);
+int v3_core_dev_mgr_deinit(struct v3_vm_info * vm) {
+ struct vm_device * dev;
+ struct v3_core_dev_mgr * mgr = &(vm->core_dev_mgr);
+ struct vm_device * tmp;
+
+ list_for_each_entry_safe(dev, tmp, &(mgr->dev_list), dev_link) {
+ v3_detach_device(dev);
+ v3_free_device(dev);
+ }
+
+ // TODO: Clear hash tables
+
+}
+*/
+
+
+int v3_create_device(struct v3_vm_info * vm, const char * dev_name, v3_cfg_tree_t * cfg) {
+ int (*dev_init)(struct v3_vm_info * vm, void * cfg_data);
dev_init = (void *)v3_htable_search(master_dev_table, (addr_t)dev_name);
}
- if (dev_init(info, cfg) == -1) {
+ if (dev_init(vm, cfg) == -1) {
PrintError("Could not initialize Device %s\n", dev_name);
return -1;
}
-struct vm_device * v3_find_dev(struct guest_info * info, const char * dev_name) {
- struct vmm_dev_mgr * mgr = &(info->dev_mgr);
+struct vm_device * v3_find_dev(struct v3_vm_info * vm, const char * dev_name) {
+ struct vmm_dev_mgr * mgr = &(vm->dev_mgr);
if (!dev_name) {
return NULL;
/* IO HOOKS */
int v3_dev_hook_io(struct vm_device * dev, uint16_t port,
- int (*read)(uint16_t port, void * dst, uint_t length, struct vm_device * dev),
- int (*write)(uint16_t port, void * src, uint_t length, struct vm_device * dev)) {
+ int (*read)(struct guest_info * core, uint16_t port, void * dst, uint_t length, struct vm_device * dev),
+ int (*write)(struct guest_info * core, uint16_t port, void * src, uint_t length, struct vm_device * dev)) {
return v3_hook_io_port(dev->vm, port,
- (int (*)(ushort_t, void *, uint_t, void *))read,
- (int (*)(ushort_t, void *, uint_t, void *))write,
+ (int (*)(struct guest_info * core, ushort_t, void *, uint_t, void *))read,
+ (int (*)(struct guest_info * core, ushort_t, void *, uint_t, void *))write,
(void *)dev);
}
}
-int v3_attach_device(struct guest_info * vm, struct vm_device * dev ) {
+int v3_attach_device(struct v3_vm_info * vm, struct vm_device * dev ) {
struct vmm_dev_mgr * mgr = &(vm->dev_mgr);
dev->vm = vm;
-void v3_print_dev_mgr(struct guest_info * info) {
- struct vmm_dev_mgr * mgr = &(info->dev_mgr);
+void v3_print_dev_mgr(struct v3_vm_info * vm) {
+ struct vmm_dev_mgr * mgr = &(vm->dev_mgr);
struct vm_device * dev;
V3_Print("%d devices registered with manager\n", mgr->num_devs);
struct blk_frontend {
- int (*connect)(struct guest_info * info,
+ int (*connect)(struct v3_vm_info * vm,
void * frontend_data,
struct v3_dev_blk_ops * ops,
v3_cfg_tree_t * cfg,
-int v3_dev_add_blk_frontend(struct guest_info * info,
+int v3_dev_add_blk_frontend(struct v3_vm_info * vm,
char * name,
- int (*connect)(struct guest_info * info,
+ int (*connect)(struct v3_vm_info * vm,
void * frontend_data,
struct v3_dev_blk_ops * ops,
v3_cfg_tree_t * cfg,
frontend->connect = connect;
frontend->priv_data = priv_data;
- list_add(&(frontend->blk_node), &(info->dev_mgr.blk_list));
- v3_htable_insert(info->dev_mgr.blk_table, (addr_t)(name), (addr_t)frontend);
+ list_add(&(frontend->blk_node), &(vm->dev_mgr.blk_list));
+ v3_htable_insert(vm->dev_mgr.blk_table, (addr_t)(name), (addr_t)frontend);
return 0;
}
-int v3_dev_connect_blk(struct guest_info * info,
+int v3_dev_connect_blk(struct v3_vm_info * vm,
char * frontend_name,
struct v3_dev_blk_ops * ops,
v3_cfg_tree_t * cfg,
struct blk_frontend * frontend = NULL;
- frontend = (struct blk_frontend *)v3_htable_search(info->dev_mgr.blk_table,
+ frontend = (struct blk_frontend *)v3_htable_search(vm->dev_mgr.blk_table,
(addr_t)frontend_name);
if (frontend == NULL) {
return 0;
}
- if (frontend->connect(info, frontend->priv_data, ops, cfg, private_data) == -1) {
+ if (frontend->connect(vm, frontend->priv_data, ops, cfg, private_data) == -1) {
PrintError("Error connecting to block frontend %s\n", frontend_name);
return -1;
}
struct net_frontend {
- int (*connect)(struct guest_info * info,
+ int (*connect)(struct v3_vm_info * vm,
void * frontend_data,
struct v3_dev_net_ops * ops,
v3_cfg_tree_t * cfg,
};
-int v3_dev_add_net_frontend(struct guest_info * info,
+int v3_dev_add_net_frontend(struct v3_vm_info * vm,
char * name,
- int (*connect)(struct guest_info * info,
+ int (*connect)(struct v3_vm_info * vm,
void * frontend_data,
struct v3_dev_net_ops * ops,
v3_cfg_tree_t * cfg,
frontend->connect = connect;
frontend->priv_data = priv_data;
- list_add(&(frontend->net_node), &(info->dev_mgr.net_list));
- v3_htable_insert(info->dev_mgr.net_table, (addr_t)(name), (addr_t)frontend);
+ list_add(&(frontend->net_node), &(vm->dev_mgr.net_list));
+ v3_htable_insert(vm->dev_mgr.net_table, (addr_t)(name), (addr_t)frontend);
return 0;
}
-int v3_dev_connect_net(struct guest_info * info,
+int v3_dev_connect_net(struct v3_vm_info * vm,
char * frontend_name,
struct v3_dev_net_ops * ops,
v3_cfg_tree_t * cfg,
{
struct net_frontend * frontend = NULL;
- frontend = (struct net_frontend *)v3_htable_search(info->dev_mgr.net_table,
+ frontend = (struct net_frontend *)v3_htable_search(vm->dev_mgr.net_table,
(addr_t)frontend_name);
if (frontend == NULL) {
return 0;
}
- if (frontend->connect(info, frontend->priv_data, ops, cfg, private_data) == -1) {
+ if (frontend->connect(vm, frontend->priv_data, ops, cfg, private_data) == -1) {
PrintError("Error connecting to net frontend %s\n", frontend_name);
return -1;
}
int pde_index = PDE32_INDEX(fault_addr);
int pte_index = PTE32_INDEX(fault_addr);
- struct v3_shadow_region * region = v3_get_shadow_region(info, fault_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32, addr=%p\n",
int pde_index = PDE32PAE_INDEX(fault_addr);
int pte_index = PTE32PAE_INDEX(fault_addr);
- struct v3_shadow_region * region = v3_get_shadow_region(info, fault_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 32PAE, addr=%p\n",
- struct v3_shadow_region * region = v3_get_shadow_region(info, fault_addr);
+ struct v3_shadow_region * region = v3_get_shadow_region(info->vm_info, fault_addr);
if (region == NULL) {
PrintError("Invalid region in passthrough page fault 64, addr=%p\n",
* interrupt and stall the guest.
*/
if (!v3_intr_pending(info)) {
- v3_raise_irq(info, 0);
+ v3_advance_time(info);
}
#include <palacios/vmm_host_events.h>
#include <palacios/vm_guest.h>
-int v3_init_host_events(struct guest_info * info) {
- struct v3_host_events * host_evts = &(info->host_event_hooks);
+int v3_init_host_events(struct v3_vm_info * vm) {
+ struct v3_host_events * host_evts = &(vm->host_event_hooks);
INIT_LIST_HEAD(&(host_evts->keyboard_events));
INIT_LIST_HEAD(&(host_evts->mouse_events));
}
-int v3_hook_host_event(struct guest_info * info,
+int v3_hook_host_event(struct v3_vm_info * vm,
v3_host_evt_type_t event_type,
union v3_host_event_handler cb,
void * private_data) {
- struct v3_host_events * host_evts = &(info->host_event_hooks);
+ struct v3_host_events * host_evts = &(vm->host_event_hooks);
struct v3_host_event_hook * hook = NULL;
hook = (struct v3_host_event_hook *)V3_Malloc(sizeof(struct v3_host_event_hook));
}
-int v3_deliver_keyboard_event(struct guest_info * info,
+int v3_deliver_keyboard_event(struct v3_vm_info * vm,
struct v3_keyboard_event * evt) {
- struct v3_host_events * host_evts = &(info->host_event_hooks);
+ struct v3_host_events * host_evts = &(vm->host_event_hooks);
struct v3_host_event_hook * hook = NULL;
- if (info->run_state != VM_RUNNING) {
+ if (vm->run_state != VM_RUNNING) {
return -1;
}
list_for_each_entry(hook, &(host_evts->keyboard_events), link) {
- if (hook->cb.keyboard_handler(info, evt, hook->private_data) == -1) {
+ if (hook->cb.keyboard_handler(vm, evt, hook->private_data) == -1) {
return -1;
}
}
}
-int v3_deliver_mouse_event(struct guest_info * info,
+int v3_deliver_mouse_event(struct v3_vm_info * vm,
struct v3_mouse_event * evt) {
- struct v3_host_events * host_evts = &(info->host_event_hooks);
+ struct v3_host_events * host_evts = &(vm->host_event_hooks);
struct v3_host_event_hook * hook = NULL;
- if (info->run_state != VM_RUNNING) {
+ if (vm->run_state != VM_RUNNING) {
return -1;
}
list_for_each_entry(hook, &(host_evts->mouse_events), link) {
- if (hook->cb.mouse_handler(info, evt, hook->private_data) == -1) {
+ if (hook->cb.mouse_handler(vm, evt, hook->private_data) == -1) {
return -1;
}
}
}
-int v3_deliver_timer_event(struct guest_info * info,
+int v3_deliver_timer_event(struct v3_vm_info * vm,
struct v3_timer_event * evt) {
- struct v3_host_events * host_evts = &(info->host_event_hooks);
+ struct v3_host_events * host_evts = &(vm->host_event_hooks);
struct v3_host_event_hook * hook = NULL;
- if (info->run_state != VM_RUNNING) {
+ if (vm->run_state != VM_RUNNING) {
return -1;
}
list_for_each_entry(hook, &(host_evts->timer_events), link) {
- if (hook->cb.timer_handler(info, evt, hook->private_data) == -1) {
+ if (hook->cb.timer_handler(vm, evt, hook->private_data) == -1) {
return -1;
}
}
-void v3_init_hypercall_map(struct guest_info * info) {
- info->hcall_map.rb_node = NULL;
+void v3_init_hypercall_map(struct v3_vm_info * vm) {
+ vm->hcall_map.rb_node = NULL;
- v3_register_hypercall(info, HYPERCALL_TEST_HCALL, hcall_test, NULL);
+ v3_register_hypercall(vm, HYPERCALL_TEST_HCALL, hcall_test, NULL);
}
-static inline struct hypercall * __insert_hypercall(struct guest_info * info, struct hypercall * hcall) {
- struct rb_node ** p = &(info->hcall_map.rb_node);
+static inline struct hypercall * __insert_hypercall(struct v3_vm_info * vm, struct hypercall * hcall) {
+ struct rb_node ** p = &(vm->hcall_map.rb_node);
struct rb_node * parent = NULL;
struct hypercall * tmp_hcall = NULL;
}
-static inline struct hypercall * insert_hypercall(struct guest_info * info, struct hypercall * hcall) {
+static inline struct hypercall * insert_hypercall(struct v3_vm_info * vm, struct hypercall * hcall) {
struct hypercall * ret;
- if ((ret = __insert_hypercall(info, hcall))) {
+ if ((ret = __insert_hypercall(vm, hcall))) {
return ret;
}
- v3_rb_insert_color(&(hcall->tree_node), &(info->hcall_map));
+ v3_rb_insert_color(&(hcall->tree_node), &(vm->hcall_map));
return NULL;
}
-static struct hypercall * get_hypercall(struct guest_info * info, uint_t id) {
- struct rb_node * n = info->hcall_map.rb_node;
+static struct hypercall * get_hypercall(struct v3_vm_info * vm, uint_t id) {
+ struct rb_node * n = vm->hcall_map.rb_node;
struct hypercall * hcall = NULL;
while (n) {
}
-int v3_register_hypercall(struct guest_info * info, uint_t hypercall_id,
+int v3_register_hypercall(struct v3_vm_info * vm, uint_t hypercall_id,
int (*hypercall)(struct guest_info * info, uint_t hcall_id, void * priv_data),
void * priv_data) {
hcall->priv_data = priv_data;
hcall->hcall_fn = hypercall;
- if (insert_hypercall(info, hcall)) {
+ if (insert_hypercall(vm, hcall)) {
V3_Free(hcall);
return -1;
}
int v3_handle_hypercall(struct guest_info * info) {
uint_t hypercall_id = *(uint_t *)&info->vm_regs.rax;
- struct hypercall * hcall = get_hypercall(info, hypercall_id);
+ struct hypercall * hcall = get_hypercall(info->vm_info, hypercall_id);
if (!hcall) {
PrintError("Invalid Hypercall (%d(0x%x) not registered)\n",
};
-void v3_init_interrupt_state(struct guest_info * info) {
+struct intr_router {
+ struct intr_router_ops * router_ops;
- info->intr_state.irq_pending = 0;
- info->intr_state.irq_started = 0;
- info->intr_state.irq_vector = 0;
+ void * priv_data;
+ struct list_head router_node;
+
+};
+
+void v3_init_intr_controllers(struct guest_info * info) {
+ struct v3_intr_core_state * intr_state = &(info->intr_core_state);
- INIT_LIST_HEAD(&(info->intr_state.controller_list));
+ intr_state->irq_pending = 0;
+ intr_state->irq_started = 0;
+ intr_state->irq_vector = 0;
- v3_lock_init(&(info->intr_state.irq_lock));
+ INIT_LIST_HEAD(&(intr_state->controller_list));
+}
+
+void v3_init_intr_routers(struct v3_vm_info * vm) {
+
+ INIT_LIST_HEAD(&(vm->intr_routers.router_list));
+
+ v3_lock_init(&(vm->intr_routers.irq_lock));
- memset((uchar_t *)(info->intr_state.hooks), 0, sizeof(struct v3_irq_hook *) * 256);
+ memset((uchar_t *)(vm->intr_routers.hooks), 0, sizeof(struct v3_irq_hook *) * 256);
}
-void v3_register_intr_controller(struct guest_info * info, struct intr_ctrl_ops * ops, void * state) {
+
+int v3_register_intr_controller(struct guest_info * info, struct intr_ctrl_ops * ops, void * priv_data) {
struct intr_controller * ctrlr = (struct intr_controller *)V3_Malloc(sizeof(struct intr_controller));
- ctrlr->priv_data = state;
+ ctrlr->priv_data = priv_data;
ctrlr->ctrl_ops = ops;
- list_add(&(ctrlr->ctrl_node), &(info->intr_state.controller_list));
-
+ list_add(&(ctrlr->ctrl_node), &(info->intr_core_state.controller_list));
+
+ return 0;
}
+int v3_register_intr_router(struct v3_vm_info * vm, struct intr_router_ops * ops, void * priv_data) {
+ struct intr_router * router = (struct intr_router *)V3_Malloc(sizeof(struct intr_router));
+
+ router->priv_data = priv_data;
+ router->router_ops = ops;
+
+ list_add(&(router->router_node), &(vm->intr_routers.router_list));
+
+ return 0;
+}
-static inline struct v3_irq_hook * get_irq_hook(struct guest_info * info, uint_t irq) {
+static inline struct v3_irq_hook * get_irq_hook(struct v3_vm_info * vm, uint_t irq) {
V3_ASSERT(irq <= 256);
- return info->intr_state.hooks[irq];
+ return vm->intr_routers.hooks[irq];
}
-int v3_hook_irq(struct guest_info * info,
+int v3_hook_irq(struct v3_vm_info * vm,
uint_t irq,
- int (*handler)(struct guest_info * info, struct v3_interrupt * intr, void * priv_data),
+ int (*handler)(struct v3_vm_info * vm, struct v3_interrupt * intr, void * priv_data),
void * priv_data)
{
struct v3_irq_hook * hook = (struct v3_irq_hook *)V3_Malloc(sizeof(struct v3_irq_hook));
return -1;
}
- if (get_irq_hook(info, irq) != NULL) {
+ if (get_irq_hook(vm, irq) != NULL) {
PrintError("IRQ %d already hooked\n", irq);
return -1;
}
hook->handler = handler;
hook->priv_data = priv_data;
- info->intr_state.hooks[irq] = hook;
+ vm->intr_routers.hooks[irq] = hook;
- if (V3_Hook_Interrupt(info, irq)) {
+ if (V3_Hook_Interrupt(vm, irq)) {
PrintError("hook_irq: failed to hook irq %d\n", irq);
return -1;
} else {
-static int passthrough_irq_handler(struct guest_info * info, struct v3_interrupt * intr, void * priv_data) {
+static int passthrough_irq_handler(struct v3_vm_info * vm, struct v3_interrupt * intr, void * priv_data) {
PrintDebug("[passthrough_irq_handler] raise_irq=%d (guest=0x%p)\n",
- intr->irq, (void *)info);
+ intr->irq, (void *)vm);
- return v3_raise_irq(info, intr->irq);
+ return v3_raise_irq(vm, intr->irq);
}
-int v3_hook_passthrough_irq(struct guest_info * info, uint_t irq) {
- int rc = v3_hook_irq(info, irq, passthrough_irq_handler, NULL);
+int v3_hook_passthrough_irq(struct v3_vm_info * vm, uint_t irq) {
+ int rc = v3_hook_irq(vm, irq, passthrough_irq_handler, NULL);
if (rc) {
- PrintError("guest_irq_injection: failed to hook irq 0x%x (guest=0x%p)\n", irq, (void *)info);
+ PrintError("guest_irq_injection: failed to hook irq 0x%x (guest=0x%p)\n", irq, (void *)vm);
return -1;
} else {
- PrintDebug("guest_irq_injection: hooked irq 0x%x (guest=0x%p)\n", irq, (void *)info);
+ PrintDebug("guest_irq_injection: hooked irq 0x%x (guest=0x%p)\n", irq, (void *)vm);
return 0;
}
}
-int v3_deliver_irq(struct guest_info * info, struct v3_interrupt * intr) {
+int v3_deliver_irq(struct v3_vm_info * vm, struct v3_interrupt * intr) {
PrintDebug("v3_deliver_irq: irq=%d state=0x%p, \n", intr->irq, (void *)intr);
- struct v3_irq_hook * hook = get_irq_hook(info, intr->irq);
+ struct v3_irq_hook * hook = get_irq_hook(vm, intr->irq);
if (hook == NULL) {
PrintError("Attempting to deliver interrupt to non registered hook(irq=%d)\n", intr->irq);
return -1;
}
- return hook->handler(info, intr, hook->priv_data);
+ return hook->handler(vm, intr, hook->priv_data);
}
int v3_raise_virq(struct guest_info * info, int irq) {
- struct v3_intr_state * intr_state = &(info->intr_state);
+ struct v3_intr_core_state * intr_state = &(info->intr_core_state);
int major = irq / 8;
int minor = irq % 8;
}
int v3_lower_virq(struct guest_info * info, int irq) {
- struct v3_intr_state * intr_state = &(info->intr_state);
+ struct v3_intr_core_state * intr_state = &(info->intr_core_state);
int major = irq / 8;
int minor = irq % 8;
}
-int v3_lower_irq(struct guest_info * info, int irq) {
- struct intr_controller * ctrl = NULL;
- struct v3_intr_state * intr_state = &(info->intr_state);
+int v3_lower_irq(struct v3_vm_info * vm, int irq) {
+ struct intr_router * router = NULL;
+ struct v3_intr_routers * routers = &(vm->intr_routers);
// PrintDebug("[v3_lower_irq]\n");
- addr_t irq_state = v3_lock_irqsave(intr_state->irq_lock);
+ addr_t irq_state = v3_lock_irqsave(routers->irq_lock);
- list_for_each_entry(ctrl, &(intr_state->controller_list), ctrl_node) {
- ctrl->ctrl_ops->lower_intr(info, ctrl->priv_data, irq);
+ list_for_each_entry(router, &(routers->router_list), router_node) {
+ router->router_ops->lower_intr(vm, router->priv_data, irq);
}
- v3_unlock_irqrestore(intr_state->irq_lock, irq_state);
+ v3_unlock_irqrestore(routers->irq_lock, irq_state);
return 0;
}
-int v3_raise_irq(struct guest_info * info, int irq) {
- struct intr_controller * ctrl = NULL;
- struct v3_intr_state * intr_state = &(info->intr_state);
+int v3_raise_irq(struct v3_vm_info * vm, int irq) {
+ struct intr_router * router = NULL;
+ struct v3_intr_routers * routers = &(vm->intr_routers);
// PrintDebug("[v3_raise_irq (%d)]\n", irq);
- addr_t irq_state = v3_lock_irqsave(intr_state->irq_lock);
+ addr_t irq_state = v3_lock_irqsave(routers->irq_lock);
- list_for_each_entry(ctrl, &(intr_state->controller_list), ctrl_node) {
- ctrl->ctrl_ops->raise_intr(info, ctrl->priv_data, irq);
+ list_for_each_entry(router, &(routers->router_list), router_node) {
+ router->router_ops->raise_intr(vm, router->priv_data, irq);
}
- v3_unlock_irqrestore(intr_state->irq_lock, irq_state);
+ v3_unlock_irqrestore(routers->irq_lock, irq_state);
return 0;
}
v3_intr_type_t v3_intr_pending(struct guest_info * info) {
- struct v3_intr_state * intr_state = &(info->intr_state);
+ struct v3_intr_core_state * intr_state = &(info->intr_core_state);
struct intr_controller * ctrl = NULL;
int ret = V3_INVALID_INTR;
int i = 0;
uint32_t v3_get_intr(struct guest_info * info) {
- struct v3_intr_state * intr_state = &(info->intr_state);
+ struct v3_intr_core_state * intr_state = &(info->intr_core_state);
struct intr_controller * ctrl = NULL;
uint_t ret = 0;
int i = 0;
int v3_injecting_intr(struct guest_info * info, uint_t intr_num, v3_intr_type_t type) {
- struct v3_intr_state * intr_state = &(info->intr_state);
+ struct v3_intr_core_state * intr_state = &(info->intr_core_state);
if (type == V3_EXTERNAL_IRQ) {
struct intr_controller * ctrl = NULL;
#endif
-static int default_write(uint16_t port, void *src, uint_t length, void * priv_data);
-static int default_read(uint16_t port, void * dst, uint_t length, void * priv_data);
+static int default_write(struct guest_info * core, uint16_t port, void *src, uint_t length, void * priv_data);
+static int default_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * priv_data);
-void v3_init_io_map(struct guest_info * info) {
+void v3_init_io_map(struct v3_vm_info * vm) {
- info->io_map.map.rb_node = NULL;
- info->io_map.arch_data = NULL;
- info->io_map.update_map = NULL;
+ vm->io_map.map.rb_node = NULL;
+ vm->io_map.arch_data = NULL;
+ vm->io_map.update_map = NULL;
}
-static inline struct v3_io_hook * __insert_io_hook(struct guest_info * info, struct v3_io_hook * hook) {
- struct rb_node ** p = &(info->io_map.map.rb_node);
+static inline struct v3_io_hook * __insert_io_hook(struct v3_vm_info * vm, struct v3_io_hook * hook) {
+ struct rb_node ** p = &(vm->io_map.map.rb_node);
struct rb_node * parent = NULL;
struct v3_io_hook * tmp_hook = NULL;
}
-static inline struct v3_io_hook * insert_io_hook(struct guest_info * info, struct v3_io_hook * hook) {
+static inline struct v3_io_hook * insert_io_hook(struct v3_vm_info * vm, struct v3_io_hook * hook) {
struct v3_io_hook * ret;
- if ((ret = __insert_io_hook(info, hook))) {
+ if ((ret = __insert_io_hook(vm, hook))) {
return ret;
}
- v3_rb_insert_color(&(hook->tree_node), &(info->io_map.map));
+ v3_rb_insert_color(&(hook->tree_node), &(vm->io_map.map));
return NULL;
}
-struct v3_io_hook * v3_get_io_hook(struct guest_info * info, uint16_t port) {
- struct rb_node * n = info->io_map.map.rb_node;
+struct v3_io_hook * v3_get_io_hook(struct v3_vm_info * vm, uint16_t port) {
+ struct rb_node * n = vm->io_map.map.rb_node;
struct v3_io_hook * hook = NULL;
while (n) {
-int v3_hook_io_port(struct guest_info * info, uint16_t port,
- int (*read)(uint16_t port, void * dst, uint_t length, void * priv_data),
- int (*write)(uint16_t port, void * src, uint_t length, void * priv_data),
+int v3_hook_io_port(struct v3_vm_info * vm, uint16_t port,
+ int (*read)(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * priv_data),
+ int (*write)(struct guest_info * core, uint16_t port, void * src, uint_t length, void * priv_data),
void * priv_data) {
struct v3_io_hook * io_hook = (struct v3_io_hook *)V3_Malloc(sizeof(struct v3_io_hook));
io_hook->priv_data = priv_data;
- if (insert_io_hook(info, io_hook)) {
+ if (insert_io_hook(vm, io_hook)) {
PrintError("Could not insert IO hook for port %u (0x%x)\n", port, port);
V3_Free(io_hook);
return -1;
}
- if (info->io_map.update_map) {
- if (info->io_map.update_map(info, port,
+ if (vm->io_map.update_map) {
+ if (vm->io_map.update_map(vm, port,
((read == NULL) ? 0 : 1),
((write == NULL) ? 0 : 1)) == -1) {
PrintError("Could not update IO map for port %u (0x%x)\n", port, port);
return 0;
}
-int v3_unhook_io_port(struct guest_info * info, uint16_t port) {
- struct v3_io_hook * hook = v3_get_io_hook(info, port);
+int v3_unhook_io_port(struct v3_vm_info * vm, uint16_t port) {
+ struct v3_io_hook * hook = v3_get_io_hook(vm, port);
if (hook == NULL) {
PrintError("Could not find port to unhook %u (0x%x)\n", port, port);
return -1;
}
- v3_rb_erase(&(hook->tree_node), &(info->io_map.map));
+ v3_rb_erase(&(hook->tree_node), &(vm->io_map.map));
- if (info->io_map.update_map) {
+ if (vm->io_map.update_map) {
// set the arch map to default (this should be 1, 1)
- info->io_map.update_map(info, port, 0, 0);
+ vm->io_map.update_map(vm, port, 0, 0);
}
V3_Free(hook);
-void v3_refresh_io_map(struct guest_info * info) {
- struct v3_io_map * io_map = &(info->io_map);
+void v3_refresh_io_map(struct v3_vm_info * vm) {
+ struct v3_io_map * io_map = &(vm->io_map);
struct v3_io_hook * tmp = NULL;
if (io_map->update_map == NULL) {
}
v3_rb_for_each_entry(tmp, &(io_map->map), tree_node) {
- io_map->update_map(info, tmp->port,
+ io_map->update_map(vm, tmp->port,
((tmp->read == NULL) ? 0 : 1),
((tmp->write == NULL) ? 0 : 1));
}
-void v3_print_io_map(struct guest_info * info) {
- struct v3_io_map * io_map = &(info->io_map);
+void v3_print_io_map(struct v3_vm_info * vm) {
+ struct v3_io_map * io_map = &(vm->io_map);
struct v3_io_hook * tmp_hook = NULL;
V3_Print("VMM IO Map\n");
/* FIX ME */
-static int default_write(uint16_t port, void * src, uint_t length, void * priv_data) {
+static int default_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * priv_data) {
if (length == 1) {
v3_outb(port, *(uint8_t *)src);
} else if (length == 2) {
return length;
}
-static int default_read(uint16_t port, void * dst, uint_t length, void * priv_data) {
+static int default_read(struct guest_info * core, uint16_t port, void * dst, uint_t length, void * priv_data) {
if (length == 1) {
*(uint8_t *)dst = v3_inb(port);
} else if (length == 2) {
static inline
-struct v3_shadow_region * insert_shadow_region(struct guest_info * info,
+struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm,
struct v3_shadow_region * region);
static int mem_offset_hypercall(struct guest_info * info, uint_t hcall_id, void * private_data) {
PrintDebug("V3Vee: Memory offset hypercall (offset=%p)\n",
- (void *)(info->mem_map.base_region.host_addr));
+ (void *)(info->vm_info->mem_map.base_region.host_addr));
- info->vm_regs.rbx = info->mem_map.base_region.host_addr;
+ info->vm_regs.rbx = info->vm_info->mem_map.base_region.host_addr;
return 0;
}
-int v3_init_shadow_map(struct guest_info * info) {
- v3_shdw_map_t * map = &(info->mem_map);
- addr_t mem_pages = info->mem_size >> 12;
+int v3_init_shadow_map(struct v3_vm_info * vm) {
+ struct v3_mem_map * map = &(vm->mem_map);
+ addr_t mem_pages = vm->mem_size >> 12;
map->shdw_regions.rb_node = NULL;
- map->hook_hva = (addr_t)V3_VAddr(V3_AllocPages(1));
+
+
+ map->hook_hvas = V3_VAddr(V3_AllocPages(vm->num_cores));
+
// There is an underlying region that contains all of the guest memory
// PrintDebug("Mapping %d pages of memory (%u bytes)\n", (int)mem_pages, (uint_t)info->mem_size);
//memset(V3_VAddr((void *)map->base_region.host_addr), 0xffffffff, map->base_region.guest_end);
- v3_register_hypercall(info, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
+ v3_register_hypercall(vm, MEM_OFFSET_HCALL, mem_offset_hypercall, NULL);
return 0;
}
-void v3_delete_shadow_map(struct guest_info * info) {
- struct rb_node * node = v3_rb_first(&(info->mem_map.shdw_regions));
+
+static inline addr_t get_hook_hva(struct guest_info * info) {
+ return (addr_t)(info->vm_info->mem_map.hook_hvas + (PAGE_SIZE_4KB * info->cpu_id));
+}
+
+void v3_delete_shadow_map(struct v3_vm_info * vm) {
+ struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
struct v3_shadow_region * reg;
struct rb_node * tmp_node = NULL;
tmp_node = node;
node = v3_rb_next(node);
- v3_delete_shadow_region(info, reg);
+ v3_delete_shadow_region(vm, reg);
}
- V3_FreePage((void *)(info->mem_map.base_region.host_addr));
- V3_FreePage(V3_PAddr((void *)(info->mem_map.hook_hva)));
+ V3_FreePage((void *)(vm->mem_map.base_region.host_addr));
+ V3_FreePage(V3_PAddr((void *)(vm->mem_map.hook_hvas)));
}
-int v3_add_shadow_mem( struct guest_info * info,
+int v3_add_shadow_mem( struct v3_vm_info * vm,
addr_t guest_addr_start,
addr_t guest_addr_end,
addr_t host_addr)
entry->read_hook = NULL;
entry->priv_data = NULL;
- if (insert_shadow_region(info, entry)) {
+ if (insert_shadow_region(vm, entry)) {
V3_Free(entry);
return -1;
}
-int v3_hook_write_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+int v3_hook_write_mem(struct v3_vm_info * vm, addr_t guest_addr_start, addr_t guest_addr_end,
addr_t host_addr,
int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data) {
entry->read_hook = NULL;
entry->priv_data = priv_data;
- if (insert_shadow_region(info, entry)) {
+ if (insert_shadow_region(vm, entry)) {
V3_Free(entry);
return -1;
}
return 0;
}
-int v3_hook_full_mem(struct guest_info * info, addr_t guest_addr_start, addr_t guest_addr_end,
+int v3_hook_full_mem(struct v3_vm_info * vm, addr_t guest_addr_start, addr_t guest_addr_end,
int (*read)(addr_t guest_addr, void * dst, uint_t length, void * priv_data),
int (*write)(addr_t guest_addr, void * src, uint_t length, void * priv_data),
void * priv_data) {
entry->read_hook = read;
entry->priv_data = priv_data;
- if (insert_shadow_region(info, entry)) {
+ if (insert_shadow_region(vm, entry)) {
V3_Free(entry);
return -1;
}
// This will unhook the memory hook registered at start address
// We do not support unhooking subregions
-int v3_unhook_mem(struct guest_info * info, addr_t guest_addr_start) {
- struct v3_shadow_region * reg = v3_get_shadow_region(info, guest_addr_start);
+int v3_unhook_mem(struct v3_vm_info * vm, addr_t guest_addr_start) {
+ struct v3_shadow_region * reg = v3_get_shadow_region(vm, guest_addr_start);
if ((reg->host_type != SHDW_REGION_FULL_HOOK) ||
(reg->host_type != SHDW_REGION_WRITE_HOOK)) {
return -1;
}
- v3_delete_shadow_region(info, reg);
+ v3_delete_shadow_region(vm, reg);
return 0;
}
static inline
-struct v3_shadow_region * __insert_shadow_region(struct guest_info * info,
+struct v3_shadow_region * __insert_shadow_region(struct v3_vm_info * vm,
struct v3_shadow_region * region) {
- struct rb_node ** p = &(info->mem_map.shdw_regions.rb_node);
+ struct rb_node ** p = &(vm->mem_map.shdw_regions.rb_node);
struct rb_node * parent = NULL;
struct v3_shadow_region * tmp_region;
static inline
-struct v3_shadow_region * insert_shadow_region(struct guest_info * info,
+struct v3_shadow_region * insert_shadow_region(struct v3_vm_info * vm,
struct v3_shadow_region * region) {
struct v3_shadow_region * ret;
+ int i = 0;
- if ((ret = __insert_shadow_region(info, region))) {
+ if ((ret = __insert_shadow_region(vm, region))) {
return ret;
}
- v3_rb_insert_color(&(region->tree_node), &(info->mem_map.shdw_regions));
+ v3_rb_insert_color(&(region->tree_node), &(vm->mem_map.shdw_regions));
- // flush virtual page tables
- // 3 cases shadow, shadow passthrough, and nested
- if (info->shdw_pg_mode == SHADOW_PAGING) {
- v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
+ for (i = 0; i < vm->num_cores; i++) {
+ struct guest_info * info = &(vm->cores[i]);
- if (mem_mode == PHYSICAL_MEM) {
- addr_t cur_addr;
+ // flush virtual page tables
+ // 3 cases shadow, shadow passthrough, and nested
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
+
+ if (mem_mode == PHYSICAL_MEM) {
+ addr_t cur_addr;
+
+ for (cur_addr = region->guest_start;
+ cur_addr < region->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+ v3_invalidate_passthrough_addr(info, cur_addr);
+ }
+ } else {
+ v3_invalidate_shadow_pts(info);
+ }
+
+ } else if (info->shdw_pg_mode == NESTED_PAGING) {
+ addr_t cur_addr;
+
for (cur_addr = region->guest_start;
cur_addr < region->guest_end;
cur_addr += PAGE_SIZE_4KB) {
- v3_invalidate_passthrough_addr(info, cur_addr);
+
+ v3_invalidate_nested_addr(info, cur_addr);
}
- } else {
- v3_invalidate_shadow_pts(info);
- }
-
- } else if (info->shdw_pg_mode == NESTED_PAGING) {
- addr_t cur_addr;
-
- for (cur_addr = region->guest_start;
- cur_addr < region->guest_end;
- cur_addr += PAGE_SIZE_4KB) {
-
- v3_invalidate_nested_addr(info, cur_addr);
}
}
addr_t fault_gva, addr_t fault_gpa,
pf_error_t access_info)
{
- struct v3_shadow_region * reg = v3_get_shadow_region(info, fault_gpa);
+ struct v3_shadow_region * reg = v3_get_shadow_region(info->vm_info, fault_gpa);
PrintDebug("Handling Special Page Fault\n");
int v3_handle_mem_full_hook(struct guest_info * info, addr_t guest_va, addr_t guest_pa,
struct v3_shadow_region * reg, pf_error_t access_info) {
- addr_t op_addr = info->mem_map.hook_hva;
+ addr_t op_addr = get_hook_hva(info);
if (access_info.write == 1) {
if (v3_emulate_write_op(info, guest_va, guest_pa, op_addr,
-struct v3_shadow_region * v3_get_shadow_region(struct guest_info * info, addr_t guest_addr) {
- struct rb_node * n = info->mem_map.shdw_regions.rb_node;
+struct v3_shadow_region * v3_get_shadow_region(struct v3_vm_info * vm, addr_t guest_addr) {
+ struct rb_node * n = vm->mem_map.shdw_regions.rb_node;
struct v3_shadow_region * reg = NULL;
while (n) {
// There is not registered region, so we check if its a valid address in the base region
- if (guest_addr > info->mem_map.base_region.guest_end) {
+ if (guest_addr > vm->mem_map.base_region.guest_end) {
PrintError("Guest Address Exceeds Base Memory Size (ga=%p), (limit=%p)\n",
- (void *)guest_addr, (void *)info->mem_map.base_region.guest_end);
- v3_print_mem_map(info);
+ (void *)guest_addr, (void *)vm->mem_map.base_region.guest_end);
+ v3_print_mem_map(vm);
return NULL;
}
- return &(info->mem_map.base_region);
+ return &(vm->mem_map.base_region);
}
-void v3_delete_shadow_region(struct guest_info * info, struct v3_shadow_region * reg) {
+void v3_delete_shadow_region(struct v3_vm_info * vm, struct v3_shadow_region * reg) {
+ int i = 0;
+
if (reg == NULL) {
return;
}
- // flush virtual page tables
- // 3 cases shadow, shadow passthrough, and nested
- if (info->shdw_pg_mode == SHADOW_PAGING) {
- v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
+ for (i = 0; i < vm->num_cores; i++) {
+ struct guest_info * info = &(vm->cores[i]);
+
+ // flush virtual page tables
+ // 3 cases shadow, shadow passthrough, and nested
+
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ v3_mem_mode_t mem_mode = v3_get_vm_mem_mode(info);
- if (mem_mode == PHYSICAL_MEM) {
- addr_t cur_addr;
+ if (mem_mode == PHYSICAL_MEM) {
+ addr_t cur_addr;
+ for (cur_addr = reg->guest_start;
+ cur_addr < reg->guest_end;
+ cur_addr += PAGE_SIZE_4KB) {
+ v3_invalidate_passthrough_addr(info, cur_addr);
+ }
+ } else {
+ v3_invalidate_shadow_pts(info);
+ }
+
+ } else if (info->shdw_pg_mode == NESTED_PAGING) {
+ addr_t cur_addr;
+
for (cur_addr = reg->guest_start;
cur_addr < reg->guest_end;
cur_addr += PAGE_SIZE_4KB) {
- v3_invalidate_passthrough_addr(info, cur_addr);
+
+ v3_invalidate_nested_addr(info, cur_addr);
}
- } else {
- v3_invalidate_shadow_pts(info);
- }
-
- } else if (info->shdw_pg_mode == NESTED_PAGING) {
- addr_t cur_addr;
-
- for (cur_addr = reg->guest_start;
- cur_addr < reg->guest_end;
- cur_addr += PAGE_SIZE_4KB) {
-
- v3_invalidate_nested_addr(info, cur_addr);
}
}
-
- v3_rb_erase(&(reg->tree_node), &(info->mem_map.shdw_regions));
+ v3_rb_erase(&(reg->tree_node), &(vm->mem_map.shdw_regions));
V3_Free(reg);
-void v3_print_mem_map(struct guest_info * info) {
- struct rb_node * node = v3_rb_first(&(info->mem_map.shdw_regions));
- struct v3_shadow_region * reg = &(info->mem_map.base_region);
+void v3_print_mem_map(struct v3_vm_info * vm) {
+ struct rb_node * node = v3_rb_first(&(vm->mem_map.shdw_regions));
+ struct v3_shadow_region * reg = &(vm->mem_map.base_region);
int i = 0;
V3_Print("Memory Layout:\n");
#include <palacios/vm_guest.h>
-void v3_init_msr_map(struct guest_info * info) {
- struct v3_msr_map * msr_map = &(info->msr_map);
+void v3_init_msr_map(struct v3_vm_info * vm) {
+ struct v3_msr_map * msr_map = &(vm->msr_map);
INIT_LIST_HEAD(&(msr_map->hook_list));
msr_map->num_hooks = 0;
PrintDebug("MSR write for msr 0x%x\n", msr_num);
- hook = v3_get_msr_hook(info, msr_num);
+ hook = v3_get_msr_hook(info->vm_info, msr_num);
if (!hook) {
PrintError("Hook for MSR write %d not found\n", msr_num);
struct v3_msr msr_val;
struct v3_msr_hook * hook = NULL;
- hook = v3_get_msr_hook(info, msr_num);
+ hook = v3_get_msr_hook(info->vm_info, msr_num);
if (!hook) {
PrintError("Hook for MSR read %d not found\n", msr_num);
return 0;
}
-int v3_hook_msr(struct guest_info * info, uint_t msr,
+int v3_hook_msr(struct v3_vm_info * vm, uint_t msr,
int (*read)(uint_t msr, struct v3_msr * dst, void * priv_data),
int (*write)(uint_t msr, struct v3_msr src, void * priv_data),
void * priv_data) {
- struct v3_msr_map * msr_map = &(info->msr_map);
+ struct v3_msr_map * msr_map = &(vm->msr_map);
struct v3_msr_hook * hook = NULL;
hook = (struct v3_msr_hook *)V3_Malloc(sizeof(struct v3_msr_hook));
list_add(&(hook->link), &(msr_map->hook_list));
if (msr_map->update_map) {
- msr_map->update_map(info, msr,
+ msr_map->update_map(vm, msr,
(read == NULL) ? 0 : 1,
(write == NULL) ? 0 : 1);
}
}
-int v3_unhook_msr(struct guest_info * info, uint_t msr) {
+int v3_unhook_msr(struct v3_vm_info * vm, uint_t msr) {
PrintError("Unhooking MSRs currently not supported\n");
return -1;
}
-struct v3_msr_hook * v3_get_msr_hook(struct guest_info * info, uint_t msr) {
- struct v3_msr_map * msr_map = &(info->msr_map);
+struct v3_msr_hook * v3_get_msr_hook(struct v3_vm_info * vm, uint_t msr) {
+ struct v3_msr_map * msr_map = &(vm->msr_map);
struct v3_msr_hook * hook = NULL;
list_for_each_entry(hook, &(msr_map->hook_list), link) {
}
-void v3_refresh_msr_map(struct guest_info * info) {
- struct v3_msr_map * msr_map = &(info->msr_map);
+void v3_refresh_msr_map(struct v3_vm_info * vm) {
+ struct v3_msr_map * msr_map = &(vm->msr_map);
struct v3_msr_hook * hook = NULL;
if (msr_map->update_map == NULL) {
list_for_each_entry(hook, &(msr_map->hook_list), link) {
PrintDebug("updating MSR map for msr %d\n", hook->msr);
- msr_map->update_map(info, hook->msr,
+ msr_map->update_map(vm, hook->msr,
(hook->read == NULL) ? 0 : 1,
(hook->write == NULL) ? 0 : 1);
}
}
-void v3_print_msr_map(struct guest_info * info) {
- struct v3_msr_map * msr_map = &(info->msr_map);
+void v3_print_msr_map(struct v3_vm_info * vm) {
+ struct v3_msr_map * msr_map = &(vm->msr_map);
struct v3_msr_hook * hook = NULL;
list_for_each_entry(hook, &(msr_map->hook_list), link) {
INIT_LIST_HEAD(&(state->page_list));
#ifdef CONFIG_SHADOW_PAGING_TELEMETRY
- if (info->enable_telemetry) {
+ if (info->vm_info->enable_telemetry) {
v3_add_telemetry_cb(info, telemetry_cb, NULL);
}
#endif
shadow_cr3->pcd = guest_cr3->pcd;
#ifdef CONFIG_SYMBIOTIC_SWAP
- v3_swap_flush(info);
+ v3_swap_flush(info->vm_info);
#endif
return 0;
pte32_t * shadow_pte = (pte32_t *)&(shadow_pt[PTE32_INDEX(fault_addr)]);
addr_t guest_pa = BASE_TO_PAGE_ADDR((addr_t)(guest_pte->page_base_addr)) + PAGE_OFFSET(fault_addr);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_pa);
if (shdw_reg == NULL) {
// Inject a machine check in the guest
#ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
if (error_code.write == 0) {
- info->swap_state.read_faults++;
+ info->vm_info->swap_state.read_faults++;
} else {
- info->swap_state.write_faults++;
+ info->vm_info->swap_state.write_faults++;
}
#endif
- swp_pg_addr = v3_get_swapped_pg_addr(info, guest_pte);
+ swp_pg_addr = v3_get_swapped_pg_addr(info->vm_info, guest_pte);
if (swp_pg_addr != 0) {
PrintDebug("Swapped page address=%p\n", (void *)swp_pg_addr);
(error_code.user == 0) ) ) {
addr_t swp_pg_pa = 0;
- swp_pg_pa = v3_map_swp_page(info, shadow_pte, guest_pte, (void *)swp_pg_addr);
+ swp_pg_pa = v3_map_swp_page(info->vm_info, shadow_pte, guest_pte, (void *)swp_pg_addr);
PrintDebug("Page fault on swapped out page (vaddr=%p) (pte=%x) (error_code=%x)\n",
(void *)fault_addr, *(uint32_t *)guest_pte, *(uint32_t *)&error_code);
shadow_pte->page_base_addr = swp_pg_pa;
#ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
- info->swap_state.mapped_pages++;
+ info->vm_info->swap_state.mapped_pages++;
#endif
// PrintError("Swap fault handled\n");
return 0;
PrintDebug("Handling 4MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_fault_pa);
if (shdw_reg == NULL) {
PrintDebug("Handling PTE fault\n");
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_pa);
PrintDebug("Handling 2MB fault (guest_fault_pa=%p) (error_code=%x)\n", (void *)guest_fault_pa, *(uint_t*)&error_code);
PrintDebug("ShadowPT=%p, LargeGuestPDE=%p\n", shadow_pt, large_guest_pde);
- struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info, guest_fault_pa);
+ struct v3_shadow_region * shdw_reg = v3_get_shadow_region(info->vm_info, guest_fault_pa);
if (shdw_reg == NULL) {
#define SYMCALL_GS_MSR 0x539
#define SYMCALL_FS_MSR 0x540
-
static int msr_read(uint_t msr, struct v3_msr * dst, void * priv_data) {
struct guest_info * info = (struct guest_info *)priv_data;
- struct v3_sym_state * state = &(info->sym_state);
+ struct v3_sym_state * state = &(info->vm_info->sym_state);
switch (msr) {
case SYM_PAGE_MSR:
dst->value = state->guest_pg_addr;
break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int symcall_msr_read(uint_t msr, struct v3_msr * dst, void * priv_data) {
+ struct guest_info * info = (struct guest_info *)priv_data;
+ struct v3_symcall_state * state = &(info->vm_info->sym_state.symcalls[info->cpu_id]);
+
+ switch (msr) {
case SYMCALL_RIP_MSR:
dst->value = state->sym_call_rip;
break;
static int msr_write(uint_t msr, struct v3_msr src, void * priv_data) {
struct guest_info * info = (struct guest_info *)priv_data;
- struct v3_sym_state * state = &(info->sym_state);
+ struct v3_sym_state * state = &(info->vm_info->sym_state);
if (msr == SYM_PAGE_MSR) {
PrintDebug("Symbiotic MSR write for page %p\n", (void *)src.value);
if (state->active == 1) {
// unmap page
- struct v3_shadow_region * old_reg = v3_get_shadow_region(info, (addr_t)state->guest_pg_addr);
+ struct v3_shadow_region * old_reg = v3_get_shadow_region(info->vm_info, (addr_t)state->guest_pg_addr);
if (old_reg == NULL) {
PrintError("Could not find previously active symbiotic page (%p)\n", (void *)state->guest_pg_addr);
return -1;
}
- v3_delete_shadow_region(info, old_reg);
+ v3_delete_shadow_region(info->vm_info, old_reg);
}
state->guest_pg_addr = src.value;
state->active = 1;
// map page
- v3_add_shadow_mem(info, (addr_t)state->guest_pg_addr,
+ v3_add_shadow_mem(info->vm_info, (addr_t)state->guest_pg_addr,
(addr_t)(state->guest_pg_addr + PAGE_SIZE_4KB - 1),
state->sym_page_pa);
-
-
- } else if (msr == SYMCALL_RIP_MSR) {
- state->sym_call_rip = src.value;
- } else if (msr == SYMCALL_RSP_MSR) {
- state->sym_call_rsp = src.value;
- } else if (msr == SYMCALL_CS_MSR) {
- state->sym_call_cs = src.value;
- } else if (msr == SYMCALL_GS_MSR) {
- state->sym_call_gs = src.value;
- } else if (msr == SYMCALL_FS_MSR) {
- state->sym_call_fs = src.value;
} else {
PrintError("Invalid Symbiotic MSR write (0x%x)\n", msr);
return -1;
return 0;
}
+
+static int symcall_msr_write(uint_t msr, struct v3_msr src, void * priv_data) {
+ struct guest_info * info = (struct guest_info *)priv_data;
+ struct v3_symcall_state * state = &(info->vm_info->sym_state.symcalls[info->cpu_id]);
+
+ switch (msr) {
+ case SYMCALL_RIP_MSR:
+ state->sym_call_rip = src.value;
+ break;
+ case SYMCALL_RSP_MSR:
+ state->sym_call_rsp = src.value;
+ break;
+ case SYMCALL_CS_MSR:
+ state->sym_call_cs = src.value;
+ break;
+ case SYMCALL_GS_MSR:
+ state->sym_call_gs = src.value;
+ break;
+ case SYMCALL_FS_MSR:
+ state->sym_call_fs = src.value;
+ break;
+ default:
+ PrintError("Invalid Symbiotic MSR write (0x%x)\n", msr);
+ return -1;
+ }
+ return 0;
+}
+
static int cpuid_fn(struct guest_info * info, uint32_t cpuid,
uint32_t * eax, uint32_t * ebx,
uint32_t * ecx, uint32_t * edx,
-int v3_init_sym_iface(struct guest_info * info) {
- struct v3_sym_state * state = &(info->sym_state);
+int v3_init_sym_iface(struct v3_vm_info * vm) {
+ struct v3_sym_state * state = &(vm->sym_state);
memset(state, 0, sizeof(struct v3_sym_state));
state->sym_page_pa = (addr_t)V3_AllocPages(1);
memcpy(&(state->sym_page->magic), "V3V", 3);
- v3_hook_msr(info, SYM_PAGE_MSR, msr_read, msr_write, info);
+ v3_hook_msr(vm, SYM_PAGE_MSR, msr_read, msr_write, info);
- v3_hook_cpuid(info, SYM_CPUID_NUM, cpuid_fn, info);
+ v3_hook_cpuid(vm, SYM_CPUID_NUM, cpuid_fn, info);
- v3_hook_msr(info, SYMCALL_RIP_MSR, msr_read, msr_write, info);
- v3_hook_msr(info, SYMCALL_RSP_MSR, msr_read, msr_write, info);
- v3_hook_msr(info, SYMCALL_CS_MSR, msr_read, msr_write, info);
- v3_hook_msr(info, SYMCALL_GS_MSR, msr_read, msr_write, info);
- v3_hook_msr(info, SYMCALL_FS_MSR, msr_read, msr_write, info);
+ v3_hook_msr(vm, SYMCALL_RIP_MSR, symcall_msr_read, msr_write, info);
+ v3_hook_msr(vm, SYMCALL_RSP_MSR, symcall_msr_read, msr_write, info);
+ v3_hook_msr(vm, SYMCALL_CS_MSR, symcall_msr_read, msr_write, info);
+ v3_hook_msr(vm, SYMCALL_GS_MSR, symcall_msr_read, msr_write, info);
+ v3_hook_msr(vm, SYMCALL_FS_MSR, symcall_msr_read, msr_write, info);
- v3_register_hypercall(info, SYM_CALL_RET_HCALL, sym_call_ret, NULL);
- v3_register_hypercall(info, SYM_CALL_ERR_HCALL, sym_call_err, NULL);
+ v3_register_hypercall(vm, SYM_CALL_RET_HCALL, sym_call_ret, NULL);
+ v3_register_hypercall(vm, SYM_CALL_ERR_HCALL, sym_call_err, NULL);
return 0;
}
-int v3_sym_map_pci_passthrough(struct guest_info * info, uint_t bus, uint_t dev, uint_t fn) {
- struct v3_sym_state * state = &(info->sym_state);
+int v3_sym_map_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn) {
+ struct v3_sym_state * state = &(vm->sym_state);
uint_t dev_index = (bus << 8) + (dev << 3) + fn;
uint_t major = dev_index / 8;
uint_t minor = dev_index % 8;
return 0;
}
-int v3_sym_unmap_pci_passthrough(struct guest_info * info, uint_t bus, uint_t dev, uint_t fn) {
- struct v3_sym_state * state = &(info->sym_state);
+int v3_sym_unmap_pci_passthrough(struct v3_vm_info * vm, uint_t bus, uint_t dev, uint_t fn) {
+ struct v3_sym_state * state = &(vm->sym_state);
uint_t dev_index = (bus << 8) + (dev << 3) + fn;
uint_t major = dev_index / 8;
uint_t minor = dev_index % 8;
static int sym_call_err(struct guest_info * info, uint_t hcall_id, void * private_data) {
- struct v3_sym_state * state = (struct v3_sym_state *)&(info->sym_state);
+ struct v3_symcall_state * state = (struct v3_symcall_state *)&(info->sym_state.symcalls[info->cpu_id]);
PrintError("sym call error\n");
}
static int sym_call_ret(struct guest_info * info, uint_t hcall_id, void * private_data) {
- struct v3_sym_state * state = (struct v3_sym_state *)&(info->sym_state);
+ struct v3_symcall_state * state = (struct v3_symcall_state *)&(info->vm_info->sym_state.symcalls[info->cpu_id]);
// PrintError("Return from sym call (ID=%x)\n", hcall_id);
// v3_print_guest_state(info);
}
static int execute_symcall(struct guest_info * info) {
+ struct v3_symcall_state * state = (struct v3_symcall_state *)&(info->vm_info->sym_state.symcalls[info->cpu_id]);
- while (info->sym_state.sym_call_returned == 0) {
+ while (state->sym_call_returned == 0) {
if (v3_vm_enter(info) == -1) {
PrintError("Error in Sym call\n");
return -1;
uint64_t call_num, sym_arg_t * arg0,
sym_arg_t * arg1, sym_arg_t * arg2,
sym_arg_t * arg3, sym_arg_t * arg4) {
- struct v3_sym_state * state = (struct v3_sym_state *)&(info->sym_state);
+ struct v3_sym_state * sym_state = (struct v3_sym_state *)&(info->vm_info->sym_sate);
+ struct v3_symcall_state * state = (struct v3_symcall_state *)&(sym_state->symcalls[info->cpu_id]);
struct v3_sym_context * old_ctx = (struct v3_sym_context *)&(state->old_ctx);
struct v3_segment sym_cs;
struct v3_segment sym_ss;
// PrintDebug("Making Sym call\n");
// v3_print_guest_state(info);
- if ((state->sym_page->sym_call_enabled == 0) ||
+ if ((sym_state->sym_page->sym_call_enabled == 0) ||
(state->sym_call_active == 1)) {
return -1;
}
#ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
static void telemetry_cb(struct guest_info * info, void * private_data, char * hdr) {
- struct v3_sym_swap_state * swap_state = &(info->swap_state);
+ struct v3_sym_swap_state * swap_state = &(info->vm_info->swap_state);
V3_Print("%sSymbiotic Swap:\n", hdr);
V3_Print("%s\tRead faults=%d\n", hdr, swap_state->read_faults);
#endif
-int v3_init_sym_swap(struct guest_info * info) {
- struct v3_sym_swap_state * swap_state = &(info->swap_state);
+int v3_init_sym_swap(struct v3_vm_info * vm) {
+ struct v3_sym_swap_state * swap_state = &(vm->swap_state);
memset(swap_state, 0, sizeof(struct v3_sym_swap_state));
swap_state->shdw_ptr_ht = v3_create_htable(0, swap_hash_fn, swap_eq_fn);
#ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
if (info->enable_telemetry) {
- v3_add_telemetry_cb(info, telemetry_cb, NULL);
+ v3_add_telemetry_cb(vm, telemetry_cb, NULL);
}
#endif
}
-int v3_register_swap_disk(struct guest_info * info, int dev_index,
+int v3_register_swap_disk(struct v3_vm_info * vm, int dev_index,
struct v3_swap_ops * ops, void * private_data) {
- struct v3_sym_swap_state * swap_state = &(info->swap_state);
+ struct v3_sym_swap_state * swap_state = &(vm->swap_state);
swap_state->devs[dev_index].present = 1;
swap_state->devs[dev_index].private_data = private_data;
-int v3_swap_in_notify(struct guest_info * info, int pg_index, int dev_index) {
+int v3_swap_in_notify(struct v3_vm_info * vm, int pg_index, int dev_index) {
struct list_head * shdw_ptr_list = NULL;
- struct v3_sym_swap_state * swap_state = &(info->swap_state);
+ struct v3_sym_swap_state * swap_state = &(vm->swap_state);
struct shadow_pointer * tmp_shdw_ptr = NULL;
struct shadow_pointer * shdw_ptr = NULL;
struct swap_pte guest_pte = {0, dev_index, pg_index};
-int v3_swap_flush(struct guest_info * info) {
- struct v3_sym_swap_state * swap_state = &(info->swap_state);
+int v3_swap_flush(struct v3_vm_info * vm) {
+ struct v3_sym_swap_state * swap_state = &(vm->swap_state);
struct hashtable_iter * ht_iter = v3_create_htable_iter(swap_state->shdw_ptr_ht);
// PrintDebug("Flushing Symbiotic Swap table\n");
-addr_t v3_get_swapped_pg_addr(struct guest_info * info, pte32_t * guest_pte) {
- struct v3_sym_swap_state * swap_state = &(info->swap_state);
+addr_t v3_get_swapped_pg_addr(struct v3_vm_info * vm, pte32_t * guest_pte) {
+ struct v3_sym_swap_state * swap_state = &(vm->swap_state);
int dev_index = get_dev_index(guest_pte);
struct v3_swap_dev * swp_dev = &(swap_state->devs[dev_index]);
}
-addr_t v3_map_swp_page(struct guest_info * info, pte32_t * shadow_pte, pte32_t * guest_pte, void * swp_page_ptr) {
+addr_t v3_map_swp_page(struct v3_vm_info * vm, pte32_t * shadow_pte, pte32_t * guest_pte, void * swp_page_ptr) {
struct list_head * shdw_ptr_list = NULL;
- struct v3_sym_swap_state * swap_state = &(info->swap_state);
+ struct v3_sym_swap_state * swap_state = &(vm->swap_state);
struct shadow_pointer * shdw_ptr = NULL;
#ifdef CONFIG_TELEMETRY_GRANULARITY
#define DEFAULT_GRANULARITY CONFIG_TELEMETRY_GRANULARITY
#else
-#define DEFAULT_GRANULARITY 50000
+#define DEFAULT_GRANULARITY 50000000
#endif
V3_Print("%stelemetry window tsc cnt: %d\n", hdr_buf, (uint32_t)(invoke_tsc - telemetry->prev_tsc));
+ /*
// Exit Telemetry
{
struct exit_event * evt = NULL;
(uint32_t)(evt->handler_time / evt->cnt));
} while ((node = v3_rb_next(node)));
}
-
+ */
// Registered callbacks
{
-void v3_update_time(struct guest_info * info, ullong_t cycles) {
+void v3_update_time(struct guest_info * info, uint64_t cycles) {
struct vm_timer * tmp_timer;
-
+
+ // cycles *= 8;
+
+// cycles /= 150;
+
info->time_state.guest_tsc += cycles;
list_for_each_entry(tmp_timer, &(info->time_state.timers), timer_link) {
- tmp_timer->ops->update_time(cycles, info->time_state.cpu_freq, tmp_timer->private_data);
+ tmp_timer->ops->update_time(info, cycles, info->time_state.cpu_freq, tmp_timer->private_data);
}
/********** Setup and VMX Control Fields from MSR ***********/
/* Setup IO map */
- v3_init_vmx_io_map(info);
- v3_init_vmx_msr_map(info);
+ /***** THEES NEED TO BE MOVED TO A GLOBAL LOCATION ***/
+ v3_init_vmx_io_map(info->vm_info);
+ v3_init_vmx_msr_map(info->vm_info);
+ /**** ****/
struct v3_msr tmp_msr;
vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
vmx_state->pri_proc_ctrls.pause_exit = 1;
- vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->io_map.arch_data));
+ vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->vm_info->io_map.arch_data));
vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR,
- (addr_t)V3_PAddr(info->io_map.arch_data) + PAGE_SIZE_4KB);
+ (addr_t)V3_PAddr(info->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
- vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->msr_map.arch_data));
+
+ vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->vm_info->msr_map.arch_data));
v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
vmx_state->exit_ctrls.value = tmp_msr.lo;
check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
- if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
+ if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
#ifdef CONFIG_DEBUG_INTERRUPTS
PrintDebug("Calling v3_injecting_intr\n");
#endif
- info->intr_state.irq_started = 0;
- v3_injecting_intr(info, info->intr_state.irq_vector, V3_EXTERNAL_IRQ);
+ info->intr_core_state.irq_started = 0;
+ v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
}
return 0;
static int update_irq_entry_state(struct guest_info * info) {
struct vmx_exit_idt_vec_info idt_vec_info;
- struct vmcs_interrupt_state intr_state;
+ struct vmcs_interrupt_state intr_core_state;
struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
- check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_state));
+ check_vmcs_read(VMCS_GUEST_INT_STATE, &(intr_core_state));
/* Check for pending exceptions to inject */
if (v3_excp_pending(info)) {
v3_injecting_excp(info, int_info.vector);
} else if ((((struct rflags *)&(info->ctrl_regs.rflags))->intr == 1) &&
- (intr_state.val == 0)) {
+ (intr_core_state.val == 0)) {
- if ((info->intr_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
+ if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
#ifdef CONFIG_DEBUG_INTERRUPTS
PrintDebug("IRQ pending from previous injection\n");
switch (v3_intr_pending(info)) {
case V3_EXTERNAL_IRQ: {
- info->intr_state.irq_vector = v3_get_intr(info);
- ent_int.vector = info->intr_state.irq_vector;
+ info->intr_core_state.irq_vector = v3_get_intr(info);
+ ent_int.vector = info->intr_core_state.irq_vector;
ent_int.type = 0;
ent_int.error_code = 0;
ent_int.valid = 1;
#ifdef CONFIG_DEBUG_INTERRUPTS
PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n",
- info->intr_state.irq_vector,
+ info->intr_core_state.irq_vector,
(uint32_t)info->num_exits,
(void *)info->rip);
#endif
check_vmcs_write(VMCS_ENTRY_INT_INFO, ent_int.value);
- info->intr_state.irq_started = 1;
+ info->intr_core_state.irq_started = 1;
break;
}
#ifdef CONFIG_SYMBIOTIC
- if (info->sym_state.sym_call_active == 0) {
+ if (info->vm_info->sym_state.symcalls[info->cpu_id].sym_call_active == 0) {
update_irq_entry_state(info);
}
#else
rdtscll(info->time_state.cached_host_tsc);
- if (info->run_state == VM_STOPPED) {
- info->run_state = VM_RUNNING;
+ if (info->vm_info->run_state == VM_STOPPED) {
+ info->vm_info->run_state = VM_RUNNING;
ret = v3_vmx_launch(&(info->vm_regs), info, &(info->ctrl_regs));
} else {
ret = v3_vmx_resume(&(info->vm_regs), info, &(info->ctrl_regs));
#ifdef CONFIG_SYMBIOTIC
- if (info->sym_state.sym_call_active == 0) {
+ if (info->vm_info->sym_state.symcalls[info->cpu_id].sym_call_active == 0) {
update_irq_exit_state(info);
}
#else
v3_print_vmcs();
*/
#ifdef CONFIG_TELEMETRY
- if (info->enable_telemetry) {
+ if (info->vm_info->enable_telemetry) {
v3_telemetry_start_exit(info);
}
#endif
}
#ifdef CONFIG_TELEMETRY
- if (info->enable_telemetry) {
+ if (info->vm_info->enable_telemetry) {
v3_telemetry_end_exit(info, exit_info->exit_reason);
}
#endif
/* Same as SVM */
-static int update_map(struct guest_info * info, uint16_t port, int hook_read, int hook_write) {
- uchar_t * bitmap = (uint8_t *)(info->io_map.arch_data);
+static int update_map(struct v3_vm_info * vm, uint16_t port, int hook_read, int hook_write) {
+ uchar_t * bitmap = (uint8_t *)(vm->io_map.arch_data);
int major = port / 8;
int minor = port % 8;
return 0;
}
-int v3_init_vmx_io_map(struct guest_info * info) {
- info->io_map.update_map = update_map;
+int v3_init_vmx_io_map(struct v3_vm_info * vm) {
+ vm->io_map.update_map = update_map;
- info->io_map.arch_data = V3_VAddr(V3_AllocPages(2));
- memset(info->io_map.arch_data, 0, PAGE_SIZE_4KB * 2);
+ vm->io_map.arch_data = V3_VAddr(V3_AllocPages(2));
+ memset(vm->io_map.arch_data, 0, PAGE_SIZE_4KB * 2);
- v3_refresh_io_map(info);
+ v3_refresh_io_map(vm);
return 0;
}
-int v3_handle_vmx_io_in(struct guest_info * info, struct vmx_exit_info * exit_info) {
+int v3_handle_vmx_io_in(struct guest_info * core, struct vmx_exit_info * exit_info) {
struct vmx_exit_io_qual io_qual = *(struct vmx_exit_io_qual *)&(exit_info->exit_qual);;
struct v3_io_hook * hook = NULL;
int read_size = 0;
- hook = v3_get_io_hook(info, io_qual.port);
+ hook = v3_get_io_hook(core->vm_info, io_qual.port);
if (hook == NULL) {
PrintError("Hook not present for IN on port %x\n", io_qual.port);
PrintDebug("IN of %d bytes on port %d (0x%x)\n", read_size, io_qual.port, io_qual.port);
- if (hook->read(io_qual.port, &(info->vm_regs.rax), read_size, hook->priv_data) != read_size) {
+ if (hook->read(core, io_qual.port, &(core->vm_regs.rax), read_size, hook->priv_data) != read_size) {
PrintError("Read failure for IN on port %x\n", io_qual.port);
return -1;
}
- info->rip += exit_info->instr_len;
+ core->rip += exit_info->instr_len;
return 0;
}
-int v3_handle_vmx_io_ins(struct guest_info * info, struct vmx_exit_info * exit_info) {
+int v3_handle_vmx_io_ins(struct guest_info * core, struct vmx_exit_info * exit_info) {
struct vmx_exit_io_qual io_qual = *(struct vmx_exit_io_qual *)&(exit_info->exit_qual);;
struct v3_io_hook * hook = NULL;
int read_size = 0;
addr_t host_addr = 0;
int rdi_change = 0;
ulong_t rep_num = 1;
- struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
+ struct rflags * flags = (struct rflags *)&(core->ctrl_regs.rflags);
- hook = v3_get_io_hook(info, io_qual.port);
+ hook = v3_get_io_hook(core->vm_info, io_qual.port);
if (hook == NULL) {
PrintError("Hook not present for INS on port 0x%x\n", io_qual.port);
struct vmx_exit_io_instr_info instr_info = *(struct vmx_exit_io_instr_info *)&(exit_info->instr_info);
if (instr_info.addr_size == 0) {
- rep_num = info->vm_regs.rcx & 0xffff;
+ rep_num = core->vm_regs.rcx & 0xffff;
} else if(instr_info.addr_size == 1) {
- rep_num = info->vm_regs.rcx & 0xffffffff;
+ rep_num = core->vm_regs.rcx & 0xffffffff;
} else if(instr_info.addr_size == 2) {
- rep_num = info->vm_regs.rcx & 0xffffffffffffffffLL;
+ rep_num = core->vm_regs.rcx & 0xffffffffffffffffLL;
} else {
PrintDebug("Unknown INS address size!\n");
return -1;
- if (guest_va_to_host_va(info, guest_va, &host_addr) == -1) {
+ if (guest_va_to_host_va(core, guest_va, &host_addr) == -1) {
PrintError("Could not convert Guest VA to host VA\n");
return -1;
}
do {
- if (hook->read(io_qual.port, (char *)host_addr, read_size, hook->priv_data) != read_size) {
+ if (hook->read(core, io_qual.port, (char *)host_addr, read_size, hook->priv_data) != read_size) {
PrintError("Read Failure for INS on port 0x%x\n", io_qual.port);
return -1;
}
host_addr += rdi_change;
- info->vm_regs.rdi += rdi_change;
+ core->vm_regs.rdi += rdi_change;
if (io_qual.rep) {
- info->vm_regs.rcx--;
+ core->vm_regs.rcx--;
}
} while (--rep_num > 0);
- info->rip += exit_info->instr_len;
+ core->rip += exit_info->instr_len;
return 0;
}
-int v3_handle_vmx_io_out(struct guest_info * info, struct vmx_exit_info * exit_info) {
+int v3_handle_vmx_io_out(struct guest_info * core, struct vmx_exit_info * exit_info) {
struct vmx_exit_io_qual io_qual = *(struct vmx_exit_io_qual *)&(exit_info->exit_qual);
struct v3_io_hook * hook = NULL;
int write_size = 0;
- hook = v3_get_io_hook(info, io_qual.port);
+ hook = v3_get_io_hook(core->vm_info, io_qual.port);
if (hook == NULL) {
PrintError("Hook not present for out on port %x\n", io_qual.port);
PrintDebug("OUT of %d bytes on port %d (0x%x)\n", write_size, io_qual.port, io_qual.port);
- if (hook->write(io_qual.port, &(info->vm_regs.rax), write_size, hook->priv_data) != write_size) {
+ if (hook->write(core, io_qual.port, &(core->vm_regs.rax), write_size, hook->priv_data) != write_size) {
PrintError("Write failure for out on port %x\n",io_qual.port);
return -1;
}
- info->rip += exit_info->instr_len;
+ core->rip += exit_info->instr_len;
return 0;
}
-int v3_handle_vmx_io_outs(struct guest_info * info, struct vmx_exit_info * exit_info) {
+int v3_handle_vmx_io_outs(struct guest_info * core, struct vmx_exit_info * exit_info) {
struct vmx_exit_io_qual io_qual = *(struct vmx_exit_io_qual *)&(exit_info->exit_qual);
struct v3_io_hook * hook = NULL;
int write_size;
addr_t host_addr;
int rsi_change;
ulong_t rep_num = 1;
- struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
+ struct rflags * flags = (struct rflags *)&(core->ctrl_regs.rflags);
- hook = v3_get_io_hook(info, io_qual.port);
+ hook = v3_get_io_hook(core->vm_info, io_qual.port);
if (hook == NULL) {
PrintError("Hook not present for OUTS on port 0x%x\n", io_qual.port);
struct vmx_exit_io_instr_info instr_info = *(struct vmx_exit_io_instr_info *)&(exit_info->instr_info);
if (instr_info.addr_size == 0) {
- rep_num = info->vm_regs.rcx & 0xffff;
+ rep_num = core->vm_regs.rcx & 0xffff;
} else if(instr_info.addr_size == 1) {
- rep_num = info->vm_regs.rcx & 0xffffffff;
+ rep_num = core->vm_regs.rcx & 0xffffffff;
} else if(instr_info.addr_size == 2) {
- rep_num = info->vm_regs.rcx & 0xffffffffffffffffLL;
+ rep_num = core->vm_regs.rcx & 0xffffffffffffffffLL;
} else {
PrintDebug("Unknown INS address size!\n");
return -1;
PrintDebug("OUTS size=%d for %ld steps\n", write_size, rep_num);
- if (guest_va_to_host_va(info, guest_va, &host_addr) == -1) {
+ if (guest_va_to_host_va(core, guest_va, &host_addr) == -1) {
PrintError("Could not convert guest VA to host VA\n");
return -1;
}
do {
- if (hook->write(io_qual.port, (char *)host_addr, write_size, hook->priv_data) != write_size) {
+ if (hook->write(core, io_qual.port, (char *)host_addr, write_size, hook->priv_data) != write_size) {
PrintError("Read failure for INS on port 0x%x\n", io_qual.port);
return -1;
}
host_addr += rsi_change;
- info->vm_regs.rsi += rsi_change;
+ core->vm_regs.rsi += rsi_change;
if (io_qual.rep) {
- --info->vm_regs.rcx;
+ --core->vm_regs.rcx;
}
} while (--rep_num > 0);
- info->rip += exit_info->instr_len;
+ core->rip += exit_info->instr_len;
return 0;
}
}
/* Same as SVM */
-static int update_map(struct guest_info * info, uint_t msr, int hook_reads, int hook_writes) {
+static int update_map(struct v3_vm_info * vm, uint_t msr, int hook_reads, int hook_writes) {
int index = get_bitmap_index(msr);
uint_t major = index / 8;
uchar_t mask = 0x1;
uint8_t read_val = (hook_reads) ? 0x1 : 0x0;
uint8_t write_val = (hook_writes) ? 0x1 : 0x0;
- uint8_t * bitmap = (uint8_t *)(info->msr_map.arch_data);
+ uint8_t * bitmap = (uint8_t *)(vm->msr_map.arch_data);
*(bitmap + major) &= ~(mask << minor);
return 0;
}
-int v3_init_vmx_msr_map(struct guest_info * info) {
- struct v3_msr_map * msr_map = &(info->msr_map);
+int v3_init_vmx_msr_map(struct v3_vm_info * vm) {
+ struct v3_msr_map * msr_map = &(vm->msr_map);
msr_map->update_map = update_map;
msr_map->arch_data = V3_VAddr(V3_AllocPages(1));
memset(msr_map->arch_data, 0, PAGE_SIZE_4KB);
- v3_refresh_msr_map(info);
+ v3_refresh_msr_map(vm);
return 0;
}