endif
-ifdef CONFIG_LINUX
+ifdef V3_CONFIG_LINUX
DEFAULT_EXTRA_TARGETS=linux_module
else
DEFAULT_EXTRA_TARGETS=
all: palacios $(DEFAULT_EXTRA_TARGETS)
-ifdef CONFIG_LINUX
+ifdef V3_CONFIG_LINUX
CFLAGS += -mcmodel=kernel
else
CFLAGS += -fPIC
endif
-ifdef CONFIG_FRAME_POINTER
+ifdef V3_CONFIG_FRAME_POINTER
CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,)
else
CFLAGS += -fomit-frame-pointer
endif
-ifdef CONFIG_UNWIND_INFO
+ifdef V3_CONFIG_UNWIND_INFO
CFLAGS += -fasynchronous-unwind-tables
endif
-ifdef CONFIG_DEBUG_INFO
+ifdef V3_CONFIG_DEBUG_INFO
CFLAGS += -g
else
CFLAGS += -O
linux_module/v3vee.ko: linux_module/*.c linux_module/*.h libv3vee.a
- cd linux_module/ && make CONFIG_LINUX_KERN=$(CONFIG_LINUX_KERN)
+ cd linux_module/ && make
cp linux_module/v3vee.ko v3vee.ko
# Modules
/ %/: prepare scripts FORCE
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(V3_CONFIG_MODULES),1) \
$(build)=$(build-dir)
%.ko: prepare scripts FORCE
- $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(V3_CONFIG_MODULES),1) \
$(build)=$(build-dir) $(@:.ko=.o)
$(Q)$(MAKE) -rR -f $(srctree)/scripts/Makefile.modpost
+include $(PWD)/../.config
-LDFLAGS += --whole-archive --script=$(PWD)/link.cmd
+LDFLAGS += --whole-archive --script=$(PWD)/ld.cmd
-EXTRA_CFLAGS += -I$(PWD)/../palacios/include/ -DMODULE=1 -D__KERNEL__=1
+ifdef V3_CONFIG_SYMMOD
+LDFLAGS += --script=$(PWD)/ld.symmod.cmd
+endif
+
+EXTRA_CFLAGS += -I$(PWD)/../palacios/include/ -include autoconf.h -DMODULE=1 -D__KERNEL__=1
-v3vee-objs:= palacios.o \
+v3vee-objs := palacios.o \
palacios-dev.o \
palacios-vm.o \
- palacios-file.o \
- palacios-console.o \
palacios-mm.o \
- palacios-serial.o \
- palacios-stream.o \
- palacios-queue.o \
- palacios-ringbuffer.o \
- palacios-debugfs.o
+ palacios-queue.o
+
+ifdef V3_CONFIG_CONSOLE
+ v3vee-objs += palacios-console.o
+endif
+ifdef V3_CONFIG_FILE
+ v3vee-objs += palacios-file.o
+endif
-ifdef CONFIG_PALACIOS_VNET
- v3vee-objs += palacios-vnet.o
+ifdef V3_CONFIG_STREAM
+ v3vee-objs += palacios-stream.o \
+ palacios-ringbuffer.o
endif
-ifdef CONFIG_PALACIOS_PACKET
- v3vee-objs += palacios-packet.o
+
+ifdef V3_CONFIG_EXT_INSPECTOR
+ v3vee-objs += palacios-inspector.o
endif
-ifdef CONFIG_PALACIOS_SOCKET
- v3vee-objs += palacios-socket.o
+
+ifdef V3_CONFIG_VNET
+ v3vee-objs += palacios-vnet.o
+endif
+
+ifdef V3_CONFIG_PACKET
+ v3vee-objs += palacios-packet.o
+endif
+
+ifdef V3_CONFIG_SOCKET
+ v3vee-objs += palacios-socket.o
endif
v3vee-objs += ../libv3vee.a
all:
- $(MAKE) -C $(CONFIG_LINUX_KERN) M=$(PWD) modules
+ $(MAKE) -C $(V3_CONFIG_LINUX_KERN) M=$(PWD) modules
clean:
- $(MAKE) -C $(CONFIG_LINUX_KERN) M=$(PWD) clean
+ $(MAKE) -C $(V3_CONFIG_LINUX_KERN) M=$(PWD) clean
}
-/* _v3_capsules :
- {
- __start__v3_capsules = .;
- *(_v3_capsules);
- __stop__v3_capsules = .;
- }
-*/
_v3_shdw_pg_impls :
{
__start__v3_shdw_pg_impls = .;
--- /dev/null
+SECTIONS
+{
+ _v3_capsules :
+ {
+ __start__v3_capsules = .;
+ *(_v3_capsules);
+ __stop__v3_capsules = .;
+ }
+}
+
+++ /dev/null
-/*
- * DebugFS interface
- * (c) Jack Lange, 2011
- */
-
-#include "palacios.h"
-
-int palacios_init_debugfs( void );
-int palacios_deinit_debugfs( void );
-
-
-
-int dfs_register_vm(struct v3_guest * guest);
-
#include "palacios-vnet.h"
#include "palacios-packet.h"
-#ifdef CONFIG_DEBUG_FS
-#include "palacios-debugfs.h"
+#ifdef V3_CONFIG_EXT_INSPECTOR
+#include "palacios-inspector.h"
#endif
MODULE_LICENSE("GPL");
palacios_vmm_init();
+#ifdef V3_CONFIG_STREAM
palacios_init_stream();
+#endif
+
+#ifdef V3_CONFIG_FILE
palacios_file_init();
- palacios_init_console();
+#endif
+#ifdef V3_CONFIG_CONSOLE
+ palacios_init_console();
+#endif
-#ifdef CONFIG_DEBUG_FS
- palacios_init_debugfs();
+#ifdef V3_CONFIG_EXT_INSPECTOR
+ palacios_init_inspector();
#endif
-#ifdef CONFIG_PALACIOS_SOCKET
+#ifdef V3_CONFIG_SOCKET
palacios_socket_init();
#endif
-#ifdef CONFIG_PALACIOS_PACKET
+#ifdef V3_CONFIG_PACKET
palacios_init_packet(NULL);
#endif
-#ifdef CONFIG_PALACIOS_VNET
+#ifdef V3_CONFIG_VNET
palacios_init_vnet();
#endif
-#ifdef CONFIG_DEBUG_FS
- palacios_deinit_debugfs();
+#ifdef V3_CONFIG_EXT_INSPECTOR
+ palacios_deinit_inspector();
#endif
+#ifdef V3_CONFIG_FILE
palacios_file_deinit();
+#endif
+
+#ifdef V3_CONFIG_STREAM
palacios_deinit_stream();
+#endif
palacios_deinit_mm();
struct dentry * v3_dir = NULL;
-int palacios_init_debugfs( void ) {
+int palacios_init_inspector( void ) {
v3_dir = debugfs_create_dir("v3vee", NULL);
}
-int palacios_deinit_debugfs( void ) {
+int palacios_deinit_inspector( void ) {
debugfs_remove(v3_dir);
return 0;
}
}
-int dfs_register_vm(struct v3_guest * guest) {
+int inspect_vm(struct v3_guest * guest) {
v3_inspect_node_t * root = v3_get_inspection_root(guest->v3_ctx);
+ struct dentry * guest_dir = NULL;
+
if (root == NULL) {
printk("No inspection root found\n");
return -1;
}
- dfs_register_tree(v3_dir, root);
+ guest_dir = debugfs_create_dir(guest->name, v3_dir);
+
+ if (IS_ERR(guest_dir)) {
+ printk("Error Creating inspector tree for VM \"%s\"\n", guest->name);
+ return -1;
+ }
+
+ dfs_register_tree(guest_dir, root);
return 0;
}
--- /dev/null
+/*
+ * DebugFS interface
+ * (c) Jack Lange, 2011
+ */
+
+#include "palacios.h"
+
+int palacios_init_inspector( void );
+int palacios_deinit_inspector( void );
+
+
+
+int inspect_vm(struct v3_guest * guest);
+
+++ /dev/null
-/*
- * VM Serial Controls
- * (c) Lei Xia, 2010
- */
-
-#include <linux/device.h>
-#include <linux/cdev.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/poll.h>
-#include <linux/anon_inodes.h>
-#include <linux/file.h>
-
-#include <palacios/vmm.h>
-#include <palacios/vmm_host_events.h>
-
-#include "palacios.h"
-#include "palacios-stream.h"
-
-
-void
-send_serial_input_to_palacios( unsigned char *input,
- unsigned int len,
- struct v3_vm_info * vm ) {
- struct v3_serial_event event;
-
- if (len > 128) {
- len = 128;
- }
-
- memcpy(event.data, input, len);
- event.len = len;
-
- v3_deliver_serial_event(vm, &event);
-}
-
-static ssize_t
-serial_read(struct file * filp, char __user * buf, size_t size, loff_t * offset) {
-
- int len = 0;
- char temp[128];
- struct stream_buffer * stream = filp->private_data;
-
- memset(temp, 0, 128);
-
- if (size > 128) {
- size = 128;
- }
-
- len = stream_dequeue(stream, temp, size);
-
- if (copy_to_user(buf, temp, len)) {
- printk("Read fault\n");
- return -EFAULT;
- }
-
- printk("Returning %d bytes\n", len);
-
- return len;
-}
-
-static ssize_t
-serial_write(struct file * filp, const char __user * buf, size_t size, loff_t * offset) {
- char temp[128];
- struct stream_buffer * stream = filp->private_data;
- struct v3_vm_info * vm;
-
- memset(temp, 0, 128);
-
- if (size > 128) {
- size = 128;
- }
-
- if (copy_from_user(temp, buf, size)) {
- printk("Write fault\n");
- return -EFAULT;
- }
-
- vm = stream->guest->v3_ctx;
- send_serial_input_to_palacios(temp, size, vm);
-
- return size;
-}
-
-
-static unsigned int
-serial_poll(struct file * filp, struct poll_table_struct * poll_tb) {
- unsigned int mask = 0;
- struct stream_buffer *stream = filp->private_data;
-
- poll_wait(filp, &(stream->intr_queue), poll_tb);
-
- if(stream_datalen(stream) > 0){
- mask = POLLIN | POLLRDNORM;
- }
-
- printk("polling v3 serial\n");
-
- return mask;
-}
-
-static struct file_operations v3_cons_fops = {
- .read = serial_read,
- .write = serial_write,
- .poll = serial_poll,
-};
-
-
-int open_serial(char * name) {
- int cons_fd;
- void *stream;
-
- printk("open path: %s\n", name);
-
- stream = find_stream_by_name(NULL, name);
-
- if (stream == NULL) {
- return -1;
- }
-
- cons_fd = anon_inode_getfd("v3-cons", &v3_cons_fops, stream, 0);
-
- if (cons_fd < 0) {
- printk("Error creating serial inode\n");
- return cons_fd;
- }
-
- printk("Returning new serial fd\n");
-
- return cons_fd;
-}
static struct list_head global_streams;
-int stream_enqueue(struct stream_buffer * stream, char * buf, int len) {
+static int stream_enqueue(struct stream_buffer * stream, char * buf, int len) {
int bytes = 0;
bytes = ringbuf_write(stream->buf, buf, len);
}
+int open_stream(const char * name) {
+ return -1;
+}
+
+
+
struct stream_buffer * find_stream_by_name(struct v3_guest * guest, const char * name) {
struct stream_buffer * stream = NULL;
struct list_head * stream_list = NULL;
void palacios_init_stream(void);
void palacios_deinit_stream(void);
-int stream_enqueue(struct stream_buffer * stream, char * buf, int len);
+
int stream_dequeue(struct stream_buffer * stream, char * buf, int len);
int stream_datalen(struct stream_buffer * stream);
struct stream_buffer * find_stream_by_name(struct v3_guest * guest, const char * name);
+int open_stream(const char * name);
#endif
#include <linux/file.h>
#include <linux/spinlock.h>
-#ifdef CONFIG_DEBUG_FS
-#include "palacios-debugfs.h"
-#endif
#include <palacios/vmm.h>
#include "palacios.h"
-#include "palacios-console.h"
-#include "palacios-serial.h"
#include "palacios-vm.h"
+#ifdef V3_CONFIG_STREAM
+#include "palacios-stream.h"
+#endif
+
+#ifdef V3_CONFIG_CONSOLE
+#include "palacios-console.h"
+#endif
+
+#ifdef V3_CONFIG_EXT_INSPECTOR
+#include "palacios-inspector.h"
+#endif
+
+
extern struct class * v3_class;
#define STREAM_NAME_LEN 128
static long v3_vm_ioctl(struct file * filp,
unsigned int ioctl, unsigned long arg) {
- void __user * argp = (void __user *)arg;
- char path_name[STREAM_NAME_LEN];
struct v3_guest * guest = filp->private_data;
switch (ioctl) {
+ case V3_VM_STOP: {
+ printk("Stopping VM\n");
+ stop_palacios_vm(guest);
+ break;
+ }
+
case V3_VM_CONSOLE_CONNECT: {
+#ifdef V3_CONFIG_CONSOLE
return connect_console(guest);
+#else
+ printk("Console support not available\n");
+ return -EFAULT;
+#endif
break;
}
- case V3_VM_SERIAL_CONNECT: {
+ case V3_VM_STREAM_CONNECT: {
+#ifdef V3_CONFIG_STREAM
+ void __user * argp = (void __user *)arg;
+ char path_name[STREAM_NAME_LEN];
+
if (copy_from_user(path_name, argp, STREAM_NAME_LEN)) {
- printk("copy from user error getting guest image...\n");
+ printk("%s(%d): copy from user error...\n", __FILE__, __LINE__);
return -EFAULT;
}
- return open_serial(path_name);
- break;
- }
- case V3_VM_STOP: {
- printk("Stopping VM\n");
- stop_palacios_vm(guest);
+ return open_stream(path_name);
+#else
+ printk("Stream support Not available\n");
+ return -EFAULT;
+#endif
break;
}
+
default:
printk("\tUnhandled\n");
return -EINVAL;
-#if CONFIG_DEBUG_FS
- dfs_register_vm(guest);
+#if V3_CONFIG_EXT_INSPECTOR
+ inspect_vm(guest);
#endif
#include <linux/sched.h>
#include <linux/slab.h>
+#ifdef V3_CONFIG_CONSOLE
#include "palacios-console.h"
+#endif
/* Global Control IOCTLs */
#define V3_START_GUEST 10
/* VM Specific IOCTLs */
#define V3_VM_CONSOLE_CONNECT 20
-#define V3_VM_SERIAL_CONNECT 21
+#define V3_VM_STREAM_CONNECT 21
#define V3_VM_STOP 22
struct v3_guest_img {
struct list_head streams;
struct list_head sockets;
+#ifdef V3_CONFIG_CONSOLE
struct palacios_console console;
+#endif
struct completion start_done;
struct completion thread_done;
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
#include <palacios/vmm_telemetry.h>
#endif
-#ifdef CONFIG_SYMBIOTIC
+#ifdef V3_CONFIG_SYMBIOTIC
#include <palacios/vmm_symbiotic.h>
struct v3_sym_core_state;
#endif
uint64_t num_exits;
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
struct v3_core_telemetry core_telem;
#endif
void * decoder_state;
-#ifdef CONFIG_SYMBIOTIC
+#ifdef V3_CONFIG_SYMBIOTIC
/* Symbiotic state */
struct v3_sym_core_state sym_core_state;
#endif
uint32_t mem_align;
struct v3_mem_map mem_map;
- v3_paging_size_t paging_size; // for nested paging
-
struct v3_mem_hooks mem_hooks;
struct v3_shdw_impl_state shdw_impl;
struct v3_extensions extensions;
-#ifdef CONFIG_SYMBIOTIC
+#ifdef V3_CONFIG_SYMBIOTIC
/* Symbiotic state */
struct v3_sym_vm_state sym_vm_state;
#endif
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
uint_t enable_telemetry;
struct v3_telemetry_state telemetry;
#endif
typedef enum {
+ /* 16 bit control field */
+ VMCS_VPID = 0x00000000,
+ /* 16 bit guest state */
VMCS_GUEST_ES_SELECTOR = 0x00000800,
VMCS_GUEST_CS_SELECTOR = 0x00000802,
VMCS_GUEST_SS_SELECTOR = 0x00000804,
VMCS_VAPIC_ADDR_HIGH = 0x00002013,
VMCS_APIC_ACCESS_ADDR = 0x00002014,
VMCS_APIC_ACCESS_ADDR_HIGH = 0x00002015,
+ VMCS_EPT_PTR = 0x0000201A,
+ VMCS_EPT_PTR_HIGH = 0x0000201B,
+ /* 64 bit read only data field */
+ VMCS_GUEST_PHYS_ADDR = 0x00002400,
+ VMCS_GUEST_PHYS_ADDR_HIGH = 0x00002401,
/* 64 bit guest state fields */
VMCS_LINK_PTR = 0x00002800,
VMCS_LINK_PTR_HIGH = 0x00002801,
- VMCS_GUEST_DBG_CTL = 0x00002802,
- VMCS_GUEST_DBG_CTL_HIGH = 0x00002803,
+ VMCS_GUEST_DBG_CTL = 0x00002802,
+ VMCS_GUEST_DBG_CTL_HIGH = 0x00002803,
+ VMCS_GUEST_PAT = 0x00002804,
+ VMCS_GUEST_PAT_HIGH = 0x00002805,
VMCS_GUEST_EFER = 0x00002806,
VMCS_GUEST_EFER_HIGH = 0x00002807,
VMCS_GUEST_PERF_GLOBAL_CTRL = 0x00002808,
VMCS_GUEST_PERF_GLOBAL_CTRL_HIGH = 0x00002809,
-
+ VMCS_GUEST_PDPTE0 = 0x0000280A,
+ VMCS_GUEST_PDPTE0_HIGH = 0x0000280B,
+ VMCS_GUEST_PDPTE1 = 0x0000280C,
+ VMCS_GUEST_PDPTE1_HIGH = 0x0000280D,
+ VMCS_GUEST_PDPTE2 = 0x0000280E,
+ VMCS_GUEST_PDPTE2_HIGH = 0x0000280F,
+ VMCS_GUEST_PDPTE3 = 0x00002810,
+ VMCS_GUEST_PDPTE3_HIGH = 0x00002811,
+ /* 64 bit host state fields */
+ VMCS_HOST_PAT = 0x00002c00,
+ VMCS_HOST_PAT_HIGH = 0x00002c01,
+ VMCS_HOST_EFER = 0x00002c02,
+ VMCS_HOST_EFER_HIGH = 0x00002c03,
VMCS_HOST_PERF_GLOBAL_CTRL = 0x00002c04,
VMCS_HOST_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
/* 32 bit control fields */
VMCS_ENTRY_INSTR_LEN = 0x0000401A,
VMCS_TPR_THRESHOLD = 0x0000401C,
VMCS_SEC_PROC_CTRLS = 0x0000401e,
+ VMCS_PLE_GAP = 0x00004020,
+ VMCS_PLE_WINDOW = 0x00004022,
/* 32 bit Read Only data fields */
VMCS_INSTR_ERR = 0x00004400,
VMCS_EXIT_REASON = 0x00004402,
VMCS_IDT_VECTOR_INFO = 0x00004408,
VMCS_IDT_VECTOR_ERR = 0x0000440A,
VMCS_EXIT_INSTR_LEN = 0x0000440C,
- VMCS_EXIT_INSTR_INFO = 0x0000440E,
+ VMCS_EXIT_INSTR_INFO = 0x0000440E,
/* 32 bit Guest state fields */
VMCS_GUEST_ES_LIMIT = 0x00004800,
VMCS_GUEST_CS_LIMIT = 0x00004802,
VMCS_GUEST_ACTIVITY_STATE = 0x00004826,
VMCS_GUEST_SMBASE = 0x00004828,
VMCS_GUEST_SYSENTER_CS = 0x0000482A,
+ VMCS_PREEMPT_TIMER = 0x0000482E,
/* 32 bit host state field */
VMCS_HOST_SYSENTER_CS = 0x00004C00,
/* Natural Width Control Fields */
+struct vmx_intr_state {
+ union {
+ uint32_t value;
+ struct {
+ uint32_t block_sti : 1;
+ uint32_t block_mov_ss : 1;
+ uint32_t block_smi : 1;
+ uint32_t block_nmi : 1;
+ uint32_t rsvd : 28;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
+
+
+struct vmx_pending_dbg_excps {
+ union {
+ uint64_t value;
+
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } __attribute__((packed));
+
+ struct {
+ uint64_t b0 : 1;
+ uint64_t b1 : 1;
+ uint64_t b2 : 1;
+ uint64_t b3 : 1;
+ uint64_t rsvd1 : 8;
+ uint64_t bp_set : 1;
+ uint64_t rsvd2 : 1;
+ uint64_t bp_ss : 1;
+ uint64_t rsvd3 : 49;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
/* Segment Selector Access Rights (32 bits) */
/* INTEL Manual: 20-4 vol 3B */
};
+struct vmcs_msr_entry {
+ uint32_t index;
+ uint32_t rsvd;
+ uint32_t lo;
+ uint32_t hi;
+} __attribute__((packed));
+
+
struct vmcs_interrupt_state {
union {
uint32_t val;
})
-#ifdef CONFIG_MULTITHREAD_OS
-
-#define V3_CREATE_THREAD(fn, arg, name) ({ \
- void * thread = NULL; \
- extern struct v3_os_hooks * os_hooks; \
- if ((os_hooks) && (os_hooks)->start_kernel_thread) { \
- thread = (os_hooks)->start_kernel_thread(fn, arg, name); \
- } \
- thread; \
- })
-
-
-#define V3_THREAD_SLEEP() \
- do{ \
- extern struct v3_os_hooks * os_hooks; \
- if ((os_hooks) && (os_hooks)->kernel_thread_sleep) { \
- (os_hooks)->kernel_thread_sleep(); \
- } \
- }while(0)
+#ifdef V3_CONFIG_MULTITHREAD_OS
+#define V3_CREATE_THREAD(fn, arg, name) \
+ do { \
+ extern struct v3_os_hooks * os_hooks; \
+ if ((os_hooks) && (os_hooks)->start_kernel_thread) { \
+ (os_hooks)->start_kernel_thread(fn, arg, name); \
+ } \
+ }
-#define V3_THREAD_WAKEUP(thread) \
- do{ \
- extern struct v3_os_hooks * os_hooks; \
- if ((os_hooks) && (os_hooks)->kernel_thread_wakeup) { \
- (os_hooks)->kernel_thread_wakeup(thread); \
- } \
- }while(0)
// Maybe make this a define....
-typedef enum v3_cpu_arch {V3_INVALID_CPU, V3_SVM_CPU, V3_SVM_REV3_CPU, V3_VMX_CPU, V3_VMX_EPT_CPU} v3_cpu_arch_t;
+typedef enum v3_cpu_arch {V3_INVALID_CPU, V3_SVM_CPU, V3_SVM_REV3_CPU, V3_VMX_CPU, V3_VMX_EPT_CPU, V3_VMX_EPT_UG_CPU} v3_cpu_arch_t;
v3_cpu_mode_t v3_get_host_cpu_mode();
void v3_print_cond(const char * fmt, ...);
-#ifdef CONFIG_MULTITHREAD_OS
+#ifdef V3_CONFIG_MULTITHREAD_OS
void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector);
#endif
- void * (*start_kernel_thread)(int (*fn)(void * arg), void * arg, char * thread_name);
- void (*kernel_thread_sleep)(void);
- void (*kernel_thread_wakeup)(void * thread);
+ void (*start_kernel_thread)(int (*fn)(void * arg), void * arg, char * thread_name);
void (*interrupt_cpu)(struct v3_vm_info * vm, int logical_cpu, int vector);
void (*call_on_cpu)(int logical_cpu, void (*fn)(void * arg), void * arg);
void * (*start_thread_on_cpu)(int cpu_id, int (*fn)(void * arg), void * arg, char * thread_name);
#include <util/vmm_lock.h>
struct v3_barrier {
-
-
+ uint64_t cpus;
int active; // If 1, barrier is active, everyone must wait
// If 0, barrier is clear, can proceed
};
+int v3_init_barrier(struct v3_barrier * barrier);
+
+int v3_activate_barrier(struct guest_info * core);
+int v3_check_barrier(struct guest_info * core);
#endif
int V3_deinit_devices();
-#ifdef CONFIG_KEYED_STREAMS
+#ifdef V3_CONFIG_KEYED_STREAMS
#include <interfaces/vmm_keyed_stream.h>
#endif
struct v3_device_ops {
int (*free)(void * private_data);
-#ifdef CONFIG_KEYED_STREAMS
+#ifdef V3_CONFIG_KEYED_STREAMS
int (*checkpoint)(struct vm_device *dev, v3_keyed_stream_t stream);
int (*restore)(struct vm_device *dev, v3_keyed_stream_t stream);
#endif
struct v3_dev_net_ops {
/* Backend implemented functions */
- int (*send)(uint8_t * buf, uint32_t count, void * private_data);
+ int (*send)(uint8_t * buf, uint32_t len, int synchronize, void * private_data);
/* Frontend implemented functions */
- int (*recv)(uint8_t * buf, uint32_t count, void * frnt_data);
- void (*poll)(struct v3_vm_info * vm, int budget, void * frnt_data);
+ int (*recv)(uint8_t * buf, uint32_t len, void * frnt_data);
/* This is ugly... */
void * frontend_data;
#define ETHERNET_PACKET_LEN (ETHERNET_HEADER_LEN + ETHERNET_MTU)
#define ETH_ALEN 6
+#define MIN_MTU 68
+//#define MAX_MTU 65535
+#define MAX_MTU 9000
+
+#define MAX_PACKET_LEN (ETHERNET_HEADER_LEN + MAX_MTU)
+
+
+extern int v3_net_debug;
#ifdef __V3VEE__
#include <palacios/vmm.h>
+#define V3_Net_Print(level, fmt, args...) \
+ do { \
+ if(level <= v3_net_debug) { \
+ extern struct v3_os_hooks * os_hooks; \
+ if ((os_hooks) && (os_hooks)->print) { \
+ (os_hooks)->print((fmt), ##args); \
+ } \
+ } \
+ } while (0)
+
struct nic_statistics {
- uint32_t tx_pkts;
+ uint64_t tx_pkts;
uint64_t tx_bytes;
- uint32_t tx_dropped;
+ uint64_t tx_dropped;
- uint32_t rx_pkts;
+ uint64_t rx_pkts;
uint64_t rx_bytes;
- uint32_t rx_dropped;
+ uint64_t rx_dropped;
- uint32_t interrupts;
+ uint32_t tx_interrupts;
+ uint32_t rx_interrupts;
};
static inline int is_multicast_ethaddr(const uint8_t * addr)
#include <palacios/vmm_types.h>
-
/* .... Giant fucking switch tables */
struct v3_ctrl_regs * crs = &(core->ctrl_regs);
- PrintDebug("\t Ctrl regs %d\n", reg_code);
+// PrintDebug("\t Ctrl regs %d\n", reg_code);
switch (reg_code) {
case 0:
+++ /dev/null
-/*
- * This file is part of the Palacios Virtual Machine Monitor developed
- * by the V3VEE Project with funding from the United States National
- * Science Foundation and the Department of Energy.
- *
- * The V3VEE Project is a joint project between Northwestern University
- * and the University of New Mexico. You can find out more at
- * http://www.v3vee.org
- *
- * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
- * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
- * All rights reserved.
- *
- * Author: Chang Bae <c.s.bae@u.northwestern.edu>
- *
- * This is free software. You are permitted to use,
- * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
- */
-
-#ifndef __VMM_INSTRUMENT_H__
-#define __VMM_INSTRUMENT_H__
-
-#ifdef __V3VEE__
-
-#ifdef CONFIG_INSTRUMENT_VMM
-
-#include <palacios/vmm_types.h>
-#include <palacios/vmm_ringbuffer.h>
-
-
-void v3_init_instrumentation() __attribute__((__no_instrument_function__));
-
-#endif // INSTRUMENT_VMM
-
-#endif // __V3VEE__
-
-#endif //
-
-
+++ /dev/null
-/*
- * This file is part of the Palacios Virtual Machine Monitor developed
- * by the V3VEE Project with funding from the United States National
- * Science Foundation and the Department of Energy.
- *
- * The V3VEE Project is a joint project between Northwestern University
- * and the University of New Mexico. You can find out more at
- * http://www.v3vee.org
- *
- * Copyright (c) 2010, Peter Dinda <pdinda@cs.northwestern.edu>
- * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
- * All rights reserved.
- *
- * Author: Peter Dinda <pdinda@cs.northwestern.edu>
- *
- * This is free software. You are permitted to use,
- * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
- */
-
-#ifndef __VMM_MPTABLE_H__
-#define __VMM_MPTABLE_H__
-
-/*
- This module is responsible for injecting an appropriate description of
- the multicore guest into the the guest's memory in the form
- of an Intel Multiprocessor Specification-compatible MP table.
-
- The guest BIOS must cooperate in having preallocated space for the table
-*/
-
-#include <palacios/vm_guest.h>
-#include <palacios/vmm_mem.h>
-#include <palacios/vmm_types.h>
-
-// Note that this must be run *after* the rombios has been mapped in
-// AND the rombios needs to be COPIED in so that we can edit it
-int v3_inject_mptable(struct v3_vm_info *vm);
-
-#endif
void PrintPTEntry(struct guest_info * info, page_type_t type, addr_t vaddr, void * entry);
-void PrintHostPageTables(struct guest_info * info, addr_t cr3);
+void PrintHostPageTables(struct guest_info * info, v3_cpu_mode_t cpu_mode, addr_t cr3);
void PrintGuestPageTables(struct guest_info * info, addr_t cr3);
void PrintHostPageTree(struct guest_info * info, addr_t virtual_addr, addr_t cr3);
void PrintGuestPageTree(struct guest_info * info, addr_t virtual_addr, addr_t cr3);
void * local_impl_data;
-#ifdef CONFIG_SHADOW_PAGING_TELEMETRY
+#ifdef V3_CONFIG_SHADOW_PAGING_TELEMETRY
uint_t guest_faults;
#endif
#include <palacios/vmm_symspy.h>
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
#include <palacios/vmm_symcall.h>
#endif
-#ifdef CONFIG_SYMMOD
+#ifdef V3_CONFIG_SYMMOD
#include <palacios/vmm_symmod.h>
#endif
struct v3_sym_vm_state {
struct v3_symspy_global_state symspy_state;
-#ifdef CONFIG_SYMMOD
+#ifdef V3_CONFIG_SYMMOD
struct v3_symmod_state symmod_state;
#endif
};
struct v3_sym_core_state {
struct v3_symspy_local_state symspy_state;
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
struct v3_symcall_state symcall_state;
#endif
#ifdef __V3VEE__
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
#include <palacios/vmm_rbtree.h>
#include <palacios/vmm_list.h>
// Returns *monotonic* guest time.
static inline uint64_t v3_get_guest_time(struct vm_time *t) {
-#ifdef CONFIG_TIME_HIDE_VM_COST
+#ifdef V3_CONFIG_TIME_HIDE_VM_COST
V3_ASSERT(t->exit_time);
return t->exit_time + t->guest_host_offset;
#else
typedef enum {VM_RUNNING, VM_STOPPED, VM_SUSPENDED, VM_ERROR, VM_EMULATING} v3_vm_operating_mode_t;
typedef enum {CORE_RUNNING, CORE_STOPPED} v3_core_operating_mode_t;
-typedef enum {PAGING_4KB, PAGING_2MB} v3_paging_size_t;
-
typedef enum {REAL, /*UNREAL,*/ PROTECTED, PROTECTED_PAE, LONG, LONG_32_COMPAT, LONG_16_COMPAT} v3_cpu_mode_t;
typedef enum {PHYSICAL_MEM, VIRTUAL_MEM} v3_mem_mode_t;
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
*/
-#ifndef __VNET_H__
-#define __VNET_H__
+#ifndef __VNET_CORE_H__
+#define __VNET_CORE_H__
#include <palacios/vmm.h>
#include <palacios/vmm_ethernet.h>
#define VNET_HASH_SIZE 17
-//routing table entry
+extern int v3_vnet_debug;
+
struct v3_vnet_route {
uint8_t src_mac[ETH_ALEN];
uint8_t dst_mac[ETH_ALEN];
uint8_t type,
void * priv_data);
int v3_vnet_add_route(struct v3_vnet_route route);
-int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data);
+int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize);
int v3_vnet_find_dev(uint8_t * mac);
int v3_vnet_stat(struct vnet_stat * stats);
int (*input)(struct v3_vm_info * vm,
struct v3_vnet_pkt * pkt,
void * dev_data);
- void (*poll) (struct v3_vm_info * vm, int budget, void * dev_data);
};
int v3_init_vnet(void);
void v3_deinit_vnet(void);
-void v3_vnet_do_poll(struct v3_vm_info * vm);
-
int v3_vnet_add_dev(struct v3_vm_info * info, uint8_t * mac,
struct v3_vnet_dev_ops * ops,
void * priv_data);
int v3_vnet_del_dev(int dev_id);
+
#endif
#endif
* and the University of New Mexico. You can find out more at
* http://www.v3vee.org
*
- * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu>
- * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
- * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
+ * Copyright (c) 2011, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
* All rights reserved.
*
- * Author: Peter Dinda <pdinda@northwestern.edu>
* Author: Jack Lange <jarusl@cs.northwestern.edu>
*
* This is free software. You are permitted to use,
#define VMM_ERROR 3
-
-
-
struct vmx_pin_ctrls {
union {
uint32_t value;
uint_t enable_rdtscp : 1;
uint_t virt_x2apic : 1;
uint_t enable_vpid : 1;
- uint_t unrstrct_guest : 1;
+ uint_t wbinvd_exit : 1;
+ uint_t unrstrct_guest : 1; /* un restricted guest (CAN RUN IN REAL MODE) */
uint_t rsvd1 : 2;
uint_t pause_loop_exit : 1;
uint_t rsvd2 : 21;
uint_t zero4 : 5;
uint_t rsvd2 : 19;
#endif
-}__attribute__((packed));
+} __attribute__((packed));
struct vmcs_host_state {
struct v3_segment gdtr;
-
-
struct vmx_data {
vmx_state_t state;
vmxassist_state_t assist_state;
struct vmcs_host_state host_state;
- addr_t vmcs_ptr_phys;
- uint8_t ia32e_avail;
+
+ addr_t vmcs_ptr_phys;
v3_reg_t guest_cr4; /// corresponds to the CR4 Read shadow
struct vmx_sec_proc_ctrls sec_proc_ctrls;
struct vmx_exit_ctrls exit_ctrls;
struct vmx_entry_ctrls entry_ctrls;
+
+ struct vmx_exception_bitmap excp_bmap;
+
+ void * msr_area;
};
int v3_is_vmx_capable();
-/*
- * vmx_assist.h: Context definitions for the VMXASSIST world switch.
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
*
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * Copyright (c) 2011, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * Author: Jack Lange <jarusl@cs.northwestern.edu>
*
- * Leendert van Doorn, leendert@watson.ibm.com
- * Copyright (c) 2005, International Business Machines Corporation.
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
*/
#ifndef _VMX_ASSIST_H_
#ifdef __V3VEE__
#include <palacios/vm_guest.h>
+#include <palacios/vmx.h>
-#define VMXASSIST_BASE 0xD0000
-#define VMXASSIST_MAGIC 0x17101966
+#define VMXASSIST_GDT 0x10000
+#define VMXASSIST_TSS 0x40000
+#define VMXASSIST_START 0xd0000
+#define VMXASSIST_1to1_PT 0xde000 // We'll shove this at the end, and pray to god VMXASSIST doesn't mess with it
-struct vmx_assist_header {
- uint64_t rsvd; // 8 bytes of nothing
- uint32_t magic;
- uint32_t new_ctx_gpa;
- uint32_t old_ctx_gpa;
-} __attribute__((packed));
-
-
-union vmcs_arbytes {
- struct arbyte_fields {
- unsigned int seg_type : 4,
- s : 1,
- dpl : 2,
- p : 1,
- reserved0 : 4,
- avl : 1,
- reserved1 : 1,
- default_ops_size: 1,
- g : 1,
- null_bit : 1,
- reserved2 : 15;
- } __attribute__((packed)) fields;
- unsigned int bytes;
-} __attribute__((packed));
-
-struct vmx_assist_segment {
- uint32_t sel;
- uint32_t limit;
- uint32_t base;
- union vmcs_arbytes arbytes;
-} __attribute__((packed));
-
-/*
- * World switch state
- */
-struct vmx_assist_context {
- uint32_t eip; /* execution pointer */
- uint32_t esp; /* stack pointer */
- uint32_t eflags; /* flags register */
- uint32_t cr0;
- uint32_t cr3; /* page table directory */
- uint32_t cr4;
-
- uint32_t idtr_limit; /* idt */
- uint32_t idtr_base;
-
- uint32_t gdtr_limit; /* gdt */
- uint32_t gdtr_base;
-
- struct vmx_assist_segment cs;
- struct vmx_assist_segment ds;
- struct vmx_assist_segment es;
- struct vmx_assist_segment ss;
- struct vmx_assist_segment fs;
- struct vmx_assist_segment gs;
- struct vmx_assist_segment tr;
- struct vmx_assist_segment ldtr;
-
-
- unsigned char rm_irqbase[2];
-} __attribute__((packed));
-
-typedef struct vmx_assist_context vmx_assist_context_t;
int v3_vmxassist_ctx_switch(struct guest_info * info);
-
+int v3_vmxassist_init(struct guest_info * core, struct vmx_data * vmx_state);
#endif
#endif /* _VMX_ASSIST_H_ */
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
#ifdef __V3VEE__
+#include <palacios/vmx_hw_info.h>
+
/* The actual format of these data structures is specified as being machine
dependent. Thus the lengths of the base address fields are defined as variable.
To be safe we assume the maximum(?) size fields
+
+ From Intel Manual...
+ N is the physical-address width supported by the logical processor. Software can determine a processor's
+ physical-address width by executing CPUID with 80000008H in EAX. The physical address
+ width is returned in bits 7:0 of EAX.
*/
+struct ept_exit_qual {
+ union {
+ uint64_t value;
+ struct {
+ uint64_t rd_op : 1;
+ uint64_t wr_op : 1;
+ uint64_t ifetch : 1;
+ uint64_t present : 1;
+ uint64_t write : 1;
+ uint64_t exec : 1;
+ uint64_t rsvd1 : 1;
+ uint64_t addr_valid : 1;
+ uint64_t addr_type : 1;
+ uint64_t rsvd2 : 1;
+ uint64_t nmi_unblock : 1;
+ uint64_t rsvd3 : 53;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
+
+
+
+
typedef struct vmx_eptp {
- uint8_t psmt : 3;
- uint8_t pwl1 : 3;
- uint8_t rsvd1 : 6;
- uint64_t pml_base_addr : 39;
+ uint64_t psmt : 3; /* (0=UC, 6=WB) */
+ uint64_t pwl1 : 3; /* 1 less than EPT page-walk length (?)*/
+ uint64_t rsvd1 : 6;
+ uint64_t pml_base_addr : 39;
uint16_t rsvd2 : 13;
} __attribute__((packed)) vmx_eptp_t;
-typedef struct vmx_pml4 {
- uint8_t read : 1;
- uint8_t write : 1;
- uint8_t exec : 1;
- uint8_t rsvd1 : 5;
- uint8_t ignore1 : 4;
+typedef struct ept_pml4 {
+ uint64_t read : 1;
+ uint64_t write : 1;
+ uint64_t exec : 1;
+ uint64_t rsvd1 : 5;
+ uint64_t ignore1 : 4;
uint64_t pdp_base_addr : 39;
- uint8_t rsvd2 : 1;
- uint32_t ignore2 : 12;
-} __attribute__((packed)) vmx_pml4_t;
-
-
-typedef struct vmx_pdp_1GB {
- uint8_t read : 1;
- uint8_t write : 1;
- uint8_t exec : 1;
- uint8_t mt : 3;
- uint8_t ipat : 1;
- uint8_t large_page : 1;
- uint8_t ignore1 : 4;
- uint32_t rsvd1 : 18;
- uint32_t page_base_addr : 21;
- uint8_t rsvd2 : 1;
- uint32_t ignore2 : 12;
-} __attribute__((packed)) vmx_pdp_1GB_t;
-
-typedef struct vmx_pdp {
- uint8_t read : 1;
- uint8_t write : 1;
- uint8_t exec : 1;
- uint8_t rsvd1 : 4;
- uint8_t large_page : 1;
- uint8_t ignore1 : 4;
- uint32_t page_base_addr : 39;
- uint8_t rsvd2 : 1;
- uint32_t ignore2 : 12;
-} __attribute__((packed)) vmx_pdp_t;
-
-
-typedef struct vmx_pde_2MB {
- uint8_t read : 1;
- uint8_t write : 1;
- uint8_t exec : 1;
- uint8_t mt : 3;
- uint8_t ipat : 1;
- uint8_t large_page : 1;
- uint8_t ignore1 : 4;
- uint32_t rsvd1 : 9;
- uint32_t page_base_addr : 30;
- uint8_t rsvd2 : 1;
- uint32_t ignore2 : 12;
-} __attribute__((packed)) vmx_pde_2MB_t;
-
-
-typedef struct vmx_pde {
- uint8_t read : 1;
- uint8_t write : 1;
- uint8_t exec : 1;
- uint8_t rsvd1 : 4;
- uint8_t large_page : 1;
- uint8_t ignore1 : 4;
- uint32_t page_base_addr : 39;
- uint8_t rsvd2 : 1;
- uint32_t ignore2 : 12;
-} __attribute__((packed)) vmx_pde_t;
-
-
-
-typedef struct vmx_pte {
- uint8_t read : 1;
- uint8_t write : 1;
- uint8_t exec : 1;
- uint8_t mt : 3;
- uint8_t ipat : 1;
- uint8_t ignore1 : 5;
- uint32_t page_base_addr : 39;
- uint8_t rsvd2 : 1;
- uint32_t ignore2 : 12;
-} __attribute__((packed)) vmx_pte_t;
+ uint64_t rsvd2 : 1;
+ uint64_t ignore2 : 12;
+} __attribute__((packed)) ept_pml4_t;
+
+
+typedef struct ept_pdp_1GB {
+ uint64_t read : 1;
+ uint64_t write : 1;
+ uint64_t exec : 1;
+ uint64_t mt : 3;
+ uint64_t ipat : 1;
+ uint64_t large_page : 1;
+ uint64_t ignore1 : 4;
+ uint64_t rsvd1 : 18;
+ uint64_t page_base_addr : 21;
+ uint64_t rsvd2 : 1;
+ uint64_t ignore2 : 12;
+} __attribute__((packed)) ept_pdp_1GB_t;
+
+typedef struct ept_pdp {
+ uint64_t read : 1;
+ uint64_t write : 1;
+ uint64_t exec : 1;
+ uint64_t rsvd1 : 4;
+ uint64_t large_page : 1;
+ uint64_t ignore1 : 4;
+ uint64_t pd_base_addr : 39;
+ uint64_t rsvd2 : 1;
+ uint64_t ignore2 : 12;
+} __attribute__((packed)) ept_pdp_t;
+
+
+typedef struct ept_pde_2MB {
+ uint64_t read : 1;
+ uint64_t write : 1;
+ uint64_t exec : 1;
+ uint64_t mt : 3;
+ uint64_t ipat : 1;
+ uint64_t large_page : 1;
+ uint64_t ignore1 : 4;
+ uint64_t rsvd1 : 9;
+ uint64_t page_base_addr : 30;
+ uint64_t rsvd2 : 1;
+ uint64_t ignore2 : 12;
+} __attribute__((packed)) ept_pde_2MB_t;
+
+
+typedef struct ept_pde {
+ uint64_t read : 1;
+ uint64_t write : 1;
+ uint64_t exec : 1;
+ uint64_t rsvd1 : 4;
+ uint64_t large_page : 1;
+ uint64_t ignore1 : 4;
+ uint64_t pt_base_addr : 39;
+ uint64_t rsvd2 : 1;
+ uint64_t ignore2 : 12;
+} __attribute__((packed)) ept_pde_t;
+
+
+
+typedef struct ept_pte {
+ uint64_t read : 1;
+ uint64_t write : 1;
+ uint64_t exec : 1;
+ uint64_t mt : 3;
+ uint64_t ipat : 1;
+ uint64_t ignore1 : 5;
+ uint64_t page_base_addr : 39;
+ uint64_t rsvd2 : 1;
+ uint64_t ignore2 : 12;
+} __attribute__((packed)) ept_pte_t;
+
+int v3_init_ept(struct guest_info * core, struct vmx_hw_info * hw_info);
+int v3_handle_ept_fault(struct guest_info * core, addr_t fault_addr, struct ept_exit_qual * ept_qual);
+
#endif
VMEXIT_IO_INSTR = 30,
VMEXIT_RDMSR = 31,
VMEXIT_WRMSR = 32,
- VMEXIT_ENTRY_FAIL_INVALID_GUEST_STATE = 33,
- VMEXIT_ENTRY_FAIL_MSR_LOAD = 34,
+ VMEXIT_INVALID_GUEST_STATE = 33,
+ VMEXIT_INVALID_MSR_LOAD = 34,
VMEXIT_MWAIT = 36,
VMEXIT_MONITOR = 39,
VMEXIT_PAUSE = 40,
- VMEXIT_ENTRY_FAILURE_MACHINE_CHECK = 41,
+ VMEXIT_INVALID_MACHINE_CHECK = 41,
VMEXIT_TPR_BELOW_THRESHOLD = 43,
VMEXIT_APIC = 44,
VMEXIT_GDTR_IDTR = 46,
} __attribute__((packed));
-struct VMExitTSQual {
+ struct VMExitTSQual {
uint32_t selector : 16; // selector of destination TSS
uint32_t rsvd : 14; // reserved to 0
uint32_t src : 2; // (0: CALL ; 1: IRET ; 2: JMP ; 3: Task gate in IDT)
+struct vmx_basic_exit_info {
+ union {
+ uint32_t value;
+ struct {
+ uint16_t reason;
+ uint16_t rsvd1 :12;
+ uint8_t mtf_pending : 1;
+ uint8_t vmx_root_op : 1;
+ uint8_t rsvd2 : 1;
+ uint8_t entry_error : 1;
+ } __attribute__((packed));
+ } __attribute__((packed));
+} __attribute__((packed));
+
struct vmx_exit_info {
uint32_t instr_len;
uint32_t int_err;
addr_t guest_linear_addr;
+
+ /* EPT INFO */
+ addr_t ept_fault_addr;
+
};
} __attribute__((packed));
struct { uint32_t revision;
- uint32_t regionSize : 13;
- uint8_t rsvd1 : 3; /* Always 0 */
- uint8_t physWidth : 1; /* VMCS address field widths
+ uint64_t regionSize : 13;
+ uint64_t rsvd1 : 3; /* Always 0 */
+ uint64_t physWidth : 1; /* VMCS address field widths
(1=32bits, 0=natural width) */
- uint8_t smm : 1;
- uint8_t memType : 4; /* 0 = UC, 6 = WriteBack */
- uint8_t io_str_info : 1;
- uint8_t def1_maybe_0 : 1; /* 1="Any VMX ctrls that default to 1 may be cleared to 0" */
- uint32_t rsvd2 : 8; /* Always 0 */
+ uint64_t smm : 1;
+ uint64_t memType : 4; /* 0 = UC, 6 = WriteBack */
+ uint64_t io_str_info : 1;
+ uint64_t def1_maybe_0 : 1; /* 1="Any VMX ctrls that default to 1 may be cleared to 0" */
+ uint64_t rsvd2 : 8; /* Always 0 */
} __attribute__((packed));
} __attribute__((packed));
} __attribute__((packed));
} __attribute__((packed));
struct {
- uint8_t tsc_multiple : 5; /* Bit position in TSC field that drives vmx timer step */
- uint8_t exits_store_LMA : 1;
- uint8_t can_halt : 1;
- uint8_t can_shtdown : 1;
- uint8_t can_wait_for_sipi : 1;
- uint8_t rsvd1 : 7;
- uint16_t num_cr3_targets : 9;
- uint8_t max_msr_cache_size : 3; /* (512 * (max_msr_cache_size + 1)) == max msr load/store list size */
- uint8_t SMM_ctrl_avail : 1;
- uint8_t rsvd2 : 3;
- uint32_t MSEG_rev_id;
+ uint64_t tsc_multiple : 5; /* Bit position in TSC field that drives vmx timer step */
+ uint64_t exits_store_LMA : 1;
+ uint64_t can_halt : 1;
+ uint64_t can_shtdown : 1;
+ uint64_t can_wait_for_sipi : 1;
+ uint64_t rsvd1 : 7;
+ uint64_t num_cr3_targets : 9;
+ uint64_t max_msr_cache_size : 3; /* (512 * (max_msr_cache_size + 1)) == max msr load/store list size */
+ uint64_t SMM_ctrl_avail : 1;
+ uint64_t rsvd2 : 3;
+ uint64_t MSEG_rev_id;
} __attribute__((packed));
} __attribute__((packed));
} __attribute__((packed));
} __attribute__((packed));
struct {
- uint8_t exec_only_ok : 1;
- uint8_t rsvd1 : 5;
- uint8_t pg_walk_len4 : 1; /* support for a page walk of length 4 */
- uint8_t rsvd2 : 1;
- uint8_t ept_uc_ok : 1; /* EPT page tables can be uncacheable */
- uint8_t rsvd3 : 5;
- uint8_t ept_wb_ok : 1; /* EPT page tables can be writeback */
- uint8_t rsvd4 : 1;
- uint8_t ept_2MB_ok : 1; /* 2MB EPT pages supported */
- uint8_t ept_1GB_ok : 1; /* 1GB EPT pages supported */
- uint8_t rsvd5 : 2;
- uint8_t INVEPT_avail : 1; /* INVEPT instruction is available */
- uint8_t rsvd6 : 4;
- uint8_t INVEPT_single_ctx_avail : 1;
- uint8_t INVEPT_all_ctx_avail : 1;
- uint8_t rsvd7 : 5;
- uint8_t INVVPID_avail : 1;
- uint8_t rsvd8 : 7;
- uint8_t INVVPID_1addr_avail : 1;
- uint8_t INVVPID_single_ctx_avail : 1;
- uint8_t INVVPID_all_ctx_avail : 1;
- uint8_t INVVPID_single_ctx_w_glbls_avail : 1;
- uint32_t rsvd9 : 20;
+ uint64_t exec_only_ok : 1;
+ uint64_t rsvd1 : 5;
+ uint64_t pg_walk_len4 : 1; /* support for a page walk of length 4 */
+ uint64_t rsvd2 : 1;
+ uint64_t ept_uc_ok : 1; /* EPT page tables can be uncacheable */
+ uint64_t rsvd3 : 5;
+ uint64_t ept_wb_ok : 1; /* EPT page tables can be writeback */
+ uint64_t rsvd4 : 1;
+ uint64_t ept_2MB_ok : 1; /* 2MB EPT pages supported */
+ uint64_t ept_1GB_ok : 1; /* 1GB EPT pages supported */
+ uint64_t rsvd5 : 2;
+ uint64_t INVEPT_avail : 1; /* INVEPT instruction is available */
+ uint64_t rsvd6 : 4;
+ uint64_t INVEPT_single_ctx_avail : 1;
+ uint64_t INVEPT_all_ctx_avail : 1;
+ uint64_t rsvd7 : 5;
+ uint64_t INVVPID_avail : 1;
+ uint64_t rsvd8 : 7;
+ uint64_t INVVPID_1addr_avail : 1;
+ uint64_t INVVPID_single_ctx_avail : 1;
+ uint64_t INVVPID_all_ctx_avail : 1;
+ uint64_t INVVPID_single_ctx_w_glbls_avail : 1;
+ uint64_t rsvd9 : 20;
} __attribute__((packed));
} __attribute__((packed));
}__attribute__((packed));
struct vmx_ctrl_field proc_ctrls;
struct vmx_ctrl_field exit_ctrls;
struct vmx_ctrl_field entry_ctrls;
- struct vmx_ctrl_field proc_ctrls_2;
+ struct vmx_ctrl_field sec_proc_ctrls;
struct vmx_cr_field cr0;
struct vmx_cr_field cr4;
+
};
+
+
int v3_init_vmx_hw(struct vmx_hw_info * hw_info);
+uint32_t v3_vmx_get_ctrl_features(struct vmx_ctrl_field * fields);
--- /dev/null
+/*
+ * This file is part of the Palacios Virtual Machine Monitor developed
+ * by the V3VEE Project with funding from the United States National
+ * Science Foundation and the Department of Energy.
+ *
+ * The V3VEE Project is a joint project between Northwestern University
+ * and the University of New Mexico. You can find out more at
+ * http://www.v3vee.org
+ *
+ * Copyright (c) 2011, Lei Xia <lxia@northwestern.edu>
+ * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
+ * All rights reserved.
+ *
+ * Author: Lei Xia <lxia@northwestern.edu>
+ *
+ * This is free software. You are permitted to use,
+ * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
+ */
+
+#ifndef __VNET_INTERFACE_H__
+#define __VNET_INTERFACE_H__
+
+struct v3_thread {
+};
+
+v3_thread * v3_thread_create(int (*func)(void *), void *arg, char * name);
+void v3_thread_sleep(int cond);
+void v3_thread_wakeup(v3_thread *);
+void v3_thread_kill(v3_thread *);
+void v3_thread_stop(v3_thread *);
+void v3_thread_continue(v3_thread *);
+
+
+// I know there is timer in palacios, but it has to be binded to specific VM, and the granularity is not
+// guaranteed
+// I need a timer that is global, not related to any specific VM, and also fine-granularity
+v3_timer * v3_create_timer(int interval /*in us*/, void (*timer_fun)(uint64_t eclipsed_cycles, void * priv_data), void * pri_data);
+int v3_del_timer(v3_timer *);
+int v3_start_timer(v3_timer *);
+int v3_stop_timer(v3_timer *);
+
+
+#endif
+
+
#include <palacios/vmm_io.h>
-#ifndef CONFIG_DEBUG_PIT
+#ifndef V3_CONFIG_DEBUG_PIT
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
return 0;
}
-#ifdef CONFIG_KEYED_STREAMS
+#ifdef V3_CONFIG_KEYED_STREAMS
static int pit_checkpoint(struct vm_device *dev, v3_keyed_stream_t stream)
{
struct pit *p = (struct pit *) (dev->private_data);
static struct v3_device_ops dev_ops = {
.free = (int (*)(void *))pit_free,
-#ifdef CONFIG_KEYED_STREAMS
+#ifdef V3_CONFIG_KEYED_STREAMS
.checkpoint = pit_checkpoint,
.restore = pit_restore,
#endif
return -1;
}
-#ifdef CONFIG_DEBUG_PIT
+#ifdef V3_CONFIG_DEBUG_PIT
PrintDebug("8254 PIT: OSC_HZ=%d, reload_val=", OSC_HZ);
//PrintTrace(reload_val);
PrintDebug("\n");
init_channel(&(pit_state->ch_1));
init_channel(&(pit_state->ch_2));
-#ifdef CONFIG_DEBUG_PIT
+#ifdef V3_CONFIG_DEBUG_PIT
PrintDebug("8254 PIT: CPU MHZ=%d -- pit count=", cpu_khz / 1000);
//PrintTraceLL(pit_state->pit_counter);
PrintDebug("\n");
#include <palacios/vmm_dev_mgr.h>
#include <palacios/vm_guest.h>
-#ifndef CONFIG_DEBUG_PIC
+#ifndef V3_CONFIG_DEBUG_PIC
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
return -1;
}
-#ifdef CONFIG_MULTITHREAD_OS
+#ifdef V3_CONFIG_MULTITHREAD_OS
v3_interrupt_cpu(vm, 0, 0);
#endif
config IO_APIC
bool "IOAPIC"
+ depends on APIC
default y
help
Includes the Virtual IO APIC
Enable debugging for the IO APIC
+config MPTABLE
+ bool "MPTABLE"
+ default y
+ depends on APIC
+ help
+ Includes the MPTABLE to map the APICs and IO-APIC
+
config BOCHS_DEBUG
bool "Bochs Debug Console Device"
-obj-$(CONFIG_APIC) += apic.o
-obj-$(CONFIG_IO_APIC) += io_apic.o
-obj-$(CONFIG_PIT) += 8254.o
-obj-$(CONFIG_PIC) += 8259a.o
-obj-$(CONFIG_BOCHS_DEBUG) += bochs_debug.o
-obj-$(CONFIG_GENERIC) += generic.o
-obj-$(CONFIG_I440FX) += i440fx.o
-obj-$(CONFIG_IDE) += ide.o
-obj-$(CONFIG_SERIAL_UART) += serial.o
-
-obj-$(CONFIG_KEYBOARD) += keyboard.o
-obj-$(CONFIG_LINUX_VIRTIO_BALLOON) += lnx_virtio_balloon.o
-obj-$(CONFIG_LINUX_VIRTIO_BLOCK) += lnx_virtio_blk.o
-obj-$(CONFIG_LINUX_VIRTIO_SYM) += lnx_virtio_sym.o
-obj-$(CONFIG_LINUX_VIRTIO_NET) += lnx_virtio_nic.o
-obj-$(CONFIG_LINUX_VIRTIO_VNET) += lnx_virtio_vnet.o
-obj-$(CONFIG_VNET_NIC) += vnet_nic.o
-obj-$(CONFIG_NVRAM) += nvram.o
-obj-$(CONFIG_OS_DEBUG) += os_debug.o
-obj-$(CONFIG_PCI) += pci.o
-obj-$(CONFIG_PIIX3) += piix3.o
-obj-$(CONFIG_SWAPBYPASS_DISK_CACHE) += swapbypass_cache.o
-obj-$(CONFIG_SWAPBYPASS_DISK_CACHE2) += swapbypass_cache2.o
-obj-$(CONFIG_DISK_MODEL) += disk_model.o
-obj-$(CONFIG_NIC_BRIDGE) += nic_bridge.o
-
-obj-$(CONFIG_NE2K) += ne2k.o
-obj-$(CONFIG_RTL8139) += rtl8139.o
-
-obj-$(CONFIG_TMPDISK) += tmpdisk.o
-obj-$(CONFIG_RAMDISK) += ramdisk.o
-obj-$(CONFIG_NETDISK) += netdisk.o
-obj-$(CONFIG_FILEDISK) += filedisk.o
-
-obj-$(CONFIG_CGA) += cga.o
-obj-$(CONFIG_TELNET_CONSOLE) += telnet_cons.o
-obj-$(CONFIG_CURSES_CONSOLE) += curses_cons.o
-
-obj-$(CONFIG_PASSTHROUGH_PCI) += pci_passthrough.o
-
-obj-$(CONFIG_SYMMOD) += lnx_virtio_symmod.o
-obj-$(CONFIG_CHAR_STREAM) += char_stream.o
-
-obj-$(CONFIG_MCHECK) += mcheck.o
-
-obj-$(CONFIG_VGA) += vga.o
-
-obj-$(CONFIG_PCI_FRONT) += pci_front.o
+obj-$(V3_CONFIG_APIC) += apic.o
+obj-$(V3_CONFIG_IO_APIC) += io_apic.o
+obj-$(V3_CONFIG_MPTABLE) += mptable.o
+obj-$(V3_CONFIG_PIT) += 8254.o
+obj-$(V3_CONFIG_PIC) += 8259a.o
+obj-$(V3_CONFIG_BOCHS_DEBUG) += bochs_debug.o
+obj-$(V3_CONFIG_GENERIC) += generic.o
+obj-$(V3_CONFIG_I440FX) += i440fx.o
+obj-$(V3_CONFIG_IDE) += ide.o
+obj-$(V3_CONFIG_SERIAL_UART) += serial.o
+
+obj-$(V3_CONFIG_KEYBOARD) += keyboard.o
+obj-$(V3_CONFIG_LINUX_VIRTIO_BALLOON) += lnx_virtio_balloon.o
+obj-$(V3_CONFIG_LINUX_VIRTIO_BLOCK) += lnx_virtio_blk.o
+obj-$(V3_CONFIG_LINUX_VIRTIO_SYM) += lnx_virtio_sym.o
+obj-$(V3_CONFIG_LINUX_VIRTIO_NET) += lnx_virtio_nic.o
+obj-$(V3_CONFIG_LINUX_VIRTIO_VNET) += lnx_virtio_vnet.o
+obj-$(V3_CONFIG_VNET_NIC) += vnet_nic.o
+obj-$(V3_CONFIG_NVRAM) += nvram.o
+obj-$(V3_CONFIG_OS_DEBUG) += os_debug.o
+obj-$(V3_CONFIG_PCI) += pci.o
+obj-$(V3_CONFIG_PIIX3) += piix3.o
+obj-$(V3_CONFIG_SWAPBYPASS_DISK_CACHE) += swapbypass_cache.o
+obj-$(V3_CONFIG_SWAPBYPASS_DISK_CACHE2) += swapbypass_cache2.o
+obj-$(V3_CONFIG_DISK_MODEL) += disk_model.o
+obj-$(V3_CONFIG_NIC_BRIDGE) += nic_bridge.o
+
+obj-$(V3_CONFIG_NE2K) += ne2k.o
+obj-$(V3_CONFIG_RTL8139) += rtl8139.o
+
+obj-$(V3_CONFIG_TMPDISK) += tmpdisk.o
+obj-$(V3_CONFIG_RAMDISK) += ramdisk.o
+obj-$(V3_CONFIG_NETDISK) += netdisk.o
+obj-$(V3_CONFIG_FILEDISK) += filedisk.o
+
+obj-$(V3_CONFIG_CGA) += cga.o
+obj-$(V3_CONFIG_TELNET_CONSOLE) += telnet_cons.o
+obj-$(V3_CONFIG_CURSES_CONSOLE) += curses_cons.o
+
+obj-$(V3_CONFIG_PASSTHROUGH_PCI) += pci_passthrough.o
+
+obj-$(V3_CONFIG_SYMMOD) += lnx_virtio_symmod.o
+obj-$(V3_CONFIG_CHAR_STREAM) += char_stream.o
+
+obj-$(V3_CONFIG_MCHECK) += mcheck.o
+
+obj-$(V3_CONFIG_VGA) += vga.o
+
+obj-$(V3_CONFIG_PCI_FRONT) += pci_front.o
-#ifndef CONFIG_DEBUG_APIC
+#ifndef V3_CONFIG_DEBUG_APIC
#undef PrintDebug
#define PrintDebug(fmt, args...)
#else
*svc_location &= ~flag;
-#ifdef CONFIG_CRAY_XT
+#ifdef V3_CONFIG_CRAY_XT
if ((isr_irq == 238) ||
(isr_irq == 239)) {
// host maitains logical proc->phsysical proc
PrintDebug(" non-local core with new interrupt, forcing it to exit now\n");
-#ifdef CONFIG_MULTITHREAD_OS
+#ifdef V3_CONFIG_MULTITHREAD_OS
v3_interrupt_cpu(dst_core->vm_info, dst_core->cpu_id, 0);
#else
V3_ASSERT(0);
}
if (do_xcall > 0 && (V3_Get_CPU() != dst)) {
-#ifdef CONFIG_MULTITHREAD_OS
+#ifdef V3_CONFIG_MULTITHREAD_OS
v3_interrupt_cpu(vm, dst, 0);
#else
V3_ASSERT(0);
PrintDebug("apic %u: (setup device): done, my id is %u\n", i, apic->lapic_id.val);
}
-#ifdef CONFIG_DEBUG_APIC
+#ifdef V3_CONFIG_DEBUG_APIC
for (i = 0; i < vm->num_cores; i++) {
struct apic_state * apic = &(apic_dev->apics[i]);
PrintDebug("apic: sanity check: apic %u (at %p) has id %u and msr value %llx and core at %p\n",
#include <devices/console.h>
-#if CONFIG_DEBUG_CGA >= 2
+#if V3_CONFIG_DEBUG_CGA >= 2
#define PrintVerbose PrintDebug
#else
#define PrintVerbose(fmt, args...)
#endif
-#if CONFIG_DEBUG_CGA == 0
+#if V3_CONFIG_DEBUG_CGA == 0
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
}
}
-#if CONFIG_DEBUG_CGA >= 2
+#if V3_CONFIG_DEBUG_CGA >= 2
static unsigned long get_value(const void *ptr, int len) {
unsigned long value = 0;
#include <interfaces/vmm_file.h>
#include <palacios/vm_guest.h>
-#ifndef CONFIG_DEBUG_FILEDISK
+#ifndef V3_CONFIG_DEBUG_FILEDISK
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vmm_dev_mgr.h>
#include <palacios/vm_guest_mem.h>
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
#include <interfaces/vmm_host_dev.h>
#endif
-#ifndef CONFIG_DEBUG_GENERIC
+#ifndef V3_CONFIG_DEBUG_GENERIC
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
struct generic_internal {
enum {GENERIC_PHYSICAL, GENERIC_HOST} forward_type;
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
v3_host_dev_t host_dev;
#endif
struct vm_device *dev; // me
}
return length;
break;
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
case GENERIC_HOST:
if (state->host_dev) {
return v3_host_dev_write_io(state->host_dev,port,src,length);
uint_t i;
int rc;
-#ifdef CONFIG_DEBUG_GENERIC
+#ifdef V3_CONFIG_DEBUG_GENERIC
struct generic_internal *state = (struct generic_internal *) priv_data;
#endif
}
return length;
break;
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
case GENERIC_HOST:
if (state->host_dev) {
return v3_host_dev_read_io(state->host_dev,port,dst,length);
uint_t i;
int rc;
-#ifdef CONFIG_DEBUG_GENERIC
+#ifdef V3_CONFIG_DEBUG_GENERIC
struct generic_internal *state = (struct generic_internal *) priv_data;
#endif
static int generic_read_port_print_and_ignore(struct guest_info * core, uint16_t port, void * src,
uint_t length, void * priv_data) {
-#ifdef CONFIG_DEBUG_GENERIC
+#ifdef V3_CONFIG_DEBUG_GENERIC
struct generic_internal *state = (struct generic_internal *) priv_data;
#endif
uint_t length, void * priv_data) {
int i;
-#ifdef CONFIG_DEBUG_GENERIC
+#ifdef V3_CONFIG_DEBUG_GENERIC
struct generic_internal *state = (struct generic_internal *) priv_data;
#endif
memcpy(V3_VAddr((void*)gpa),src,len);
return len;
break;
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
case GENERIC_HOST:
if (state->host_dev) {
return v3_host_dev_write_mem(state->host_dev,gpa,src,len);
uint_t len,
void * priv)
{
-#ifdef CONFIG_DEBUG_GENERIC
+#ifdef V3_CONFIG_DEBUG_GENERIC
struct vm_device *dev = (struct vm_device *) priv;
struct generic_internal *state = (struct generic_internal *) dev->private_data;
#endif
uint_t len,
void * priv)
{
-#ifdef CONFIG_DEBUG_GENERIC
+#ifdef V3_CONFIG_DEBUG_GENERIC
struct vm_device *dev = (struct vm_device *) priv;
struct generic_internal *state = (struct generic_internal *) dev->private_data;
#endif
memcpy(dst,V3_VAddr((void*)gpa),len);
return len;
break;
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
case GENERIC_HOST:
if (state->host_dev) {
return v3_host_dev_read_mem(state->host_dev,gpa,dst,len);
uint_t len,
void * priv)
{
-#ifdef CONFIG_DEBUG_GENERIC
+#ifdef V3_CONFIG_DEBUG_GENERIC
struct vm_device *dev = (struct vm_device *) priv;
struct generic_internal *state = (struct generic_internal *) dev->private_data;
#endif
uint_t len,
void * priv)
{
-#ifdef CONFIG_DEBUG_GENERIC
+#ifdef V3_CONFIG_DEBUG_GENERIC
struct vm_device *dev = (struct vm_device *) priv;
struct generic_internal *state = (struct generic_internal *) dev->private_data;
#endif
PrintDebug("generic (%s): deinit_device\n", state->name);
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
if (state->host_dev) {
v3_host_dev_close(state->host_dev);
state->host_dev=0;
struct generic_internal * state = NULL;
char * dev_id = v3_cfg_val(cfg, "ID");
char * forward = v3_cfg_val(cfg, "forward");
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
char * host_dev = v3_cfg_val(cfg, "hostdev");
#endif
v3_cfg_tree_t * port_cfg = v3_cfg_subtree(cfg, "ports");
if (!strcasecmp(forward,"physical_device")) {
state->forward_type=GENERIC_PHYSICAL;
} else if (!strcasecmp(forward,"host_device")) {
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
state->forward_type=GENERIC_HOST;
#else
PrintError("generic (%s): cannot configure host device since host device support is not built in\n", state->name);
state->dev=dev;
-#ifdef CONFIG_HOST_DEVICE
+#ifdef V3_CONFIG_HOST_DEVICE
if (state->forward_type==GENERIC_HOST) {
if (!host_dev) {
PrintError("generic (%s): host forwarding requested, but no host device given\n", state->name);
#include "ide-types.h"
#include "atapi-types.h"
-#ifndef CONFIG_DEBUG_IDE
+#ifndef V3_CONFIG_DEBUG_IDE
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include "ata.h"
-#ifdef CONFIG_DEBUG_IDE
+#ifdef V3_CONFIG_DEBUG_IDE
static void print_prd_table(struct ide_internal * ide, struct ide_channel * channel) {
struct ide_dma_prd prd_entry;
int index = 0;
// Read in the data buffer....
// Read a sector/block at a time until the prd entry is full.
-#ifdef CONFIG_DEBUG_IDE
+#ifdef V3_CONFIG_DEBUG_IDE
print_prd_table(ide, channel);
#endif
#include <devices/apic.h>
#include <palacios/vm_guest.h>
-#ifndef CONFIG_DEBUG_IO_APIC
+#ifndef V3_CONFIG_DEBUG_IO_APIC
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vm_guest.h>
-#ifndef CONFIG_DEBUG_KEYBOARD
+#ifndef V3_CONFIG_DEBUG_KEYBOARD
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vmm_telemetry.h>
-#ifdef CONFIG_SYMMOD
+#ifdef V3_CONFIG_SYMMOD
#include <palacios/vmm_symmod.h>
#endif
}
// PrintGuestPageTables(info, info->shdw_pg_state.guest_cr3);
}
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
else if (evt->scan_code == 0x43) { // F9 Sym test
struct guest_info * core = &(vm->cores[0]);
PrintDebug("Testing sym call\n");
v3_dbg_enable ^= 1;
}
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
else if (evt->scan_code == 0x41) { // F7 telemetry dump
v3_print_telemetry(vm);
}
#endif
-#ifdef CONFIG_SYMMOD
+#ifdef V3_CONFIG_SYMMOD
else if (evt->scan_code == 0x40) { // F6 Test symmod load
v3_load_sym_capsule(vm, "lnx_test");
}
-#ifndef CONFIG_DEBUG_VIRTIO_BLK
+#ifndef V3_CONFIG_DEBUG_VIRTIO_BLK
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vmm_time.h>
-#ifndef CONFIG_DEBUG_VIRTIO_NET
+#ifndef V3_CONFIG_DEBUG_VIRTIO_NET
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
+#define TX_QUEUE_SIZE 4096
+#define RX_QUEUE_SIZE 4096
+#define CTRL_QUEUE_SIZE 64
+
+/* The feature bitmap for virtio nic
+ * from Linux */
+#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
+#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
+#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
+#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
+#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
+#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
+#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
+
+/* Port to get virtio config */
+#define VIRTIO_NET_CONFIG 20
+
#define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 << 10))
+/* for gso_type in virtio_net_hdr */
+#define VIRTIO_NET_HDR_GSO_NONE 0
+#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
+#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
+#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
-struct virtio_net_hdr {
- uint8_t flags;
-
- uint8_t gso_type;
- uint16_t hdr_len; /* Ethernet + IP + tcp/udp hdrs */
- uint16_t gso_size; /* Bytes to append to hdr_len per frame */
- uint16_t csum_start; /* Position to start checksumming from */
- uint16_t csum_offset; /* Offset after that to place checksum */
-}__attribute__((packed));
+/* for flags in virtio_net_hdr */
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
-struct virtio_net_hdr_mrg_rxbuf {
- struct virtio_net_hdr hdr;
- uint16_t num_buffers; /* Number of merged rx buffers */
-};
-
-#define TX_QUEUE_SIZE 256
-#define RX_QUEUE_SIZE 4096
-#define CTRL_QUEUE_SIZE 64
+/* First element of the scatter-gather list, used with GSO or CSUM features */
+struct virtio_net_hdr
+{
+ uint8_t flags;
+ uint8_t gso_type;
+ uint16_t hdr_len; /* Ethernet + IP + tcp/udp hdrs */
+ uint16_t gso_size; /* Bytes to append to hdr_len per frame */
+ uint16_t csum_start; /* Position to start checksumming from */
+ uint16_t csum_offset; /* Offset after that to place checksum */
+}__attribute__((packed));
-#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
-#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
-#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
-#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
-#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
-/* Port to get virtio config */
-#define VIRTIO_NET_CONFIG 20
+/* The header to use when the MRG_RXBUF
+ * feature has been negotiated. */
+struct virtio_net_hdr_mrg_rxbuf {
+ struct virtio_net_hdr hdr;
+ uint16_t num_buffers; /* Number of merged rx buffers */
+};
struct virtio_net_config
{
struct virtio_net_config net_cfg;
struct virtio_config virtio_cfg;
+ struct v3_vm_info * vm;
struct vm_device * dev;
struct pci_device * pci_dev;
int io_range_size;
+
+ uint16_t status;
struct virtio_queue rx_vq; /* idx 0*/
struct virtio_queue tx_vq; /* idx 1*/
struct virtio_queue ctrl_vq; /* idx 2*/
+ uint8_t mergeable_rx_bufs;
+
struct v3_timer * timer;
+ void * poll_thread;
- struct nic_statistics statistics;
+ struct nic_statistics stats;
struct v3_dev_net_ops * net_ops;
v3_lock_t rx_lock, tx_lock;
uint8_t tx_notify, rx_notify;
uint32_t tx_pkts, rx_pkts;
- uint64_t past_ms;
+ uint64_t past_us;
void * backend_data;
struct virtio_dev_state * virtio_dev;
struct list_head dev_link;
};
+
static int virtio_init_state(struct virtio_net_state * virtio)
{
virtio->rx_vq.queue_size = RX_QUEUE_SIZE;
virtio->ctrl_vq.cur_avail_idx = 0;
virtio->virtio_cfg.pci_isr = 0;
+
+ virtio->mergeable_rx_bufs = 0;
- virtio->virtio_cfg.host_features = 0 | (1 << VIRTIO_NET_F_MAC) |
- (1 << VIRTIO_NET_F_HOST_UFO) |
- (1 << VIRTIO_NET_F_HOST_TSO4);
+ virtio->virtio_cfg.host_features = 0 | (1 << VIRTIO_NET_F_MAC);
+ if(virtio->mergeable_rx_bufs) {
+ virtio->virtio_cfg.host_features |= (1 << VIRTIO_NET_F_MRG_RXBUF);
+ }
if ((v3_lock_init(&(virtio->rx_lock)) == -1) ||
(v3_lock_init(&(virtio->tx_lock)) == -1)){
}
static int tx_one_pkt(struct guest_info * core,
- struct virtio_net_state * virtio,
- struct vring_desc * buf_desc)
+ struct virtio_net_state * virtio,
+ struct vring_desc * buf_desc)
{
uint8_t * buf = NULL;
uint32_t len = buf_desc->length;
+ int synchronize = virtio->tx_notify;
if (v3_gpa_to_hva(core, buf_desc->addr_gpa, (addr_t *)&(buf)) == -1) {
PrintDebug("Could not translate buffer address\n");
return -1;
}
- if(virtio->net_ops->send(buf, len, virtio->backend_data) >= 0){
- virtio->statistics.tx_pkts ++;
- virtio->statistics.tx_bytes += len;
+ V3_Net_Print(2, "Virtio-NIC: virtio_tx: size: %d\n", len);
+ if(v3_net_debug >= 4){
+ v3_hexdump(buf, len, NULL, 0);
+ }
- return 0;
+ if(virtio->net_ops->send(buf, len, synchronize, virtio->backend_data) < 0){
+ virtio->stats.tx_dropped ++;
+ return -1;
}
- virtio->statistics.tx_dropped ++;
+ virtio->stats.tx_pkts ++;
+ virtio->stats.tx_bytes += len;
- return -1;
+ return 0;
}
-static int
-copy_data_to_desc(struct guest_info * core,
+static inline int copy_data_to_desc(struct guest_info * core,
struct virtio_net_state * virtio_state,
struct vring_desc * desc,
uchar_t * buf,
uint_t buf_len,
- uint_t offset)
-{
+ uint_t dst_offset){
uint32_t len;
uint8_t * desc_buf = NULL;
if (v3_gpa_to_hva(core, desc->addr_gpa, (addr_t *)&(desc_buf)) == -1) {
- PrintError("Could not translate buffer address\n");
+ PrintDebug("Could not translate buffer address\n");
return -1;
}
- len = (desc->length < buf_len)?(desc->length - offset):buf_len;
- memcpy(desc_buf+offset, buf, len);
+ len = (desc->length < buf_len)?(desc->length - dst_offset):buf_len;
+ memcpy(desc_buf+dst_offset, buf, len);
return len;
}
-static int get_desc_count(struct virtio_queue * q, int index) {
+static inline int get_desc_count(struct virtio_queue * q, int index) {
struct vring_desc * tmp_desc = &(q->desc[index]);
int cnt = 1;
queue->used->flags |= VRING_NO_NOTIFY_FLAG;
}
-
-/* interrupt the guest, so the guest core get EXIT to Palacios */
-static inline void notify_guest(struct virtio_net_state * virtio){
- v3_interrupt_cpu(virtio->virtio_dev->vm, virtio->virtio_dev->vm->cores[0].cpu_id, 0);
-}
-
-
-/* guest free some pkts for rx queue */
-static int handle_rx_queue_kick(struct guest_info * core,
- struct virtio_net_state * virtio)
-{
- return 0;
-}
-
-
-static int handle_ctrl(struct guest_info * core,
- struct virtio_net_state * virtio) {
-
- return 0;
-}
-
static int handle_pkt_tx(struct guest_info * core,
struct virtio_net_state * virtio_state)
{
struct virtio_queue *q = &(virtio_state->tx_vq);
- struct virtio_net_hdr *hdr = NULL;
int txed = 0;
- unsigned long flags;
+ unsigned long flags;
if (!q->ring_avail_addr) {
return -1;
flags = v3_lock_irqsave(virtio_state->tx_lock);
while (q->cur_avail_idx != q->avail->index) {
+ struct virtio_net_hdr_mrg_rxbuf * hdr = NULL;
struct vring_desc * hdr_desc = NULL;
addr_t hdr_addr = 0;
uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
int desc_cnt = get_desc_count(q, desc_idx);
- uint32_t req_len = 0;
- int i = 0;
+
+ if(desc_cnt > 2){
+ PrintError("VNIC: merged rx buffer not supported, desc_cnt %d\n", desc_cnt);
+ goto exit_error;
+ }
hdr_desc = &(q->desc[desc_idx]);
if (v3_gpa_to_hva(core, hdr_desc->addr_gpa, &(hdr_addr)) == -1) {
goto exit_error;
}
- hdr = (struct virtio_net_hdr *)hdr_addr;
+ hdr = (struct virtio_net_hdr_mrg_rxbuf *)hdr_addr;
desc_idx = hdr_desc->next;
- if(desc_cnt > 2){
- PrintError("VNIC: merged rx buffer not supported, desc_cnt %d\n", desc_cnt);
+ V3_Net_Print(2, "Virtio NIC: TX hdr count : %d\n", hdr->num_buffers);
+
+ /* here we assumed that one ethernet pkt is not splitted into multiple buffer */
+ struct vring_desc * buf_desc = &(q->desc[desc_idx]);
+ if (tx_one_pkt(core, virtio_state, buf_desc) == -1) {
+ PrintError("Virtio NIC: Error handling nic operation\n");
goto exit_error;
}
-
- /* here we assumed that one ethernet pkt is not splitted into multiple virtio buffer */
- for (i = 0; i < desc_cnt - 1; i++) {
- struct vring_desc * buf_desc = &(q->desc[desc_idx]);
- if (tx_one_pkt(core, virtio_state, buf_desc) == -1) {
- PrintError("Error handling nic operation\n");
- goto exit_error;
- }
-
- req_len += buf_desc->length;
- desc_idx = buf_desc->next;
+ if(buf_desc->next & VIRTIO_NEXT_FLAG){
+ V3_Net_Print(2, "Virtio NIC: TX more buffer need to read\n");
}
-
+
q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
- q->used->ring[q->used->index % q->queue_size].length = req_len; /* What do we set this to???? */
+ q->used->ring[q->used->index % q->queue_size].length = buf_desc->length; /* What do we set this to???? */
q->used->index ++;
q->cur_avail_idx ++;
}
v3_unlock_irqrestore(virtio_state->tx_lock, flags);
-
+
if (txed && !(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
v3_pci_raise_irq(virtio_state->virtio_dev->pci_bus, 0, virtio_state->pci_dev);
virtio_state->virtio_cfg.pci_isr = 0x1;
- virtio_state->statistics.interrupts ++;
+ virtio_state->stats.rx_interrupts ++;
+ }
+
+ if(txed > 0) {
+ V3_Net_Print(2, "Virtio Handle TX: txed pkts: %d\n", txed);
}
return 0;
break;
case 1:
virtio_setup_queue(core, virtio, &virtio->tx_vq, pfn, page_addr);
+ if(virtio->tx_notify == 0){
+ disable_cb(&virtio->tx_vq);
+ V3_THREAD_WAKEUP(virtio->poll_thread);
+ }
break;
case 2:
virtio_setup_queue(core, virtio, &virtio->ctrl_vq, pfn, page_addr);
{
uint16_t queue_idx = *(uint16_t *)src;
if (queue_idx == 0){
- if(handle_rx_queue_kick(core, virtio) == -1){
- PrintError("Could not handle Virtio NIC rx kick\n");
- return -1;
- }
+ /* receive queue refill */
+ virtio->stats.tx_interrupts ++;
} else if (queue_idx == 1){
if (handle_pkt_tx(core, virtio) == -1) {
PrintError("Could not handle Virtio NIC tx kick\n");
return -1;
}
+ virtio->stats.tx_interrupts ++;
} else if (queue_idx == 2){
- if (handle_ctrl(core, virtio) == -1) {
- PrintError("Could not handle Virtio NIC ctrl kick\n");
- return -1;
- }
+ /* ctrl */
} else {
PrintError("Wrong queue index %d\n", queue_idx);
}
case HOST_FEATURES_PORT:
if (length != 4) {
PrintError("Illegal read length for host features\n");
- return -1;
+ //return -1;
}
*(uint32_t *)dst = virtio->virtio_cfg.host_features;
break;
struct virtio_net_state * virtio = (struct virtio_net_state *)private_data;
struct virtio_queue * q = &(virtio->rx_vq);
struct virtio_net_hdr_mrg_rxbuf hdr;
- uint32_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
uint32_t data_len;
- uint32_t offset = 0;
unsigned long flags;
-#ifdef CONFIG_DEBUG_VIRTIO_NET
- PrintDebug("Virtio-NIC: virtio_rx: size: %d\n", size);
- v3_hexdump(buf, size, NULL, 0);
-#endif
+ V3_Net_Print(2, "Virtio-NIC: virtio_rx: size: %d\n", size);
+ if(v3_net_debug >= 4){
+ v3_hexdump(buf, size, NULL, 0);
+ }
flags = v3_lock_irqsave(virtio->rx_lock);
memset(&hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
if (q->ring_avail_addr == 0) {
- PrintDebug("Queue is not set\n");
+ V3_Net_Print(2, "Virtio NIC: RX Queue not set\n");
+ virtio->stats.rx_dropped ++;
goto err_exit;
}
if (q->cur_avail_idx != q->avail->index){
addr_t hdr_addr = 0;
- uint16_t hdr_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
uint16_t buf_idx = 0;
+ uint16_t hdr_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
struct vring_desc * hdr_desc = NULL;
+ struct vring_desc * buf_desc = NULL;
+ uint32_t hdr_len = 0;
+ uint32_t len;
hdr_desc = &(q->desc[hdr_idx]);
if (v3_gpa_to_hva(&(virtio->virtio_dev->vm->cores[0]), hdr_desc->addr_gpa, &(hdr_addr)) == -1) {
- PrintDebug("Could not translate receive buffer address\n");
+ V3_Net_Print(2, "Virtio NIC: Could not translate receive buffer address\n");
+ virtio->stats.rx_dropped ++;
goto err_exit;
}
- hdr.num_buffers = 1;
- memcpy((void *)hdr_addr, &hdr, sizeof(struct virtio_net_hdr_mrg_rxbuf));
- if (offset >= data_len) {
- hdr_desc->flags &= ~VIRTIO_NEXT_FLAG;
- }
- struct vring_desc * buf_desc = NULL;
- for (buf_idx = hdr_desc->next; offset < data_len; buf_idx = q->desc[hdr_idx].next) {
- uint32_t len = 0;
- buf_desc = &(q->desc[buf_idx]);
+ hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+
+ if(virtio->mergeable_rx_bufs){/* merged buffer */
+ uint32_t offset = 0;
+ len = 0;
+ hdr.num_buffers = 0;
- len = copy_data_to_desc(&(virtio->virtio_dev->vm->cores[0]), virtio, buf_desc, buf + offset, data_len - offset, 0);
+ hdr_desc = &(q->desc[buf_idx]);
+ hdr_desc->flags &= ~VIRTIO_NEXT_FLAG;
+
+ len = copy_data_to_desc(&(virtio->virtio_dev->vm->cores[0]), virtio, hdr_desc, buf, data_len, hdr_len);
offset += len;
- if (offset < data_len) {
- buf_desc->flags = VIRTIO_NEXT_FLAG;
+
+ hdr.num_buffers ++;
+ q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
+ q->used->ring[q->used->index % q->queue_size].length = hdr_len + len;
+ q->cur_avail_idx ++;
+
+ while(offset < data_len) {
+ buf_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
+ buf_desc = &(q->desc[buf_idx]);
+
+ len = copy_data_to_desc(&(virtio->virtio_dev->vm->cores[0]), virtio, buf_desc, buf + offset, data_len - offset, 0);
+ if (len <= 0){
+ V3_Net_Print(2, "Virtio NIC:merged buffer, %d buffer size %d\n", hdr.num_buffers, data_len);
+ virtio->stats.rx_dropped ++;
+ goto err_exit;
+ }
+ offset += len;
+ buf_desc->flags &= ~VIRTIO_NEXT_FLAG;
+
+ hdr.num_buffers ++;
+ q->used->ring[(q->used->index + hdr.num_buffers) % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
+ q->used->ring[(q->used->index + hdr.num_buffers) % q->queue_size].length = len;
+ q->cur_avail_idx ++;
}
- buf_desc->length = len;
- }
- buf_desc->flags &= ~VIRTIO_NEXT_FLAG;
-
- q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
- q->used->ring[q->used->index % q->queue_size].length = data_len + hdr_len; /* This should be the total length of data sent to guest (header+pkt_data) */
- q->used->index++;
- q->cur_avail_idx++;
+ q->used->index += hdr.num_buffers;
+ copy_data_to_desc(&(virtio->virtio_dev->vm->cores[0]), virtio, hdr_desc, (uchar_t *)&hdr, hdr_len, 0);
+ }else{
+ hdr_desc = &(q->desc[buf_idx]);
+ copy_data_to_desc(&(virtio->virtio_dev->vm->cores[0]), virtio, hdr_desc, (uchar_t *)&hdr, hdr_len, 0);
- virtio->statistics.rx_pkts ++;
- virtio->statistics.rx_bytes += size;
+ buf_idx = hdr_desc->next;
+ buf_desc = &(q->desc[buf_idx]);
+ len = copy_data_to_desc(&(virtio->virtio_dev->vm->cores[0]), virtio, buf_desc, buf, data_len, 0);
+ if (len < data_len) {
+ V3_Net_Print(2, "Virtio NIC: ring buffer len less than pkt size, merged buffer not supported, buffer size %d\n", len);
+ virtio->stats.rx_dropped ++;
+
+ goto err_exit;
+ }
+ buf_desc->flags &= ~VIRTIO_NEXT_FLAG;
+
+ q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
+ q->used->ring[q->used->index % q->queue_size].length = data_len + hdr_len; /* This should be the total length of data sent to guest (header+pkt_data) */
+ q->used->index++;
+ q->cur_avail_idx++;
+ }
+
+ virtio->stats.rx_pkts ++;
+ virtio->stats.rx_bytes += size;
} else {
- virtio->statistics.rx_dropped ++;
+ V3_Net_Print(2, "Virtio NIC: Guest RX queue is full\n");
+ virtio->stats.rx_dropped ++;
+
+ /* kick guest to refill the queue */
+ virtio->virtio_cfg.pci_isr = 0x1;
+ v3_pci_raise_irq(virtio->virtio_dev->pci_bus, 0, virtio->pci_dev);
+ v3_interrupt_cpu(virtio->virtio_dev->vm, virtio->virtio_dev->vm->cores[0].cpu_id, 0);
+ virtio->stats.rx_interrupts ++;
goto err_exit;
}
if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
- PrintDebug("Raising IRQ %d\n", virtio->pci_dev->config_header.intr_line);
-
+ V3_Net_Print(2, "Raising IRQ %d\n", virtio->pci_dev->config_header.intr_line);
+
+ virtio->virtio_cfg.pci_isr = 0x1;
v3_pci_raise_irq(virtio->virtio_dev->pci_bus, 0, virtio->pci_dev);
- virtio->virtio_cfg.pci_isr = 0x1;
- virtio->statistics.interrupts ++;
+
+ virtio->stats.rx_interrupts ++;
}
v3_unlock_irqrestore(virtio->rx_lock, flags);
- /* notify guest if guest is running */
- if(virtio->rx_notify == 1){
+ /* notify guest if it is in guest mode */
+ if(virtio->rx_notify == 1 &&
+ V3_Get_CPU() != virtio->virtio_dev->vm->cores[0].cpu_id){
v3_interrupt_cpu(virtio->virtio_dev->vm, virtio->virtio_dev->vm->cores[0].cpu_id, 0);
}
};
-static void virtio_nic_poll(struct v3_vm_info * vm, int budget, void * data){
- struct virtio_net_state * virtio = (struct virtio_net_state *)data;
+static int virtio_tx_flush(void * args){
+ struct virtio_net_state *virtio = (struct virtio_net_state *)args;
- if(virtio->tx_notify == 0){
- handle_pkt_tx(&(vm->cores[0]), virtio);
+ V3_Print("Virtio TX Poll Thread Starting for %s\n", virtio->vm->name);
+
+ while(1){
+ if(virtio->tx_notify == 0){
+ handle_pkt_tx(&(virtio->vm->cores[0]), virtio);
+ v3_yield(NULL);
+ }else {
+ V3_THREAD_SLEEP();
+ }
}
+
+ return 0;
}
static int register_dev(struct virtio_dev_state * virtio,
#define RATE_UPPER_THRESHOLD 10 /* 10000 pkts per second, around 100Mbits */
#define RATE_LOWER_THRESHOLD 1
-#define PROFILE_PERIOD 50 /*50ms*/
+#define PROFILE_PERIOD 10000 /*us*/
-/* Timer Functions */
static void virtio_nic_timer(struct guest_info * core,
uint64_t cpu_cycles, uint64_t cpu_freq,
void * priv_data) {
struct virtio_net_state * net_state = (struct virtio_net_state *)priv_data;
- uint64_t period_ms;
+ uint64_t period_us;
+ static int profile_ms = 0;
- period_ms = cpu_cycles/cpu_freq;
- net_state->past_ms += period_ms;
+ period_us = (1000*cpu_cycles)/cpu_freq;
+ net_state->past_us += period_us;
- if(net_state->past_ms > PROFILE_PERIOD){
+ if(net_state->past_us > PROFILE_PERIOD){
uint32_t tx_rate, rx_rate;
- tx_rate = (net_state->statistics.tx_pkts - net_state->tx_pkts)/net_state->past_ms; /* pkts/per ms */
- rx_rate = (net_state->statistics.rx_pkts - net_state->rx_pkts)/net_state->past_ms;
+ tx_rate = (net_state->stats.tx_pkts - net_state->tx_pkts)/(net_state->past_us/1000); /* pkts/per ms */
+ rx_rate = (net_state->stats.rx_pkts - net_state->rx_pkts)/(net_state->past_us/1000);
- net_state->tx_pkts = net_state->statistics.tx_pkts;
- net_state->rx_pkts = net_state->statistics.rx_pkts;
+ net_state->tx_pkts = net_state->stats.tx_pkts;
+ net_state->rx_pkts = net_state->stats.rx_pkts;
if(tx_rate > RATE_UPPER_THRESHOLD && net_state->tx_notify == 1){
V3_Print("Virtio NIC: Switch TX to VMM driven mode\n");
disable_cb(&(net_state->tx_vq));
net_state->tx_notify = 0;
+ V3_THREAD_WAKEUP(net_state->poll_thread);
}
if(tx_rate < RATE_LOWER_THRESHOLD && net_state->tx_notify == 0){
}
if(rx_rate > RATE_UPPER_THRESHOLD && net_state->rx_notify == 1){
- PrintDebug("Virtio NIC: Switch RX to VMM None notify mode\n");
+ V3_Print("Virtio NIC: Switch RX to VMM None notify mode\n");
net_state->rx_notify = 0;
}
if(rx_rate < RATE_LOWER_THRESHOLD && net_state->rx_notify == 0){
- PrintDebug("Virtio NIC: Switch RX to VMM notify mode\n");
+ V3_Print("Virtio NIC: Switch RX to VMM notify mode\n");
net_state->rx_notify = 1;
}
- net_state->past_ms = 0;
+ net_state->past_us = 0;
}
-}
+ profile_ms += period_us/1000;
+ if(profile_ms > 20000){
+ V3_Net_Print(1, "Virtio NIC: TX: Pkt: %lld, Bytes: %lld\n\t\tRX Pkt: %lld. Bytes: %lld\n\t\tDropped: tx %lld, rx %lld\nInterrupts: tx %d, rx %d\nTotal Exit: %lld\n",
+ net_state->stats.tx_pkts, net_state->stats.tx_bytes,
+ net_state->stats.rx_pkts, net_state->stats.rx_bytes,
+ net_state->stats.tx_dropped, net_state->stats.rx_dropped,
+ net_state->stats.tx_interrupts, net_state->stats.rx_interrupts,
+ net_state->vm->cores[0].num_exits);
+ profile_ms = 0;
+ }
+}
static struct v3_timer_ops timer_ops = {
.update_timer = virtio_nic_timer,
memset(net_state, 0, sizeof(struct virtio_net_state));
register_dev(virtio, net_state);
+ net_state->vm = info;
net_state->net_ops = ops;
net_state->backend_data = private_data;
net_state->virtio_dev = virtio;
- net_state->tx_notify = 1;
- net_state->rx_notify = 1;
-
+ net_state->tx_notify = 0;
+ net_state->rx_notify = 0;
+
net_state->timer = v3_add_timer(&(info->cores[0]),&timer_ops,net_state);
ops->recv = virtio_rx;
- ops->poll = virtio_nic_poll;
ops->frontend_data = net_state;
memcpy(ops->fnt_mac, virtio->mac, ETH_ALEN);
+ net_state->poll_thread = V3_CREATE_THREAD(virtio_tx_flush, (void *)net_state, "Virtio_Poll");
+
return 0;
}
if (macstr != NULL && !str2mac(macstr, virtio_state->mac)) {
PrintDebug("Virtio NIC: Mac specified %s\n", macstr);
- PrintDebug("MAC: %x:%x:%x:%x:%x:%x\n", virtio_state->mac[0],
- virtio_state->mac[1],
- virtio_state->mac[2],
- virtio_state->mac[3],
- virtio_state->mac[4],
- virtio_state->mac[5]);
}else {
- PrintDebug("Virtio NIC: MAC not specified\n");
random_ethaddr(virtio_state->mac);
}
#include <devices/pci.h>
-#ifndef CONFIG_DEBUG_LINUX_VIRTIO_VNET
+#ifndef V3_CONFIG_DEBUG_LINUX_VIRTIO_VNET
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
memcpy(pkt.header, virtio_pkt->pkt, ETHERNET_HEADER_LEN);
pkt.data = virtio_pkt->pkt;
- v3_vnet_send_pkt(&pkt, NULL);
+ v3_vnet_send_pkt(&pkt, NULL, 1);
q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
#include <palacios/vmm_string.h>
#include <palacios/vmm_cpuid.h>
-#ifndef CONFIG_DEBUG_MCHECK
+#ifndef V3_CONFIG_DEBUG_MCHECK
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
*/
#include <palacios/vmm.h>
-#include <palacios/vmm_mptable.h>
#include <palacios/vmm_string.h>
#include <palacios/vm_guest_mem.h>
return 0;
}
-
-int v3_inject_mptable(struct v3_vm_info * vm) {
+static int mptable_init(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) {
void * target = NULL;
if (v3_gpa_to_hva(&(vm->cores[0]), BIOS_MP_TABLE_DEFAULT_LOCATION, (addr_t *)&target) == -1) {
return 0;
}
+
+
+
+device_register("MPTABLE", mptable_init)
#include <palacios/vm_guest.h>
#include <palacios/vmm_sprintf.h>
-#ifndef CONFIG_DEBUG_NE2K
+#ifndef V3_CONFIG_DEBUG_NE2K
#undef PrintDebug
#define PrintDebug(fmts, args...)
#endif
static int tx_one_pkt(struct ne2k_state * nic_state, uchar_t *pkt, uint32_t length) {
-#ifdef CONFIG_DEBUG_NE2K
+#ifdef V3_CONFIG_DEBUG_NE2K
PrintDebug("NE2000: Send Packet:\n");
v3_hexdump(pkt, length, NULL, 0);
#endif
static int ne2k_rx(uint8_t * buf, uint32_t size, void * private_data){
struct ne2k_state * nic_state = (struct ne2k_state *)private_data;
-#ifdef CONFIG_DEBUG_NE2K
+#ifdef V3_CONFIG_DEBUG_NE2K
PrintDebug("\nNe2k: Packet Received:\n");
v3_hexdump(buf, size, NULL, 0);
#endif
#include <palacios/vmm_dev_mgr.h>
#include <interfaces/vmm_socket.h>
-#ifndef CONFIG_DEBUG_IDE
+#ifndef V3_CONFIG_DEBUG_IDE
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vmm_sprintf.h>
#include <interfaces/vmm_packet.h>
-#ifndef CONFIG_DEBUG_NIC_BRIDGE
+#ifndef V3_CONFIG_DEBUG_NIC_BRIDGE
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
};
static int bridge_send(uint8_t * buf, uint32_t len,
+ int synchronize,
void * private_data) {
-#ifdef CONFIG_DEBUG_NIC_BRIDGE
+#ifdef V3_CONFIG_DEBUG_NIC_BRIDGE
{
PrintDebug("NIC Bridge: send pkt size: %d\n", len);
v3_hexdump(buf, len, NULL, 0);
void * private_data) {
struct nic_bridge_state * bridge = (struct nic_bridge_state *)private_data;
-#ifdef CONFIG_DEBUG_NIC_BRIDGE
+#ifdef V3_CONFIG_DEBUG_NIC_BRIDGE
{
PrintDebug("NIC Bridge: recv pkt size: %d\n", evt->size);
v3_hexdump(evt->pkt, evt->size, NULL, 0);
#include <palacios/vm_guest.h>
-#ifndef CONFIG_DEBUG_NVRAM
+#ifndef V3_CONFIG_DEBUG_NVRAM
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
-#ifndef CONFIG_DEBUG_PCI
+#ifndef V3_CONFIG_DEBUG_PCI
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
-#ifdef CONFIG_DEBUG_PCI
+#ifdef V3_CONFIG_DEBUG_PCI
static void pci_dump_state(struct pci_internal * pci_state) {
struct rb_node * node = v3_rb_first(&(pci_state->bus_list[0].devices));
// add the device
add_device_to_bus(bus, pci_dev);
-#ifdef CONFIG_DEBUG_PCI
+#ifdef V3_CONFIG_DEBUG_PCI
pci_dump_state(pci_state);
#endif
// add the device
add_device_to_bus(bus, pci_dev);
-#ifdef CONFIG_DEBUG_PCI
+#ifdef V3_CONFIG_DEBUG_PCI
pci_dump_state(pci_state);
#endif
#include <interfaces/vmm_host_dev.h>
-#ifndef CONFIG_DEBUG_PCI_FRONT
+#ifndef V3_CONFIG_DEBUG_PCI_FRONT
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vmm_dev_mgr.h>
-#ifndef CONFIG_DEBUG_RAMDISK
+#ifndef V3_CONFIG_DEBUG_RAMDISK
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
-#ifndef CONFIG_DEBUG_RTL8139
+#ifndef V3_CONFIG_DEBUG_RTL8139
#undef PrintDebug
#define PrintDebug(fmts, args...)
#endif
v3_gpa_to_hva(&(nic_state->vm->cores[0]), (addr_t)pkt_gpa, &hostva);
pkt = (uchar_t *)hostva;
-#ifdef CONFIG_DEBUG_RTL8139
+#ifdef V3_CONFIG_DEBUG_RTL8139
v3_hexdump(pkt, txsize, NULL, 0);
#endif
#include <devices/serial.h>
-#ifndef CONFIG_DEBUG_SERIAL
+#ifndef V3_CONFIG_DEBUG_SERIAL
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vm_guest.h>
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
#include <palacios/vmm_telemetry.h>
#endif
union swap_header * hdr;
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
uint32_t pages_in;
uint32_t pages_out;
#endif
int i = 0;
// Notify the shadow paging layer
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
swap->pages_in += length / 4096;
#endif
if ((swap->active == 1) && (offset != 0)) {
int i = 0;
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
swap->pages_out += length / 4096;
#endif
};
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
static void telemetry_cb(struct v3_vm_info * vm, void * private_data, char * hdr) {
struct vm_device * dev = (struct vm_device *)private_data;
struct swap_state * swap = (struct swap_state *)(dev->private_data);
return -1;
}
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
if (vm->enable_telemetry == 1) {
v3_add_telemetry_cb(vm, telemetry_cb, dev);
}
#include <palacios/vmm_hashtable.h>
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
#include <palacios/vmm_telemetry.h>
#endif
struct v3_dev_blk_ops * ops;
void * private_data;
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
uint32_t pages_in;
uint32_t pages_out;
#endif
swap->unswapped_pages += (length / 4096);
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
swap->pages_in += length / 4096;
#endif
swap->swapped_pages += written_pages;
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
swap->pages_out += length / 4096;
#endif
};
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
static void telemetry_cb(struct v3_vm_info * vm, void * private_data, char * hdr) {
struct swap_state * swap = (struct swap_state *)private_data;
}
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
if (vm->enable_telemetry == 1) {
v3_add_telemetry_cb(vm, telemetry_cb, swap);
#include "vga_regs.h"
-#ifndef CONFIG_DEBUG_VGA
+#ifndef V3_CONFIG_DEBUG_VGA
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vmm_sprintf.h>
#include <palacios/vmm_ethernet.h>
-#ifndef CONFIG_DEBUG_VNET_NIC
+#ifndef V3_CONFIG_DEBUG_VNET_NIC
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
/* called by frontend, send pkt to VNET */
static int vnet_nic_send(uint8_t * buf, uint32_t len,
- void * private_data) {
+ int synchronize, void * private_data) {
struct vnet_nic_state * vnetnic = (struct vnet_nic_state *)private_data;
struct v3_vnet_pkt pkt;
memcpy(pkt.header, buf, ETHERNET_HEADER_LEN);
pkt.data = buf;
-#ifdef CONFIG_DEBUG_VNET_NIC
- {
- PrintDebug("VNET-NIC: send pkt (size: %d, src_id: %d, src_type: %d)\n",
+ V3_Net_Print(2, "VNET-NIC: send pkt (size: %d, src_id: %d, src_type: %d)\n",
pkt.size, pkt.src_id, pkt.src_type);
- v3_hexdump(buf, len, NULL, 0);
+ if(v3_net_debug >= 4){
+ v3_hexdump(buf, len, NULL, 0);
}
-#endif
- return v3_vnet_send_pkt(&pkt, NULL);;
+ return v3_vnet_send_pkt(&pkt, NULL, synchronize);
}
void * private_data){
struct vnet_nic_state *vnetnic = (struct vnet_nic_state *)private_data;
- PrintDebug("VNET-NIC: receive pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
+ V3_Net_Print(2, "VNET-NIC: receive pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
pkt->size, pkt->src_id, pkt->src_type, pkt->dst_id, pkt->dst_type);
return vnetnic->net_ops.recv(pkt->data, pkt->size,
vnetnic->net_ops.frontend_data);
}
-/* poll data from front-end */
-static void virtio_poll(struct v3_vm_info * info,
- int budget,
- void * private_data){
- struct vnet_nic_state *vnetnic = (struct vnet_nic_state *)private_data;
-
- vnetnic->net_ops.poll(info, budget, vnetnic->net_ops.frontend_data);
-}
-
static int vnet_nic_free(struct vnet_nic_state * vnetnic) {
static struct v3_vnet_dev_ops vnet_dev_ops = {
.input = virtio_input,
- .poll = virtio_poll,
};
obj-y += null.o
-obj-$(CONFIG_EXT_MTRRS) += ext_mtrr.o
-obj-$(CONFIG_EXT_VTSC) += ext_vtsc.o
-obj-$(CONFIG_EXT_VTIME) += ext_vtime.o
-obj-$(CONFIG_EXT_INSPECTOR) += ext_inspector.o
+obj-$(V3_CONFIG_EXT_MTRRS) += ext_mtrr.o
+obj-$(V3_CONFIG_EXT_VTSC) += ext_vtsc.o
+obj-$(V3_CONFIG_EXT_VTIME) += ext_vtime.o
+obj-$(V3_CONFIG_EXT_INSPECTOR) += ext_inspector.o
-obj-$(CONFIG_SOCKET) += vmm_socket.o
-obj-$(CONFIG_PACKET) += vmm_packet.o
-obj-$(CONFIG_FILE) += vmm_file.o
-obj-$(CONFIG_CONSOLE) += vmm_console.o
-obj-$(CONFIG_STREAM) += vmm_stream.o
-obj-$(CONFIG_GRAPHICS_CONSOLE) += vmm_graphics_console.o
-obj-$(CONFIG_KEYED_STREAMS) += vmm_keyed_stream.o
-obj-$(CONFIG_HOST_DEVICE) += vmm_host_dev.o
+obj-$(V3_CONFIG_SOCKET) += vmm_socket.o
+obj-$(V3_CONFIG_PACKET) += vmm_packet.o
+obj-$(V3_CONFIG_FILE) += vmm_file.o
+obj-$(V3_CONFIG_CONSOLE) += vmm_console.o
+obj-$(V3_CONFIG_STREAM) += vmm_stream.o
+obj-$(V3_CONFIG_GRAPHICS_CONSOLE) += vmm_graphics_console.o
+obj-$(V3_CONFIG_KEYED_STREAMS) += vmm_keyed_stream.o
+obj-$(V3_CONFIG_HOST_DEVICE) += vmm_host_dev.o
vmm_cpuid.o \
vmm_xml.o \
vmm_mem_hook.o \
- vmm_mptable.o \
vmm_extensions.o \
vmm_mtrr.o \
vmm_multitree.o \
-obj-$(CONFIG_XED) += vmm_xed.o
-obj-$(CONFIG_V3_DECODER) += vmm_v3dec.o
+obj-$(V3_CONFIG_XED) += vmm_xed.o
+obj-$(V3_CONFIG_V3_DECODER) += vmm_v3dec.o
-obj-$(CONFIG_SVM) += svm.o \
- svm_io.o \
- svm_lowlevel.o \
- svm_msr.o \
- svm_pause.o \
- svm_wbinvd.o \
- svm_handler.o \
- vmcb.o
+obj-$(V3_CONFIG_SVM) += svm.o \
+ svm_io.o \
+ svm_lowlevel.o \
+ svm_msr.o \
+ svm_pause.o \
+ svm_wbinvd.o \
+ svm_handler.o \
+ vmcb.o
-obj-$(CONFIG_VMX) += vmx.o \
- vmx_handler.o \
- vmx_io.o \
- vmx_lowlevel.o \
- vmx_msr.o \
- vmx_hw_info.o \
- vmcs.o \
- vmx_ctrl_regs.o \
- vmx_assist.o
+obj-$(V3_CONFIG_VMX) += vmx.o \
+ vmx_handler.o \
+ vmx_io.o \
+ vmx_lowlevel.o \
+ vmx_msr.o \
+ vmx_hw_info.o \
+ vmcs.o \
+ vmx_ctrl_regs.o \
+ vmx_assist.o \
+ vmx_ept.o
-obj-$(CONFIG_INSTRUMENT_VMM) += vmm_instrument.o
-obj-$(CONFIG_TELEMETRY) += vmm_telemetry.o
+obj-$(V3_CONFIG_TELEMETRY) += vmm_telemetry.o
-obj-$(CONFIG_VNET) += vmm_vnet_core.o
+obj-$(V3_CONFIG_VNET) += vmm_vnet_core.o
-obj-$(CONFIG_SYMBIOTIC) += vmm_symbiotic.o vmm_symspy.o
-obj-$(CONFIG_SYMCALL) += vmm_symcall.o
-obj-$(CONFIG_SYMMOD) += vmm_symmod.o
+obj-$(V3_CONFIG_SYMBIOTIC) += vmm_symbiotic.o vmm_symspy.o
+obj-$(V3_CONFIG_SYMCALL) += vmm_symcall.o
+obj-$(V3_CONFIG_SYMMOD) += vmm_symmod.o
obj-y += mmu/
-obj-$(CONFIG_SHADOW_PAGING_VTLB) += vmm_shdw_pg_tlb.o
-obj-$(CONFIG_SWAPBYPASS) += vmm_shdw_pg_swapbypass.o
-obj-$(CONFIG_SHADOW_PAGING_CACHE1) += vmm_shdw_pg_cache.o
+obj-$(V3_CONFIG_SHADOW_PAGING_VTLB) += vmm_shdw_pg_tlb.o
+obj-$(V3_CONFIG_SWAPBYPASS) += vmm_shdw_pg_swapbypass.o
+obj-$(V3_CONFIG_SHADOW_PAGING_CACHE1) += vmm_shdw_pg_cache.o
#define V3_CACHED_PG 0x1
-#ifndef CONFIG_DEBUG_SHDW_PG_CACHE
+#ifndef V3_CONFIG_DEBUG_SHDW_PG_CACHE
#undef PrintDebug
#define PrintDebug(fmt, ...)
#endif
#include <palacios/vmm_paging.h>
-#ifndef CONFIG_DEBUG_SHDW_CACHE
+#ifndef V3_CONFIG_DEBUG_SHDW_CACHE
#undef PrintDebug
#define PrintDebug(fmt, ...)
#endif
-#ifdef CONFIG_SHADOW_CACHE
+#ifdef V3_CONFIG_SHADOW_CACHE
struct pde_chain {
addr_t shadow_pdes[NR_PTE_CHAIN_ENTRIES];
-#ifdef CONFIG_SHADOW_CACHE
+#ifdef V3_CONFIG_SHADOW_CACHE
static inline int activate_shadow_pt_32(struct guest_info * core) {
struct cr3_32 * shadow_cr3 = (struct cr3_32 *)&(core->ctrl_regs.cr3);
-#ifdef CONFIG_SHADOW_CACHE
+#ifdef V3_CONFIG_SHADOW_CACHE
static inline int activate_shadow_pt_32pae(struct guest_info * info) {
PrintError("Activating 32 bit PAE page tables not implemented\n");
-#ifdef CONFIG_SHADOW_CACHE
+#ifdef V3_CONFIG_SHADOW_CACHE
#define PT64_NX_MASK (1ULL << 63)
//#define SHOW_ALL
#include <palacios/vmm_hashtable.h>
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
#include <palacios/vmm_telemetry.h>
#endif
struct swapbypass_vm_state {
struct v3_swap_dev devs[256];
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
uint32_t read_faults;
uint32_t write_faults;
uint32_t flushes;
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
static void telemetry_cb(struct v3_vm_info * vm, void * private_data, char * hdr) {
struct swapbypass_vm_state * swap_state = (struct swapbypass_vm_state *)(vm->shdw_impl.impl_data);
if (shdw_ptr_list == NULL) {
shdw_ptr_list = (struct list_head *)V3_Malloc(sizeof(struct list_head));
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
swap_state->list_size++;
#endif
INIT_LIST_HEAD(shdw_ptr_list);
if (shdw_ptr == NULL) {
PrintError("MEMORY LEAK\n");
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
telemetry_cb(vm, NULL, "");
#endif
return 0;
// PrintDebug("Flushing Symbiotic Swap table\n");
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
swap_state->flushes++;
#endif
memset(sb_state, 0, sizeof(struct swapbypass_vm_state));
sb_state->shdw_ptr_ht = v3_create_htable(0, swap_hash_fn, swap_eq_fn);
-#ifdef CONFIG_SWAPBYPASS_TELEMETRY
+#ifdef V3_CONFIG_SWAPBYPASS_TELEMETRY
if (vm->enable_telemetry) {
v3_add_telemetry_cb(vm, telemetry_cb, NULL);
}
-#ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
+#ifdef V3_CONFIG_SYMBIOTIC_SWAP_TELEMETRY
if (error_code.write == 0) {
info->vm_info->swap_state.read_faults++;
} else {
shadow_pte->page_base_addr = swp_pg_pa;
-#ifdef CONFIG_SYMBIOTIC_SWAP_TELEMETRY
+#ifdef V3_CONFIG_SYMBIOTIC_SWAP_TELEMETRY
info->vm_info->swap_state.mapped_pages++;
#endif
// PrintError("Swap fault handled\n");
#include <palacios/vm_guest_mem.h>
-#ifndef CONFIG_DEBUG_SHDW_PG_VTLB
+#ifndef V3_CONFIG_DEBUG_SHDW_PG_VTLB
#undef PrintDebug
#define PrintDebug(fmt, ...)
#endif
#include <palacios/vmm_sprintf.h>
-#ifndef CONFIG_DEBUG_SVM
+#ifndef V3_CONFIG_DEBUG_SVM
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
uint32_t v3_last_exit;
// This is a global pointer to the host's VMCB
-static addr_t host_vmcbs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
+static addr_t host_vmcbs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
ctrl_area->instrs.HLT = 1;
-#ifdef CONFIG_TIME_VIRTUALIZE_TSC
+#ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
ctrl_area->instrs.RDTSC = 1;
ctrl_area->svm_instrs.RDTSCP = 1;
#endif
if ((info->intr_core_state.irq_pending == 1) && (guest_ctrl->guest_ctrl.V_IRQ == 0)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
PrintDebug("INTAK cycle completed for irq %d\n", info->intr_core_state.irq_vector);
#endif
}
if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 0)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
PrintDebug("Interrupt %d taken by guest\n", info->intr_core_state.irq_vector);
#endif
info->intr_core_state.irq_started = 0;
} else if ((info->intr_core_state.irq_started == 1) && (guest_ctrl->exit_int_info.valid == 1)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
PrintDebug("EXIT INT INFO is set (vec=%d)\n", guest_ctrl->exit_int_info.vector);
#endif
}
if (info->excp_state.excp_error_code_valid) {
guest_ctrl->EVENTINJ.error_code = info->excp_state.excp_error_code;
guest_ctrl->EVENTINJ.ev = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
PrintDebug("Injecting exception %d with error code %x\n", excp, guest_ctrl->EVENTINJ.error_code);
#endif
}
guest_ctrl->EVENTINJ.valid = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
PrintDebug("<%d> Injecting Exception %d (CR2=%p) (EIP=%p)\n",
(int)info->num_exits,
guest_ctrl->EVENTINJ.vector,
v3_injecting_excp(info, excp);
} else if (info->intr_core_state.irq_started == 1) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
PrintDebug("IRQ pending from previous injection\n");
#endif
guest_ctrl->guest_ctrl.V_IRQ = 1;
guest_ctrl->guest_ctrl.V_IGN_TPR = 1;
guest_ctrl->guest_ctrl.V_INTR_PRIO = 0xf;
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
PrintDebug("Injecting Interrupt %d (EIP=%p)\n",
guest_ctrl->guest_ctrl.V_INTR_VECTOR,
(void *)(addr_t)info->rip);
guest_state->rip = info->rip;
guest_state->rsp = info->vm_regs.rsp;
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
if (info->sym_core_state.symcall_state.sym_call_active == 0) {
update_irq_entry_state(info);
}
(void *)(addr_t)info->rip);
*/
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
if (info->sym_core_state.symcall_state.sym_call_active == 1) {
if (guest_ctrl->guest_ctrl.V_IRQ == 1) {
V3_Print("!!! Injecting Interrupt during Sym call !!!\n");
exit_info2 = guest_ctrl->exit_info2;
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
if (info->sym_core_state.symcall_state.sym_call_active == 0) {
update_irq_exit_state(info);
}
#include <palacios/vmm_cpuid.h>
#include <palacios/vmm_direct_paging.h>
-#ifndef CONFIG_DEBUG_SVM
+#ifndef V3_CONFIG_DEBUG_SVM
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
#include <palacios/vmm_telemetry.h>
#endif
int v3_handle_svm_exit(struct guest_info * info, addr_t exit_code, addr_t exit_info1, addr_t exit_info2) {
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
if (info->vm_info->enable_telemetry) {
v3_telemetry_start_exit(info);
}
break;
case VMEXIT_CR0_WRITE:
-#ifdef CONFIG_DEBUG_CTRL_REGS
+#ifdef V3_CONFIG_DEBUG_CTRL_REGS
PrintDebug("CR0 Write\n");
#endif
if (v3_handle_cr0_write(info) == -1) {
}
break;
case VMEXIT_CR0_READ:
-#ifdef CONFIG_DEBUG_CTRL_REGS
+#ifdef V3_CONFIG_DEBUG_CTRL_REGS
PrintDebug("CR0 Read\n");
#endif
if (v3_handle_cr0_read(info) == -1) {
}
break;
case VMEXIT_CR3_WRITE:
-#ifdef CONFIG_DEBUG_CTRL_REGS
+#ifdef V3_CONFIG_DEBUG_CTRL_REGS
PrintDebug("CR3 Write\n");
#endif
if (v3_handle_cr3_write(info) == -1) {
break;
case VMEXIT_CR3_READ:
-#ifdef CONFIG_DEBUG_CTRL_REGS
+#ifdef V3_CONFIG_DEBUG_CTRL_REGS
PrintDebug("CR3 Read\n");
#endif
if (v3_handle_cr3_read(info) == -1) {
}
break;
case VMEXIT_CR4_WRITE:
-#ifdef CONFIG_DEBUG_CTRL_REGS
+#ifdef V3_CONFIG_DEBUG_CTRL_REGS
PrintDebug("CR4 Write\n");
#endif
if (v3_handle_cr4_write(info) == -1) {
}
break;
case VMEXIT_CR4_READ:
-#ifdef CONFIG_DEBUG_CTRL_REGS
+#ifdef V3_CONFIG_DEBUG_CTRL_REGS
PrintDebug("CR4 Read\n");
#endif
if (v3_handle_cr4_read(info) == -1) {
case VMEXIT_EXCP14: {
addr_t fault_addr = exit_info2;
pf_error_t * error_code = (pf_error_t *)&(exit_info1);
-#ifdef CONFIG_DEBUG_SHADOW_PAGING
+#ifdef V3_CONFIG_DEBUG_SHADOW_PAGING
PrintDebug("PageFault at %p (error=%d)\n",
(void *)fault_addr, *(uint_t *)error_code);
#endif
}
case VMEXIT_INVLPG:
if (info->shdw_pg_mode == SHADOW_PAGING) {
-#ifdef CONFIG_DEBUG_SHADOW_PAGING
+#ifdef V3_CONFIG_DEBUG_SHADOW_PAGING
PrintDebug("Invlpg\n");
#endif
if (v3_handle_shadow_invlpg(info) == -1) {
// handle_svm_smi(info); // ignored for now
break;
case VMEXIT_HLT:
-#ifdef CONFIG_DEBUG_HALT
+#ifdef V3_CONFIG_DEBUG_HALT
PrintDebug("Guest halted\n");
#endif
if (v3_handle_halt(info) == -1) {
}
break;
case VMEXIT_WBINVD:
-#ifdef CONFIG_DEBUG_EMULATOR
+#ifdef V3_CONFIG_DEBUG_EMULATOR
PrintDebug("WBINVD\n");
#endif
if (v3_handle_svm_wbinvd(info) == -1) {
}
break;
case VMEXIT_RDTSC:
-#ifdef CONFIG_DEBUG_TIME
+#ifdef V3_CONFIG_DEBUG_TIME
PrintDebug("RDTSC/RDTSCP\n");
#endif
if (v3_handle_rdtsc(info) == -1) {
}
break;
case VMEXIT_RDTSCP:
-#ifdef CONFIG_DEBUG_TIME
+#ifdef V3_CONFIG_DEBUG_TIME
PrintDebug("RDTSCP\n");
#endif
if (v3_handle_rdtscp(info) == -1) {
}
// END OF SWITCH (EXIT_CODE)
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
if (info->vm_info->enable_telemetry) {
v3_telemetry_end_exit(info, exit_code);
}
#include <palacios/vmm_decoder.h>
#include <palacios/vm_guest_mem.h>
-#ifndef CONFIG_DEBUG_IO
+#ifndef V3_CONFIG_DEBUG_IO
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
v3_print_guest_state(core);
// init SVM/VMX
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
if ((cpu_type == V3_SVM_CPU) || (cpu_type == V3_SVM_REV3_CPU)) {
cpu_valid = 1;
PrintDebugVMCB((vmcb_t *)(core->vmm_data));
}
#endif
-#ifdef CONFIG_VMX
- if ((cpu_type == V3_VMX_CPU) || (cpu_type == V3_VMX_EPT_CPU)) {
+#ifdef V3_CONFIG_VMX
+ if ((cpu_type == V3_VMX_CPU) || (cpu_type == V3_VMX_EPT_CPU) || (cpu_type == V3_VMX_EPT_UG_CPU)) {
cpu_valid = 1;
v3_print_vmcs();
}
}
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
#include <palacios/svm.h>
#include <palacios/svm_io.h>
#include <palacios/svm_msr.h>
#endif
-#ifdef CONFIG_VMX
+#ifdef V3_CONFIG_VMX
#include <palacios/vmx.h>
#include <palacios/vmx_io.h>
#include <palacios/vmx_msr.h>
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
v3_init_telemetry(vm);
#endif
v3_init_time_vm(vm);
-#ifdef CONFIG_SYMBIOTIC
+#ifdef V3_CONFIG_SYMBIOTIC
v3_init_symbiotic_vm(vm);
#endif
// init SVM/VMX
switch (cpu_type) {
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
case V3_SVM_CPU:
case V3_SVM_REV3_CPU:
v3_init_svm_io_map(vm);
v3_init_svm_msr_map(vm);
break;
#endif
-#ifdef CONFIG_VMX
+#ifdef V3_CONFIG_VMX
case V3_VMX_CPU:
case V3_VMX_EPT_CPU:
+ case V3_VMX_EPT_UG_CPU:
v3_init_vmx_io_map(vm);
v3_init_vmx_msr_map(vm);
break;
-#ifdef CONFIG_SYMBIOTIC
+#ifdef V3_CONFIG_SYMBIOTIC
v3_deinit_symbiotic_vm(vm);
#endif
// init SVM/VMX
switch (cpu_type) {
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
case V3_SVM_CPU:
case V3_SVM_REV3_CPU:
v3_deinit_svm_io_map(vm);
v3_deinit_svm_msr_map(vm);
break;
#endif
-#ifdef CONFIG_VMX
+#ifdef V3_CONFIG_VMX
case V3_VMX_CPU:
case V3_VMX_EPT_CPU:
+ case V3_VMX_EPT_UG_CPU:
v3_deinit_vmx_io_map(vm);
v3_deinit_vmx_msr_map(vm);
break;
v3_deinit_io_map(vm);
v3_deinit_hypercall_map(vm);
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
v3_deinit_telemetry(vm);
#endif
/*
* Initialize the subsystem data strutures
*/
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
v3_init_core_telemetry(core);
#endif
v3_init_decoder(core);
-#ifdef CONFIG_SYMBIOTIC
+#ifdef V3_CONFIG_SYMBIOTIC
v3_init_symbiotic_core(core);
#endif
switch (cpu_type) {
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
case V3_SVM_CPU:
case V3_SVM_REV3_CPU:
if (v3_init_svm_vmcb(core, vm->vm_class) == -1) {
}
break;
#endif
-#ifdef CONFIG_VMX
+#ifdef V3_CONFIG_VMX
case V3_VMX_CPU:
case V3_VMX_EPT_CPU:
+ case V3_VMX_EPT_UG_CPU:
if (v3_init_vmx_vmcs(core, vm->vm_class) == -1) {
PrintError("Error in VMX initialization\n");
return -1;
v3_cpu_arch_t cpu_type = v3_get_cpu_type(V3_Get_CPU());
-#ifdef CONFIG_SYMBIOTIC
+#ifdef V3_CONFIG_SYMBIOTIC
v3_deinit_symbiotic_core(core);
#endif
v3_free_passthrough_pts(core);
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
v3_deinit_core_telemetry(core);
#endif
switch (cpu_type) {
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
case V3_SVM_CPU:
case V3_SVM_REV3_CPU:
if (v3_deinit_svm_vmcb(core) == -1) {
}
break;
#endif
-#ifdef CONFIG_VMX
+#ifdef V3_CONFIG_VMX
case V3_VMX_CPU:
case V3_VMX_EPT_CPU:
+ case V3_VMX_EPT_UG_CPU:
if (v3_deinit_vmx_vmcs(core) == -1) {
PrintError("Error in VMX initialization\n");
return -1;
vmx_ret |= check_vmcs_write(VMCS_EXIT_CTRLS, arch_data->exit_ctrls.value);
vmx_ret |= check_vmcs_write(VMCS_ENTRY_CTRLS, arch_data->entry_ctrls.value);
+ vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, arch_data->excp_bmap.value);
+
+ if (info->shdw_pg_mode == NESTED_PAGING) {
+ vmx_ret |= check_vmcs_write(VMCS_EPT_PTR, info->direct_map_pt);
+ }
return vmx_ret;
}
check_vmcs_read(VMCS_GUEST_DR7, &(info->dbg_regs.dr7));
check_vmcs_read(VMCS_GUEST_RFLAGS, &(info->ctrl_regs.rflags));
- if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
+
#ifdef __V3_64BIT__
- check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
-#else
- uint32_t hi, lo;
- check_vmcs_read(VMCS_GUEST_EFER, &hi);
- check_vmcs_read(VMCS_GUEST_EFER_HIGH, &lo);
- info->ctrl_regs.efer = ((uint64_t) hi << 32) | lo;
+ check_vmcs_read(VMCS_GUEST_EFER, &(info->ctrl_regs.efer));
#endif
- }
-
+
error = v3_read_vmcs_segments(&(info->segments));
return error;
check_vmcs_write(VMCS_GUEST_RFLAGS, info->ctrl_regs.rflags);
- if (((struct vmx_data *)info->vmm_data)->ia32e_avail) {
- check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
- }
+#ifdef __V3_64BIT__
+ check_vmcs_write(VMCS_GUEST_EFER, info->ctrl_regs.efer);
+#endif
+
+
+
error = v3_write_vmcs_segments(&(info->segments));
vmx_ret |= check_vmcs_write(VMCS_HOST_IDTR_BASE, arch_data->host_state.idtr.base);
vmx_ret |= check_vmcs_write(VMCS_HOST_TR_BASE, arch_data->host_state.tr.base);
-#define FS_BASE_MSR 0xc0000100
-#define GS_BASE_MSR 0xc0000101
-
- // FS.BASE MSR
- v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);
-
- // GS.BASE MSR
- v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);
#define SYSENTER_CS_MSR 0x00000174
#define SYSENTER_ESP_MSR 0x00000175
#define SYSENTER_EIP_MSR 0x00000176
+#define FS_BASE_MSR 0xc0000100
+#define GS_BASE_MSR 0xc0000101
+#define EFER_MSR 0xc0000080
- // SYSENTER CS MSR
+
+ // SYSENTER CS MSR
v3_get_msr(SYSENTER_CS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_CS, tmp_msr.lo);
v3_get_msr(SYSENTER_EIP_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
vmx_ret |= check_vmcs_write(VMCS_HOST_SYSENTER_EIP, tmp_msr.value);
+
+ // FS.BASE MSR
+ v3_get_msr(FS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_FS_BASE, tmp_msr.value);
+
+ // GS.BASE MSR
+ v3_get_msr(GS_BASE_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_GS_BASE, tmp_msr.value);
+
+
+ // EFER
+ v3_get_msr(EFER_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_HOST_EFER, tmp_msr.value);
+
+ // PERF GLOBAL CONTROL
+
+ // PAT
+
+
+ // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
+
+
+
+
+
+
+
return vmx_ret;
}
print_vmcs_field(VMCS_GUEST_CR4);
print_vmcs_field(VMCS_GUEST_DR7);
+ // if save IA32_EFER
+ print_vmcs_field(VMCS_GUEST_EFER);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_EFER_HIGH);
+#endif
+
PrintDebug("\n");
print_vmcs_field(VMCS_GUEST_SYSENTER_ESP);
print_vmcs_field(VMCS_GUEST_SYSENTER_EIP);
+
+ // if save IA32_PAT
+ print_vmcs_field(VMCS_GUEST_PAT);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PAT_HIGH);
+#endif
+
+ //if load IA32_PERF_GLOBAL_CTRL
print_vmcs_field(VMCS_GUEST_PERF_GLOBAL_CTRL);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_GUEST_PERF_GLOBAL_CTRL_HIGH);
print_vmcs_field(VMCS_GUEST_SMBASE);
+
+
PrintDebug("GUEST_NON_REGISTER_STATE\n");
print_vmcs_field(VMCS_GUEST_ACTIVITY_STATE);
print_vmcs_field(VMCS_GUEST_INT_STATE);
print_vmcs_field(VMCS_GUEST_PENDING_DBG_EXCP);
+ // if VMX preempt timer
+ print_vmcs_field(VMCS_PREEMPT_TIMER);
+
}
static void print_host_state()
print_vmcs_field(VMCS_HOST_CR3);
print_vmcs_field(VMCS_HOST_CR4);
+
+
+ // if load IA32_EFER
+ print_vmcs_field(VMCS_HOST_EFER);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_HOST_EFER_HIGH);
+#endif
+
+
PrintDebug("\n");
print_vmcs_field(VMCS_HOST_CS_SELECTOR);
print_vmcs_field(VMCS_HOST_SS_SELECTOR);
print_vmcs_field(VMCS_HOST_SYSENTER_ESP);
print_vmcs_field(VMCS_HOST_SYSENTER_EIP);
+
+ // if load IA32_PAT
+ print_vmcs_field(VMCS_HOST_PAT);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_HOST_PAT_HIGH);
+#endif
+
+ // if load IA32_PERF_GLOBAL_CTRL
print_vmcs_field(VMCS_HOST_PERF_GLOBAL_CTRL);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_HOST_PERF_GLOBAL_CTRL_HIGH);
PrintDebug("VMCS_EXEC_CTRL_FIELDS\n");
print_vmcs_field(VMCS_PIN_CTRLS);
print_vmcs_field(VMCS_PROC_CTRLS);
+
+ // if activate secondary controls
print_vmcs_field(VMCS_SEC_PROC_CTRLS);
print_vmcs_field(VMCS_EXCP_BITMAP);
print_vmcs_field(VMCS_CR3_TGT_VAL_2);
print_vmcs_field(VMCS_CR3_TGT_VAL_3);
+ // Check max number of CR3 targets... may continue...
+
+
PrintDebug("\n");
+ // if virtualize apic accesses
print_vmcs_field(VMCS_APIC_ACCESS_ADDR);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_APIC_ACCESS_ADDR_HIGH);
#endif
+ // if use tpr shadow
print_vmcs_field(VMCS_VAPIC_ADDR);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_VAPIC_ADDR_HIGH);
#endif
+ // if use tpr shadow
print_vmcs_field(VMCS_TPR_THRESHOLD);
+
+ // if use MSR bitmaps
print_vmcs_field(VMCS_MSR_BITMAP);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_MSR_BITMAP_HIGH);
#ifdef __V3_32BIT__
print_vmcs_field(VMCS_EXEC_PTR_HIGH);
#endif
+
+
+}
+
+static void print_ept_state() {
+ V3_Print("VMCS EPT INFO\n");
+
+ // if enable vpid
+ print_vmcs_field(VMCS_VPID);
+
+ print_vmcs_field(VMCS_EPT_PTR);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_EPT_PTR_HIGH);
+#endif
+
+ print_vmcs_field(VMCS_GUEST_PHYS_ADDR);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PHYS_ADDR_HIGH);
+#endif
+
+
+
+ print_vmcs_field(VMCS_GUEST_PDPTE0);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PDPTE0_HIGH);
+#endif
+
+ print_vmcs_field(VMCS_GUEST_PDPTE1);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PDPTE1_HIGH);
+#endif
+
+ print_vmcs_field(VMCS_GUEST_PDPTE2);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PDPTE2_HIGH);
+#endif
+
+ print_vmcs_field(VMCS_GUEST_PDPTE3);
+#ifdef __V3_32BIT__
+ print_vmcs_field(VMCS_GUEST_PDPTE3_HIGH);
+#endif
+
+
+
}
print_vmcs_field(VMCS_EXIT_MSR_LOAD_ADDR_HIGH);
#endif
+
+ // if pause loop exiting
+ print_vmcs_field(VMCS_PLE_GAP);
+ print_vmcs_field(VMCS_PLE_WINDOW);
+
}
print_guest_state();
print_host_state();
+ print_ept_state();
+
print_exec_ctrls();
print_exit_ctrls();
print_entry_ctrls();
print_exit_info();
-
-
-
}
if (enc->access_type == 1) {
return 4;
} else {
-#ifdef __V3_64BIT__
- return 8;
-#else
- return 4;
-#endif
+ return sizeof(addr_t);
}
}
case 2:
-
+static const char VMCS_VPID_STR[] = "VPID";
static const char VMCS_GUEST_ES_SELECTOR_STR[] = "GUEST_ES_SELECTOR";
static const char VMCS_GUEST_CS_SELECTOR_STR[] = "GUEST_CS_SELECTOR";
static const char VMCS_GUEST_SS_SELECTOR_STR[] = "GUEST_SS_SELECTOR";
static const char VMCS_VAPIC_ADDR_HIGH_STR[] = "VAPIC_PAGE_ADDR_HIGH";
static const char VMCS_APIC_ACCESS_ADDR_STR[] = "APIC_ACCESS_ADDR";
static const char VMCS_APIC_ACCESS_ADDR_HIGH_STR[] = "APIC_ACCESS_ADDR_HIGH";
+static const char VMCS_EPT_PTR_STR[] = "VMCS_EPT_PTR";
+static const char VMCS_EPT_PTR_HIGH_STR[] = "VMCS_EPT_PTR_HIGH";
+static const char VMCS_GUEST_PHYS_ADDR_STR[] = "VMCS_GUEST_PHYS_ADDR";
+static const char VMCS_GUEST_PHYS_ADDR_HIGH_STR[] = "VMCS_GUEST_PHYS_ADDR_HIGH";
static const char VMCS_LINK_PTR_STR[] = "VMCS_LINK_PTR";
static const char VMCS_LINK_PTR_HIGH_STR[] = "VMCS_LINK_PTR_HIGH";
static const char VMCS_GUEST_DBG_CTL_STR[] = "GUEST_DEBUG_CTL";
static const char VMCS_GUEST_DBG_CTL_HIGH_STR[] = "GUEST_DEBUG_CTL_HIGH";
+static const char VMCS_GUEST_PAT_STR[] = "GUEST_PAT";
+static const char VMCS_GUEST_PAT_HIGH_STR[] = "GUEST_PAT_HIGH";
+static const char VMCS_GUEST_EFER_STR[] = "GUEST_EFER";
+static const char VMCS_GUEST_EFER_HIGH_STR[] = "GUEST_EFER_HIGH";
static const char VMCS_GUEST_PERF_GLOBAL_CTRL_STR[] = "GUEST_PERF_GLOBAL_CTRL";
static const char VMCS_GUEST_PERF_GLOBAL_CTRL_HIGH_STR[] = "GUEST_PERF_GLOBAL_CTRL_HIGH";
+static const char VMCS_GUEST_PDPTE0_STR[] = "GUEST_PDPTE0";
+static const char VMCS_GUEST_PDPTE0_HIGH_STR[] = "GUEST_PDPTE0_HIGH";
+static const char VMCS_GUEST_PDPTE1_STR[] = "GUEST_PDPTE1";
+static const char VMCS_GUEST_PDPTE1_HIGH_STR[] = "GUEST_PDPTE1_HIGH";
+static const char VMCS_GUEST_PDPTE2_STR[] = "GUEST_PDPTE2";
+static const char VMCS_GUEST_PDPTE2_HIGH_STR[] = "GUEST_PDPTE2_HIGH";
+static const char VMCS_GUEST_PDPTE3_STR[] = "GUEST_PDPTE3";
+static const char VMCS_GUEST_PDPTE3_HIGH_STR[] = "GUEST_PDPTE3_HIGH";
+static const char VMCS_HOST_PAT_STR[] = "HOST_PAT";
+static const char VMCS_HOST_PAT_HIGH_STR[] = "HOST_PAT_HIGH";
+static const char VMCS_HOST_EFER_STR[] = "VMCS_HOST_EFER";
+static const char VMCS_HOST_EFER_HIGH_STR[] = "VMCS_HOST_EFER_HIGH";
static const char VMCS_HOST_PERF_GLOBAL_CTRL_STR[] = "HOST_PERF_GLOBAL_CTRL";
static const char VMCS_HOST_PERF_GLOBAL_CTRL_HIGH_STR[] = "HOST_PERF_GLOBAL_CTRL_HIGH";
static const char VMCS_PIN_CTRLS_STR[] = "PIN_VM_EXEC_CTRLS";
static const char VMCS_ENTRY_INSTR_LEN_STR[] = "VM_ENTRY_INSTR_LENGTH";
static const char VMCS_TPR_THRESHOLD_STR[] = "TPR_THRESHOLD";
static const char VMCS_SEC_PROC_CTRLS_STR[] = "VMCS_SEC_PROC_CTRLS";
+static const char VMCS_PLE_GAP_STR[] = "PLE_GAP";
+static const char VMCS_PLE_WINDOW_STR[] = "PLE_WINDOW";
static const char VMCS_INSTR_ERR_STR[] = "VM_INSTR_ERROR";
static const char VMCS_EXIT_REASON_STR[] = "EXIT_REASON";
static const char VMCS_EXIT_INT_INFO_STR[] = "VM_EXIT_INT_INFO";
static const char VMCS_GUEST_ACTIVITY_STATE_STR[] = "GUEST_ACTIVITY_STATE";
static const char VMCS_GUEST_SMBASE_STR[] = "GUEST_SMBASE";
static const char VMCS_GUEST_SYSENTER_CS_STR[] = "GUEST_SYSENTER_CS";
+static const char VMCS_PREEMPT_TIMER_STR[] = "PREEMPT_TIMER";
static const char VMCS_HOST_SYSENTER_CS_STR[] = "HOST_SYSENTER_CS";
static const char VMCS_CR0_MASK_STR[] = "CR0_GUEST_HOST_MASK";
static const char VMCS_CR4_MASK_STR[] = "CR4_GUEST_HOST_MASK";
const char * v3_vmcs_field_to_str(vmcs_field_t field) {
switch (field) {
+ case VMCS_VPID:
+ return VMCS_VPID_STR;
case VMCS_GUEST_ES_SELECTOR:
return VMCS_GUEST_ES_SELECTOR_STR;
case VMCS_GUEST_CS_SELECTOR:
return VMCS_APIC_ACCESS_ADDR_STR;
case VMCS_APIC_ACCESS_ADDR_HIGH:
return VMCS_APIC_ACCESS_ADDR_HIGH_STR;
+ case VMCS_EPT_PTR:
+ return VMCS_EPT_PTR_STR;
+ case VMCS_EPT_PTR_HIGH:
+ return VMCS_EPT_PTR_HIGH_STR;
+ case VMCS_GUEST_PHYS_ADDR:
+ return VMCS_GUEST_PHYS_ADDR_STR;
+ case VMCS_GUEST_PHYS_ADDR_HIGH:
+ return VMCS_GUEST_PHYS_ADDR_HIGH_STR;
case VMCS_LINK_PTR:
return VMCS_LINK_PTR_STR;
case VMCS_LINK_PTR_HIGH:
return VMCS_GUEST_DBG_CTL_STR;
case VMCS_GUEST_DBG_CTL_HIGH:
return VMCS_GUEST_DBG_CTL_HIGH_STR;
- case VMCS_GUEST_PERF_GLOBAL_CTRL:
+ case VMCS_GUEST_PAT:
+ return VMCS_GUEST_PAT_STR;
+ case VMCS_GUEST_PAT_HIGH:
+ return VMCS_GUEST_PAT_HIGH_STR;
+ case VMCS_GUEST_EFER:
+ return VMCS_GUEST_EFER_STR;
+ case VMCS_GUEST_EFER_HIGH:
+ return VMCS_GUEST_EFER_HIGH_STR;
+ case VMCS_GUEST_PERF_GLOBAL_CTRL:
return VMCS_GUEST_PERF_GLOBAL_CTRL_STR;
case VMCS_GUEST_PERF_GLOBAL_CTRL_HIGH:
return VMCS_GUEST_PERF_GLOBAL_CTRL_HIGH_STR;
+ case VMCS_GUEST_PDPTE0:
+ return VMCS_GUEST_PDPTE0_STR;
+ case VMCS_GUEST_PDPTE0_HIGH:
+ return VMCS_GUEST_PDPTE0_HIGH_STR;
+ case VMCS_GUEST_PDPTE1:
+ return VMCS_GUEST_PDPTE1_STR;
+ case VMCS_GUEST_PDPTE1_HIGH:
+ return VMCS_GUEST_PDPTE1_HIGH_STR;
+ case VMCS_GUEST_PDPTE2:
+ return VMCS_GUEST_PDPTE2_STR;
+ case VMCS_GUEST_PDPTE2_HIGH:
+ return VMCS_GUEST_PDPTE2_HIGH_STR;
+ case VMCS_GUEST_PDPTE3:
+ return VMCS_GUEST_PDPTE3_STR;
+ case VMCS_GUEST_PDPTE3_HIGH:
+ return VMCS_GUEST_PDPTE3_HIGH_STR;
+ case VMCS_HOST_PAT:
+ return VMCS_HOST_PAT_STR;
+ case VMCS_HOST_PAT_HIGH:
+ return VMCS_HOST_PAT_HIGH_STR;
+ case VMCS_HOST_EFER:
+ return VMCS_HOST_EFER_STR;
+ case VMCS_HOST_EFER_HIGH:
+ return VMCS_HOST_EFER_HIGH_STR;
case VMCS_HOST_PERF_GLOBAL_CTRL:
return VMCS_HOST_PERF_GLOBAL_CTRL_STR;
case VMCS_HOST_PERF_GLOBAL_CTRL_HIGH:
return VMCS_TPR_THRESHOLD_STR;
case VMCS_SEC_PROC_CTRLS:
return VMCS_SEC_PROC_CTRLS_STR;
+ case VMCS_PLE_GAP:
+ return VMCS_PLE_GAP_STR;
+ case VMCS_PLE_WINDOW:
+ return VMCS_PLE_WINDOW_STR;
case VMCS_INSTR_ERR:
return VMCS_INSTR_ERR_STR;
case VMCS_EXIT_REASON:
return VMCS_GUEST_SMBASE_STR;
case VMCS_GUEST_SYSENTER_CS:
return VMCS_GUEST_SYSENTER_CS_STR;
- case VMCS_HOST_SYSENTER_CS:
+ case VMCS_PREEMPT_TIMER:
+ return VMCS_PREEMPT_TIMER_STR;
+ case VMCS_HOST_SYSENTER_CS:
return VMCS_HOST_SYSENTER_CS_STR;
case VMCS_CR0_MASK:
return VMCS_CR0_MASK_STR;
#include <palacios/vmm_sprintf.h>
#include <palacios/vmm_extensions.h>
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
#include <palacios/svm.h>
#endif
-#ifdef CONFIG_VMX
+#ifdef V3_CONFIG_VMX
#include <palacios/vmx.h>
#endif
-#ifdef CONFIG_VNET
+#ifdef V3_CONFIG_VNET
#include <palacios/vmm_vnet.h>
#endif
-v3_cpu_arch_t v3_cpu_types[CONFIG_MAX_CPUS];
+v3_cpu_arch_t v3_cpu_types[V3_CONFIG_MAX_CPUS];
struct v3_os_hooks * os_hooks = NULL;
int v3_dbg_enable = 0;
static void init_cpu(void * arg) {
uint32_t cpu_id = (uint32_t)(addr_t)arg;
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
if (v3_is_svm_capable()) {
PrintDebug("Machine is SVM Capable\n");
v3_init_svm_cpu(cpu_id);
} else
#endif
-#ifdef CONFIG_VMX
+#ifdef V3_CONFIG_VMX
if (v3_is_vmx_capable()) {
PrintDebug("Machine is VMX Capable\n");
v3_init_vmx_cpu(cpu_id);
switch (v3_cpu_types[cpu_id]) {
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
case V3_SVM_CPU:
case V3_SVM_REV3_CPU:
PrintDebug("Deinitializing SVM CPU %d\n", cpu_id);
v3_deinit_svm_cpu(cpu_id);
break;
#endif
-#ifdef CONFIG_VMX
+#ifdef V3_CONFIG_VMX
case V3_VMX_CPU:
case V3_VMX_EPT_CPU:
+ case V3_VMX_EPT_UG_CPU:
PrintDebug("Deinitializing VMX CPU %d\n", cpu_id);
v3_deinit_vmx_cpu(cpu_id);
break;
// Set global variables.
os_hooks = hooks;
- for (i = 0; i < CONFIG_MAX_CPUS; i++) {
+ for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
v3_cpu_types[i] = V3_INVALID_CPU;
}
V3_init_extensions();
-#ifdef CONFIG_SYMMOD
+#ifdef V3_CONFIG_SYMMOD
V3_init_symmod();
#endif
-#ifdef CONFIG_VNET
+#ifdef V3_CONFIG_VNET
v3_init_vnet();
#endif
-#ifdef CONFIG_MULTITHREAD_OS
+#ifdef V3_CONFIG_MULTITHREAD_OS
if ((hooks) && (hooks->call_on_cpu)) {
for (i = 0; i < num_cpus; i++) {
V3_deinit_extensions();
-#ifdef CONFIG_SYMMOD
+#ifdef V3_CONFIG_SYMMOD
V3_deinit_symmod();
#endif
-#ifdef CONFIG_VNET
+#ifdef V3_CONFIG_VNET
v3_deinit_vnet();
#endif
-#ifdef CONFIG_MULTITHREAD_OS
+#ifdef V3_CONFIG_MULTITHREAD_OS
if ((os_hooks) && (os_hooks->call_on_cpu)) {
- for (i = 0; i < CONFIG_MAX_CPUS; i++) {
+ for (i = 0; i < V3_CONFIG_MAX_CPUS; i++) {
if (v3_cpu_types[i] != V3_INVALID_CPU) {
deinit_cpu((void *)(addr_t)i);
}
core->cpu_id, (void *)(addr_t)core->rip);
switch (v3_cpu_types[0]) {
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
case V3_SVM_CPU:
case V3_SVM_REV3_CPU:
return v3_start_svm_guest(core);
break;
#endif
-#if CONFIG_VMX
+#if V3_CONFIG_VMX
case V3_VMX_CPU:
case V3_VMX_EPT_CPU:
+ case V3_VMX_EPT_UG_CPU:
return v3_start_vmx_guest(core);
break;
#endif
// For the moment very ugly. Eventually we will shift the cpu_mask to an arbitrary sized type...
-#ifdef CONFIG_MULTITHREAD_OS
+#ifdef V3_CONFIG_MULTITHREAD_OS
#define MAX_CORES 32
#else
#define MAX_CORES 1
return -1;
}
-#ifdef CONFIG_MULTITHREAD_OS
+#ifdef V3_CONFIG_MULTITHREAD_OS
// spawn off new threads, for other cores
for (i = 0, vcore_id = 1; (i < MAX_CORES) && (vcore_id < vm->num_cores); i++) {
int major = 0;
}
-#ifdef CONFIG_MULTITHREAD_OS
+#ifdef V3_CONFIG_MULTITHREAD_OS
void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector) {
extern struct v3_os_hooks * os_hooks;
int v3_vm_enter(struct guest_info * info) {
switch (v3_cpu_types[0]) {
-#ifdef CONFIG_SVM
+#ifdef V3_CONFIG_SVM
case V3_SVM_CPU:
case V3_SVM_REV3_CPU:
return v3_svm_enter(info);
break;
#endif
-#if CONFIG_VMX
+#if V3_CONFIG_VMX
case V3_VMX_CPU:
case V3_VMX_EPT_CPU:
+ case V3_VMX_EPT_UG_CPU:
return v3_vmx_enter(info);
break;
#endif
#include <util/vmm_barrier.h>
+
+
+
+int v3_init_barrier(struct v3_barrier * barrier) {
+ memset(barrier, 0, sizeof(struct v3_barrier));
+ v3_lock_init(&(barrier->lock));
+
+ return 0;
+}
+
+
+int v3_activate_barrier(struct guest_info * core, struct v3_barrier * barrier) {
+ addr_t flag;
+ int acquired = 0;
+
+ flag = v3_lock_irqsave(barrier->lock);
+
+ if (barrier->active == 0) {
+ barrier->active = 1;
+ acquired = 1;
+ }
+
+ v3_unlock_irqrestore(barrier->lock, flag);
+
+ if (acquired == 0) {
+ return -1;
+ }
+
+
+ // wait for barrier catch
+
+
+ return 0;
+}
+
+
+
+
+int v3_deactivate_barrier(struct v3_barrier * barrier) {
+
+}
+
+
+int v3_check_barrier(struct guest_info * core, struct v3_barrier * barrier) {
+
+ if (barrier->activated == 0) {
+ return 0;
+ }
+
+ // set cpu bit
+
+ // wait for cpu bit to clear
+
+}
.data
-#ifdef CONFIG_VMX
+#ifdef V3_CONFIG_VMX
.globl v3_vmxassist_start
v3_vmxassist_start:
-.incbin CONFIG_VMXASSIST_PATH
+.incbin V3_CONFIG_VMXASSIST_PATH
.globl v3_vmxassist_end
v3_vmxassist_end:
#endif
.globl v3_vgabios_start
v3_vgabios_start:
-.incbin CONFIG_VGABIOS_PATH
+.incbin V3_CONFIG_VGABIOS_PATH
.global v3_vgabios_end
v3_vgabios_end:
.globl v3_rombios_start
v3_rombios_start:
-.incbin CONFIG_ROMBIOS_PATH
+.incbin V3_CONFIG_ROMBIOS_PATH
.globl v3_rombios_end
v3_rombios_end:
-#ifdef CONFIG_USE_PXE_BIOS
+#ifdef V3_CONFIG_USE_PXE_BIOS
.globl pxebios_start
pxebios_start:
-.incbin CONFIG_PXEBIOS_PATH
+.incbin V3_CONFIG_PXEBIOS_PATH
.globl pxebios_end
pxebios_end:
#endif
#include <palacios/vmm_xml.h>
#include <palacios/vmm_io.h>
#include <palacios/vmm_msr.h>
-#include <palacios/vmm_mptable.h>
#include <palacios/vmm_sprintf.h>
}
}
-#ifndef CONFIG_ALIGNED_PG_ALLOC
+#ifndef V3_CONFIG_ALIGNED_PG_ALLOC
if (alignment != PAGE_SIZE_4KB) {
PrintError("Aligned page allocations are not supported in this host (requested alignment=%d)\n", alignment);
PrintError("Ignoring alignment request\n");
return -1;
}
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
{
char * telemetry = v3_cfg_val(vm_cfg, "telemetry");
v3_cfg_tree_t * vm_tree = info->vm_info->cfg_data->cfg;
v3_cfg_tree_t * pg_tree = v3_cfg_subtree(vm_tree, "paging");
char * pg_mode = v3_cfg_val(pg_tree, "mode");
- char * page_size = v3_cfg_val(pg_tree, "page_size");
PrintDebug("Paging mode specified as %s\n", pg_mode);
if (pg_mode) {
if ((strcasecmp(pg_mode, "nested") == 0)) {
// we assume symmetric cores, so if core 0 has nested paging they all do
- if (v3_cpu_types[0] == V3_SVM_REV3_CPU) {
+ if ((v3_cpu_types[0] == V3_SVM_REV3_CPU) ||
+ (v3_cpu_types[0] == V3_VMX_EPT_CPU) ||
+ (v3_cpu_types[0] == V3_VMX_EPT_UG_CPU)) {
info->shdw_pg_mode = NESTED_PAGING;
} else {
PrintError("Nested paging not supported on this hardware. Defaulting to shadow paging\n");
}
- if (info->shdw_pg_mode == NESTED_PAGING) {
- PrintDebug("Guest Paging Mode: NESTED_PAGING\n");
- if (strcasecmp(page_size, "4kb") == 0) { /* TODO: this may not be an ideal place for this */
- info->vm_info->paging_size = PAGING_4KB;
- } else if (strcasecmp(page_size, "2mb") == 0) {
- info->vm_info->paging_size = PAGING_2MB;
- } else {
- PrintError("Invalid VM paging size: '%s'\n", page_size);
- return -1;
- }
- PrintDebug("VM page size=%s\n", page_size);
- } else if (info->shdw_pg_mode == SHADOW_PAGING) {
- PrintDebug("Guest Paging Mode: SHADOW_PAGING\n");
- } else {
- PrintError("Guest paging mode incorrectly set.\n");
- return -1;
- }
-
if (v3_cfg_val(pg_tree, "large_pages") != NULL) {
if (strcasecmp(v3_cfg_val(pg_tree, "large_pages"), "true") == 0) {
info->use_large_pages = 1;
return -1;
}
- /*
- * Initialize configured extensions
- */
- if (setup_extensions(vm, cfg) == -1) {
- PrintError("Failed to setup extensions\n");
- return -1;
- }
/*
* Initialize configured devices
return -1;
}
+ /*
+ * Initialize configured extensions
+ */
+ if (setup_extensions(vm, cfg) == -1) {
+ PrintError("Failed to setup extensions\n");
+ return -1;
+ }
+
+
return 0;
}
}
- if (vm->num_cores>1 && !v3_find_dev(vm,"apic")) {
- PrintError("palacios: VM has more than one core, but no device named \"apic\"!\n");
- return -1;
- }
-
- if (v3_find_dev(vm,"apic")) {
- if (!v3_find_dev(vm,"ioapic")) {
- PrintError("palacios: VM cores have apics, but there is no device named \"ioapic\"!\n");
- }
- if (v3_inject_mptable(vm) == -1) {
- PrintError("Failed to inject mptable during configuration\n");
- return -1;
- }
- }
-
return 0;
}
#include <palacios/vmm_direct_paging.h>
#include <palacios/svm.h>
-#ifndef CONFIG_DEBUG_CTRL_REGS
+#ifndef V3_CONFIG_DEBUG_CTRL_REGS
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vmm_decoder.h>
-#ifndef CONFIG_DEBUG_DEV_MGR
+#ifndef V3_CONFIG_DEBUG_DEV_MGR
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
struct v3_device_info * tmp_dev = __start__v3_devices;
int i = 0;
-#ifdef CONFIG_DEBUG_DEV_MGR
+#ifdef V3_CONFIG_DEBUG_DEV_MGR
{
int num_devices = (__stop__v3_devices - __start__v3_devices) / sizeof(struct v3_device_info);
PrintDebug("%d Virtual devices registered with Palacios\n", num_devices);
#include <palacios/vm_guest.h>
-#ifndef CONFIG_DEBUG_NESTED_PAGING
+#ifndef V3_CONFIG_DEBUG_NESTED_PAGING
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
int v3_invalidate_nested_addr(struct guest_info * info, addr_t inv_addr) {
+
+#ifdef __V3_64BIT__
v3_cpu_mode_t mode = LONG;
+#else
+ v3_cpu_mode_t mode = PROTECTED;
+#endif
switch(mode) {
case REAL:
return 0;
} else if (pde[pde_index].large_page) {
pde[pde_index].present = 0;
+ pde[pde_index].writable = 0;
+ pde[pde_index].user_page = 0;
return 0;
}
pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
pte[pte_index].present = 0;
+ pte[pte_index].writable = 0;
+ pte[pte_index].user_page = 0;
return 0;
}
return 0;
} else if (pdpe[pdpe_index].large_page == 1) { // 1GiB
pdpe[pdpe_index].present = 0;
+ pdpe[pdpe_index].writable = 0;
+ pdpe[pdpe_index].user_page = 0;
return 0;
}
return 0;
} else if (pde[pde_index].large_page == 1) { // 2MiB
pde[pde_index].present = 0;
+ pde[pde_index].writable = 0;
+ pde[pde_index].user_page = 0;
return 0;
}
pte = V3_VAddr((void*)BASE_TO_PAGE_ADDR(pde[pde_index].pt_base_addr));
pte[pte_index].present = 0; // 4KiB
+ pte[pte_index].writable = 0;
+ pte[pte_index].user_page = 0;
return 0;
}
#include <palacios/vmm_instr_emulator.h>
#include <palacios/vmm_ctrl_regs.h>
-#ifndef CONFIG_DEBUG_EMULATOR
+#ifndef V3_CONFIG_DEBUG_EMULATOR
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vmm_intr.h>
-#ifndef CONFIG_DEBUG_HALT
+#ifndef V3_CONFIG_DEBUG_HALT
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
+++ /dev/null
-/*
- * This file is part of the Palacios Virtual Machine Monitor developed
- * by the V3VEE Project with funding from the United States National
- * Science Foundation and the Department of Energy.
- *
- * The V3VEE Project is a joint project between Northwestern University
- * and the University of New Mexico. You can find out more at
- * http://www.v3vee.org
- *
- * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
- * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
- * All rights reserved.
- *
- * Author: Chang Bae <c.s.bae@u.northwestern.edu>
- *
- * This is free software. You are permitted to use,
- * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
- */
-
-
-
-#include <palacios/svm_handler.h>
-#include <palacios/vmm_instrument.h>
-
-#define NO_INSTRUMENTATION
-#include <palacios/vmm_ringbuffer.h>
-#undef NO_INSTRUMENTATION
-
-#define RING_SIZE 2000
-
-static ullong_t last = 0;
-static struct v3_ringbuf * func_ring = NULL;
-
-struct instrumented_func {
- ullong_t time;
- uint_t exiting;
- void * called_fn;
- void * calling_fn;
-} __attribute__((packed));
-
-
-
-static void print_instrumentation() __attribute__((__no_instrument_function__));
-
-void __cyg_profile_func_enter(void * this, void * callsite) __attribute__((__no_instrument_function__));
-void __cyg_profile_func_exit(void * this, void * callsite) __attribute__((__no_instrument_function__));
-
-void v3_init_instrumentation() {
- PrintDebug("Creating Ring Buffer (unit size = %d)\n", (uint_t)sizeof(struct instrumented_func));
- // initialize
- func_ring = v3_create_ringbuf(sizeof(struct instrumented_func) * RING_SIZE); //dequeue at every 4095
-}
-
-
-
-__attribute__((__no_instrument_function__))
- void __cyg_profile_func_enter(void * this, void * callsite) {
-
- if (func_ring != NULL) {
-
- struct instrumented_func tmp_fn;
- ullong_t now = 0;
-
- rdtscll(now);
-
- //PrintDebug("Entering Function\n");
-
- if (v3_ringbuf_avail_space(func_ring) < sizeof(struct instrumented_func)) {
- print_instrumentation();
- }
-
- tmp_fn.time = now - last; // current tsc
- tmp_fn.exiting = 0; //enter to be 0
- tmp_fn.called_fn = this; //this
- tmp_fn.calling_fn = callsite; //callsite
-
- // PrintDebug("Writing Function: fn_data=%p, size=%d\n",
- // (void *)&tmp_fn, (uint_t)sizeof(struct instrumented_func));
- v3_ringbuf_write(func_ring, (uchar_t *)&tmp_fn, sizeof(struct instrumented_func));
-
- rdtscll(last);
- }
-}
-
-
-__attribute__((__no_instrument_function__))
- void __cyg_profile_func_exit(void * this, void * callsite){
-
- if (func_ring != NULL) {
-
- struct instrumented_func tmp_fn;
- ullong_t now = 0;
-
- rdtscll(now);
-
- // PrintDebug("Exiting Function\n");
-
- if (v3_ringbuf_avail_space(func_ring) < sizeof(struct instrumented_func)) {
- print_instrumentation();
- }
-
- tmp_fn.time = now - last; // current tsc
- tmp_fn.exiting = 1; //exit to be 0
- tmp_fn.called_fn = this; //this
- tmp_fn.calling_fn = callsite; //callsite
-
- // PrintDebug("Writing Function: fn_data=%p, size=%d\n",
- // (void *)&tmp_fn, (uint_t)sizeof(struct instrumented_func));
- v3_ringbuf_write(func_ring, (uchar_t *)&tmp_fn, sizeof(struct instrumented_func));
-
- rdtscll(last);
- }
-}
-
-
-
-static void print_instrumentation() {
-
- struct instrumented_func tmp_fn;
-
- // PrintDebug("Printing Instrumentation\n");
- while (v3_ringbuf_data_len(func_ring) >= sizeof(struct instrumented_func)) {
-
- v3_ringbuf_read(func_ring, (uchar_t *)&tmp_fn, sizeof(struct instrumented_func));
-
- PrintDebug("CYG_PROF: %d %p %p %p\n",
- tmp_fn.exiting,
- (void *)(addr_t)(tmp_fn.time),
- tmp_fn.called_fn,
- tmp_fn.calling_fn);
- }
-}
-
-
-
#include <palacios/vmm_lock.h>
-#ifndef CONFIG_DEBUG_INTERRUPTS
+#ifndef V3_CONFIG_DEBUG_INTERRUPTS
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
}
}
-#ifdef CONFIG_DEBUG_INTERRUPTS
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
if (type == V3_INVALID_INTR) {
PrintError("[get_intr_type] Invalid_Intr\n");
}
-#ifndef CONFIG_DEBUG_IO
+#ifndef V3_CONFIG_DEBUG_IO
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
static int unhandled_err(struct guest_info * core, addr_t guest_va, addr_t guest_pa,
struct v3_mem_region * reg, pf_error_t access_info) {
- PrintError("Unhandled memory access error\n");
+ PrintError("Unhandled memory access error (gpa=%p, gva=%p, error_code=%d)\n",
+ (void *)guest_pa, (void *)guest_va, *(uint32_t *)&access_info);
v3_print_mem_map(core->vm_info);
map->base_region.guest_start = 0;
map->base_region.guest_end = mem_pages * PAGE_SIZE_4KB;
-#ifdef CONFIG_ALIGNED_PG_ALLOC
+#ifdef V3_CONFIG_ALIGNED_PG_ALLOC
map->base_region.host_addr = (addr_t)V3_AllocAlignedPages(mem_pages, vm->mem_align);
#else
map->base_region.host_addr = (addr_t)V3_AllocPages(mem_pages);
-#ifndef CONFIG_DEBUG_SHADOW_PAGING
+#ifndef V3_CONFIG_DEBUG_SHADOW_PAGING
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
}
-void PrintHostPageTables(struct guest_info * info, addr_t cr3) {
+void PrintHostPageTables(struct guest_info * info, v3_cpu_mode_t cpu_mode, addr_t cr3) {
PrintDebug("CR3: %p\n", (void *)cr3);
- switch (info->cpu_mode) {
+ switch (cpu_mode) {
case PROTECTED:
v3_walk_host_pt_32(info, cr3, print_page_walk_cb, NULL);
break;
void PrintGuestPageTables(struct guest_info * info, addr_t cr3) {
PrintDebug("CR3: %p\n", (void *)cr3);
switch (info->cpu_mode) {
+ case REAL:
case PROTECTED:
v3_walk_guest_pt_32(info, cr3, print_page_walk_cb, NULL);
break;
-#ifdef CONFIG_SHADOW_PAGING_TELEMETRY
+#ifdef V3_CONFIG_SHADOW_PAGING_TELEMETRY
#include <palacios/vmm_telemetry.h>
#endif
-#ifdef CONFIG_SYMBIOTIC_SWAP
+#ifdef V3_CONFIG_SYMBIOTIC_SWAP
#include <palacios/vmm_sym_swap.h>
#endif
-#ifndef CONFIG_DEBUG_SHADOW_PAGING
+#ifndef V3_CONFIG_DEBUG_SHADOW_PAGING
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
***/
-#ifdef CONFIG_SHADOW_PAGING_TELEMETRY
+#ifdef V3_CONFIG_SHADOW_PAGING_TELEMETRY
static void telemetry_cb(struct v3_vm_info * vm, void * private_data, char * hdr) {
int i = 0;
for (i = 0; i < vm->num_cores; i++) {
}
-#ifdef CONFIG_SHADOW_PAGING_TELEMETRY
+#ifdef V3_CONFIG_SHADOW_PAGING_TELEMETRY
v3_add_telemetry_cb(core->vm_info, telemetry_cb, NULL);
#endif
return -1;
}
-#ifdef CONFIG_SHADOW_PAGING_TELEMETRY
+#ifdef V3_CONFIG_SHADOW_PAGING_TELEMETRY
v3_remove_telemetry_cb(core->vm_info, telemetry_cb, NULL);
#endif
int v3_inject_guest_pf(struct guest_info * core, addr_t fault_addr, pf_error_t error_code) {
core->ctrl_regs.cr2 = fault_addr;
-#ifdef CONFIG_SHADOW_PAGING_TELEMETRY
+#ifdef V3_CONFIG_SHADOW_PAGING_TELEMETRY
core->shdw_pg_state.guest_faults++;
#endif
-#if defined(CONFIG_BUILT_IN_STDIO) && \
- ( defined(CONFIG_BUILT_IN_SPRINTF) || \
- defined(CONFIG_BUILT_IN_SNPRINTF) || \
- defined(CONFIG_BUILT_IN_VSPRINTF) || \
- defined(CONFIG_BUILT_IN_VSNRPRINTF ))
+#if defined(V3_CONFIG_BUILT_IN_STDIO) && \
+ ( defined(V3_CONFIG_BUILT_IN_SPRINTF) || \
+ defined(V3_CONFIG_BUILT_IN_SNPRINTF) || \
+ defined(V3_CONFIG_BUILT_IN_VSPRINTF) || \
+ defined(V3_CONFIG_BUILT_IN_VSNRPRINTF ))
static char * ksprintn(char * nbuf, uint64_t num, int base, int *len, int upper);
static void snprintf_func(int ch, void * arg);
static int kvprintf(char const * fmt, void (*func)(int, void *), void * arg, int radix, va_list ap);
-#ifdef CONFIG_BUILT_IN_SPRINTF
+#ifdef V3_CONFIG_BUILT_IN_SPRINTF
/*
* Scaled down version of sprintf(3).
*/
#endif
-#ifdef CONFIG_BUILT_IN_VSPRINTF
+#ifdef V3_CONFIG_BUILT_IN_VSPRINTF
/*
* Scaled down version of vsprintf(3).
*/
#endif
-#ifdef CONFIG_BUILT_IN_SNPRINTF
+#ifdef V3_CONFIG_BUILT_IN_SNPRINTF
/*
* Scaled down version of snprintf(3).
*/
-#ifdef CONFIG_BUILT_IN_VSNRPRINTF
+#ifdef V3_CONFIG_BUILT_IN_VSNRPRINTF
/*
* Kernel version which takes radix argument vsnprintf(3).
*/
}
-#endif // CONFIG_BUILT_IN_STDIO
+#endif // V3_CONFIG_BUILT_IN_STDIO
#include <palacios/vmm.h>
-#ifdef CONFIG_BUILT_IN_MEMSET
+#ifdef V3_CONFIG_BUILT_IN_MEMSET
void * memset(void * s, int c, size_t n) {
uchar_t * p = (uchar_t *) s;
}
#endif
-#ifdef CONFIG_BUILT_IN_MEMCPY
+#ifdef V3_CONFIG_BUILT_IN_MEMCPY
void * memcpy(void * dst, const void * src, size_t n) {
uchar_t * d = (uchar_t *) dst;
const uchar_t * s = (const uchar_t *)src;
}
#endif
-#ifdef CONFIG_BUILT_IN_MEMMOVE
+#ifdef V3_CONFIG_BUILT_IN_MEMMOVE
void * memmove(void * dst, const void * src, size_t n) {
uint8_t * tmp = (uint8_t *)V3_Malloc(n);
#endif
-#ifdef CONFIG_BUILT_IN_MEMCMP
+#ifdef V3_CONFIG_BUILT_IN_MEMCMP
int memcmp(const void * s1_, const void * s2_, size_t n) {
const char * s1 = s1_;
const char * s2 = s2_;
#endif
-#ifdef CONFIG_BUILT_IN_STRLEN
+#ifdef V3_CONFIG_BUILT_IN_STRLEN
size_t strlen(const char * s) {
size_t len = 0;
-#ifdef CONFIG_BUILT_IN_STRNLEN
+#ifdef V3_CONFIG_BUILT_IN_STRNLEN
/*
* This it a GNU extension.
* It is like strlen(), but it will check at most maxlen
#endif
-#ifdef CONFIG_BUILT_IN_STRCMP
+#ifdef V3_CONFIG_BUILT_IN_STRCMP
int strcmp(const char * s1, const char * s2) {
while (1) {
int cmp = (*s1 - *s2);
}
#endif
-#ifdef CONFIG_BUILT_IN_STRCASECMP
+#ifdef V3_CONFIG_BUILT_IN_STRCASECMP
int strcasecmp(const char * s1, const char * s2) {
while (1) {
int cmp = (tolower(*s1) - tolower(*s2));
#endif
-#ifdef CONFIG_BUILT_IN_STRNCMP
+#ifdef V3_CONFIG_BUILT_IN_STRNCMP
int strncmp(const char * s1, const char * s2, size_t limit) {
size_t i = 0;
}
#endif
-#ifdef CONFIG_BUILT_IN_STRNCASECMP
+#ifdef V3_CONFIG_BUILT_IN_STRNCASECMP
int strncasecmp(const char * s1, const char * s2, size_t limit) {
size_t i = 0;
#endif
-#ifdef CONFIG_BUILT_IN_STRCAT
+#ifdef V3_CONFIG_BUILT_IN_STRCAT
char * strcat(char * s1, const char * s2) {
char * t1 = s1;
#endif
-#ifdef CONFIG_BUILT_IN_STRNCAT
+#ifdef V3_CONFIG_BUILT_IN_STRNCAT
char * strncat(char * s1, const char * s2, size_t limit) {
size_t i = 0;
char * t1;
-#ifdef CONFIG_BUILT_IN_STRCPY
+#ifdef V3_CONFIG_BUILT_IN_STRCPY
char * strcpy(char * dest, const char * src)
{
char *ret = dest;
#endif
-#ifdef CONFIG_BUILT_IN_STRNCPY
+#ifdef V3_CONFIG_BUILT_IN_STRNCPY
char * strncpy(char * dest, const char * src, size_t limit) {
char * ret = dest;
-#ifdef CONFIG_BUILT_IN_STRDUP
+#ifdef V3_CONFIG_BUILT_IN_STRDUP
char * strdup(const char * s1) {
char *ret;
-#ifdef CONFIG_BUILT_IN_ATOI
+#ifdef V3_CONFIG_BUILT_IN_ATOI
int atoi(const char * buf) {
int ret = 0;
-#ifdef CONFIG_BUILT_IN_STRCHR
+#ifdef V3_CONFIG_BUILT_IN_STRCHR
char * strchr(const char * s, int c) {
while (*s != '\0') {
if (*s == c)
#endif
-#ifdef CONFIG_BUILT_IN_STRRCHR
+#ifdef V3_CONFIG_BUILT_IN_STRRCHR
char * strrchr(const char * s, int c) {
size_t len = strlen(s);
const char * p = s + len;
}
#endif
-#ifdef CONFIG_BUILT_IN_STRPBRK
+#ifdef V3_CONFIG_BUILT_IN_STRPBRK
char * strpbrk(const char * s, const char * accept) {
size_t setLen = strlen(accept);
}
#endif
-#ifdef CONFIG_BUILT_IN_STRSPN
+#ifdef V3_CONFIG_BUILT_IN_STRSPN
size_t strspn(const char * s, const char * accept) {
int match = 1;
int cnt = 0;
#endif
-#ifdef CONFIG_BUILT_IN_STRCSPN
+#ifdef V3_CONFIG_BUILT_IN_STRCSPN
size_t strcspn(const char * s, const char * reject) {
int match = 0;
int cnt = 0;
#endif
-#ifdef CONFIG_BUILT_IN_STRSTR
+#ifdef V3_CONFIG_BUILT_IN_STRSTR
char *strstr(const char *haystack, const char *needle)
{
int l1, l2;
return -1;
}
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
if (v3_init_symcall_vm(vm) == -1) {
PrintError("Error intializing global SymCall state\n");
return -1;
}
#endif
-#ifdef CONFIG_SYMMOD
+#ifdef V3_CONFIG_SYMMOD
if (v3_init_symmod_vm(vm, vm->cfg_data->cfg) == -1) {
PrintError("Error initializing global SymMod state\n");
return -1;
int v3_deinit_symbiotic_vm(struct v3_vm_info * vm) {
-#ifdef CONFIG_SYMMOD
+#ifdef V3_CONFIG_SYMMOD
if (v3_deinit_symmod_vm(vm) == -1) {
PrintError("Error deinitializing global SymMod state\n");
return -1;
#include <palacios/vmm_sprintf.h>
-#ifdef CONFIG_TELEMETRY_GRANULARITY
-#define DEFAULT_GRANULARITY CONFIG_TELEMETRY_GRANULARITY
+#ifdef V3_CONFIG_TELEMETRY_GRANULARITY
+#define DEFAULT_GRANULARITY V3_CONFIG_TELEMETRY_GRANULARITY
#else
#define DEFAULT_GRANULARITY 50000
#endif
#include <palacios/vmm_time.h>
#include <palacios/vm_guest.h>
-#ifndef CONFIG_DEBUG_TIME
+#ifndef V3_CONFIG_DEBUG_TIME
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
#include <palacios/vmm_decoder.h>
#include <palacios/vmm_instr_decoder.h>
-#ifndef CONFIG_DEBUG_DECODER
+#ifndef V3_CONFIG_DEBUG_DECODER
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
int length = 0;
- V3_Print("Decoding Instruction at %p\n", (void *)instr_ptr);
+ PrintDebug("Decoding Instruction at %p\n", (void *)instr_ptr);
memset(instr, 0, sizeof(struct x86_instr));
form = op_code_to_form((uint8_t *)(instr_ptr + length), &length);
- V3_Print("\t decoded as (%s)\n", op_form_to_str(form));
+ PrintDebug("\t decoded as (%s)\n", op_form_to_str(form));
if (form == INVALID_INSTR) {
PrintError("Could not find instruction form (%x)\n", *(uint32_t *)(instr_ptr + length));
instr->instr_length += length;
+#ifdef V3_CONFIG_DEBUG_DECODER
v3_print_instr(instr);
+#endif
return 0;
}
#include <palacios/vmm_sprintf.h>
#include <palacios/vmm_ethernet.h>
-#ifndef CONFIG_DEBUG_VNET
+#ifndef V3_CONFIG_DEBUG_VNET
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
+int v3_net_debug = 0;
+
struct eth_hdr {
uint8_t dst_mac[ETH_ALEN];
uint8_t src_mac[ETH_ALEN];
struct v3_vnet_dev_ops dev_ops;
void * private_data;
- int active;
-
- uint64_t bytes_tx, bytes_rx;
- uint32_t pkts_tx, pkt_rx;
-
struct list_head node;
} __attribute__((packed));
uint8_t type;
- int active;
void * private_data;
} __attribute__((packed));
} __attribute__((packed));
+struct queue_entry{
+ uint8_t use;
+ struct v3_vnet_pkt pkt;
+ uint8_t * data;
+ uint32_t size_alloc;
+};
+
+#define VNET_QUEUE_SIZE 1024
+struct vnet_queue {
+ struct queue_entry buf[VNET_QUEUE_SIZE];
+ int head, tail;
+ int count;
+ v3_lock_t lock;
+};
+
static struct {
struct list_head routes;
struct list_head devs;
v3_lock_t lock;
struct vnet_stat stats;
- struct hashtable * route_cache;
-} vnet_state;
+ void * pkt_flush_thread;
+ struct vnet_queue pkt_q;
+ struct hashtable * route_cache;
+} vnet_state;
+
-#ifdef CONFIG_DEBUG_VNET
+#ifdef V3_CONFIG_DEBUG_VNET
static inline void mac_to_string(uint8_t * mac, char * buf) {
snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x",
mac[0], mac[1], mac[2],
return 0;
}
-static int look_into_cache(const struct v3_vnet_pkt * pkt, struct route_list ** routes) {
+static int look_into_cache(const struct v3_vnet_pkt * pkt,
+ struct route_list ** routes) {
*routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
return 0;
new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
memset(new_route, 0, sizeof(struct vnet_route_info));
-#ifdef CONFIG_DEBUG_VNET
+#ifdef V3_CONFIG_DEBUG_VNET
PrintDebug("VNET/P Core: add_route_entry:\n");
print_route(&route);
#endif
v3_unlock_irqrestore(vnet_state.lock, flags);
-#ifdef CONFIG_DEBUG_VNET
+#ifdef V3_CONFIG_DEBUG_VNET
dump_routes();
#endif
int max_rank = 0;
struct list_head match_list;
struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
-// uint8_t src_type = pkt->src_type;
- // uint32_t src_link = pkt->src_id;
+ // uint8_t src_type = pkt->src_type;
+ // uint32_t src_link = pkt->src_id;
-#ifdef CONFIG_DEBUG_VNET
+#ifdef V3_CONFIG_DEBUG_VNET
{
char dst_str[100];
char src_str[100];
}
-int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
+int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
struct route_list * matched_routes = NULL;
unsigned long flags;
int i;
-#ifdef CONFIG_DEBUG_VNET
- {
- int cpu = V3_Get_CPU();
- PrintDebug("VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
+ int cpu = V3_Get_CPU();
+ V3_Net_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
cpu, pkt->size, pkt->src_id,
pkt->src_type, pkt->dst_id, pkt->dst_type);
- }
-#endif
+ if(v3_net_debug >= 4){
+ v3_hexdump(pkt->data, pkt->size, NULL, 0);
+ }
flags = v3_lock_irqsave(vnet_state.lock);
for (i = 0; i < matched_routes->num_routes; i++) {
struct vnet_route_info * route = matched_routes->routes[i];
- if (route->route_def.dst_type == LINK_EDGE) {
- struct vnet_brg_dev *bridge = vnet_state.bridge;
- pkt->dst_type = LINK_EDGE;
- pkt->dst_id = route->route_def.dst_id;
+ if (route->route_def.dst_type == LINK_EDGE) {
+ struct vnet_brg_dev * bridge = vnet_state.bridge;
+ pkt->dst_type = LINK_EDGE;
+ pkt->dst_id = route->route_def.dst_id;
- if (bridge == NULL || (bridge->active == 0)) {
- PrintDebug("VNET/P Core: No active bridge to sent data to\n");
+ if (bridge == NULL) {
+ V3_Net_Print(2, "VNET/P Core: No active bridge to sent data to\n");
continue;
}
if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
- PrintDebug("VNET/P Core: Packet not sent properly to bridge\n");
+ V3_Net_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
continue;
}
vnet_state.stats.tx_bytes += pkt->size;
vnet_state.stats.tx_pkts ++;
} else if (route->route_def.dst_type == LINK_INTERFACE) {
- if (route->dst_dev == NULL || route->dst_dev->active == 0){
- PrintDebug("VNET/P Core: No active device to sent data to\n");
+ if (route->dst_dev == NULL){
+ V3_Net_Print(2, "VNET/P Core: No active device to sent data to\n");
continue;
}
if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
- PrintDebug("VNET/P Core: Packet not sent properly\n");
+ V3_Net_Print(2, "VNET/P Core: Packet not sent properly\n");
continue;
}
vnet_state.stats.tx_bytes += pkt->size;
return 0;
}
+
+static int vnet_pkt_enqueue(struct v3_vnet_pkt * pkt){
+ unsigned long flags;
+ struct queue_entry * entry;
+ struct vnet_queue * q = &(vnet_state.pkt_q);
+ uint16_t num_pages;
+
+ flags = v3_lock_irqsave(q->lock);
+
+ if (q->count >= VNET_QUEUE_SIZE){
+ V3_Net_Print(1, "VNET Queue overflow!\n");
+ v3_unlock_irqrestore(q->lock, flags);
+ return -1;
+ }
+
+ q->count ++;
+ entry = &(q->buf[q->tail++]);
+ q->tail %= VNET_QUEUE_SIZE;
+
+ v3_unlock_irqrestore(q->lock, flags);
+
+ /* this is ugly, but should happen very unlikely */
+ while(entry->use);
+
+ if(entry->size_alloc < pkt->size){
+ if(entry->data != NULL){
+ V3_FreePages(V3_PAddr(entry->data), (entry->size_alloc / PAGE_SIZE));
+ entry->data = NULL;
+ }
+
+ num_pages = 1 + (pkt->size / PAGE_SIZE);
+ entry->data = V3_VAddr(V3_AllocPages(num_pages));
+ if(entry->data == NULL){
+ return -1;
+ }
+ entry->size_alloc = PAGE_SIZE * num_pages;
+ }
+
+ entry->pkt.data = entry->data;
+ memcpy(&(entry->pkt), pkt, sizeof(struct v3_vnet_pkt));
+ memcpy(entry->data, pkt->data, pkt->size);
+
+ entry->use = 1;
+
+ return 0;
+}
+
+
+int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize) {
+ if(synchronize){
+ vnet_tx_one_pkt(pkt, NULL);
+ }else {
+ vnet_pkt_enqueue(pkt);
+ V3_Net_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size);
+ }
+
+ return 0;
+}
+
int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac,
struct v3_vnet_dev_ops *ops,
void * priv_data){
memcpy(new_dev->mac_addr, mac, 6);
new_dev->dev_ops.input = ops->input;
- new_dev->dev_ops.poll = ops->poll;
new_dev->private_data = priv_data;
new_dev->vm = vm;
new_dev->dev_id = 0;
- new_dev->active = 1;
flags = v3_lock_irqsave(vnet_state.lock);
}
-
int v3_vnet_del_dev(int dev_id){
struct vnet_dev * dev = NULL;
unsigned long flags;
return 0;
}
+
int v3_vnet_stat(struct vnet_stat * stats){
stats->rx_bytes = vnet_state.stats.rx_bytes;
struct vnet_brg_dev * tmp_bridge = NULL;
flags = v3_lock_irqsave(vnet_state.lock);
-
if (vnet_state.bridge == NULL) {
bridge_free = 1;
vnet_state.bridge = (void *)1;
}
-
v3_unlock_irqrestore(vnet_state.lock, flags);
if (bridge_free == 0) {
tmp_bridge->brg_ops.input = ops->input;
tmp_bridge->brg_ops.poll = ops->poll;
tmp_bridge->private_data = priv_data;
- tmp_bridge->active = 1;
tmp_bridge->type = type;
/* make this atomic to avoid possible race conditions */
}
-void v3_vnet_do_poll(struct v3_vm_info * vm){
- struct vnet_dev * dev = NULL;
+static int vnet_tx_flush(void *args){
+ unsigned long flags;
+ struct queue_entry * entry;
+ struct vnet_queue * q = &(vnet_state.pkt_q);
- /* TODO: run this on separate threads
- * round-robin schedule, with maximal budget for each poll
- */
- list_for_each_entry(dev, &(vnet_state.devs), node) {
- if(dev->dev_ops.poll != NULL){
- dev->dev_ops.poll(vm, -1, dev->private_data);
- }
+ V3_Print("VNET/P Handing Pkt Thread Starting ....\n");
+
+ //V3_THREAD_SLEEP();
+ /* we need thread sleep/wakeup in Palacios */
+ while(1){
+ flags = v3_lock_irqsave(q->lock);
+
+ if (q->count <= 0){
+ v3_unlock_irqrestore(q->lock, flags);
+ v3_yield(NULL);
+ //V3_THREAD_SLEEP();
+ }else {
+ q->count --;
+ entry = &(q->buf[q->head++]);
+ q->head %= VNET_QUEUE_SIZE;
+
+ v3_unlock_irqrestore(q->lock, flags);
+
+ /* this is ugly, but should happen very unlikely */
+ while(!entry->use);
+ vnet_tx_one_pkt(&(entry->pkt), NULL);
+
+ /* asynchronizely release allocated memory for buffer entry here */
+ entry->use = 0;
+
+ V3_Net_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size);
+ }
}
}
-
int v3_init_vnet() {
memset(&vnet_state, 0, sizeof(vnet_state));
}
vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
-
if (vnet_state.route_cache == NULL) {
PrintError("VNET/P Core: Fails to initiate route cache\n");
return -1;
}
+ v3_lock_init(&(vnet_state.pkt_q.lock));
+
+ vnet_state.pkt_flush_thread = V3_CREATE_THREAD(vnet_tx_flush, NULL, "VNET_Pkts");
+
PrintDebug("VNET/P Core is initiated\n");
return 0;
-#ifndef CONFIG_DEBUG_DECODER
+#ifndef V3_CONFIG_DEBUG_DECODER
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
xed_iform_enum_t iform = xed_decoded_inst_get_iform_enum(&xed_instr);
-#ifdef CONFIG_DEBUG_DECODER
+#ifdef V3_CONFIG_DEBUG_DECODER
xed_iclass_enum_t iclass = xed_decoded_inst_get_iclass(&xed_instr);
PrintDebug("iform=%s, iclass=%s\n", xed_iform_enum_t2str(iform), xed_iclass_enum_t2str(iclass));
* and the University of New Mexico. You can find out more at
* http://www.v3vee.org
*
- * Copyright (c) 2008, Peter Dinda <pdinda@northwestern.edu>
- * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
- * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
+ * Copyright (c) 2011, Jack Lange <jarusl@cs.northwestern.edu>
+ * Copyright (c) 2011, The V3VEE Project <http://www.v3vee.org>
* All rights reserved.
*
- * Author: Peter Dinda <pdinda@northwestern.edu>
- * Jack Lange <jarusl@cs.northwestern.edu>
+ * Author: Jack Lange <jarusl@cs.northwestern.edu>
*
* This is free software. You are permitted to use,
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
#include <palacios/vmx_io.h>
#include <palacios/vmx_msr.h>
+#include <palacios/vmx_ept.h>
+#include <palacios/vmx_assist.h>
#include <palacios/vmx_hw_info.h>
-#ifndef CONFIG_DEBUG_VMX
+#ifndef V3_CONFIG_DEBUG_VMX
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
/* These fields contain the hardware feature sets supported by the local CPU */
static struct vmx_hw_info hw_info;
+extern v3_cpu_arch_t v3_cpu_types[];
-static addr_t active_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
-static addr_t host_vmcs_ptrs[CONFIG_MAX_CPUS] = { [0 ... CONFIG_MAX_CPUS - 1] = 0};
+static addr_t active_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
+static addr_t host_vmcs_ptrs[V3_CONFIG_MAX_CPUS] = { [0 ... V3_CONFIG_MAX_CPUS - 1] = 0};
extern int v3_vmx_launch(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
extern int v3_vmx_resume(struct v3_gprs * vm_regs, struct guest_info * info, struct v3_ctrl_regs * ctrl_regs);
-static int init_vmcs_bios(struct guest_info * info, struct vmx_data * vmx_state) {
+static int init_vmcs_bios(struct guest_info * core, struct vmx_data * vmx_state) {
int vmx_ret = 0;
- struct vmx_data * vmx_info = (struct vmx_data *)(info->vmm_data);
// disable global interrupts for vm state initialization
v3_disable_ints();
PrintDebug("Loading VMCS\n");
vmx_ret = vmcs_load(vmx_state->vmcs_ptr_phys);
- active_vmcs_ptrs[V3_Get_CPU()] = vmx_info->vmcs_ptr_phys;
+ active_vmcs_ptrs[V3_Get_CPU()] = vmx_state->vmcs_ptr_phys;
vmx_state->state = VMX_UNLAUNCHED;
if (vmx_ret != VMX_SUCCESS) {
}
+ /*** Setup default state from HW ***/
+
+ vmx_state->pin_ctrls.value = hw_info.pin_ctrls.def_val;
+ vmx_state->pri_proc_ctrls.value = hw_info.proc_ctrls.def_val;
+ vmx_state->exit_ctrls.value = hw_info.exit_ctrls.def_val;
+ vmx_state->entry_ctrls.value = hw_info.entry_ctrls.def_val;
+ vmx_state->sec_proc_ctrls.value = hw_info.sec_proc_ctrls.def_val;
+
+ /* Print Control MSRs */
+ PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)hw_info.cr0.value);
+ PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)hw_info.cr4.value);
+
+
/******* Setup Host State **********/
vmx_state->host_state.tr.base = tmp_seg.base;
-
-
- /********** Setup and VMX Control Fields from MSR ***********/
- /* Setup IO map */
-
- struct v3_msr tmp_msr;
-
- v3_get_msr(VMX_PINBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ /********** Setup VMX Control Fields ***********/
/* Add external interrupts, NMI exiting, and virtual NMI */
- vmx_state->pin_ctrls.value = tmp_msr.lo;
vmx_state->pin_ctrls.nmi_exit = 1;
vmx_state->pin_ctrls.ext_int_exit = 1;
- v3_get_msr(VMX_PROCBASED_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_state->pri_proc_ctrls.value = tmp_msr.lo;
- vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
vmx_state->pri_proc_ctrls.hlt_exit = 1;
- vmx_state->pri_proc_ctrls.invlpg_exit = 1;
- vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
- vmx_state->pri_proc_ctrls.pause_exit = 1;
+
+
+ vmx_state->pri_proc_ctrls.pause_exit = 0;
vmx_state->pri_proc_ctrls.tsc_offset = 1;
-#ifdef CONFIG_TIME_VIRTUALIZE_TSC
+#ifdef V3_CONFIG_TIME_VIRTUALIZE_TSC
vmx_state->pri_proc_ctrls.rdtsc_exit = 1;
#endif
- vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(info->vm_info->io_map.arch_data));
+ /* Setup IO map */
+ vmx_state->pri_proc_ctrls.use_io_bitmap = 1;
+ vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_A_ADDR, (addr_t)V3_PAddr(core->vm_info->io_map.arch_data));
vmx_ret |= check_vmcs_write(VMCS_IO_BITMAP_B_ADDR,
- (addr_t)V3_PAddr(info->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
+ (addr_t)V3_PAddr(core->vm_info->io_map.arch_data) + PAGE_SIZE_4KB);
- vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(info->vm_info->msr_map.arch_data));
+ vmx_state->pri_proc_ctrls.use_msr_bitmap = 1;
+ vmx_ret |= check_vmcs_write(VMCS_MSR_BITMAP, (addr_t)V3_PAddr(core->vm_info->msr_map.arch_data));
- v3_get_msr(VMX_EXIT_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_state->exit_ctrls.value = tmp_msr.lo;
- vmx_state->exit_ctrls.host_64_on = 1;
- if ((vmx_state->exit_ctrls.save_efer == 1) || (vmx_state->exit_ctrls.ld_efer == 1)) {
- vmx_state->ia32e_avail = 1;
- }
- v3_get_msr(VMX_ENTRY_CTLS_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_state->entry_ctrls.value = tmp_msr.lo;
-
- {
- struct vmx_exception_bitmap excp_bmap;
- excp_bmap.value = 0;
-
- excp_bmap.pf = 1;
- vmx_ret |= check_vmcs_write(VMCS_EXCP_BITMAP, excp_bmap.value);
- }
- /******* Setup VMXAssist guest state ***********/
- info->rip = 0xd0000;
- info->vm_regs.rsp = 0x80000;
- struct rflags * flags = (struct rflags *)&(info->ctrl_regs.rflags);
- flags->rsvd1 = 1;
- /* Print Control MSRs */
- v3_get_msr(VMX_CR0_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- PrintDebug("CR0 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
+#ifdef __V3_64BIT__
+ vmx_state->exit_ctrls.host_64_on = 1;
+#endif
+
+
+ /* Not sure how exactly to handle this... */
+ v3_hook_msr(core->vm_info, EFER_MSR,
+ &v3_handle_efer_read,
+ &v3_handle_efer_write,
+ core);
- v3_get_msr(VMX_CR4_FIXED0_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- PrintDebug("CR4 MSR: %p\n", (void *)(addr_t)tmp_msr.value);
+ // Or is it this???
+ vmx_state->entry_ctrls.ld_efer = 1;
+ vmx_state->exit_ctrls.ld_efer = 1;
+ vmx_state->exit_ctrls.save_efer = 1;
+ /* *** */
+ vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
-#define GUEST_CR0 0x80000031
-#define GUEST_CR4 0x00002000
- info->ctrl_regs.cr0 = GUEST_CR0;
- info->ctrl_regs.cr4 = GUEST_CR4;
- ((struct cr0_32 *)&(info->shdw_pg_state.guest_cr0))->pe = 1;
-
/* Setup paging */
- if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (core->shdw_pg_mode == SHADOW_PAGING) {
PrintDebug("Creating initial shadow page table\n");
- if (v3_init_passthrough_pts(info) == -1) {
+ if (v3_init_passthrough_pts(core) == -1) {
PrintError("Could not initialize passthrough page tables\n");
return -1;
}
#define CR0_PE 0x00000001
#define CR0_PG 0x80000000
+#define CR0_WP 0x00010000 // To ensure mem hooks work
+ vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
-
- vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG) );
- vmx_ret |= check_vmcs_write(VMCS_CR4_MASK, CR4_VMXE);
-
- info->ctrl_regs.cr3 = info->direct_map_pt;
+ core->ctrl_regs.cr3 = core->direct_map_pt;
// vmx_state->pinbased_ctrls |= NMI_EXIT;
/* Add CR exits */
vmx_state->pri_proc_ctrls.cr3_ld_exit = 1;
vmx_state->pri_proc_ctrls.cr3_str_exit = 1;
- }
+
+ vmx_state->pri_proc_ctrls.invlpg_exit = 1;
+
+ /* Add page fault exits */
+ vmx_state->excp_bmap.pf = 1;
- // Setup segment registers
- {
- struct v3_segment * seg_reg = (struct v3_segment *)&(info->segments);
+ // Setup VMX Assist
+ v3_vmxassist_init(core, vmx_state);
+
+ } else if ((core->shdw_pg_mode == NESTED_PAGING) &&
+ (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_CPU)) {
+
+#define CR0_PE 0x00000001
+#define CR0_PG 0x80000000
+#define CR0_WP 0x00010000 // To ensure mem hooks work
+ vmx_ret |= check_vmcs_write(VMCS_CR0_MASK, (CR0_PE | CR0_PG | CR0_WP));
+
+ // vmx_state->pinbased_ctrls |= NMI_EXIT;
+
+ /* Disable CR exits */
+ vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
+ vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
- int i;
+ vmx_state->pri_proc_ctrls.invlpg_exit = 0;
- for (i = 0; i < 10; i++) {
- seg_reg[i].selector = 3 << 3;
- seg_reg[i].limit = 0xffff;
- seg_reg[i].base = 0x0;
+ /* Add page fault exits */
+ // vmx_state->excp_bmap.pf = 1; // This should never happen..., enabled to catch bugs
+
+ // Setup VMX Assist
+ v3_vmxassist_init(core, vmx_state);
+
+ /* Enable EPT */
+ vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
+ vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
+
+
+
+ if (v3_init_ept(core, &hw_info) == -1) {
+ PrintError("Error initializing EPT\n");
+ return -1;
}
- info->segments.cs.selector = 2<<3;
-
- /* Set only the segment registers */
- for (i = 0; i < 6; i++) {
- seg_reg[i].limit = 0xfffff;
- seg_reg[i].granularity = 1;
- seg_reg[i].type = 3;
- seg_reg[i].system = 1;
- seg_reg[i].dpl = 0;
- seg_reg[i].present = 1;
- seg_reg[i].db = 1;
+ } else if ((core->shdw_pg_mode == NESTED_PAGING) &&
+ (v3_cpu_types[core->cpu_id] == V3_VMX_EPT_UG_CPU)) {
+ int i = 0;
+ // For now we will assume that unrestricted guest mode is assured w/ EPT
+
+
+ core->vm_regs.rsp = 0x00;
+ core->rip = 0xfff0;
+ core->vm_regs.rdx = 0x00000f00;
+ core->ctrl_regs.rflags = 0x00000002; // The reserved bit is always 1
+ core->ctrl_regs.cr0 = 0x60010010; // Set the WP flag so the memory hooks work in real-mode
+
+
+ core->segments.cs.selector = 0xf000;
+ core->segments.cs.limit = 0xffff;
+ core->segments.cs.base = 0x0000000f0000LL;
+
+ // (raw attributes = 0xf3)
+ core->segments.cs.type = 0xb;
+ core->segments.cs.system = 0x1;
+ core->segments.cs.dpl = 0x0;
+ core->segments.cs.present = 1;
+
+
+
+ struct v3_segment * segregs [] = {&(core->segments.ss), &(core->segments.ds),
+ &(core->segments.es), &(core->segments.fs),
+ &(core->segments.gs), NULL};
+
+ for ( i = 0; segregs[i] != NULL; i++) {
+ struct v3_segment * seg = segregs[i];
+
+ seg->selector = 0x0000;
+ // seg->base = seg->selector << 4;
+ seg->base = 0x00000000;
+ seg->limit = 0xffff;
+
+
+ seg->type = 0x3;
+ seg->system = 0x1;
+ seg->dpl = 0x0;
+ seg->present = 1;
+ // seg->granularity = 1;
+
}
- info->segments.cs.type = 0xb;
- info->segments.ldtr.selector = 0x20;
- info->segments.ldtr.type = 2;
- info->segments.ldtr.system = 0;
- info->segments.ldtr.present = 1;
- info->segments.ldtr.granularity = 0;
+ core->segments.gdtr.limit = 0x0000ffff;
+ core->segments.gdtr.base = 0x0000000000000000LL;
-
- /************* Map in GDT and vmxassist *************/
-
- uint64_t gdt[] __attribute__ ((aligned(32))) = {
- 0x0000000000000000ULL, /* 0x00: reserved */
- 0x0000830000000000ULL, /* 0x08: 32-bit TSS */
- //0x0000890000000000ULL, /* 0x08: 32-bit TSS */
- 0x00CF9b000000FFFFULL, /* 0x10: CS 32-bit */
- 0x00CF93000000FFFFULL, /* 0x18: DS 32-bit */
- 0x000082000000FFFFULL, /* 0x20: LDTR 32-bit */
- };
-
-#define VMXASSIST_GDT 0x10000
- addr_t vmxassist_gdt = 0;
-
- if (v3_gpa_to_hva(info, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
- PrintError("Could not find VMXASSIST GDT destination\n");
+ core->segments.idtr.limit = 0x0000ffff;
+ core->segments.idtr.base = 0x0000000000000000LL;
+
+ core->segments.ldtr.selector = 0x0000;
+ core->segments.ldtr.limit = 0x0000ffff;
+ core->segments.ldtr.base = 0x0000000000000000LL;
+ core->segments.ldtr.type = 2;
+ core->segments.ldtr.present = 1;
+
+ core->segments.tr.selector = 0x0000;
+ core->segments.tr.limit = 0x0000ffff;
+ core->segments.tr.base = 0x0000000000000000LL;
+ core->segments.tr.type = 0xb;
+ core->segments.tr.present = 1;
+
+ // core->dbg_regs.dr6 = 0x00000000ffff0ff0LL;
+ core->dbg_regs.dr7 = 0x0000000000000400LL;
+
+ /* Enable EPT */
+ vmx_state->pri_proc_ctrls.sec_ctrls = 1; // Enable secondary proc controls
+ vmx_state->sec_proc_ctrls.enable_ept = 1; // enable EPT paging
+ vmx_state->sec_proc_ctrls.unrstrct_guest = 1; // enable unrestricted guest operation
+
+
+ /* Disable shadow paging stuff */
+ vmx_state->pri_proc_ctrls.cr3_ld_exit = 0;
+ vmx_state->pri_proc_ctrls.cr3_str_exit = 0;
+
+ vmx_state->pri_proc_ctrls.invlpg_exit = 0;
+
+
+ if (v3_init_ept(core, &hw_info) == -1) {
+ PrintError("Error initializing EPT\n");
return -1;
}
- memcpy((void *)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
-
- info->segments.gdtr.base = VMXASSIST_GDT;
-
-#define VMXASSIST_TSS 0x40000
- uint64_t vmxassist_tss = VMXASSIST_TSS;
- gdt[0x08 / sizeof(gdt[0])] |=
- ((vmxassist_tss & 0xFF000000) << (56 - 24)) |
- ((vmxassist_tss & 0x00FF0000) << (32 - 16)) |
- ((vmxassist_tss & 0x0000FFFF) << (16)) |
- (8392 - 1);
-
- info->segments.tr.selector = 0x08;
- info->segments.tr.base = vmxassist_tss;
-
- //info->segments.tr.type = 0x9;
- info->segments.tr.type = 0x3;
- info->segments.tr.system = 0;
- info->segments.tr.present = 1;
- info->segments.tr.granularity = 0;
+ } else {
+ PrintError("Invalid Virtual paging mode\n");
+ return -1;
}
-
- // setup VMXASSIST
- {
-#define VMXASSIST_START 0x000d0000
- extern uint8_t v3_vmxassist_start[];
- extern uint8_t v3_vmxassist_end[];
- addr_t vmxassist_dst = 0;
-
- if (v3_gpa_to_hva(info, VMXASSIST_START, &vmxassist_dst) == -1) {
- PrintError("Could not find VMXASSIST destination\n");
+
+
+ // hook vmx msrs
+
+ // Setup SYSCALL/SYSENTER MSRs in load/store area
+
+ // save STAR, LSTAR, FMASK, KERNEL_GS_BASE MSRs in MSR load/store area
+ {
+#define IA32_STAR 0xc0000081
+#define IA32_LSTAR 0xc0000082
+#define IA32_FMASK 0xc0000084
+#define IA32_KERN_GS_BASE 0xc0000102
+
+#define IA32_CSTAR 0xc0000083 // Compatibility mode STAR (ignored for now... hopefully its not that important...)
+
+ int msr_ret = 0;
+
+ struct vmcs_msr_entry * exit_store_msrs = NULL;
+ struct vmcs_msr_entry * exit_load_msrs = NULL;
+ struct vmcs_msr_entry * entry_load_msrs = NULL;;
+ int max_msrs = (hw_info.misc_info.max_msr_cache_size + 1) * 4;
+
+ V3_Print("Setting up MSR load/store areas (max_msr_count=%d)\n", max_msrs);
+
+ if (max_msrs < 4) {
+ PrintError("Max MSR cache size is too small (%d)\n", max_msrs);
return -1;
}
- memcpy((void *)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
+ vmx_state->msr_area = V3_VAddr(V3_AllocPages(1));
+
+ if (vmx_state->msr_area == NULL) {
+ PrintError("could not allocate msr load/store area\n");
+ return -1;
+ }
+
+ msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_CNT, 4);
+ msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_CNT, 4);
+ msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_CNT, 4);
+
+
+ exit_store_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area);
+ exit_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 4));
+ entry_load_msrs = (struct vmcs_msr_entry *)(vmx_state->msr_area + (sizeof(struct vmcs_msr_entry) * 8));
+
+
+ exit_store_msrs[0].index = IA32_STAR;
+ exit_store_msrs[1].index = IA32_LSTAR;
+ exit_store_msrs[2].index = IA32_FMASK;
+ exit_store_msrs[3].index = IA32_KERN_GS_BASE;
+
+ memcpy(exit_store_msrs, exit_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
+ memcpy(exit_store_msrs, entry_load_msrs, sizeof(struct vmcs_msr_entry) * 4);
+
+
+ v3_get_msr(IA32_STAR, &(exit_load_msrs[0].hi), &(exit_load_msrs[0].lo));
+ v3_get_msr(IA32_LSTAR, &(exit_load_msrs[1].hi), &(exit_load_msrs[1].lo));
+ v3_get_msr(IA32_FMASK, &(exit_load_msrs[2].hi), &(exit_load_msrs[2].lo));
+ v3_get_msr(IA32_KERN_GS_BASE, &(exit_load_msrs[3].hi), &(exit_load_msrs[3].lo));
+
+ msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_STORE_ADDR, (addr_t)V3_PAddr(exit_store_msrs));
+ msr_ret |= check_vmcs_write(VMCS_EXIT_MSR_LOAD_ADDR, (addr_t)V3_PAddr(exit_load_msrs));
+ msr_ret |= check_vmcs_write(VMCS_ENTRY_MSR_LOAD_ADDR, (addr_t)V3_PAddr(entry_load_msrs));
+
}
- /*** Write all the info to the VMCS ***/
+ /* Sanity check ctrl/reg fields against hw_defaults */
-#define DEBUGCTL_MSR 0x1d9
- v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
- vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
- info->dbg_regs.dr7 = 0x400;
+
+
+ /*** Write all the info to the VMCS ***/
+
+ /*
+ {
+ // IS THIS NECESSARY???
+#define DEBUGCTL_MSR 0x1d9
+ struct v3_msr tmp_msr;
+ v3_get_msr(DEBUGCTL_MSR, &(tmp_msr.hi), &(tmp_msr.lo));
+ vmx_ret |= check_vmcs_write(VMCS_GUEST_DBG_CTL, tmp_msr.value);
+ core->dbg_regs.dr7 = 0x400;
+ }
+ */
#ifdef __V3_64BIT__
vmx_ret |= check_vmcs_write(VMCS_LINK_PTR, (addr_t)0xffffffffffffffffULL);
vmx_ret |= check_vmcs_write(VMCS_LINK_PTR_HIGH, (addr_t)0xffffffffUL);
#endif
- if (v3_update_vmcs_ctrl_fields(info)) {
+
+
+
+ if (v3_update_vmcs_ctrl_fields(core)) {
PrintError("Could not write control fields!\n");
return -1;
}
- if (v3_update_vmcs_host_state(info)) {
+ if (v3_update_vmcs_host_state(core)) {
PrintError("Could not write host state\n");
return -1;
}
-
- vmx_state->assist_state = VMXASSIST_DISABLED;
-
// reenable global interrupts for vm state initialization now
// that the vm state is initialized. If another VM kicks us off,
// it'll update our vmx state so that we know to reload ourself
return 0;
}
-int v3_init_vmx_vmcs(struct guest_info * info, v3_vm_class_t vm_class) {
+int v3_init_vmx_vmcs(struct guest_info * core, v3_vm_class_t vm_class) {
struct vmx_data * vmx_state = NULL;
int vmx_ret = 0;
vmx_state = (struct vmx_data *)V3_Malloc(sizeof(struct vmx_data));
+ memset(vmx_state, 0, sizeof(struct vmx_data));
PrintDebug("vmx_data pointer: %p\n", (void *)vmx_state);
PrintDebug("VMCS pointer: %p\n", (void *)(vmx_state->vmcs_ptr_phys));
- info->vmm_data = vmx_state;
+ core->vmm_data = vmx_state;
vmx_state->state = VMX_UNLAUNCHED;
- PrintDebug("Initializing VMCS (addr=%p)\n", info->vmm_data);
+ PrintDebug("Initializing VMCS (addr=%p)\n", core->vmm_data);
// TODO: Fix vmcs fields so they're 32-bit
if (vm_class == V3_PC_VM) {
PrintDebug("Initializing VMCS\n");
- init_vmcs_bios(info, vmx_state);
+ if (init_vmcs_bios(core, vmx_state) == -1) {
+ PrintError("Error initializing VMCS to BIOS state\n");
+ return -1;
+ }
} else {
PrintError("Invalid VM Class\n");
return -1;
struct vmx_data * vmx_state = core->vmm_data;
V3_FreePages((void *)(vmx_state->vmcs_ptr_phys), 1);
+ V3_FreePages(vmx_state->msr_area, 1);
V3_Free(vmx_state);
check_vmcs_read(VMCS_IDT_VECTOR_INFO, &(idt_vec_info.value));
if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 0)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Calling v3_injecting_intr\n");
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+ V3_Print("Calling v3_injecting_intr\n");
#endif
info->intr_core_state.irq_started = 0;
v3_injecting_intr(info, info->intr_core_state.irq_vector, V3_EXTERNAL_IRQ);
check_vmcs_write(VMCS_ENTRY_EXCP_ERR, info->excp_state.excp_error_code);
int_info.error_code = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Injecting exception %d with error code %x\n",
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+ V3_Print("Injecting exception %d with error code %x\n",
int_info.vector, info->excp_state.excp_error_code);
#endif
}
int_info.valid = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+ V3_Print("Injecting exception %d (EIP=%p)\n", int_info.vector, (void *)(addr_t)info->rip);
#endif
check_vmcs_write(VMCS_ENTRY_INT_INFO, int_info.value);
if ((info->intr_core_state.irq_started == 1) && (idt_vec_info.valid == 1)) {
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("IRQ pending from previous injection\n");
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+ V3_Print("IRQ pending from previous injection\n");
#endif
// Copy the IDT vectoring info over to reinject the old interrupt
ent_int.error_code = 0;
ent_int.valid = 1;
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Injecting Interrupt %d at exit %u(EIP=%p)\n",
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+ V3_Print("Injecting Interrupt %d at exit %u(EIP=%p)\n",
info->intr_core_state.irq_vector,
(uint32_t)info->num_exits,
(void *)(addr_t)info->rip);
check_vmcs_read(VMCS_EXIT_INSTR_LEN, &instr_len);
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Enabling Interrupt-Window exiting: %d\n", instr_len);
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+ V3_Print("Enabling Interrupt-Window exiting: %d\n", instr_len);
#endif
vmx_info->pri_proc_ctrls.int_wndw_exit = 1;
v3_vmx_restore_vmcs(info);
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
if (info->sym_core_state.symcall_state.sym_call_active == 0) {
update_irq_entry_state(info);
}
check_vmcs_write(VMCS_TSC_OFFSET_HIGH, tsc_offset_high);
check_vmcs_write(VMCS_TSC_OFFSET, tsc_offset_low);
+ if (v3_update_vmcs_host_state(info)) {
+ v3_enable_ints();
+ PrintError("Could not write host state\n");
+ return -1;
+ }
+
if (vmx_info->state == VMX_UNLAUNCHED) {
vmx_info->state = VMX_LAUNCHED;
uint32_t error = 0;
vmcs_read(VMCS_INSTR_ERR, &error);
- PrintError("VMENTRY Error: %d\n", error);
+ v3_enable_ints();
+
+ PrintError("VMENTRY Error: %d\n", error);
return -1;
}
check_vmcs_read(VMCS_EXIT_INT_ERR, &(exit_info.int_err));
check_vmcs_read(VMCS_GUEST_LINEAR_ADDR, &(exit_info.guest_linear_addr));
+ if (info->shdw_pg_mode == NESTED_PAGING) {
+ check_vmcs_read(VMCS_GUEST_PHYS_ADDR, &(exit_info.ept_fault_addr));
+ }
+
//PrintDebug("VMX Exit taken, id-qual: %u-%lu\n", exit_info.exit_reason, exit_info.exit_qual);
exit_log[info->num_exits % 10] = exit_info;
-#ifdef CONFIG_SYMCALL
+#ifdef V3_CONFIG_SYMCALL
if (info->sym_core_state.symcall_state.sym_call_active == 0) {
update_irq_exit_state(info);
}
vmx_info->pri_proc_ctrls.int_wndw_exit = 0;
vmcs_write(VMCS_PROC_CTRLS, vmx_info->pri_proc_ctrls.value);
-#ifdef CONFIG_DEBUG_INTERRUPTS
- PrintDebug("Interrupts available again! (RIP=%llx)\n", info->rip);
+#ifdef V3_CONFIG_DEBUG_INTERRUPTS
+ V3_Print("Interrupts available again! (RIP=%llx)\n", info->rip);
#endif
}
void v3_init_vmx_cpu(int cpu_id) {
- extern v3_cpu_arch_t v3_cpu_types[];
if (cpu_id == 0) {
if (v3_init_vmx_hw(&hw_info) == -1) {
}
}
-
enable_vmx();
}
- v3_cpu_types[cpu_id] = V3_VMX_CPU;
-
-
+ {
+ struct vmx_sec_proc_ctrls sec_proc_ctrls;
+ sec_proc_ctrls.value = v3_vmx_get_ctrl_features(&(hw_info.sec_proc_ctrls));
+
+ if (sec_proc_ctrls.enable_ept == 0) {
+ V3_Print("VMX EPT (Nested) Paging not supported\n");
+ v3_cpu_types[cpu_id] = V3_VMX_CPU;
+ } else if (sec_proc_ctrls.unrstrct_guest == 0) {
+ V3_Print("VMX EPT (Nested) Paging supported\n");
+ v3_cpu_types[cpu_id] = V3_VMX_EPT_CPU;
+ } else {
+ V3_Print("VMX EPT (Nested) Paging + Unrestricted guest supported\n");
+ v3_cpu_types[cpu_id] = V3_VMX_EPT_UG_CPU;
+ }
+ }
}
#include <palacios/vmx_lowlevel.h>
#include <palacios/vm_guest_mem.h>
#include <palacios/vmx.h>
+#include <palacios/vmm_ctrl_regs.h>
-#ifndef CONFIG_DEBUG_VMX
+#ifndef V3_CONFIG_DEBUG_VMX
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
+
+
+
+#define VMXASSIST_MAGIC 0x17101966
+
+
+struct vmx_assist_header {
+ uint64_t rsvd; // 8 bytes of nothing
+ uint32_t magic;
+ uint32_t new_ctx_gpa;
+ uint32_t old_ctx_gpa;
+} __attribute__((packed));
+
+
+union vmcs_arbytes {
+ struct arbyte_fields {
+ unsigned int seg_type : 4,
+ s : 1,
+ dpl : 2,
+ p : 1,
+ reserved0 : 4,
+ avl : 1,
+ reserved1 : 1,
+ default_ops_size: 1,
+ g : 1,
+ null_bit : 1,
+ reserved2 : 15;
+ } __attribute__((packed)) fields;
+ unsigned int bytes;
+} __attribute__((packed));
+
+struct vmx_assist_segment {
+ uint32_t sel;
+ uint32_t limit;
+ uint32_t base;
+ union vmcs_arbytes arbytes;
+} __attribute__((packed));
+
+
+/*
+ * World switch state
+ */
+struct vmx_assist_context {
+ uint32_t eip; /* execution pointer */
+ uint32_t esp; /* stack pointer */
+ uint32_t eflags; /* flags register */
+ uint32_t cr0;
+ uint32_t cr3; /* page table directory */
+ uint32_t cr4;
+
+ uint32_t idtr_limit; /* idt */
+ uint32_t idtr_base;
+
+ uint32_t gdtr_limit; /* gdt */
+ uint32_t gdtr_base;
+
+ struct vmx_assist_segment cs;
+ struct vmx_assist_segment ds;
+ struct vmx_assist_segment es;
+ struct vmx_assist_segment ss;
+ struct vmx_assist_segment fs;
+ struct vmx_assist_segment gs;
+ struct vmx_assist_segment tr;
+ struct vmx_assist_segment ldtr;
+
+
+ unsigned char rm_irqbase[2];
+} __attribute__((packed));
+
+
+
static void vmx_save_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
static void vmx_restore_world_ctx(struct guest_info * info, struct vmx_assist_context * ctx);
- if (v3_gpa_to_hva(info, VMXASSIST_BASE, (addr_t *)&hdr) == -1) {
+ if (v3_gpa_to_hva(info, VMXASSIST_START, (addr_t *)&hdr) == -1) {
PrintError("Could not translate address for vmxassist header\n");
return -1;
}
}
+int v3_vmxassist_init(struct guest_info * core, struct vmx_data * vmx_state) {
+
+ core->rip = 0xd0000;
+ core->vm_regs.rsp = 0x80000;
+ ((struct rflags *)&(core->ctrl_regs.rflags))->rsvd1 = 1;
+
+#define GUEST_CR0 0x80010031
+#define GUEST_CR4 0x00002010
+ core->ctrl_regs.cr0 = GUEST_CR0;
+ core->ctrl_regs.cr4 = GUEST_CR4;
+
+ ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->pe = 1;
+ ((struct cr0_32 *)&(core->shdw_pg_state.guest_cr0))->wp = 1;
+
+
+ // Setup segment registers
+ {
+ struct v3_segment * seg_reg = (struct v3_segment *)&(core->segments);
+
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ seg_reg[i].selector = 3 << 3;
+ seg_reg[i].limit = 0xffff;
+ seg_reg[i].base = 0x0;
+ }
+
+ core->segments.cs.selector = 2 << 3;
+
+ /* Set only the segment registers */
+ for (i = 0; i < 6; i++) {
+ seg_reg[i].limit = 0xfffff;
+ seg_reg[i].granularity = 1;
+ seg_reg[i].type = 3;
+ seg_reg[i].system = 1;
+ seg_reg[i].dpl = 0;
+ seg_reg[i].present = 1;
+ seg_reg[i].db = 1;
+ }
+
+ core->segments.cs.type = 0xb;
+
+ core->segments.ldtr.selector = 0x20;
+ core->segments.ldtr.type = 2;
+ core->segments.ldtr.system = 0;
+ core->segments.ldtr.present = 1;
+ core->segments.ldtr.granularity = 0;
+
+
+ /************* Map in GDT and vmxassist *************/
+
+ uint64_t gdt[] __attribute__ ((aligned(32))) = {
+ 0x0000000000000000ULL, /* 0x00: reserved */
+ 0x0000830000000000ULL, /* 0x08: 32-bit TSS */
+ //0x0000890000000000ULL, /* 0x08: 32-bit TSS */
+ 0x00CF9b000000FFFFULL, /* 0x10: CS 32-bit */
+ 0x00CF93000000FFFFULL, /* 0x18: DS 32-bit */
+ 0x000082000000FFFFULL, /* 0x20: LDTR 32-bit */
+ };
+
+
+ addr_t vmxassist_gdt = 0;
+
+ if (v3_gpa_to_hva(core, VMXASSIST_GDT, &vmxassist_gdt) == -1) {
+ PrintError("Could not find VMXASSIST GDT destination\n");
+ return -1;
+ }
+
+ memcpy((void *)vmxassist_gdt, gdt, sizeof(uint64_t) * 5);
+
+ core->segments.gdtr.base = VMXASSIST_GDT;
+
+
+ uint64_t vmxassist_tss = VMXASSIST_TSS;
+ gdt[0x08 / sizeof(gdt[0])] |=
+ ((vmxassist_tss & 0xFF000000) << (56 - 24)) |
+ ((vmxassist_tss & 0x00FF0000) << (32 - 16)) |
+ ((vmxassist_tss & 0x0000FFFF) << (16)) |
+ (8392 - 1);
+
+ core->segments.tr.selector = 0x08;
+ core->segments.tr.base = vmxassist_tss;
+
+ //core->segments.tr.type = 0x9;
+ core->segments.tr.type = 0x3;
+ core->segments.tr.system = 0;
+ core->segments.tr.present = 1;
+ core->segments.tr.granularity = 0;
+ }
+
+ if (core->shdw_pg_mode == NESTED_PAGING) {
+ // setup 1to1 page table internally.
+ int i = 0;
+ pde32_4MB_t * pde = NULL;
+
+ PrintError("Setting up internal VMXASSIST page tables\n");
+
+ if (v3_gpa_to_hva(core, VMXASSIST_1to1_PT, (addr_t *)(&pde)) == -1) {
+ PrintError("Could not find VMXASSIST 1to1 PT destination\n");
+ return -1;
+ }
+
+ memset(pde, 0, PAGE_SIZE);
+
+ for (i = 0; i < 1024; i++) {
+ pde[i].present = 1;
+ pde[i].writable = 1;
+ pde[i].user_page = 1;
+ pde[i].large_page = 1;
+ pde[i].page_base_addr = PAGE_BASE_ADDR_4MB(i * PAGE_SIZE_4MB);
+
+ // PrintError("PDE %d: %x\n", i, *(uint32_t *)&(pde[i]));
+ }
+
+ core->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
+
+ }
+
+ // setup VMXASSIST
+ {
+
+ extern uint8_t v3_vmxassist_start[];
+ extern uint8_t v3_vmxassist_end[];
+ addr_t vmxassist_dst = 0;
+
+ if (v3_gpa_to_hva(core, VMXASSIST_START, &vmxassist_dst) == -1) {
+ PrintError("Could not find VMXASSIST destination\n");
+ return -1;
+ }
+
+ memcpy((void *)vmxassist_dst, v3_vmxassist_start, v3_vmxassist_end - v3_vmxassist_start);
+
+
+ vmx_state->assist_state = VMXASSIST_DISABLED;
+ }
+
+
+ return 0;
+}
#include <palacios/vmm_direct_paging.h>
#include <palacios/vmm_ctrl_regs.h>
-#ifndef CONFIG_DEBUG_VMX
+#ifndef V3_CONFIG_DEBUG_VMX
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
guest_cr0->pg = 1;
guest_cr0->ne = 1;
- if (paging_transition) {
+ if ((paging_transition)) {
// Paging transition
if (v3_get_vm_mem_mode(info) == VIRTUAL_MEM) {
// PrintDebug("Activating Shadow Page tables\n");
- if (v3_activate_shadow_pt(info) == -1) {
- PrintError("Failed to activate shadow page tables\n");
- return -1;
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (v3_activate_shadow_pt(info) == -1) {
+ PrintError("Failed to activate shadow page tables\n");
+ return -1;
+ }
}
- } else if (v3_activate_passthrough_pt(info) == -1) {
- PrintError("Failed to activate passthrough page tables\n");
- return -1;
+ } else {
+
+ if (info->shdw_pg_mode == SHADOW_PAGING) {
+ if (v3_activate_passthrough_pt(info) == -1) {
+ PrintError("Failed to activate passthrough page tables\n");
+ return -1;
+ }
+ } else {
+ // This is hideous... Let's hope that the 1to1 page table has not been nuked...
+ info->ctrl_regs.cr3 = VMXASSIST_1to1_PT;
+ }
}
}
}
* redistribute, and modify it as specified in the file "V3VEE_LICENSE".
*/
+#include <palacios/vmm.h>
+#include <palacios/vmx_ept.h>
+#include <palacios/vmx_lowlevel.h>
+#include <palacios/vmm_paging.h>
+#include <palacios/vm_guest_mem.h>
+
+static struct vmx_ept_msr * ept_info = NULL;
+
+
+static addr_t create_ept_page() {
+ void * page = 0;
+ page = V3_VAddr(V3_AllocPages(1));
+ memset(page, 0, PAGE_SIZE);
+
+ return (addr_t)page;
+}
+
+
+
+
+int v3_init_ept(struct guest_info * core, struct vmx_hw_info * hw_info) {
+ addr_t ept_pa = (addr_t)V3_PAddr((void *)create_ept_page());
+ vmx_eptp_t * ept_ptr = (vmx_eptp_t *)&(core->direct_map_pt);
+
+
+ ept_info = &(hw_info->ept_info);
+
+ /* TODO: Should we set this to WB?? */
+ ept_ptr->psmt = 0;
+
+ if (ept_info->pg_walk_len4) {
+ ept_ptr->pwl1 = 3;
+ } else {
+ PrintError("Unsupported EPT Table depth\n");
+ return -1;
+ }
+
+ ept_ptr->pml_base_addr = PAGE_BASE_ADDR(ept_pa);
+
+
+ return 0;
+}
+
+
+/* We can use the default paging macros, since the formats are close enough to allow it */
+
+int v3_handle_ept_fault(struct guest_info * core, addr_t fault_addr, struct ept_exit_qual * ept_qual) {
+ ept_pml4_t * pml = NULL;
+ // ept_pdp_1GB_t * pdpe1gb = NULL;
+ ept_pdp_t * pdpe = NULL;
+ ept_pde_2MB_t * pde2mb = NULL;
+ ept_pde_t * pde = NULL;
+ ept_pte_t * pte = NULL;
+ addr_t host_addr = 0;
+
+ int pml_index = PML4E64_INDEX(fault_addr);
+ int pdpe_index = PDPE64_INDEX(fault_addr);
+ int pde_index = PDE64_INDEX(fault_addr);
+ int pte_index = PTE64_INDEX(fault_addr);
+
+ struct v3_mem_region * region = v3_get_mem_region(core->vm_info, core->cpu_id, fault_addr);
+ int page_size = PAGE_SIZE_4KB;
+
+
+
+ pf_error_t error_code = {0};
+ error_code.present = ept_qual->present;
+ error_code.write = ept_qual->write;
+
+ if (region == NULL) {
+ PrintError("invalid region, addr=%p\n", (void *)fault_addr);
+ return -1;
+ }
+
+ if ((core->use_large_pages == 1) || (core->use_giant_pages == 1)) {
+ page_size = v3_get_max_page_size(core, fault_addr, LONG);
+ }
+
+
+
+ pml = (ept_pml4_t *)CR3_TO_PML4E64_VA(core->direct_map_pt);
+
+
+
+ //Fix up the PML entry
+ if (pml[pml_index].read == 0) {
+ pdpe = (ept_pdp_t *)create_ept_page();
+
+ // Set default PML Flags...
+ pml[pml_index].read = 1;
+ pml[pml_index].write = 1;
+ pml[pml_index].exec = 1;
+
+ pml[pml_index].pdp_base_addr = PAGE_BASE_ADDR_4KB((addr_t)V3_PAddr(pdpe));
+ } else {
+ pdpe = V3_VAddr((void *)BASE_TO_PAGE_ADDR_4KB(pml[pml_index].pdp_base_addr));
+ }
+
+
+ // Fix up the PDPE entry
+ if (pdpe[pdpe_index].read == 0) {
+ pde = (ept_pde_t *)create_ept_page();
+
+ // Set default PDPE Flags...
+ pdpe[pdpe_index].read = 1;
+ pdpe[pdpe_index].write = 1;
+ pdpe[pdpe_index].exec = 1;
+
+ pdpe[pdpe_index].pd_base_addr = PAGE_BASE_ADDR_4KB((addr_t)V3_PAddr(pde));
+ } else {
+ pde = V3_VAddr((void *)BASE_TO_PAGE_ADDR_4KB(pdpe[pdpe_index].pd_base_addr));
+ }
+
+
+
+ // Fix up the 2MiB PDE and exit here
+ if (page_size == PAGE_SIZE_2MB) {
+ pde2mb = (ept_pde_2MB_t *)pde; // all but these two lines are the same for PTE
+ pde2mb[pde_index].large_page = 1;
+
+ if (pde2mb[pde_index].read == 0) {
+
+ if ( (region->flags.alloced == 1) &&
+ (region->flags.read == 1)) {
+ // Full access
+ pde2mb[pde_index].read = 1;
+ pde2mb[pde_index].exec = 1;
+
+ if (region->flags.write == 1) {
+ pde2mb[pde_index].write = 1;
+ } else {
+ pde2mb[pde_index].write = 0;
+ }
+
+ if (v3_gpa_to_hpa(core, fault_addr, &host_addr) == -1) {
+ PrintError("Error: Could not translate fault addr (%p)\n", (void *)fault_addr);
+ return -1;
+ }
+
+ pde2mb[pde_index].page_base_addr = PAGE_BASE_ADDR_2MB(host_addr);
+ } else {
+ return region->unhandled(core, fault_addr, fault_addr, region, error_code);
+ }
+ } else {
+ // We fix all permissions on the first pass,
+ // so we only get here if its an unhandled exception
+
+ return region->unhandled(core, fault_addr, fault_addr, region, error_code);
+ }
+
+ return 0;
+ }
+
+ // Continue with the 4KiB page heirarchy
+
+
+ // Fix up the PDE entry
+ if (pde[pde_index].read == 0) {
+ pte = (ept_pte_t *)create_ept_page();
+
+ pde[pde_index].read = 1;
+ pde[pde_index].write = 1;
+ pde[pde_index].exec = 1;
+
+ pde[pde_index].pt_base_addr = PAGE_BASE_ADDR_4KB((addr_t)V3_PAddr(pte));
+ } else {
+ pte = V3_VAddr((void *)BASE_TO_PAGE_ADDR_4KB(pde[pde_index].pt_base_addr));
+ }
+
+
+
+
+ // Fix up the PTE entry
+ if (pte[pte_index].read == 0) {
+
+ if ( (region->flags.alloced == 1) &&
+ (region->flags.read == 1)) {
+ // Full access
+ pte[pte_index].read = 1;
+ pte[pte_index].exec = 1;
+
+ if (region->flags.write == 1) {
+ pte[pte_index].write = 1;
+ } else {
+ pte[pte_index].write = 0;
+ }
+
+ if (v3_gpa_to_hpa(core, fault_addr, &host_addr) == -1) {
+ PrintError("Error Could not translate fault addr (%p)\n", (void *)fault_addr);
+ return -1;
+ }
+
+
+ pte[pte_index].page_base_addr = PAGE_BASE_ADDR_4KB(host_addr);
+ } else {
+ return region->unhandled(core, fault_addr, fault_addr, region, error_code);
+ }
+ } else {
+ // We fix all permissions on the first pass,
+ // so we only get here if its an unhandled exception
+
+ return region->unhandled(core, fault_addr, fault_addr, region, error_code);
+ }
+
+
+ return 0;
+}
#include <palacios/vmx_ctrl_regs.h>
#include <palacios/vmx_assist.h>
#include <palacios/vmm_halt.h>
+#include <palacios/vmx_ept.h>
-#ifndef CONFIG_DEBUG_VMX
+#ifndef V3_CONFIG_DEBUG_VMX
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
#include <palacios/vmm_telemetry.h>
#endif
/* At this point the GPRs are already copied into the guest_info state */
int v3_handle_vmx_exit(struct guest_info * info, struct vmx_exit_info * exit_info) {
+ struct vmx_basic_exit_info * basic_info = (struct vmx_basic_exit_info *)&(exit_info->exit_reason);
+
/*
PrintError("Handling VMEXIT: %s (%u), %lu (0x%lx)\n",
v3_vmx_exit_code_to_str(exit_info->exit_reason),
v3_print_vmcs();
*/
-#ifdef CONFIG_TELEMETRY
+
+ if (basic_info->entry_error == 1) {
+ switch (basic_info->reason) {
+ case VMEXIT_INVALID_GUEST_STATE:
+ PrintError("VM Entry failed due to invalid guest state\n");
+ PrintError("Printing VMCS: (NOTE: This VMCS may not belong to the correct guest)\n");
+ v3_print_vmcs();
+ break;
+ case VMEXIT_INVALID_MSR_LOAD:
+ PrintError("VM Entry failed due to error loading MSRs\n");
+ break;
+ default:
+ PrintError("Entry failed for unknown reason (%d)\n", basic_info->reason);
+ break;
+ }
+
+ return -1;
+ }
+
+
+#ifdef V3_CONFIG_TELEMETRY
if (info->vm_info->enable_telemetry) {
v3_telemetry_start_exit(info);
}
#endif
- switch (exit_info->exit_reason) {
+ switch (basic_info->reason) {
case VMEXIT_INFO_EXCEPTION_OR_NMI: {
pf_error_t error_code = *(pf_error_t *)&(exit_info->int_err);
// JRL: Change "0x0e" to a macro value
- if ((uint8_t)exit_info->int_info == 0x0e) {
-#ifdef CONFIG_DEBUG_SHADOW_PAGING
+ if ((uint8_t)exit_info->int_info == 14) {
+#ifdef V3_CONFIG_DEBUG_SHADOW_PAGING
PrintDebug("Page Fault at %p error_code=%x\n", (void *)exit_info->exit_qual, *(uint32_t *)&error_code);
#endif
PrintError("Error handling shadow page fault\n");
return -1;
}
+
} else {
PrintError("Page fault in unimplemented paging mode\n");
return -1;
break;
}
+ case VMEXIT_EPT_VIOLATION: {
+ struct ept_exit_qual * ept_qual = (struct ept_exit_qual *)&(exit_info->exit_qual);
+
+ if (v3_handle_ept_fault(info, exit_info->ept_fault_addr, ept_qual) == -1) {
+ PrintError("Error handling EPT fault\n");
+ return -1;
+ }
+
+ break;
+ }
case VMEXIT_INVLPG:
if (info->shdw_pg_mode == SHADOW_PAGING) {
if (v3_handle_shadow_invlpg(info) == -1) {
break;
case VMEXIT_RDTSC:
-#ifdef CONFIG_DEBUG_TIME
+#ifdef V3_CONFIG_DEBUG_TIME
PrintDebug("RDTSC\n");
#endif
if (v3_handle_rdtsc(info) == -1) {
}
break;
+
+
+
case VMEXIT_PAUSE:
// Handled as NOP
info->rip += 2;
// This is handled in the atomic part of the vmx code,
// not in the generic (interruptable) vmx handler
break;
+
+
default:
PrintError("Unhandled VMEXIT: %s (%u), %lu (0x%lx)\n",
- v3_vmx_exit_code_to_str(exit_info->exit_reason),
- exit_info->exit_reason,
+ v3_vmx_exit_code_to_str(basic_info->reason),
+ basic_info->reason,
exit_info->exit_qual, exit_info->exit_qual);
return -1;
}
-#ifdef CONFIG_TELEMETRY
+#ifdef V3_CONFIG_TELEMETRY
if (info->vm_info->enable_telemetry) {
v3_telemetry_end_exit(info, exit_info->exit_reason);
}
static const char VMEXIT_IO_INSTR_STR[] = "VMEXIT_IO_INSTR";
static const char VMEXIT_RDMSR_STR[] = "VMEXIT_RDMSR";
static const char VMEXIT_WRMSR_STR[] = "VMEXIT_WRMSR";
-static const char VMEXIT_ENTRY_FAIL_INVALID_GUEST_STATE_STR[] = "VMEXIT_ENTRY_FAIL_INVALID_GUEST_STATE";
-static const char VMEXIT_ENTRY_FAIL_MSR_LOAD_STR[] = "VMEXIT_ENTRY_FAIL_MSR_LOAD";
+static const char VMEXIT_INVALID_GUEST_STATE_STR[] = "VMEXIT_INVALID_GUEST_STATE";
+static const char VMEXIT_INVALID_MSR_LOAD_STR[] = "VMEXIT_INVALID_MSR_LOAD";
static const char VMEXIT_MWAIT_STR[] = "VMEXIT_MWAIT";
static const char VMEXIT_MONITOR_STR[] = "VMEXIT_MONITOR";
static const char VMEXIT_PAUSE_STR[] = "VMEXIT_PAUSE";
-static const char VMEXIT_ENTRY_FAILURE_MACHINE_CHECK_STR[] = "VMEXIT_ENTRY_FAILURE_MACHINE_CHECK";
+static const char VMEXIT_INVALID_MACHINE_CHECK_STR[] = "VMEXIT_INVALIDE_MACHINE_CHECK";
static const char VMEXIT_TPR_BELOW_THRESHOLD_STR[] = "VMEXIT_TPR_BELOW_THRESHOLD";
static const char VMEXIT_APIC_STR[] = "VMEXIT_APIC";
static const char VMEXIT_GDTR_IDTR_STR[] = "VMEXIT_GDTR_IDTR";
return VMEXIT_RDMSR_STR;
case VMEXIT_WRMSR:
return VMEXIT_WRMSR_STR;
- case VMEXIT_ENTRY_FAIL_INVALID_GUEST_STATE:
- return VMEXIT_ENTRY_FAIL_INVALID_GUEST_STATE_STR;
- case VMEXIT_ENTRY_FAIL_MSR_LOAD:
- return VMEXIT_ENTRY_FAIL_MSR_LOAD_STR;
+ case VMEXIT_INVALID_GUEST_STATE:
+ return VMEXIT_INVALID_GUEST_STATE_STR;
+ case VMEXIT_INVALID_MSR_LOAD:
+ return VMEXIT_INVALID_MSR_LOAD_STR;
case VMEXIT_MWAIT:
return VMEXIT_MWAIT_STR;
case VMEXIT_MONITOR:
return VMEXIT_MONITOR_STR;
case VMEXIT_PAUSE:
return VMEXIT_PAUSE_STR;
- case VMEXIT_ENTRY_FAILURE_MACHINE_CHECK:
- return VMEXIT_ENTRY_FAILURE_MACHINE_CHECK_STR;
+ case VMEXIT_INVALID_MACHINE_CHECK:
+ return VMEXIT_INVALID_MACHINE_CHECK_STR;
case VMEXIT_TPR_BELOW_THRESHOLD:
return VMEXIT_TPR_BELOW_THRESHOLD_STR;
case VMEXIT_APIC:
// Intel VMX Feature MSRs
+uint32_t v3_vmx_get_ctrl_features(struct vmx_ctrl_field * fields) {
+ // features are available if they are hardwired to 1, or the mask is 0 (they can be changed)
+ uint32_t features = 0;
+
+ features = fields->req_val;
+ features |= ~(fields->req_mask);
+
+ return features;
+}
+
static int get_ex_ctrl_caps(struct vmx_hw_info * hw_info, struct vmx_ctrl_field * field,
- uint32_t old_msr, uint32_t true_msr) {
+ uint32_t old_msr, uint32_t true_msr) {
uint32_t old_0; /* Bit is 1 => MB1 */
uint32_t old_1; /* Bit is 0 => MBZ */
uint32_t true_0; /* Bit is 1 => MB1 */
/* Intel Manual 3B. Sect. G.3.3 */
if ( ((hw_info->proc_ctrls.req_mask & 0x80000000) == 0) ||
((hw_info->proc_ctrls.req_val & 0x80000000) == 1) ) {
- get_ctrl_caps(&(hw_info->proc_ctrls_2), VMX_PROCBASED_CTLS2_MSR);
+ get_ctrl_caps(&(hw_info->sec_proc_ctrls), VMX_PROCBASED_CTLS2_MSR);
}
get_cr_fields(&(hw_info->cr0), VMX_CR0_FIXED1_MSR, VMX_CR0_FIXED0_MSR);
#include <palacios/vm_guest_mem.h>
#include <palacios/vmm_decoder.h>
-#ifndef CONFIG_DEBUG_IO
+#ifndef V3_CONFIG_DEBUG_IO
#undef PrintDebug
#define PrintDebug(fmt, args...)
#endif
sym = NULL;
switch (line[0]) {
case '#':
- if (memcmp(line + 2, "CONFIG_", 7))
+ if (memcmp(line + 2, "V3_CONFIG_", 10))
continue;
- p = strchr(line + 9, ' ');
+ p = strchr(line + 12, ' ');
if (!p)
continue;
*p++ = 0;
if (strncmp(p, "is not set", 10))
continue;
- sym = sym_find(line + 9);
+ sym = sym_find(line + 12);
if (!sym) {
- conf_warning("trying to assign nonexistent symbol %s", line + 9);
+ conf_warning("trying to assign nonexistent symbol %s", line + 12);
break;
} else if (!(sym->flags & SYMBOL_NEW)) {
conf_warning("trying to reassign symbol %s", sym->name);
;
}
break;
- case 'C':
- if (memcmp(line, "CONFIG_", 7)) {
- conf_warning("unexpected data");
+ case 'V':
+ if (memcmp(line, "V3_CONFIG_", 10)) {
+ conf_warning("unexpected data (1)");
continue;
}
- p = strchr(line + 7, '=');
+ p = strchr(line + 10, '=');
if (!p)
continue;
*p++ = 0;
p2 = strchr(p, '\n');
if (p2)
*p2 = 0;
- sym = sym_find(line + 7);
+ sym = sym_find(line + 10);
if (!sym) {
- conf_warning("trying to assign nonexistent symbol %s", line + 7);
+ conf_warning("trying to assign nonexistent symbol %s", line + 10);
break;
} else if (!(sym->flags & SYMBOL_NEW)) {
conf_warning("trying to reassign symbol %s", sym->name);
case '\n':
break;
default:
- conf_warning("unexpected data");
+ conf_warning("unexpected data (2)");
continue;
}
if (sym && sym_is_choice_value(sym)) {
case S_TRISTATE:
switch (sym_get_tristate_value(sym)) {
case no:
- fprintf(out, "# CONFIG_%s is not set\n", sym->name);
+ fprintf(out, "# V3_CONFIG_%s is not set\n", sym->name);
if (out_h)
- fprintf(out_h, "#undef CONFIG_%s\n", sym->name);
+ fprintf(out_h, "#undef V3_CONFIG_%s\n", sym->name);
break;
case mod:
- fprintf(out, "CONFIG_%s=m\n", sym->name);
+ fprintf(out, "V3_CONFIG_%s=m\n", sym->name);
if (out_h)
- fprintf(out_h, "#define CONFIG_%s_MODULE 1\n", sym->name);
+ fprintf(out_h, "#define V3_CONFIG_%s_MODULE 1\n", sym->name);
break;
case yes:
- fprintf(out, "CONFIG_%s=y\n", sym->name);
+ fprintf(out, "V3_CONFIG_%s=y\n", sym->name);
if (out_h)
- fprintf(out_h, "#define CONFIG_%s 1\n", sym->name);
+ fprintf(out_h, "#define V3_CONFIG_%s 1\n", sym->name);
break;
}
break;
case S_STRING:
// fix me
str = sym_get_string_value(sym);
- fprintf(out, "CONFIG_%s=\"", sym->name);
+ fprintf(out, "V3_CONFIG_%s=\"", sym->name);
if (out_h)
- fprintf(out_h, "#define CONFIG_%s \"", sym->name);
+ fprintf(out_h, "#define V3_CONFIG_%s \"", sym->name);
do {
l = strcspn(str, "\"\\");
if (l) {
case S_HEX:
str = sym_get_string_value(sym);
if (str[0] != '0' || (str[1] != 'x' && str[1] != 'X')) {
- fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
+ fprintf(out, "V3_CONFIG_%s=%s\n", sym->name, str);
if (out_h)
- fprintf(out_h, "#define CONFIG_%s 0x%s\n", sym->name, str);
+ fprintf(out_h, "#define V3_CONFIG_%s 0x%s\n", sym->name, str);
break;
}
case S_INT:
str = sym_get_string_value(sym);
- fprintf(out, "CONFIG_%s=%s\n", sym->name, str);
+ fprintf(out, "V3_CONFIG_%s=%s\n", sym->name, str);
if (out_h)
- fprintf(out_h, "#define CONFIG_%s %s\n", sym->name, str);
+ fprintf(out_h, "#define V3_CONFIG_%s %s\n", sym->name, str);
break;
}
}