EXTRA_CFLAGS += -I$(PWD)/../palacios/include/ -include autoconf.h -DMODULE=1 -D__KERNEL__=1
-v3vee-y := palacios.o \
- palacios-dev.o \
- palacios-vm.o \
- palacios-mm.o \
- palacios-queue.o \
- palacios-hashtable.o \
+v3vee-y := palacios-stubs.o \
+ main.o \
+ vm.o \
+ mm.o \
+ util-queue.o \
+ util-hashtable.o \
linux-exts.o
-v3vee-$(V3_CONFIG_CONSOLE) += palacios-console.o
-v3vee-$(V3_CONFIG_FILE) += palacios-file.o
-v3vee-$(V3_CONFIG_STREAM) += palacios-stream.o \
- palacios-ringbuffer.o
-v3vee-$(V3_CONFIG_EXT_INSPECTOR) += palacios-inspector.o
-v3vee-$(V3_CONFIG_PACKET) += palacios-packet.o
-v3vee-$(V3_CONFIG_SOCKET) += palacios-socket.o
-v3vee-$(V3_CONFIG_KEYED_STREAMS) += palacios-keyed-stream.o
-v3vee-$(V3_CONFIG_HOST_DEVICE) += palacios-host-dev.o
-v3vee-$(V3_CONFIG_GRAPHICS_CONSOLE) += palacios-graphics-console.o
+v3vee-$(V3_CONFIG_CONSOLE) += iface-console.o
+v3vee-$(V3_CONFIG_FILE) += iface-file.o
+v3vee-$(V3_CONFIG_STREAM) += iface-stream.o \
+ util-ringbuffer.o
+v3vee-$(V3_CONFIG_EXT_INSPECTOR) += inspector.o
+v3vee-$(V3_CONFIG_PACKET) += iface-packet.o
+v3vee-$(V3_CONFIG_SOCKET) += iface-socket.o
+v3vee-$(V3_CONFIG_KEYED_STREAMS) += iface-keyed-stream.o
+v3vee-$(V3_CONFIG_HOST_DEVICE) += iface-host-dev.o
+v3vee-$(V3_CONFIG_GRAPHICS_CONSOLE) += iface-graphics-console.o
v3vee-$(V3_CONFIG_VNET) += palacios-vnet.o \
palacios-vnet-ctrl.o \
palacios-vnet-brg.o
-
-
-
v3vee-objs := $(v3vee-y) ../libv3vee.a
-
obj-m := v3vee.o
-
all:
$(MAKE) -C $(V3_CONFIG_LINUX_KERN) M=$(PWD) modules
-
-
clean:
$(MAKE) -C $(V3_CONFIG_LINUX_KERN) M=$(PWD) clean
#include <interfaces/vmm_console.h>
#include <palacios/vmm_host_events.h>
-#include "palacios-vm.h"
+#include "vm.h"
#include "palacios.h"
-#include "palacios-queue.h"
+#include "util-queue.h"
#include "linux-exts.h"
typedef enum { CONSOLE_CURS_SET = 1,
#include <interfaces/vmm_console.h>
#include <palacios/vmm_host_events.h>
-#include "palacios-graphics-console.h"
+#include "iface-graphics-console.h"
#include "palacios.h"
#include "linux-exts.h"
-#include "palacios-vm.h"
+#include "vm.h"
#include <linux/vmalloc.h>
#include <interfaces/vmm_host_dev.h>
#include "palacios.h"
-#include "palacios-host-dev-user.h"
+#include "iface-host-dev.h"
#include "linux-exts.h"
-#include "palacios-vm.h"
+#include "vm.h"
/*
There are two things in this file:
#include "palacios.h"
-#include "palacios-hashtable.h"
+#include "util-hashtable.h"
#include "linux-exts.h"
#define sint64_t int64_t
#include "palacios.h"
#include "linux-exts.h"
+
struct palacios_packet_state {
struct socket * raw_sock;
uint8_t inited;
}
static int palacios_packet_del_recver(const char * mac,
- struct v3_vm_info * vm){
+ struct v3_vm_info * vm){
return 0;
}
-static int init_raw_socket (const char * eth_dev){
+static int init_raw_socket(const char * eth_dev){
int err;
struct sockaddr_ll sock_addr;
struct ifreq if_req;
int dev_idx;
err = sock_create(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL), &(packet_state.raw_sock));
+
if (err < 0) {
printk(KERN_WARNING "Could not create a PF_PACKET Socket, err %d\n", err);
return -1;
}
- if(eth_dev == NULL){
+ if (eth_dev == NULL){
eth_dev = "eth0"; /* default "eth0" */
}
memset(&if_req, 0, sizeof(if_req));
strncpy(if_req.ifr_name, eth_dev, IFNAMSIZ); //sizeof(if_req.ifr_name));
+
err = packet_state.raw_sock->ops->ioctl(packet_state.raw_sock, SIOCGIFINDEX, (long)&if_req);
- if(err < 0){
- printk(KERN_WARNING "Palacios Packet: Unable to get index for device %s, error %d\n", if_req.ifr_name, err);
+
+ if (err < 0){
+ printk(KERN_WARNING "Palacios Packet: Unable to get index for device %s, error %d\n",
+ if_req.ifr_name, err);
dev_idx = 2; /* match ALL 2:"eth0" */
- }
- else{
+ } else {
dev_idx = if_req.ifr_ifindex;
}
sock_addr.sll_protocol = htons(ETH_P_ALL);
sock_addr.sll_ifindex = dev_idx;
- err = packet_state.raw_sock->ops->bind(packet_state.raw_sock, (struct sockaddr *)&sock_addr, sizeof(sock_addr));
+ err = packet_state.raw_sock->ops->bind(packet_state.raw_sock,
+ (struct sockaddr *)&sock_addr,
+ sizeof(sock_addr));
+
if (err < 0){
printk(KERN_WARNING "Error binding raw packet to device %s, %d\n", eth_dev, err);
return -1;
while (!kthread_should_stop()) {
size = recv_pkt(pkt, ETHERNET_PACKET_LEN);
+
if (size < 0) {
printk(KERN_WARNING "Palacios raw packet receive error, Server terminated\n");
break;
vm = (struct v3_vm_info *)v3_htable_search(packet_state.mac_vm_cache, (addr_t)pkt);
- if(vm != NULL){
+
+ if (vm != NULL){
printk("Find destinated VM 0x%p\n", vm);
send_raw_packet_to_palacios(pkt, size, vm);
}
const char * eth_dev = NULL;
- if(packet_state.inited == 0){
+ if (packet_state.inited == 0){
packet_state.inited = 1;
- if(init_raw_socket(eth_dev) == -1){
+ if (init_raw_socket(eth_dev) == -1){
printk("Error to initiate palacios packet interface\n");
return -1;
}
}
- // REGISTER GLOBAL CONTROL to add devices...
+ // REGISTER GLOBAL CONTROL to add interfaces...
return 0;
}
#include <interfaces/vmm_stream.h>
#include "linux-exts.h"
-#include "palacios-ringbuffer.h"
-#include "palacios-vm.h"
+#include "util-ringbuffer.h"
+#include "vm.h"
+#include "iface-stream.h"
#define STREAM_BUF_SIZE 1024
-#define STREAM_NAME_LEN 128
+
return -EFAULT;
}
+
+
printk("ERROR: Opening Streams is currently not implemented...\n");
return -EFAULT;
--- /dev/null
+#ifndef __IFACE_STREAM_H__
+#define __IFACE_STREAM_H__
+
+
+// Stream Connection IOCTL number
+#define V3_VM_STREAM_CONNECT 21
+
+// Buffer size of the stream name being connected to
+#define STREAM_NAME_LEN 128
+
+
+#endif
#include <interfaces/inspector.h>
#include "palacios.h"
-#include "palacios-vm.h"
+#include "vm.h"
#include "linux-exts.h"
-struct dentry * v3_dir = NULL;
+static struct dentry * v3_dir = NULL;
--- /dev/null
+Registry of ioctl numbers currently in use
+To add a new ioctl find an available number and add it to this list.
+
+Global commands (/dev/v3vee)
+
+10 -- (VMM) Start guest
+
+50 -- (VMM) Add physical memory to VMM manager
+
+
+
+VM Commands (/dev/v3-vm*)
+
+20 -- (IFACE) Connect CGA Console
+21 -- (IFACE) Connect Stream
+22 -- (VMM) Stop Guest
+
+30 -- (EXT) Activate Inspector
+
+257 -- (IFACE) VGA Console Framebuf Input
+258 -- (IFACE) VGA Console Framebuf Query
+
+10245 -- (IFACE) Connect Host Device
\ No newline at end of file
#include <linux/kthread.h>
#include "palacios.h"
-#include "palacios-mm.h"
-#include "palacios-vm.h"
+#include "mm.h"
+#include "vm.h"
#include "linux-exts.h"
+++ /dev/null
-/*
- * Palacios VM Stream Serial interface
- * (c) Jack Lange, 2010
- */
-
-#ifndef __PALACIOS_SERIAL_H__
-#define __PALACIOS_SERIAL_H__
-
-int open_serial(char * name);
-
-#endif
#include <linux/kthread.h>
#include <asm/uaccess.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <palacios/vmm.h>
#include <palacios/vmm_host_events.h>
-#include "palacios-mm.h"
+#include "mm.h"
u32 pg_allocs = 0;
struct lnx_thread_arg * thread_info = (struct lnx_thread_arg *)arg;
/*
- lock_kernel();
printk("Daemonizing new Palacios thread (name=%s)\n", thread_info->name);
daemonize(thread_info->name);
- unlock_kernel();
allow_signal(SIGKILL);
*/
} else if (strnicmp("DEL", token, strlen("DEL")) == 0) {
char * idx_str = NULL;
uint32_t d_idx;
-
+ struct vnet_route_iter * route = NULL;
+
idx_str = strsep(&buf_iter, " ");
if (!idx_str) {
d_idx = simple_strtoul(idx_str, &idx_str, 10);
- v3_vnet_del_route(d_idx);
-
- printk("VNET Control: One route deleted\n");
+ printk("VNET: deleting route %d\n", d_idx);
+ list_for_each_entry(route, &(vnet_ctrl_s.route_list), node) {
+ if (route->idx == d_idx) {
+ delete_route(route);
+ break;
+ }
+ }
} else {
printk("Invalid Route command string\n");
}
#include <linux/timer.h>
#include <vnet/vnet.h>
-#include "palacios-mm.h"
+#include "mm.h"
#include "palacios-vnet.h"
+#include "linux-exts.h"
static void host_print(const char * fmt, ...) {
#include <vnet/vnet.h>
-int palacios_vnet_init(void);
-void palacios_vnet_deinit(void);
typedef enum {UDP, TCP, RAW, NONE} vnet_brg_proto_t;
uint32_t vnet_brg_add_link(uint32_t ip, uint16_t port, vnet_brg_proto_t proto);
int vnet_brg_link_stats(uint32_t link_idx, struct nic_statistics * stats);
int vnet_brg_stats(struct vnet_brg_stats * stats);
+
+
int vnet_bridge_init(void);
void vnet_bridge_deinit(void);
/* Global Control IOCTLs */
#define V3_START_GUEST 10
#define V3_ADD_MEMORY 50
-#define V3_START_NETWORK 60
/* VM Specific IOCTLs */
#define V3_VM_CONSOLE_CONNECT 20
-#define V3_VM_STREAM_CONNECT 21
+
#define V3_VM_STOP 22
#define V3_VM_INSPECT 30
#include <linux/preempt.h>
#include <linux/sched.h>
-#include "palacios-hashtable.h"
+#include "util-hashtable.h"
struct hash_entry {
#include <linux/slab.h>
-#include "palacios-queue.h"
+#include "util-queue.h"
void init_queue(struct gen_queue * queue, unsigned int max_entries) {
queue->num_entries = 0;
-/*
+/* \r
* Ringbuffer Routines for VM\r
- * (c) Lei Xia, 2010
+ * (c) Lei Xia, 2010\r
*/\r
-#include <linux/errno.h>
-#include <linux/percpu.h>
+#include <linux/errno.h>\r
+#include <linux/percpu.h>\r
#include <linux/sched.h>\r
\r
#include "palacios.h"\r
-#include "palacios-ringbuffer.h"\r
-
+#include "util-ringbuffer.h"\r
+\r
void init_ringbuf(struct ringbuf * ring, unsigned int size) {\r
ring->buf = kmalloc(size, GFP_KERNEL);\r
- ring->size = size;
-
- ring->start = 0;
- ring->end = 0;
- ring->current_len = 0;
-}
+ ring->size = size;\r
+ \r
+ ring->start = 0;\r
+ ring->end = 0;\r
+ ring->current_len = 0;\r
+}\r
\r
struct ringbuf * create_ringbuf(unsigned int size) {\r
struct ringbuf * ring = (struct ringbuf *)kmalloc(sizeof(struct ringbuf), GFP_KERNEL);\r
init_ringbuf(ring, size);\r
-
- return ring;
-}
+\r
+ return ring;\r
+}\r
\r
void free_ringbuf(struct ringbuf * ring) {\r
kfree(ring->buf);\r
\r
static inline unsigned char * get_read_ptr(struct ringbuf * ring) {\r
return (unsigned char *)(ring->buf + ring->start);\r
-}
-
+}\r
+\r
\r
static inline unsigned char * get_write_ptr(struct ringbuf * ring) {\r
return (unsigned char *)(ring->buf + ring->end);\r
}\r
\r
static inline int get_read_section_size(struct ringbuf * ring) {\r
- return ring->size - ring->start;
-}
-
+ return ring->size - ring->start;\r
+}\r
+\r
\r
static inline int get_write_section_size(struct ringbuf * ring) {\r
- return ring->size - ring->end;
-}
-
+ return ring->size - ring->end;\r
+}\r
+\r
\r
static inline int is_read_loop(struct ringbuf * ring, unsigned int len) {\r
- if ((ring->start >= ring->end) && (ring->current_len > 0)) {
- // end is past the end of the buffer
- if (get_read_section_size(ring) < len) {
- return 1;
- }
- }
- return 0;
-}
-
+ if ((ring->start >= ring->end) && (ring->current_len > 0)) {\r
+ // end is past the end of the buffer\r
+ if (get_read_section_size(ring) < len) {\r
+ return 1;\r
+ }\r
+ }\r
+ return 0;\r
+}\r
+\r
\r
static inline int is_write_loop(struct ringbuf * ring, unsigned int len) {\r
- if ((ring->end >= ring->start) && (ring->current_len < ring->size)) {
- // end is past the end of the buffer
- if (get_write_section_size(ring) < len) {
- return 1;
- }
- }
- return 0;
-}
-
+ if ((ring->end >= ring->start) && (ring->current_len < ring->size)) {\r
+ // end is past the end of the buffer\r
+ if (get_write_section_size(ring) < len) {\r
+ return 1;\r
+ }\r
+ }\r
+ return 0;\r
+}\r
+\r
\r
static inline int ringbuf_avail_space(struct ringbuf * ring) {\r
- return ring->size - ring->current_len;
-}
-
+ return ring->size - ring->current_len;\r
+}\r
+\r
\r
int ringbuf_data_len(struct ringbuf * ring) {\r
- return ring->current_len;
-}
-
+ return ring->current_len;\r
+}\r
+\r
\r
static inline int ringbuf_capacity(struct ringbuf * ring) {\r
- return ring->size;
-}
-
+ return ring->size;\r
+}\r
+\r
\r
int ringbuf_read(struct ringbuf * ring, unsigned char * dst, unsigned int len) {\r
- int read_len = 0;
- int ring_data_len = ring->current_len;
-
- read_len = (len > ring_data_len) ? ring_data_len : len;
-
- if (is_read_loop(ring, read_len)) {
- int section_len = get_read_section_size(ring);
-
- memcpy(dst, get_read_ptr(ring), section_len);
- memcpy(dst + section_len, ring->buf, read_len - section_len);
-
- ring->start = read_len - section_len;
- } else {
- memcpy(dst, get_read_ptr(ring), read_len);
-
- ring->start += read_len;
- }
-
- ring->current_len -= read_len;
-
- return read_len;
-}
-
+ int read_len = 0;\r
+ int ring_data_len = ring->current_len;\r
+\r
+ read_len = (len > ring_data_len) ? ring_data_len : len;\r
+\r
+ if (is_read_loop(ring, read_len)) {\r
+ int section_len = get_read_section_size(ring);\r
+\r
+ memcpy(dst, get_read_ptr(ring), section_len);\r
+ memcpy(dst + section_len, ring->buf, read_len - section_len);\r
+ \r
+ ring->start = read_len - section_len;\r
+ } else {\r
+ memcpy(dst, get_read_ptr(ring), read_len);\r
+ \r
+ ring->start += read_len;\r
+ }\r
+\r
+ ring->current_len -= read_len;\r
+\r
+ return read_len;\r
+}\r
+\r
\r
#if 0\r
\r
static int ringbuf_peek(struct ringbuf * ring, unsigned char * dst, unsigned int len) {\r
- int read_len = 0;
- int ring_data_len = ring->current_len;
-
- read_len = (len > ring_data_len) ? ring_data_len : len;
-
- if (is_read_loop(ring, read_len)) {
- int section_len = get_read_section_size(ring);
-
- memcpy(dst, get_read_ptr(ring), section_len);
- memcpy(dst + section_len, ring->buf, read_len - section_len);
- } else {
- memcpy(dst, get_read_ptr(ring), read_len);
- }
-
- return read_len;
-}
-
+ int read_len = 0;\r
+ int ring_data_len = ring->current_len;\r
+\r
+ read_len = (len > ring_data_len) ? ring_data_len : len;\r
+\r
+ if (is_read_loop(ring, read_len)) {\r
+ int section_len = get_read_section_size(ring);\r
+\r
+ memcpy(dst, get_read_ptr(ring), section_len);\r
+ memcpy(dst + section_len, ring->buf, read_len - section_len);\r
+ } else {\r
+ memcpy(dst, get_read_ptr(ring), read_len);\r
+ }\r
+\r
+ return read_len;\r
+}\r
+\r
\r
static int ringbuf_delete(struct ringbuf * ring, unsigned int len) {\r
- int del_len = 0;
- int ring_data_len = ring->current_len;
-
- del_len = (len > ring_data_len) ? ring_data_len : len;
-
- if (is_read_loop(ring, del_len)) {
- int section_len = get_read_section_size(ring);
- ring->start = del_len - section_len;
- } else {
- ring->start += del_len;
- }
-
- ring->current_len -= del_len;
- return del_len;
-}
+ int del_len = 0;\r
+ int ring_data_len = ring->current_len;\r
+\r
+ del_len = (len > ring_data_len) ? ring_data_len : len;\r
+\r
+ if (is_read_loop(ring, del_len)) {\r
+ int section_len = get_read_section_size(ring);\r
+ ring->start = del_len - section_len;\r
+ } else {\r
+ ring->start += del_len;\r
+ }\r
+\r
+ ring->current_len -= del_len;\r
+ return del_len;\r
+}\r
#endif\r
\r
int ringbuf_write(struct ringbuf * ring, unsigned char * src, unsigned int len) {\r
- int write_len = 0;
- int ring_avail_space = ring->size - ring->current_len;
-
+ int write_len = 0;\r
+ int ring_avail_space = ring->size - ring->current_len;\r
+ \r
write_len = (len > ring_avail_space) ? ring_avail_space : len;\r
-
- if (is_write_loop(ring, write_len)) {
- int section_len = get_write_section_size(ring);
+\r
+ if (is_write_loop(ring, write_len)) {\r
+ int section_len = get_write_section_size(ring);\r
\r
- memcpy(get_write_ptr(ring), src, section_len);
- ring->end = 0;
-
- memcpy(get_write_ptr(ring), src + section_len, write_len - section_len);
-
- ring->end += write_len - section_len;
+ memcpy(get_write_ptr(ring), src, section_len);\r
+ ring->end = 0;\r
+\r
+ memcpy(get_write_ptr(ring), src + section_len, write_len - section_len);\r
+\r
+ ring->end += write_len - section_len;\r
} else {\r
- memcpy(get_write_ptr(ring), src, write_len);
-
- ring->end += write_len;
- }
-
- ring->current_len += write_len;
-
- return write_len;
-}
+ memcpy(get_write_ptr(ring), src, write_len);\r
+\r
+ ring->end += write_len;\r
+ }\r
+\r
+ ring->current_len += write_len;\r
+\r
+ return write_len;\r
+}\r
\r
#include <linux/anon_inodes.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/file.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <palacios/vmm.h>
#include "palacios.h"
-#include "palacios-vm.h"
+#include "vm.h"
#include "linux-exts.h"
struct v3_guest * guest = (struct v3_guest *)arg;
int err;
- lock_kernel();
+
daemonize(guest->name);
// allow_signal(SIGKILL);
- unlock_kernel();
+
init_vm_extensions(guest);
#define _V3_USER_HOST_DEV_
#include <stdint.h>
-#include "palacios-host-dev-user.h"
+#include "iface-host-dev.h"
int v3_user_host_dev_rendezvous(char *vmdev, char *url); // returns devfd for use in poll/select
int v3_user_host_dev_depart(int devfd);
*/
#define MASK_DISPLACEMENT(reg, mode) ({ \
sint64_t val = 0; \
- if (mode == DISP0) { \
- val = reg; \
- } else if (mode == DISP8) { \
+ if (mode == DISP8) { \
val = (sint8_t)(reg & 0xff); \
} else if (mode == DISP16) { \
val = (sint16_t)(reg & 0xffff); \
#define ADDR_MASK(val, length) ({ \
ullong_t mask = 0x0LL; \
switch (length) { \
- case 1: \
- mask = 0x0000000000000ffLL; \
- break; \
case 2: \
mask = 0x00000000000fffffLL; \
break; \
uint_t channel_cycles = 0;
uint_t output_changed = 0;
- // PrintDebug("8254 PIT: %d crystal tics\n", oscillations);
+ // PrintDebug("8254 PIT (channel %d): %d crystal tics\n",
+ // ch - pit->ch0, oscillations);
if (ch->run_state == PENDING) {
oscillations--;
ch->counter = ch->reload_value;
if (ch->counter > oscillations) {
ch->counter -= oscillations;
+ //PrintDebug("8254 PIT: Counter at %u after %u oscillations.\n",
+ // (unsigned int)ch->counter, oscillations);
return output_changed;
} else {
ushort_t reload_val = ch->reload_value;
oscillations = oscillations % reload_val;
ch->counter = reload_val - oscillations;
+ // PrintDebug("8254 PIT: Counter reset to %u.\n",
+ // (unsigned int)ch->counter);
+
}
- // PrintDebug("8254 PIT: Channel Cycles: %d\n", channel_cycles);
+ //PrintDebug("8254 PIT: Channel %ld (mode = %u) Cycles: %d\n",
+ //(ch - &pit->ch_0), ch->op_mode, channel_cycles);
-
-
switch (ch->op_mode) {
case IRQ_ON_TERM_CNT:
- if ((channel_cycles > 0) && (ch->output_pin == 0)) {
- ch->output_pin = 1;
- output_changed = 1;
+ if (channel_cycles > 0) {
+ if (ch->output_pin == 0) {
+ ch->output_pin = 1;
+ output_changed = 1;
+ } else {
+ // PrintDebug("8254: Output not changed in TERM_CNT mode.\n");
+ }
}
break;
case ONE_SHOT:
- if ((channel_cycles > 0) && (ch->output_pin == 0)) {
- ch->output_pin = 1;
- output_changed = 1;
+ if (channel_cycles > 0) {
+ if ((ch->output_pin == 0)) {
+ ch->output_pin = 1;
+ output_changed = 1;
+ } else {
+ // PrintDebug("8254: Output not changed in ONE_SHOT mode.\n");
+ }
}
break;
case RATE_GEN:
output_changed = 1;
}
+
break;
case SW_STROBE:
state->pit_counter = state->pit_reload - cpu_cycles;
if (oscillations) {
- PrintDebug("8254 PIT: Handling %d crystal tics\n", oscillations);
- }
+ // PrintDebug("8254 PIT: Handling %d crystal tics\n", oscillations);
- if (handle_crystal_tics(state, &(state->ch_0), oscillations) == 1) {
- // raise interrupt
- // PrintDebug("8254 PIT: Injecting Timer interrupt to guest\n");
- v3_raise_irq(info->vm_info, 0);
- }
+ if (handle_crystal_tics(state, &(state->ch_0), oscillations) == 1) {
+ // raise interrupt
+ PrintDebug("8254 PIT: Injecting Timer interrupt to guest (run_state = %d)\n",
+ state->ch_0.run_state);
+ v3_raise_irq(info->vm_info, 0);
+ }
- //handle_crystal_tics(state, &(state->ch_1), oscillations);
- handle_crystal_tics(state, &(state->ch_2), oscillations);
+ //handle_crystal_tics(state, &(state->ch_1), oscillations);
+ handle_crystal_tics(state, &(state->ch_2), oscillations);
+ }
}
-
-
-
return;
}
static int handle_channel_cmd(struct channel * ch, struct pit_cmd_word cmd) {
- ch->access_mode = cmd.access_mode;
+ if (cmd.op_mode != ch->op_mode) {
+ PrintDebug("8254 PIT: Changing channel from op mode %d to op mode %d.\n",
+ ch->op_mode, cmd.op_mode);
+ }
- if (ch->access_mode != 0) {
- ch->op_mode = cmd.op_mode;
+ if (cmd.access_mode != 0) {
+ ch->op_mode = cmd.op_mode;
}
+ if (cmd.access_mode != ch->access_mode) {
+ PrintDebug("8254 PIT: Changing channel from access mode %d to access mode %d.\n",
+ ch->access_mode, cmd.access_mode);
+ }
+ ch->access_mode = cmd.access_mode;
switch (cmd.access_mode) {
case LATCH_COUNT:
apic->tmr_cur_cnt = op_val;
break;
case TMR_DIV_CFG_OFFSET:
+ PrintDebug("apic %u: core %u: setting tmr_div_cfg to 0x%x\n",
+ apic->lapic_id.val, core->vcpu_id, op_val);
apic->tmr_div_cfg.val = op_val;
break;
}
if (apic->tmr_vec_tbl.tmr_mode == APIC_TMR_PERIODIC) {
+ static unsigned int nexits = 0;
+ static unsigned int missed_ints = 0;
+
+ nexits++;
+ missed_ints += tmr_ticks / apic->tmr_init_cnt;
+
+ if ((missed_ints > 0) && (nexits >= 5000)) {
+ V3_Print("apic %u: core %u: missed %u timer interrupts total in last %u exits.\n",
+ apic->lapic_id.val, core->vcpu_id, missed_ints, nexits);
+ missed_ints = 0;
+ nexits = 0;
+ }
+
tmr_ticks = tmr_ticks % apic->tmr_init_cnt;
apic->tmr_cur_cnt = apic->tmr_init_cnt - tmr_ticks;
}
// disable global interrupts for vm state transition
v3_clgi();
- // Update timer devices prior to entering VM.
+ // Update timer devices after being in the VM, with interupts
+ // disabled, but before doing IRQ updates, so that any interrupts they
+ //raise get seen immediately.
v3_update_timers(info);
// Synchronize the guest state to the VMCB
info->mem_mode = v3_get_vm_mem_mode(info);
/* ** */
-
// save exit info here
exit_code = guest_ctrl->exit_code;
exit_info1 = guest_ctrl->exit_info1;
exit_info2 = guest_ctrl->exit_info2;
-
#ifdef V3_CONFIG_SYMCALL
if (info->sym_core_state.symcall_state.sym_call_active == 0) {
update_irq_exit_state(info);
update_irq_exit_state(info);
#endif
-
// reenable global interrupts after vm exit
v3_stgi();
-
// Conditionally yield the CPU if the timeslice has expired
v3_yield_cond(info);
-
-
if (v3_handle_svm_exit(info, exit_code, exit_info1, exit_info2) != 0) {
PrintError("Error in SVM exit handler\n");
PrintError(" last exit was %d\n", v3_last_exit);
#include <palacios/vmm_halt.h>
#include <palacios/vmm_intr.h>
-
+#include <palacios/vmm_lowlevel.h>
#ifndef V3_CONFIG_DEBUG_HALT
#undef PrintDebug
while (!v3_intr_pending(info)) {
/* Since we're in an exit, time is already paused here, so no need to pause again. */
+ // V3_Print("palacios: halt->yield\n");
+
v3_yield(info);
- v3_update_timers(info);
+ v3_disable_ints();
+ v3_update_timers(info);
+ v3_enable_ints();
+
/* At this point, we either have some combination of
interrupts, including perhaps a timer interrupt, or
no interrupt.
*/
if (!v3_intr_pending(info)) {
/* if no interrupt, then we do halt */
- asm("hlt");
+ /* asm("hlt"); */
}
}
+
+ /* V3_Print("palacios: done with halt\n"); */
info->rip += 1;
}
return 0;
}
-// Control guest time in relation to host time so that the two stay
-// appropriately synchronized to the extent possible.
-int v3_adjust_time(struct guest_info * info) {
+static uint64_t compute_target_host_time(struct guest_info * info)
+{
+ struct vm_time * time_state = &(info->time_state);
+ uint64_t guest_elapsed, desired_elapsed;
+
+ guest_elapsed = (v3_get_guest_time(time_state) - time_state->initial_time);
+ desired_elapsed = (guest_elapsed * time_state->host_cpu_freq) / time_state->guest_cpu_freq;
+ return time_state->initial_time + desired_elapsed;
+}
+
+static uint64_t compute_target_guest_time(struct guest_info *info)
+{
+ struct vm_time * time_state = &(info->time_state);
+ uint64_t host_elapsed, desired_elapsed;
+
+ host_elapsed = v3_get_host_time(time_state) - time_state->initial_time;
+ desired_elapsed = (host_elapsed * time_state->guest_cpu_freq) / time_state->host_cpu_freq;
+
+ return time_state->initial_time + desired_elapsed;
+
+}
+
+/* Yield time in the host to deal with a guest that wants to run slower than
+ * the native host cycle frequency */
+static int yield_host_time(struct guest_info * info) {
struct vm_time * time_state = &(info->time_state);
uint64_t host_time, target_host_time;
- uint64_t guest_time, target_guest_time, old_guest_time;
- uint64_t guest_elapsed, host_elapsed, desired_elapsed;
+ uint64_t guest_time, old_guest_time;
/* Compute the target host time given how much time has *already*
* passed in the guest */
- guest_time = v3_get_guest_time(time_state);
- guest_elapsed = (guest_time - time_state->initial_time);
- desired_elapsed = (guest_elapsed * time_state->host_cpu_freq) / time_state->guest_cpu_freq;
- target_host_time = time_state->initial_time + desired_elapsed;
-
+ target_host_time = compute_target_host_time(info);
+
/* Now, let the host run while the guest is stopped to make the two
- * sync up. */
+ * sync up. Note that this doesn't assume that guest time is stopped;
+ * the offsetting in the next step will change add an offset to guest
+ * time to account for the time paused even if the geust isn't
+ * usually paused in the VMM. */
host_time = v3_get_host_time(time_state);
old_guest_time = v3_get_guest_time(time_state);
guest_time = v3_get_guest_time(time_state);
- // We do *not* assume the guest timer was paused in the VM. If it was
- // this offseting is 0. If it wasn't we need this.
+ /* We do *not* assume the guest timer was paused in the VM. If it was
+ * this offseting is 0. If it wasn't, we need this. */
v3_offset_time(info, (sint64_t)old_guest_time - (sint64_t)guest_time);
+ return 0;
+}
+
+static int skew_guest_time(struct guest_info * info) {
+ struct vm_time * time_state = &(info->time_state);
+ uint64_t target_guest_time, guest_time;
/* Now the host may have gotten ahead of the guest because
* yielding is a coarse grained thing. Figure out what guest time
* we want to be at, and use the use the offsetting mechanism in
* the VMM to make the guest run forward. We limit *how* much we skew
* it forward to prevent the guest time making large jumps,
* however. */
- host_elapsed = host_time - time_state->initial_time;
- desired_elapsed = (host_elapsed * time_state->guest_cpu_freq) / time_state->host_cpu_freq;
- target_guest_time = time_state->initial_time + desired_elapsed;
+ target_guest_time = compute_target_guest_time(info);
+ guest_time = v3_get_guest_time(time_state);
if (guest_time < target_guest_time) {
uint64_t max_skew, desired_skew, skew;
if (time_state->enter_time) {
- max_skew = (time_state->exit_time - time_state->enter_time) / 10;
+ /* Limit forward skew to 10% of the amount the guest has
+ * run since we last could skew time */
+ max_skew = (guest_time - time_state->enter_time) / 10;
} else {
max_skew = 0;
}
desired_skew = target_guest_time - guest_time;
skew = desired_skew > max_skew ? max_skew : desired_skew;
-/* PrintDebug("Guest %llu cycles behind where it should be.\n",
+ PrintDebug("Guest %llu cycles behind where it should be.\n",
desired_skew);
PrintDebug("Limit on forward skew is %llu. Skewing forward %llu.\n",
- max_skew, skew); */
+ max_skew, skew);
v3_offset_time(info, skew);
}
+
+ return 0;
+}
+
+// Control guest time in relation to host time so that the two stay
+// appropriately synchronized to the extent possible.
+int v3_adjust_time(struct guest_info * info) {
+
+ /* First deal with yielding if we want to slow down the guest */
+ yield_host_time(info);
+
+ /* Now, if the guest is too slow, (either from excess yielding above,
+ * or because the VMM is doing something that takes a long time to emulate)
+ * allow guest time to jump forward a bit */
+ skew_guest_time(info);
return 0;
}
time_state->enter_time = host_time;
time_state->guest_host_offset = guest_time - host_time;
- // Because we just modified the offset - shouldn't matter as this should be
- // the last time-related call prior to entering the VMM, but worth it
- // just in case.
- time_state->exit_time = host_time;
-
return 0;
}
time_state->last_update = v3_get_guest_time(time_state);
cycles = time_state->last_update - old_time;
+ V3_ASSERT(cycles >= 0);
+ // V3_Print("Updating timers with %lld elapsed cycles.\n", cycles);
list_for_each_entry(tmp_timer, &(time_state->timers), timer_link) {
tmp_timer->ops->update_timer(info, cycles, time_state->guest_cpu_freq, tmp_timer->private_data);
}
// Perform any additional yielding needed for time adjustment
v3_adjust_time(info);
- // Update timer devices prior to entering VM.
- v3_update_timers(info);
-
// disable global interrupts for vm state transition
v3_disable_ints();
+ // Update timer devices late after being in the VM so that as much
+ // of hte time in the VM is accounted for as possible. Also do it before
+ // updating IRQ entry state so that any interrupts the timers raise get
+ // handled on the next VM entry. Must be done with interrupts disabled.
+ v3_update_timers(info);
if (vmcs_store() != vmx_info->vmcs_ptr_phys) {
vmcs_load(vmx_info->vmcs_ptr_phys);
}
-
v3_vmx_restore_vmcs(info);
exit_log[info->num_exits % 10] = exit_info;
-
#ifdef V3_CONFIG_SYMCALL
if (info->sym_core_state.symcall_state.sym_call_active == 0) {
update_irq_exit_state(info);
static void dump_routes(){
struct vnet_route_info *route;
- int i = 0;
Vnet_Debug("\n========Dump routes starts ============\n");
list_for_each_entry(route, &(vnet_state.routes), node) {
- Vnet_Debug("\nroute %d:\n", i++);
+ Vnet_Debug("\nroute %d:\n", route->idx);
print_route(&(route->route_def));
if (route->route_def.dst_type == LINK_INTERFACE) {
flags = vnet_lock_irqsave(vnet_state.lock);
list_for_each_entry(route, &(vnet_state.routes), node) {
+ V3_Print("v3_vnet_del_route, route idx: %d\n", route->idx);
if(route->idx == route_idx){
list_del(&(route->node));
- list_del(&(route->match_node));
- Vnet_Free(route);
+ Vnet_Free(route);
+ break;
}
}
vnet_unlock_irqrestore(vnet_state.lock, flags);
+ clear_hash_cache();
+
+#ifdef V3_CONFIG_DEBUG_VNET
+ dump_routes();
+#endif
}