Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


VNET fixes and adaptation to new API
Peter Dinda [Tue, 26 Jun 2012 21:29:17 +0000 (16:29 -0500)]
The most important fixes here are in eliminating the use of blocking receives
and using timed yields

linux_module/palacios-vnet-brg.c
linux_module/palacios-vnet-ctrl.c
linux_module/palacios-vnet.c

index 4efbb86..7f4ab13 100644 (file)
 #include "palacios-vnet.h"
 #include "palacios.h"
 
+
 #define VNET_SERVER_PORT 9000
 
+#define VNET_YIELD_TIME_USEC 1000
+
 struct vnet_link {
     uint32_t dst_ip;
     uint16_t dst_port;
@@ -255,8 +258,8 @@ _udp_send(struct socket * sock,
 
 static int 
 _udp_recv(struct socket * sock, 
-        struct sockaddr_in * addr,
-        unsigned char * buf, int len) {
+         struct sockaddr_in * addr,
+         unsigned char * buf, int len, int nonblocking) {
     struct msghdr msg;
     struct iovec iov;
     mm_segment_t oldfs;
@@ -269,7 +272,7 @@ _udp_recv(struct socket * sock,
     iov.iov_base = buf;
     iov.iov_len = len;
     
-    msg.msg_flags = 0;
+    msg.msg_flags = MSG_NOSIGNAL | (nonblocking ? MSG_DONTWAIT : 0);
     msg.msg_name = addr;
     msg.msg_namelen = sizeof(struct sockaddr_in);
     msg.msg_control = NULL;
@@ -349,7 +352,7 @@ bridge_send_pkt(struct v3_vm_info * vm,
        link->stats.tx_bytes += pkt->size;
        link->stats.tx_pkts ++;
     } else {
-       INFO("VNET Bridge Linux Host: wrong dst link, idx: %d, discards the packet\n", pkt->dst_id);
+       INFO("VNET Bridge Linux Host: wrong dst link, idx: %d, discarding the packet\n", pkt->dst_id);
        vnet_brg_s.stats.pkt_drop_vmm ++;
     }
 
@@ -411,17 +414,34 @@ static int _udp_server(void * arg) {
     INFO("Palacios VNET Bridge: UDP receiving server ..... \n");
 
     pkt = kmalloc(MAX_PACKET_LEN, GFP_KERNEL);
+
+
     while (!kthread_should_stop()) {
+
+       // This is a NONBLOCKING receive
+       // If we block here, we will never detect that this thread
+       // is being signaled to stop, plus we might go uninterrupted on this core
+       // blocking out access to other threads - leave this NONBLOCKING
+       // unless you know what you are doing
+       len = _udp_recv(vnet_brg_s.serv_sock, &pkt_addr, pkt, MAX_PACKET_LEN, 1); 
+
+
+       // If it would have blocked, we have no packet, and so
+       // we will give other threads on this core a chance
+       if (len==-EAGAIN || len==-EWOULDBLOCK || len==-EINTR) { 
+           palacios_yield_cpu_timed(VNET_YIELD_TIME_USEC);
+           continue;
+       }
        
-       len = _udp_recv(vnet_brg_s.serv_sock, &pkt_addr, pkt, MAX_PACKET_LEN); 
        if(len < 0) {
            WARNING("Receive error: Could not get packet, error %d\n", len);
            continue;
        }
 
        link = _link_by_ip(pkt_addr.sin_addr.s_addr);
+
        if (link == NULL){
-           WARNING("VNET Server: No VNET Link match the src IP\n");
+           WARNING("VNET Server: No VNET Link matches the src IP\n");
            vnet_brg_s.stats.pkt_drop_phy ++;
            continue;
        }
@@ -433,6 +453,8 @@ static int _udp_server(void * arg) {
        send_to_palacios(pkt, len, link->idx);
     }
 
+    INFO("VNET Server: UDP thread exiting\n");
+
     kfree(pkt);
 
     return 0;
@@ -447,6 +469,8 @@ static int _rx_server(void * arg) {
        //accept new connection
        //use select to receive pkt from physical network
        //or create new kthread to handle each connection?
+       WARNING("VNET Server: TCP is not currently supported\n");
+       return -1;
     }else {
        WARNING ("VNET Server: Unsupported Protocol\n");
        return -1;
@@ -507,16 +531,29 @@ int vnet_bridge_init(void) {
 
 void vnet_bridge_deinit(void){
 
+    INFO("VNET LNX Bridge Deinit Started\n");
+
     v3_vnet_del_bridge(HOST_LNX_BRIDGE);
 
+    //DEBUG("Stopping bridge service thread\n");
+
     kthread_stop(vnet_brg_s.serv_thread);
+
+    //DEBUG("Releasing bridee service socket\n");
+
     vnet_brg_s.serv_sock->ops->release(vnet_brg_s.serv_sock);
 
+    //DEBUG("Deiniting bridge links\n");
+
     deinit_links_list();
 
+    //DEBUG("Freeing bridge hash tables\n");
+
     vnet_free_htable(vnet_brg_s.ip2link, 0, 0);
 
     vnet_brg_s.status = 0;
+
+    INFO("VNET LNX Bridge Deinit Finished\n");
 }
 
 
index 992556c..9495cb4 100644 (file)
@@ -877,7 +877,9 @@ static int init_proc_files(void) {
     struct proc_dir_entry * debug_entry = NULL;
     struct proc_dir_entry * vnet_root = NULL;
 
-    vnet_root = proc_mkdir("vnet", NULL);
+
+    vnet_root = proc_mkdir("vnet", palacios_get_procdir());
+
     if (vnet_root == NULL) {
        return -1;
     }
@@ -957,12 +959,17 @@ int vnet_ctrl_init(void) {
 
 
 void vnet_ctrl_deinit(void){
+    
+    INFO("VNET Control Deinit Started\n");
+
     destroy_proc_files();
 
     deinit_links_list();
     deinit_routes_list();
 
     vnet_ctrl_s.status = 0;
+
+    INFO("VNET Control Deinit Finished\n");
 }
 
 
index 2020e48..297852d 100644 (file)
 #include "palacios-vnet.h"
 #include "linux-exts.h"
 
-static void host_print(const char *    fmt, ...) {
-#if V3_PRINTK_OLD_STYLE_OUTPUT
 
-  va_list ap;
 
-  va_start(ap, fmt);
-  vprintk(fmt, ap);
-  va_end(ap);
-
-  return
-
-#else 
-
-  va_list ap;
-  char *buf;
-
-  // Allocate space atomically, in case we are called
-  // with a lock held
-  buf = kmalloc(V3_PRINTK_BUF_SIZE, GFP_ATOMIC);
-  if (!buf) { 
-      printk("palacios: output skipped - unable to allocate\n");
-      return;
-  } 
-
-  va_start(ap, fmt);
-  vsnprintf(buf,V3_PRINTK_BUF_SIZE, fmt, ap);
-  va_end(ap);
-
-  printk(KERN_INFO "palacios: %s",buf);
-
-  kfree(buf);
-
-  return;
-
-#endif
-}
-
-
-static void * host_allocate_pages(int num_pages, unsigned int alignment){
-    uintptr_t addr = 0; 
-    struct page * pgs = NULL;
-    int order = get_order(num_pages * PAGE_SIZE);
-        
-    pgs = alloc_pages(GFP_KERNEL, order);
-    
-    WARN(!pgs, "Could not allocate pages\n");
-       
-    addr = page_to_pfn(pgs) << PAGE_SHIFT; 
-   
-    return (void *)addr;
-}
-
-
-static void host_free_pages(void * page_paddr, int num_pages) {
-    uintptr_t pg_addr = (uintptr_t)page_paddr;
-       
-    __free_pages(pfn_to_page(pg_addr >> PAGE_SHIFT), get_order(num_pages * PAGE_SIZE));
-}
-
-
-static void *
-host_alloc(unsigned int size) {
-    void * addr;
-    addr =  kmalloc(size, GFP_KERNEL);
-
-    return addr;
-}
-
-static void
-host_free(
-       void *                  addr
-)
-{
-    kfree(addr);
-    return;
-}
-
-static void *
-host_vaddr_to_paddr(void * vaddr)
-{
-    return (void*) __pa(vaddr);
-
-}
-
-static void *
-host_paddr_to_vaddr(void * paddr)
-{
-    return __va(paddr);
-}
-
-
-static void *
-host_start_kernel_thread(
-       int (*fn)(void * arg),
-       void * arg,
-       char * thread_name) {
-    struct task_struct * thread = NULL;
-
-    thread = kthread_run(fn, arg, thread_name );
-
-    return thread;
-}
 
 static void host_kthread_sleep(long timeout){
     set_current_state(TASK_INTERRUPTIBLE);
@@ -143,7 +43,8 @@ static void host_kthread_wakeup(void * thread){
 static void host_kthread_stop(void * thread){
     struct task_struct * kthread = (struct task_struct *)thread;
 
-    kthread_stop(kthread);
+    while (kthread_stop(kthread)==-EINTR)
+       ;
 }
 
 static int host_kthread_should_stop(void){
@@ -157,45 +58,6 @@ static void host_udelay(unsigned long usecs){
 
 
 
-static void
-host_yield_cpu(void)
-{
-    schedule();
-    return;
-}
-
-static void *
-host_mutex_alloc(void)
-{
-    spinlock_t * lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
-
-    if (lock) {
-       spin_lock_init(lock);
-    }
-
-    return lock;
-}
-
-static void
-host_mutex_free(
-       void * mutex
-) 
-{
-    kfree(mutex);
-}
-
-static void 
-host_mutex_lock(void * mutex, 
-               int must_spin)
-{
-    spin_lock((spinlock_t *)mutex);
-}
-
-static void 
-host_mutex_unlock(void * mutex) 
-{
-    spin_unlock((spinlock_t *)mutex);
-}
 
 
 
@@ -276,32 +138,32 @@ host_del_timer(void * vnet_timer){
 
 
 static struct vnet_host_hooks vnet_host_hooks = {
-    .timer_create      = host_create_timer,
-    .timer_del         = host_del_timer,
+    .timer_create              = host_create_timer,
+    .timer_del                 = host_del_timer,
     .timer_start               = host_start_timer,
-    .timer_stop                = host_stop_timer,
-    .timer_reset       = host_reset_timer,
-
-    .thread_start      = host_start_kernel_thread,
-    .thread_sleep      = host_kthread_sleep,
-    .thread_wakeup     = host_kthread_wakeup,
-    .thread_stop       = host_kthread_stop,
-    .thread_should_stop        = host_kthread_should_stop,
-    .udelay    = host_udelay,
-
-    .yield_cpu         = host_yield_cpu,
-    .mutex_alloc       = host_mutex_alloc,
-    .mutex_free        = host_mutex_free,
-    .mutex_lock        = host_mutex_lock, 
-    .mutex_unlock      = host_mutex_unlock,
-
-    .print                     = host_print,
-    .allocate_pages    = host_allocate_pages,
-    .free_pages        = host_free_pages,
-    .malloc            = host_alloc,
-    .free                      = host_free,
-    .vaddr_to_paddr            = host_vaddr_to_paddr,
-    .paddr_to_vaddr            = host_paddr_to_vaddr,
+    .timer_stop                        = host_stop_timer,
+    .timer_reset               = host_reset_timer,
+
+    .thread_start              = palacios_start_kernel_thread,
+    .thread_sleep              = host_kthread_sleep,
+    .thread_wakeup             = host_kthread_wakeup,
+    .thread_stop               = host_kthread_stop,
+    .thread_should_stop                = host_kthread_should_stop,
+    .udelay                    = host_udelay,
+
+    .yield_cpu                 = palacios_yield_cpu,
+    .mutex_alloc               = palacios_mutex_alloc,
+    .mutex_free                        = palacios_mutex_free,
+    .mutex_lock                        = palacios_mutex_lock, 
+    .mutex_unlock              = palacios_mutex_unlock,
+
+    .print                     = palacios_print,
+    .allocate_pages            = palacios_allocate_pages,
+    .free_pages                        = palacios_free_pages,
+    .malloc                    = palacios_alloc,
+    .free                      = palacios_free,
+    .vaddr_to_paddr            = palacios_vaddr_to_paddr,
+    .paddr_to_vaddr            = palacios_paddr_to_vaddr,
 };
 
 
@@ -319,11 +181,19 @@ static int vnet_init( void ) {
 
 
 static int vnet_deinit( void ) {
-    deinit_vnet();
 
-    vnet_bridge_deinit();
+    INFO("V3 Control Deinit Start\n");
+
     vnet_ctrl_deinit();
 
+    INFO("V3 Bridge Deinit Start\n");
+
+    vnet_bridge_deinit();
+
+    INFO("V3 VNET Deinit Start\n");
+
+    deinit_vnet();
+
     INFO("V3 VNET Deinited\n");
 
     return 0;