X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fvnet%2Fvnet_core.c;h=46690294165b5f50eac65f501ba7ef2f8c24bba1;hb=96c8702630006b04e8e8f762f059f5300660e612;hp=42c9d3bc571da1bc8776df776e6c2d9af1ba8956;hpb=f904d129c5f7f44493bba3c9a82c150bbb613217;p=palacios.git diff --git a/palacios/src/vnet/vnet_core.c b/palacios/src/vnet/vnet_core.c index 42c9d3b..4669029 100644 --- a/palacios/src/vnet/vnet_core.c +++ b/palacios/src/vnet/vnet_core.c @@ -10,7 +10,7 @@ * Copyright (c) 2010, Lei Xia * Copyright (c) 2009, Yuan Tang * Copyright (c) 2009, The V3VEE Project - * All rights reserved. + * All rights reserved * * Author: Lei Xia * Yuan Tang @@ -24,12 +24,14 @@ #include #include +#include + #ifndef V3_CONFIG_DEBUG_VNET #undef Vnet_Debug #define Vnet_Debug(fmt, args...) #endif -int vnet_debug = 0; +int net_debug = 0; struct eth_hdr { uint8_t dst_mac[ETH_ALEN]; @@ -43,9 +45,15 @@ struct vnet_dev { uint8_t mac_addr[ETH_ALEN]; struct v3_vm_info * vm; struct v3_vnet_dev_ops dev_ops; + + int poll; + +#define VNET_MAX_QUOTE 64 + int quote; + void * private_data; - struct list_head node; + struct list_head node; } __attribute__((packed)); @@ -66,6 +74,8 @@ struct vnet_route_info { struct vnet_dev * dst_dev; struct vnet_dev * src_dev; + uint32_t idx; + struct list_head node; struct list_head match_node; // used for route matching }; @@ -86,36 +96,34 @@ struct queue_entry{ uint32_t size_alloc; }; -#define VNET_QUEUE_SIZE 1024 -struct vnet_queue { - struct queue_entry buf[VNET_QUEUE_SIZE]; - int head, tail; - int count; - vnet_lock_t lock; -}; static struct { struct list_head routes; struct list_head devs; - - int num_routes; - int num_devs; + + uint8_t status; + + uint32_t num_routes; + uint32_t route_idx; + uint32_t num_devs; + uint32_t dev_idx; struct vnet_brg_dev * bridge; vnet_lock_t lock; struct vnet_stat stats; - struct vnet_thread * pkt_flush_thread; + /* device queue that are waiting to be polled */ + struct v3_queue * poll_devs; - struct vnet_queue pkt_q; + struct vnet_thread * pkt_flush_thread; struct hashtable * route_cache; } vnet_state; #ifdef V3_CONFIG_DEBUG_VNET -static inline void mac_to_string(uint8_t * mac, char * buf) { +static inline void mac2str(uint8_t * mac, char * buf) { snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); @@ -124,10 +132,10 @@ static inline void mac_to_string(uint8_t * mac, char * buf) { static void print_route(struct v3_vnet_route * route){ char str[50]; - mac_to_string(route->src_mac, str); + mac2str(route->src_mac, str); Vnet_Debug("Src Mac (%s), src_qual (%d)\n", str, route->src_mac_qual); - mac_to_string(route->dst_mac, str); + mac2str(route->dst_mac, str); Vnet_Debug("Dst Mac (%s), dst_qual (%d)\n", str, route->dst_mac_qual); Vnet_Debug("Src dev id (%d), src type (%d)", @@ -141,10 +149,9 @@ static void print_route(struct v3_vnet_route * route){ static void dump_routes(){ struct vnet_route_info *route; - int i = 0; Vnet_Debug("\n========Dump routes starts ============\n"); list_for_each_entry(route, &(vnet_state.routes), node) { - Vnet_Debug("\nroute %d:\n", i++); + Vnet_Debug("\nroute %d:\n", route->idx); print_route(&(route->route_def)); if (route->route_def.dst_type == LINK_INTERFACE) { @@ -274,16 +281,42 @@ int v3_vnet_add_route(struct v3_vnet_route route) { flags = vnet_lock_irqsave(vnet_state.lock); list_add(&(new_route->node), &(vnet_state.routes)); - clear_hash_cache(); - + new_route->idx = ++ vnet_state.route_idx; + vnet_state.num_routes ++; + vnet_unlock_irqrestore(vnet_state.lock, flags); - + + clear_hash_cache(); #ifdef V3_CONFIG_DEBUG_VNET dump_routes(); #endif - return 0; + return new_route->idx; +} + + +void v3_vnet_del_route(uint32_t route_idx){ + struct vnet_route_info * route = NULL; + unsigned long flags; + + flags = vnet_lock_irqsave(vnet_state.lock); + + list_for_each_entry(route, &(vnet_state.routes), node) { + V3_Print("v3_vnet_del_route, route idx: %d\n", route->idx); + if(route->idx == route_idx){ + list_del(&(route->node)); + Vnet_Free(route); + break; + } + } + + vnet_unlock_irqrestore(vnet_state.lock, flags); + clear_hash_cache(); + +#ifdef V3_CONFIG_DEBUG_VNET + dump_routes(); +#endif } @@ -319,7 +352,7 @@ static struct route_list * match_route(const struct v3_vnet_pkt * pkt) { int max_rank = 0; struct list_head match_list; struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data); - // uint8_t src_type = pkt->src_type; + // uint8_t src_type = pkt->src_type; // uint32_t src_link = pkt->src_id; #ifdef V3_CONFIG_DEBUG_VNET @@ -327,8 +360,8 @@ static struct route_list * match_route(const struct v3_vnet_pkt * pkt) { char dst_str[100]; char src_str[100]; - mac_to_string(hdr->src_mac, src_str); - mac_to_string(hdr->dst_mac, dst_str); + mac2str(hdr->src_mac, src_str); + mac2str(hdr->dst_mac, dst_str); Vnet_Debug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str); } #endif @@ -438,17 +471,17 @@ static struct route_list * match_route(const struct v3_vnet_pkt * pkt) { } -int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) { +int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) { struct route_list * matched_routes = NULL; unsigned long flags; int i; int cpu = V3_Get_CPU(); Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n", - cpu, pkt->size, pkt->src_id, - pkt->src_type, pkt->dst_id, pkt->dst_type); - if(vnet_debug >= 4){ - v3_hexdump(pkt->data, pkt->size, NULL, 0); + cpu, pkt->size, pkt->src_id, + pkt->src_type, pkt->dst_id, pkt->dst_type); + if(net_debug >= 4){ + v3_hexdump(pkt->data, pkt->size, NULL, 0); } flags = vnet_lock_irqsave(vnet_state.lock); @@ -515,66 +548,8 @@ int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) { } -static int vnet_pkt_enqueue(struct v3_vnet_pkt * pkt){ - unsigned long flags; - struct queue_entry * entry; - struct vnet_queue * q = &(vnet_state.pkt_q); - uint16_t num_pages; - - flags = vnet_lock_irqsave(q->lock); - - if (q->count >= VNET_QUEUE_SIZE){ - Vnet_Print(1, "VNET Queue overflow!\n"); - vnet_unlock_irqrestore(q->lock, flags); - return -1; - } - - q->count ++; - entry = &(q->buf[q->tail++]); - q->tail %= VNET_QUEUE_SIZE; - - vnet_unlock_irqrestore(q->lock, flags); - - /* this is ugly, but should happen very unlikely */ - while(entry->use); - - if(entry->size_alloc < pkt->size){ - if(entry->data != NULL){ - Vnet_FreePages(Vnet_PAddr(entry->data), (entry->size_alloc / PAGE_SIZE)); - entry->data = NULL; - } - - num_pages = 1 + (pkt->size / PAGE_SIZE); - entry->data = Vnet_VAddr(Vnet_AllocPages(num_pages)); - if(entry->data == NULL){ - return -1; - } - entry->size_alloc = PAGE_SIZE * num_pages; - } - - entry->pkt.data = entry->data; - memcpy(&(entry->pkt), pkt, sizeof(struct v3_vnet_pkt)); - memcpy(entry->data, pkt->data, pkt->size); - - entry->use = 1; - - return 0; -} - - -int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize) { - if(synchronize){ - vnet_tx_one_pkt(pkt, NULL); - }else { - vnet_pkt_enqueue(pkt); - Vnet_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size); - } - - return 0; -} - int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, - struct v3_vnet_dev_ops *ops, + struct v3_vnet_dev_ops *ops, int quote, int poll_state, void * priv_data){ struct vnet_dev * new_dev = NULL; unsigned long flags; @@ -591,12 +566,15 @@ int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, new_dev->private_data = priv_data; new_dev->vm = vm; new_dev->dev_id = 0; + new_dev->quote = quotepoll = poll_state; flags = vnet_lock_irqsave(vnet_state.lock); if (dev_by_mac(mac) == NULL) { list_add(&(new_dev->node), &(vnet_state.devs)); - new_dev->dev_id = ++vnet_state.num_devs; + new_dev->dev_id = ++ vnet_state.dev_idx; + vnet_state.num_devs ++; } vnet_unlock_irqrestore(vnet_state.lock, flags); @@ -622,7 +600,8 @@ int v3_vnet_del_dev(int dev_id){ dev = dev_by_id(dev_id); if (dev != NULL){ list_del(&(dev->node)); - del_routes_by_dev(dev_id); + //del_routes_by_dev(dev_id); + vnet_state.num_devs --; } vnet_unlock_irqrestore(vnet_state.lock, flags); @@ -636,7 +615,6 @@ int v3_vnet_del_dev(int dev_id){ int v3_vnet_stat(struct vnet_stat * stats){ - stats->rx_bytes = vnet_state.stats.rx_bytes; stats->rx_pkts = vnet_state.stats.rx_pkts; stats->tx_bytes = vnet_state.stats.tx_bytes; @@ -645,7 +623,7 @@ int v3_vnet_stat(struct vnet_stat * stats){ return 0; } -static void free_devices(){ +static void deinit_devices_list(){ struct vnet_dev * dev = NULL; list_for_each_entry(dev, &(vnet_state.devs), node) { @@ -654,7 +632,7 @@ static void free_devices(){ } } -static void free_routes(){ +static void deinit_routes_list(){ struct vnet_route_info * route = NULL; list_for_each_entry(route, &(vnet_state.routes), node) { @@ -706,41 +684,59 @@ int v3_vnet_add_bridge(struct v3_vm_info * vm, return 0; } -static int vnet_tx_flush(void *args){ + +void v3_vnet_del_bridge(uint8_t type) { unsigned long flags; - struct queue_entry * entry; - struct vnet_queue * q = &(vnet_state.pkt_q); + struct vnet_brg_dev * tmp_bridge = NULL; + + flags = vnet_lock_irqsave(vnet_state.lock); + + if (vnet_state.bridge != NULL && vnet_state.bridge->type == type) { + tmp_bridge = vnet_state.bridge; + vnet_state.bridge = NULL; + } + + vnet_unlock_irqrestore(vnet_state.lock, flags); - Vnet_Print(0, "VNET/P Handing Pkt Thread Starting ....\n"); + if (tmp_bridge) { + Vnet_Free(tmp_bridge); + } +} - /* we need thread sleep/wakeup in Palacios */ - while(!vnet_thread_should_stop()){ - flags = vnet_lock_irqsave(q->lock); - if (q->count <= 0){ - vnet_unlock_irqrestore(q->lock, flags); - Vnet_Yield(); - }else { - q->count --; - entry = &(q->buf[q->head++]); - q->head %= VNET_QUEUE_SIZE; +/* can be instanieoued to multiple threads + * that runs on multiple cores + * or it could be running on a dedicated side core + */ +static int vnet_tx_flush(void *args){ + struct vnet_dev * dev = NULL; + int ret; - vnet_unlock_irqrestore(q->lock, flags); + Vnet_Print(0, "VNET/P Polling Thread Starting ....\n"); - /* this is ugly, but should happen very unlikely */ - while(!entry->use); - vnet_tx_one_pkt(&(entry->pkt), NULL); + /* we need thread sleep/wakeup in Palacios */ + while(!vnet_thread_should_stop()){ + dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs); + if(dev != NULL){ + if(dev->poll && dev->dev_ops.poll != NULL){ + ret = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data); - /* asynchronizely release allocated memory for buffer entry here */ - entry->use = 0; + if (ret < 0){ + PrintDebug("VNET/P: poll from device %p error!\n", dev); + } - Vnet_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size); + v3_enqueue(vnet_state.poll_devs, (addr_t)dev); + } + }else { /* no device needs to be polled */ + /* sleep here? */ + Vnet_Yield(); } } return 0; } + int v3_init_vnet() { memset(&vnet_state, 0, sizeof(vnet_state)); @@ -751,20 +747,20 @@ int v3_init_vnet() { vnet_state.num_routes = 0; if (vnet_lock_init(&(vnet_state.lock)) == -1){ - PrintError("VNET/P Core: Fails to initiate lock\n"); + PrintError("VNET/P: Fails to initiate lock\n"); } vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq); if (vnet_state.route_cache == NULL) { - PrintError("VNET/P Core: Fails to initiate route cache\n"); + PrintError("VNET/P: Fails to initiate route cache\n"); return -1; } - vnet_lock_init(&(vnet_state.pkt_q.lock)); + vnet_state.poll_devs = v3_create_queue(); - vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "VNET_Pkts"); + vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "vnetd"); - Vnet_Debug("VNET/P Core is initiated\n"); + Vnet_Debug("VNET/P is initiated\n"); return 0; } @@ -774,8 +770,8 @@ void v3_deinit_vnet(){ vnet_lock_deinit(&(vnet_state.lock)); - free_devices(); - free_routes(); + deinit_devices_list(); + deinit_routes_list(); vnet_free_htable(vnet_state.route_cache, 1, 1); Vnet_Free(vnet_state.bridge);