X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=blobdiff_plain;f=palacios%2Fsrc%2Fpalacios%2Fvmm_vnet_core.c;h=4b54d7188e705f3400a5a4b81c0466bf380faaee;hb=fdb16aa5e99a19a91264ae84beaf77d3036548d5;hp=a2d06f2ffeeab465ce9dc78b607f80864b56d1b0;hpb=0a7690f54cb5bda83780dac26ce433ad24b1d766;p=palacios.git diff --git a/palacios/src/palacios/vmm_vnet_core.c b/palacios/src/palacios/vmm_vnet_core.c index a2d06f2..4b54d71 100644 --- a/palacios/src/palacios/vmm_vnet_core.c +++ b/palacios/src/palacios/vmm_vnet_core.c @@ -31,6 +31,8 @@ #define PrintDebug(fmt, args...) #endif +int v3_net_debug = 0; + struct eth_hdr { uint8_t dst_mac[ETH_ALEN]; uint8_t src_mac[ETH_ALEN]; @@ -45,12 +47,6 @@ struct vnet_dev { struct v3_vnet_dev_ops dev_ops; void * private_data; - int active; - nic_poll_type_t mode; /*vmm_drivern or guest_drivern */ - - uint64_t bytes_tx, bytes_rx; - uint32_t pkts_tx, pkt_rx; - struct list_head node; } __attribute__((packed)); @@ -60,8 +56,7 @@ struct vnet_brg_dev { struct v3_vnet_bridge_ops brg_ops; uint8_t type; - nic_poll_type_t mode; - int active; + void * private_data; } __attribute__((packed)); @@ -86,6 +81,20 @@ struct route_list { } __attribute__((packed)); +struct queue_entry{ + uint8_t use; + struct v3_vnet_pkt pkt; + uint8_t data[ETHERNET_PACKET_LEN]; +}; + +#define VNET_QUEUE_SIZE 10240 +struct vnet_queue { + struct queue_entry buf[VNET_QUEUE_SIZE]; + int head, tail; + int count; + v3_lock_t lock; +}; + static struct { struct list_head routes; struct list_head devs; @@ -98,10 +107,13 @@ static struct { v3_lock_t lock; struct vnet_stat stats; - struct hashtable * route_cache; -} vnet_state; + void * pkt_flush_thread; + struct vnet_queue pkt_q; + struct hashtable * route_cache; +} vnet_state; + #ifdef CONFIG_DEBUG_VNET static inline void mac_to_string(uint8_t * mac, char * buf) { @@ -183,7 +195,8 @@ static int clear_hash_cache() { return 0; } -static int look_into_cache(const struct v3_vnet_pkt * pkt, struct route_list ** routes) { +static int look_into_cache(const struct v3_vnet_pkt * pkt, + struct route_list ** routes) { *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf)); return 0; @@ -307,8 +320,8 @@ static struct route_list * match_route(const struct v3_vnet_pkt * pkt) { int max_rank = 0; struct list_head match_list; struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data); - // uint8_t src_type = pkt->src_type; - // uint32_t src_link = pkt->src_id; + // uint8_t src_type = pkt->src_type; + // uint32_t src_link = pkt->src_id; #ifdef CONFIG_DEBUG_VNET { @@ -426,20 +439,18 @@ static struct route_list * match_route(const struct v3_vnet_pkt * pkt) { } -int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) { +int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) { struct route_list * matched_routes = NULL; unsigned long flags; int i; -#ifdef CONFIG_DEBUG_VNET - { - int cpu = V3_Get_CPU(); - PrintDebug("VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n", + int cpu = V3_Get_CPU(); + V3_Net_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n", cpu, pkt->size, pkt->src_id, pkt->src_type, pkt->dst_id, pkt->dst_type); - //v3_hexdump(pkt->data, pkt->size, NULL, 0); - } -#endif + if(v3_net_debug >= 4){ + v3_hexdump(pkt->data, pkt->size, NULL, 0); + } flags = v3_lock_irqsave(vnet_state.lock); @@ -468,30 +479,30 @@ int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) { for (i = 0; i < matched_routes->num_routes; i++) { struct vnet_route_info * route = matched_routes->routes[i]; - if (route->route_def.dst_type == LINK_EDGE) { - struct vnet_brg_dev *bridge = vnet_state.bridge; - pkt->dst_type = LINK_EDGE; - pkt->dst_id = route->route_def.dst_id; + if (route->route_def.dst_type == LINK_EDGE) { + struct vnet_brg_dev * bridge = vnet_state.bridge; + pkt->dst_type = LINK_EDGE; + pkt->dst_id = route->route_def.dst_id; - if (bridge == NULL || (bridge->active == 0)) { - PrintDebug("VNET/P Core: No active bridge to sent data to\n"); + if (bridge == NULL) { + V3_Net_Print(2, "VNET/P Core: No active bridge to sent data to\n"); continue; } if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){ - PrintDebug("VNET/P Core: Packet not sent properly to bridge\n"); + V3_Net_Print(2, "VNET/P Core: Packet not sent properly to bridge\n"); continue; } vnet_state.stats.tx_bytes += pkt->size; vnet_state.stats.tx_pkts ++; } else if (route->route_def.dst_type == LINK_INTERFACE) { - if (route->dst_dev == NULL || route->dst_dev->active == 0){ - PrintDebug("VNET/P Core: No active device to sent data to\n"); + if (route->dst_dev == NULL){ + V3_Net_Print(2, "VNET/P Core: No active device to sent data to\n"); continue; } if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) { - PrintDebug("VNET/P Core: Packet not sent properly\n"); + V3_Net_Print(2, "VNET/P Core: Packet not sent properly\n"); continue; } vnet_state.stats.tx_bytes += pkt->size; @@ -504,6 +515,50 @@ int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) { return 0; } + +static int vnet_pkt_enqueue(struct v3_vnet_pkt * pkt){ + unsigned long flags; + struct queue_entry * entry; + struct vnet_queue * q = &(vnet_state.pkt_q); + + flags = v3_lock_irqsave(q->lock); + + if (q->count >= VNET_QUEUE_SIZE){ + V3_Net_Print(1, "VNET Queue overflow!\n"); + v3_unlock_irqrestore(q->lock, flags); + return -1; + } + + q->count ++; + entry = &(q->buf[q->tail++]); + q->tail %= VNET_QUEUE_SIZE; + + v3_unlock_irqrestore(q->lock, flags); + + /* this is ugly, but should happen very unlikely */ + while(entry->use); + + entry->pkt.data = entry->data; + memcpy(&(entry->pkt), pkt, sizeof(struct v3_vnet_pkt)); + memcpy(entry->data, pkt->data, pkt->size); + + entry->use = 1; + + return 0; +} + + +int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize) { + if(synchronize){ + vnet_tx_one_pkt(pkt, NULL); + }else { + vnet_pkt_enqueue(pkt); + V3_Net_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size); + } + + return 0; +} + int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, struct v3_vnet_dev_ops *ops, void * priv_data){ @@ -519,12 +574,9 @@ int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, memcpy(new_dev->mac_addr, mac, 6); new_dev->dev_ops.input = ops->input; - new_dev->dev_ops.poll = ops->poll; new_dev->private_data = priv_data; new_dev->vm = vm; new_dev->dev_id = 0; - new_dev->active = 1; - new_dev->mode = GUEST_DRIVERN; flags = v3_lock_irqsave(vnet_state.lock); @@ -547,7 +599,6 @@ int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, } - int v3_vnet_del_dev(int dev_id){ struct vnet_dev * dev = NULL; unsigned long flags; @@ -569,6 +620,7 @@ int v3_vnet_del_dev(int dev_id){ return 0; } + int v3_vnet_stat(struct vnet_stat * stats){ stats->rx_bytes = vnet_state.stats.rx_bytes; @@ -607,12 +659,10 @@ int v3_vnet_add_bridge(struct v3_vm_info * vm, struct vnet_brg_dev * tmp_bridge = NULL; flags = v3_lock_irqsave(vnet_state.lock); - if (vnet_state.bridge == NULL) { bridge_free = 1; vnet_state.bridge = (void *)1; } - v3_unlock_irqrestore(vnet_state.lock, flags); if (bridge_free == 0) { @@ -632,8 +682,6 @@ int v3_vnet_add_bridge(struct v3_vm_info * vm, tmp_bridge->brg_ops.input = ops->input; tmp_bridge->brg_ops.poll = ops->poll; tmp_bridge->private_data = priv_data; - tmp_bridge->active = 1; - tmp_bridge->mode = GUEST_DRIVERN; tmp_bridge->type = type; /* make this atomic to avoid possible race conditions */ @@ -645,20 +693,39 @@ int v3_vnet_add_bridge(struct v3_vm_info * vm, } -void v3_vnet_do_poll(struct v3_vm_info * vm){ - struct vnet_dev * dev = NULL; +static int vnet_tx_flush(void *args){ + unsigned long flags; + struct queue_entry * entry; + struct vnet_queue * q = &(vnet_state.pkt_q); - /* TODO: run this on separate threads - * round-robin schedule, with maximal budget for each poll - */ - list_for_each_entry(dev, &(vnet_state.devs), node) { - if(dev->mode == VMM_DRIVERN){ - dev->dev_ops.poll(vm, -1, dev->private_data); - } + V3_Print("VNET/P Handing Pkt Thread Starting ....\n"); + + //V3_THREAD_SLEEP(); + /* we need thread sleep/wakeup in Palacios */ + while(1){ + flags = v3_lock_irqsave(q->lock); + + if (q->count <= 0){ + v3_unlock_irqrestore(q->lock, flags); + v3_yield(NULL); + //V3_THREAD_SLEEP(); + }else { + q->count --; + entry = &(q->buf[q->head++]); + q->head %= VNET_QUEUE_SIZE; + + v3_unlock_irqrestore(q->lock, flags); + + /* this is ugly, but should happen very unlikely */ + while(!entry->use); + vnet_tx_one_pkt(&(entry->pkt), NULL); + entry->use = 0; + + V3_Net_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size); + } } } - int v3_init_vnet() { memset(&vnet_state, 0, sizeof(vnet_state)); @@ -673,12 +740,15 @@ int v3_init_vnet() { } vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq); - if (vnet_state.route_cache == NULL) { PrintError("VNET/P Core: Fails to initiate route cache\n"); return -1; } + v3_lock_init(&(vnet_state.pkt_q.lock)); + + vnet_state.pkt_flush_thread = V3_CREATE_THREAD(vnet_tx_flush, NULL, "VNET_Pkts"); + PrintDebug("VNET/P Core is initiated\n"); return 0;