2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu>
11 * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>
12 * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org>
15 * Author: Lei Xia <lxia@northwestern.edu>
16 * Yuan Tang <ytang@northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <vnet/vnet.h>
23 #include <vnet/vnet_hashtable.h>
24 #include <vnet/vnet_host.h>
25 #include <vnet/vnet_vmm.h>
27 #include <palacios/vmm_queue.h>
29 #ifndef V3_CONFIG_DEBUG_VNET
31 #define PrintDebug(fmt, args...)
34 #define VNET_NUM_TX_KICK_THREADS 1
36 #define VNET_ADAPTIVE_TX_KICK 1 // set to 1 to try to sleep when there is nothing to do
37 #define VNET_NOPROGRESS_LIMIT 1000 // ... after this many tries
38 #define VNET_YIELD_USEC 1000 // ... and go to sleep for this long
44 uint8_t dst_mac[ETH_ALEN];
45 uint8_t src_mac[ETH_ALEN];
46 uint16_t type; /* indicates layer 3 protocol type */
47 } __attribute__((packed));
52 uint8_t mac_addr[ETH_ALEN];
53 struct v3_vm_info * vm;
54 struct v3_vnet_dev_ops dev_ops;
58 #define VNET_MAX_QUOTE 64
63 struct list_head node;
64 } __attribute__((packed));
68 struct v3_vm_info * vm;
69 struct v3_vnet_bridge_ops brg_ops;
74 } __attribute__((packed));
78 struct vnet_route_info {
79 struct v3_vnet_route route_def;
81 struct vnet_dev * dst_dev;
82 struct vnet_dev * src_dev;
86 struct list_head node;
87 struct list_head match_node; // used for route matching
92 uint8_t hash_buf[VNET_HASH_SIZE];
95 struct vnet_route_info * routes[0];
96 } __attribute__((packed));
101 struct v3_vnet_pkt pkt;
108 struct list_head routes;
109 struct list_head devs;
118 struct vnet_brg_dev * bridge;
121 struct vnet_stat stats;
123 /* device queue that are waiting to be polled */
124 struct v3_queue * poll_devs;
126 struct vnet_thread * pkt_flush_thread[VNET_NUM_TX_KICK_THREADS];
128 struct hashtable * route_cache;
133 #ifdef V3_CONFIG_DEBUG_VNET
134 static inline void mac2str(uint8_t * mac, char * buf) {
135 snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x",
136 mac[0], mac[1], mac[2],
137 mac[3], mac[4], mac[5]);
140 static void print_route(struct v3_vnet_route * route){
143 mac2str(route->src_mac, str);
144 PrintDebug(VM_NONE, VCORE_NONE, "Src Mac (%s), src_qual (%d)\n",
145 str, route->src_mac_qual);
146 mac2str(route->dst_mac, str);
147 PrintDebug(VM_NONE, VCORE_NONE, "Dst Mac (%s), dst_qual (%d)\n",
148 str, route->dst_mac_qual);
149 PrintDebug(VM_NONE, VCORE_NONE, "Src dev id (%d), src type (%d)",
152 PrintDebug(VM_NONE, VCORE_NONE, "Dst dev id (%d), dst type (%d)\n",
157 static void dump_routes(){
158 struct vnet_route_info *route;
160 PrintDebug(VM_NONE, VCORE_NONE, "\n========Dump routes starts ============\n");
161 list_for_each_entry(route, &(vnet_state.routes), node) {
162 PrintDebug(VM_NONE, VCORE_NONE, "\nroute %d:\n", route->idx);
164 print_route(&(route->route_def));
165 if (route->route_def.dst_type == LINK_INTERFACE) {
166 PrintDebug(VM_NONE, VCORE_NONE, "dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
168 route->dst_dev->dev_id,
169 (void *)&(route->dst_dev->dev_ops),
170 route->dst_dev->private_data);
174 PrintDebug(VM_NONE, VCORE_NONE, "\n========Dump routes end ============\n");
181 * A VNET packet is a packed struct with the hashed fields grouped together.
182 * This means we can generate the hash from an offset into the pkt struct
184 static inline uint_t hash_fn(addr_t hdr_ptr) {
185 uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
187 return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
190 static inline int hash_eq(addr_t key1, addr_t key2) {
191 return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
194 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
195 memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
197 if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
198 PrintError(VM_NONE, VCORE_NONE, "VNET/P Core: Failed to insert new route entry to the cache\n");
205 static int clear_hash_cache() {
206 vnet_free_htable(vnet_state.route_cache, 1, 1);
207 vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
212 static int look_into_cache(const struct v3_vnet_pkt * pkt,
213 struct route_list ** routes) {
214 *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
220 static struct vnet_dev * dev_by_id(int idx) {
221 struct vnet_dev * dev = NULL;
223 list_for_each_entry(dev, &(vnet_state.devs), node) {
224 if (dev->dev_id == idx) {
232 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
233 struct vnet_dev * dev = NULL;
235 list_for_each_entry(dev, &(vnet_state.devs), node) {
236 if (!compare_ethaddr(dev->mac_addr, mac)){
245 static int start_vnet_kick_threads(void);
246 static int stop_vnet_kick_threads(void);
249 int v3_vnet_find_dev(uint8_t * mac) {
250 struct vnet_dev * dev = NULL;
252 dev = dev_by_mac(mac);
262 int v3_vnet_add_route(struct v3_vnet_route route) {
263 struct vnet_route_info * new_route = NULL;
264 vnet_intr_flags_t flags;
266 new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
269 PrintError(VM_NONE, VCORE_NONE, "Cannot allocate new route\n");
273 memset(new_route, 0, sizeof(struct vnet_route_info));
275 #ifdef V3_CONFIG_DEBUG_VNET
276 PrintDebug(VM_NONE, VCORE_NONE, "VNET/P Core: add_route_entry:\n");
280 memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
281 memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
282 new_route->route_def.src_mac_qual = route.src_mac_qual;
283 new_route->route_def.dst_mac_qual = route.dst_mac_qual;
284 new_route->route_def.dst_type = route.dst_type;
285 new_route->route_def.src_type = route.src_type;
286 new_route->route_def.src_id = route.src_id;
287 new_route->route_def.dst_id = route.dst_id;
289 if (new_route->route_def.dst_type == LINK_INTERFACE) {
290 new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
293 if (new_route->route_def.src_type == LINK_INTERFACE) {
294 new_route->src_dev = dev_by_id(new_route->route_def.src_id);
298 flags = vnet_lock_irqsave(vnet_state.lock);
300 list_add(&(new_route->node), &(vnet_state.routes));
301 new_route->idx = ++ vnet_state.route_idx;
302 vnet_state.num_routes ++;
304 vnet_unlock_irqrestore(vnet_state.lock, flags);
308 #ifdef V3_CONFIG_DEBUG_VNET
312 return new_route->idx;
316 void v3_vnet_del_route(uint32_t route_idx){
317 struct vnet_route_info * route = NULL;
318 vnet_intr_flags_t flags;
320 flags = vnet_lock_irqsave(vnet_state.lock);
322 list_for_each_entry(route, &(vnet_state.routes), node) {
323 Vnet_Print(0, "v3_vnet_del_route, route idx: %d\n", route->idx);
324 if(route->idx == route_idx){
325 list_del(&(route->node));
331 vnet_unlock_irqrestore(vnet_state.lock, flags);
334 #ifdef V3_CONFIG_DEBUG_VNET
340 /* delete all route entries with specfied src or dst device id */
341 static void inline del_routes_by_dev(int dev_id){
342 struct vnet_route_info * route, *tmp_route;
343 vnet_intr_flags_t flags;
345 flags = vnet_lock_irqsave(vnet_state.lock);
347 list_for_each_entry_safe(route, tmp_route, &(vnet_state.routes), node) {
348 if((route->route_def.dst_type == LINK_INTERFACE &&
349 route->route_def.dst_id == dev_id) ||
350 (route->route_def.src_type == LINK_INTERFACE &&
351 route->route_def.src_id == dev_id)){
353 list_del(&(route->node));
354 list_del(&(route->match_node));
359 vnet_unlock_irqrestore(vnet_state.lock, flags);
363 // Match classes, must be in order
364 #define NUM_MATCH_CLASSES 4
365 #define NUM_MATCH_CLASSES_BOUND 3
372 static inline uint8_t match_mac(uint8_t test_mac[ETH_ALEN],
373 uint8_t route_mac[ETH_ALEN],
376 switch (route_qual) {
387 if (memcmp(test_mac,route_mac,ETH_ALEN)) {
394 if (memcmp(test_mac,route_mac,ETH_ALEN)) {
401 PrintError(VM_NONE, VCORE_NONE, "Unknown qualifier %u\n",route_qual);
408 #define QUAL_TO_STR(q) ( \
409 (q)==MAC_NOSET ? "MAC_NOSET" : \
410 (q)==MAC_NONE ? "MAC_NONE" : \
411 (q)==MAC_ANY ? "MAC_ANY" : \
412 (q)==MAC_NOT ? "MAC_NOT" : \
413 (q)==MAC_ADDR ? "MAC_ADDR" : \
417 #define MATCH_CLASS_TO_STR(c) ( \
418 (c)==NONE ? "NONE" : \
421 (c)==DIRECT ? "DIRECT" : \
429 Original priority behavior...
431 priority src srcqual dst dstqual
446 Current priority order is given in the following table
449 // [src][dst] => priority
450 static int priority_map[NUM_MATCH_CLASSES][NUM_MATCH_CLASSES] =
452 [NONE] = { [ 0 ... NUM_MATCH_CLASSES_BOUND ] = -1}, // ignore if it's not a source match
453 [NOT][NONE] = -1, // ignore it if there is no destination match
457 [ANY][NONE] = -1, // ignore if there is no destination match
461 [DIRECT][NONE] = -1, // ignore if there is no destination match
464 [DIRECT][DIRECT] = 8,
470 static inline int match_priority(uint8_t src_mac[ETH_ALEN],
471 uint8_t dst_mac[ETH_ALEN],
472 uint8_t route_src_mac[ETH_ALEN],
473 uint8_t route_src_qual,
474 uint8_t route_dst_mac[ETH_ALEN],
475 uint8_t route_dst_qual)
479 return priority_map[match_mac(src_mac,route_src_mac,route_src_qual)][match_mac(dst_mac,route_dst_mac,route_dst_qual)];
484 Route matching will return the list of the highest priority routes that
485 match. It's a list because it's possible to have multiple high priority routes
487 static struct route_list * match_route(const struct v3_vnet_pkt * pkt)
490 struct vnet_route_info * route = NULL;
491 struct route_list * matches = NULL;
493 int max_priority = -1;
494 struct list_head match_list;
495 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
499 // NOTE: USING THE MATCH_NODE in the route list to record a match list
500 // IS A DISASTER WAITING TO HAPPEN
503 #ifdef V3_CONFIG_DEBUG_VNET
505 char dst_str[32], src_str[32];
506 mac2str(hdr->src_mac, src_str);
507 mac2str(hdr->dst_mac, dst_str);
508 PrintDebug(VM_NONE, VCORE_NONE, "VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
512 INIT_LIST_HEAD(&match_list);
515 list_for_each_entry(route, &(vnet_state.routes), node) {
517 struct v3_vnet_route * route_def = &(route->route_def);
521 priority = match_priority(hdr->src_mac,
524 route_def->src_mac_qual,
526 route_def->dst_mac_qual);
530 #ifdef V3_CONFIG_DEBUG_VNET
535 mac2str(route_def->src_mac, src_str);
536 mac2str(route_def->dst_mac, dst_str);
538 PrintDebug(VM_NONE, VCORE_NONE, "Tested match against SRC(%s) SRC_QUAL(%s), DEST(%s) DST_QUAL(%s): "
539 "SRC_MATCH=%s DEST_MATCH=%s PRIORITY=%d\n",
540 src_str, QUAL_TO_STR(route_def->src_mac_qual),
541 dst_str, QUAL_TO_STR(route_def->dst_mac_qual),
542 MATCH_CLASS_TO_STR(match_mac(hdr->src_mac,route_def->src_mac,route_def->src_mac_qual)),
543 MATCH_CLASS_TO_STR(match_mac(hdr->dst_mac,route_def->dst_mac,route_def->dst_mac_qual)),
549 PrintDebug(VM_NONE, VCORE_NONE, "No match to this rule\n");
553 if (priority > max_priority) {
554 PrintDebug(VM_NONE, VCORE_NONE, "New highest priority match, reseting list\n");
555 max_priority = priority;
557 struct vnet_route_info *my_route, *tmp_route;
559 list_for_each_entry_safe(my_route, tmp_route, &match_list,match_node) {
560 list_del(&(my_route->match_node));
563 list_add(&(route->match_node), &match_list);
566 } else if (priority == max_priority) {
567 PrintDebug(VM_NONE, VCORE_NONE, "Equal priority match, adding to list\n");
569 list_add(&(route->match_node), &match_list);
575 PrintDebug(VM_NONE, VCORE_NONE, "VNET/P Core: match_route: Matches=%d\n", num_matches);
577 if (num_matches <= 0) {
581 matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) +
582 (sizeof(struct vnet_route_info *) * num_matches));
586 PrintError(VM_NONE, VCORE_NONE, "VNET/P Core: Unable to allocate matches\n");
590 matches->num_routes = num_matches;
593 list_for_each_entry(route, &match_list, match_node) {
594 if (i==num_matches) {
595 // the list should never have more than num_matches on it...
596 PrintError(VM_NONE, VCORE_NONE, "Weird list behavior\n");
599 matches->routes[i++] = route;
607 int v3_vnet_query_header(uint8_t src_mac[ETH_ALEN],
608 uint8_t dest_mac[ETH_ALEN],
609 int recv, // 0 = send, 1=recv
610 struct v3_vnet_header *header)
612 struct route_list *routes;
613 struct vnet_route_info *r;
614 struct v3_vnet_pkt p;
619 memcpy(p.header,dest_mac,ETH_ALEN);
620 memcpy(p.header+ETH_ALEN,src_mac,ETH_ALEN);
621 memset(p.header+12,0,2);
623 p.src_type = LINK_EDGE;
626 memcpy(header->src_mac,src_mac,ETH_ALEN);
627 memcpy(header->dst_mac,dest_mac,ETH_ALEN);
630 flags = vnet_lock_irqsave(vnet_state.lock);
632 look_into_cache(&p,&routes);
635 routes = match_route(&p);
637 vnet_unlock_irqrestore(vnet_state.lock,flags);
638 PrintError(VM_NONE, VCORE_NONE, "Cannot match route\n");
639 header->header_type=VNET_HEADER_NOMATCH;
640 header->header_len=0;
643 add_route_to_cache(&p,routes);
647 vnet_unlock_irqrestore(vnet_state.lock,flags);
649 if (routes->num_routes<1) {
650 PrintError(VM_NONE, VCORE_NONE, "Less than one route\n");
651 header->header_type=VNET_HEADER_NOMATCH;
652 header->header_len=0;
656 if (routes->num_routes>1) {
657 PrintError(VM_NONE, VCORE_NONE, "More than one route, building header for the first one only\n");
662 switch (r->route_def.dst_type) {
664 // switch based on the link type
665 // for mac-in-udp, we would want to generate a mac, ip, and udp header
666 // direct transmission
668 // for now we will say we have no encapsulation
670 header->header_type=VNET_HEADER_NONE;
671 header->header_len=0;
672 header->src_mac_qual=r->route_def.src_mac_qual;
673 header->dst_mac_qual=r->route_def.dst_mac_qual;
682 // direct transmission
683 // let's guess that it goes to the same interface...
684 header->header_type=VNET_HEADER_NONE;
685 header->header_len=0;
686 header->src_mac_qual=r->route_def.src_mac_qual;
687 header->dst_mac_qual=r->route_def.dst_mac_qual;
693 PrintError(VM_NONE, VCORE_NONE, "Unknown destination type\n");
704 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
705 struct route_list * matched_routes = NULL;
706 vnet_intr_flags_t flags;
709 int cpu = V3_Get_CPU();
711 Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
712 cpu, pkt->size, pkt->src_id,
713 pkt->src_type, pkt->dst_id, pkt->dst_type);
716 v3_hexdump(pkt->data, pkt->size, NULL, 0);
719 flags = vnet_lock_irqsave(vnet_state.lock);
721 vnet_state.stats.rx_bytes += pkt->size;
722 vnet_state.stats.rx_pkts++;
724 look_into_cache(pkt, &matched_routes);
726 if (matched_routes == NULL) {
727 PrintDebug(VM_NONE, VCORE_NONE, "VNET/P Core: sending pkt - matching route\n");
729 matched_routes = match_route(pkt);
731 if (matched_routes) {
732 add_route_to_cache(pkt, matched_routes);
734 PrintDebug(VM_NONE, VCORE_NONE, "VNET/P Core: Could not find route for packet... discarding packet\n");
735 vnet_unlock_irqrestore(vnet_state.lock, flags);
736 return 0; /* do we return -1 here?*/
740 vnet_unlock_irqrestore(vnet_state.lock, flags);
742 PrintDebug(VM_NONE, VCORE_NONE, "VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
744 for (i = 0; i < matched_routes->num_routes; i++) {
745 struct vnet_route_info * route = matched_routes->routes[i];
747 if (route->route_def.dst_type == LINK_EDGE) {
748 struct vnet_brg_dev * bridge = vnet_state.bridge;
749 pkt->dst_type = LINK_EDGE;
750 pkt->dst_id = route->route_def.dst_id;
752 if (bridge == NULL) {
753 Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
757 if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
758 Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
761 vnet_state.stats.tx_bytes += pkt->size;
762 vnet_state.stats.tx_pkts ++;
763 } else if (route->route_def.dst_type == LINK_INTERFACE) {
764 if (route->dst_dev == NULL){
765 Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
769 if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
770 Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
773 vnet_state.stats.tx_bytes += pkt->size;
774 vnet_state.stats.tx_pkts ++;
776 Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
784 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac,
785 struct v3_vnet_dev_ops * ops, int quote, int poll_state,
787 struct vnet_dev * new_dev = NULL;
788 vnet_intr_flags_t flags;
790 new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev));
792 if (new_dev == NULL) {
793 Vnet_Print(0, "VNET/P Core: Unable to allocate a new device\n");
797 memcpy(new_dev->mac_addr, mac, ETH_ALEN);
798 new_dev->dev_ops.input = ops->input;
799 new_dev->dev_ops.poll = ops->poll;
800 new_dev->private_data = priv_data;
803 new_dev->quote = quote<VNET_MAX_QUOTE ? quote : VNET_MAX_QUOTE;
804 new_dev->poll = poll_state;
806 stop_vnet_kick_threads();
808 flags = vnet_lock_irqsave(vnet_state.lock);
810 if (dev_by_mac(mac) == NULL) {
811 list_add(&(new_dev->node), &(vnet_state.devs));
812 new_dev->dev_id = ++ vnet_state.dev_idx;
813 vnet_state.num_devs ++;
816 v3_enqueue(vnet_state.poll_devs, (addr_t)new_dev);
819 PrintError(VM_NONE, VCORE_NONE,"VNET/P: Device with the same MAC has already been added\n");
822 vnet_unlock_irqrestore(vnet_state.lock, flags);
824 start_vnet_kick_threads();
826 /* if the device was found previosly the id should still be 0 */
827 if (new_dev->dev_id == 0) {
828 Vnet_Print(0, "VNET/P Core: Device Already exists\n");
832 PrintDebug(VM_NONE, VCORE_NONE, "VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
834 return new_dev->dev_id;
838 int v3_vnet_del_dev(int dev_id){
839 struct vnet_dev * dev = NULL;
840 vnet_intr_flags_t flags;
842 stop_vnet_kick_threads();
844 flags = vnet_lock_irqsave(vnet_state.lock);
846 dev = dev_by_id(dev_id);
848 list_del(&(dev->node));
849 //del_routes_by_dev(dev_id);
850 vnet_state.num_devs --;
853 vnet_unlock_irqrestore(vnet_state.lock, flags);
855 start_vnet_kick_threads();
859 PrintDebug(VM_NONE, VCORE_NONE, "VNET/P Core: Removed Device: dev_id %d\n", dev_id);
865 int v3_vnet_stat(struct vnet_stat * stats){
866 stats->rx_bytes = vnet_state.stats.rx_bytes;
867 stats->rx_pkts = vnet_state.stats.rx_pkts;
868 stats->tx_bytes = vnet_state.stats.tx_bytes;
869 stats->tx_pkts = vnet_state.stats.tx_pkts;
874 static void deinit_devices_list(){
875 struct vnet_dev * dev, * tmp;
877 list_for_each_entry_safe(dev, tmp, &(vnet_state.devs), node) {
878 list_del(&(dev->node));
883 static void deinit_routes_list(){
884 struct vnet_route_info * route, * tmp;
886 list_for_each_entry_safe(route, tmp, &(vnet_state.routes), node) {
887 list_del(&(route->node));
888 list_del(&(route->match_node));
893 int v3_vnet_add_bridge(struct v3_vm_info * vm,
894 struct v3_vnet_bridge_ops * ops,
897 vnet_intr_flags_t flags;
899 struct vnet_brg_dev * tmp_bridge = NULL;
901 flags = vnet_lock_irqsave(vnet_state.lock);
902 if (vnet_state.bridge == NULL) {
904 vnet_state.bridge = (void *)1;
906 vnet_unlock_irqrestore(vnet_state.lock, flags);
908 if (bridge_free == 0) {
909 PrintError(VM_NONE, VCORE_NONE, "VNET/P Core: Bridge already set\n");
913 tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
915 if (tmp_bridge == NULL) {
916 PrintError(VM_NONE, VCORE_NONE, "VNET/P Core: Unable to allocate new bridge\n");
917 vnet_state.bridge = NULL;
922 tmp_bridge->brg_ops.input = ops->input;
923 tmp_bridge->brg_ops.poll = ops->poll;
924 tmp_bridge->private_data = priv_data;
925 tmp_bridge->type = type;
927 /* make this atomic to avoid possible race conditions */
928 flags = vnet_lock_irqsave(vnet_state.lock);
929 vnet_state.bridge = tmp_bridge;
930 vnet_unlock_irqrestore(vnet_state.lock, flags);
936 void v3_vnet_del_bridge(uint8_t type) {
937 vnet_intr_flags_t flags;
938 struct vnet_brg_dev * tmp_bridge = NULL;
940 flags = vnet_lock_irqsave(vnet_state.lock);
942 if (vnet_state.bridge != NULL && vnet_state.bridge->type == type) {
943 tmp_bridge = vnet_state.bridge;
944 vnet_state.bridge = NULL;
947 vnet_unlock_irqrestore(vnet_state.lock, flags);
950 Vnet_Free(tmp_bridge);
955 /* can be instanieoued to multiple threads
956 * that runs on multiple cores
957 * or it could be running on a dedicated side core
959 static int vnet_tx_flush(void * args){
960 struct vnet_dev * dev = NULL;
963 uint64_t noprogress_count;
965 Vnet_Print(0, "VNET/P Polling Thread Starting ....\n");
967 // since there are multiple instances of this thread, and only
968 // one queue of pollable devices, our model here will be to synchronize
969 // on that queue, removing devices as we go, and keeping them
970 // then putting them back on the queue when we are done
971 // in this way, multiple instances of this function will never
972 // be polling the same device at the same time
974 struct v3_queue * tq = v3_create_queue();
977 PrintError(VM_NONE, VCORE_NONE, "VNET/P polling thread cannot allocate queue\n");
981 noprogress_count = 0;
983 while (!vnet_thread_should_stop()) {
985 more=0; // will indicate if any device has more work for us to do
987 while ((dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs))) {
988 // we are handling this device
989 v3_enqueue(tq,(addr_t)dev);
991 if (dev->poll && dev->dev_ops.poll) {
992 // The device's poll function MUST NOT BLOCK
993 rc = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data);
996 Vnet_Print(0, "VNET/P: poll from device %p error (ignoring) !\n", dev);
1003 while ((dev = (struct vnet_dev *)v3_dequeue(tq))) {
1004 // now someone else can handle it
1005 v3_enqueue(vnet_state.poll_devs, (addr_t)dev);
1012 if ( ! ((noprogress_count+1) < noprogress_count)) {
1018 if ((!VNET_ADAPTIVE_TX_KICK) || (noprogress_count < VNET_NOPROGRESS_LIMIT)) {
1021 V3_Sleep(VNET_YIELD_USEC);
1028 Vnet_Print(0, "VNET/P Polling Thread Done.\n");
1033 static int start_vnet_kick_threads()
1037 for (i=0; i<VNET_NUM_TX_KICK_THREADS;i++) {
1039 snprintf(name,32,"vnetd-%d",i);
1040 vnet_state.pkt_flush_thread[i] = vnet_start_thread(vnet_tx_flush, NULL, name);
1045 static int stop_vnet_kick_threads()
1048 for (i=0; i<VNET_NUM_TX_KICK_THREADS;i++) {
1049 vnet_thread_stop(vnet_state.pkt_flush_thread[i]);
1057 memset(&vnet_state, 0, sizeof(vnet_state));
1059 INIT_LIST_HEAD(&(vnet_state.routes));
1060 INIT_LIST_HEAD(&(vnet_state.devs));
1062 vnet_state.num_devs = 0;
1063 vnet_state.num_routes = 0;
1065 if (vnet_lock_init(&(vnet_state.lock)) == -1){
1066 PrintError(VM_NONE, VCORE_NONE, "VNET/P: Fails to initiate lock\n");
1069 vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
1070 if (vnet_state.route_cache == NULL) {
1071 PrintError(VM_NONE, VCORE_NONE, "VNET/P: Fails to initiate route cache\n");
1075 vnet_state.poll_devs = v3_create_queue();
1077 start_vnet_kick_threads();
1079 PrintDebug(VM_NONE, VCORE_NONE, "VNET/P is initiated (%d tx kick threads active)\n",VNET_NUM_TX_KICK_THREADS);
1085 void v3_deinit_vnet()
1088 PrintDebug(VM_NONE, VCORE_NONE, "Stopping kick threads\n");
1089 stop_vnet_kick_threads();
1092 PrintDebug(VM_NONE, VCORE_NONE, "Deiniting poll devices\n");
1093 v3_deinit_queue(vnet_state.poll_devs);
1094 Vnet_Free(vnet_state.poll_devs);
1097 // At this point there should be no lock-holder
1099 Vnet_Free(vnet_state.poll_devs);
1102 PrintDebug(VM_NONE, VCORE_NONE, "Deiniting Device List\n");
1103 // close any devices we have open
1104 deinit_devices_list();
1106 PrintDebug(VM_NONE, VCORE_NONE, "Deiniting Route List\n");
1107 // remove any routes we have
1108 deinit_routes_list();
1110 PrintDebug(VM_NONE, VCORE_NONE, "Freeing hash table\n");
1111 // remove the hash table
1112 vnet_free_htable(vnet_state.route_cache, 1, 1);
1115 PrintDebug(VM_NONE, VCORE_NONE, "Removing Bridge\n");
1116 // remove bridge if it was added
1117 if (vnet_state.bridge) {
1118 Vnet_Free(vnet_state.bridge);
1121 PrintDebug(VM_NONE, VCORE_NONE, "Deleting lock\n");
1122 // eliminate the lock
1123 vnet_lock_deinit(&(vnet_state.lock));