2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu>
11 * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>
12 * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org>
15 * Author: Lei Xia <lxia@northwestern.edu>
16 * Yuan Tang <ytang@northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <vnet/vnet.h>
23 #include <vnet/vnet_hashtable.h>
24 #include <vnet/vnet_host.h>
25 #include <vnet/vnet_vmm.h>
27 #include <palacios/vmm_queue.h>
29 #ifndef V3_CONFIG_DEBUG_VNET
31 #define PrintDebug(fmt, args...)
34 #define VNET_NUM_TX_KICK_THREADS 2
36 #define VNET_ADAPTIVE_TX_KICK 0 // set to 1 to try to sleep when there is nothing to do
37 #define VNET_NOPROGRESS_LIMIT 1000 // ... after this many tries
38 #define VNET_YIELD_USEC 1000 // ... and go to sleep for this long
44 uint8_t dst_mac[ETH_ALEN];
45 uint8_t src_mac[ETH_ALEN];
46 uint16_t type; /* indicates layer 3 protocol type */
47 } __attribute__((packed));
52 uint8_t mac_addr[ETH_ALEN];
53 struct v3_vm_info * vm;
54 struct v3_vnet_dev_ops dev_ops;
58 #define VNET_MAX_QUOTE 64
63 struct list_head node;
64 } __attribute__((packed));
68 struct v3_vm_info * vm;
69 struct v3_vnet_bridge_ops brg_ops;
74 } __attribute__((packed));
78 struct vnet_route_info {
79 struct v3_vnet_route route_def;
81 struct vnet_dev * dst_dev;
82 struct vnet_dev * src_dev;
86 struct list_head node;
87 struct list_head match_node; // used for route matching
92 uint8_t hash_buf[VNET_HASH_SIZE];
95 struct vnet_route_info * routes[0];
96 } __attribute__((packed));
101 struct v3_vnet_pkt pkt;
108 struct list_head routes;
109 struct list_head devs;
118 struct vnet_brg_dev * bridge;
121 struct vnet_stat stats;
123 /* device queue that are waiting to be polled */
124 struct v3_queue * poll_devs;
126 struct vnet_thread * pkt_flush_thread[VNET_NUM_TX_KICK_THREADS];
128 struct hashtable * route_cache;
133 #ifdef V3_CONFIG_DEBUG_VNET
134 static inline void mac2str(uint8_t * mac, char * buf) {
135 snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x",
136 mac[0], mac[1], mac[2],
137 mac[3], mac[4], mac[5]);
140 static void print_route(struct v3_vnet_route * route){
143 mac2str(route->src_mac, str);
144 PrintDebug("Src Mac (%s), src_qual (%d)\n",
145 str, route->src_mac_qual);
146 mac2str(route->dst_mac, str);
147 PrintDebug("Dst Mac (%s), dst_qual (%d)\n",
148 str, route->dst_mac_qual);
149 PrintDebug("Src dev id (%d), src type (%d)",
152 PrintDebug("Dst dev id (%d), dst type (%d)\n",
157 static void dump_routes(){
158 struct vnet_route_info *route;
160 PrintDebug("\n========Dump routes starts ============\n");
161 list_for_each_entry(route, &(vnet_state.routes), node) {
162 PrintDebug("\nroute %d:\n", route->idx);
164 print_route(&(route->route_def));
165 if (route->route_def.dst_type == LINK_INTERFACE) {
166 PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
168 route->dst_dev->dev_id,
169 (void *)&(route->dst_dev->dev_ops),
170 route->dst_dev->private_data);
174 PrintDebug("\n========Dump routes end ============\n");
181 * A VNET packet is a packed struct with the hashed fields grouped together.
182 * This means we can generate the hash from an offset into the pkt struct
184 static inline uint_t hash_fn(addr_t hdr_ptr) {
185 uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
187 return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
190 static inline int hash_eq(addr_t key1, addr_t key2) {
191 return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
194 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
195 memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
197 if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
198 PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
205 static int clear_hash_cache() {
206 vnet_free_htable(vnet_state.route_cache, 1, 1);
207 vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
212 static int look_into_cache(const struct v3_vnet_pkt * pkt,
213 struct route_list ** routes) {
214 *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
220 static struct vnet_dev * dev_by_id(int idx) {
221 struct vnet_dev * dev = NULL;
223 list_for_each_entry(dev, &(vnet_state.devs), node) {
224 if (dev->dev_id == idx) {
232 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
233 struct vnet_dev * dev = NULL;
235 list_for_each_entry(dev, &(vnet_state.devs), node) {
236 if (!compare_ethaddr(dev->mac_addr, mac)){
245 int v3_vnet_find_dev(uint8_t * mac) {
246 struct vnet_dev * dev = NULL;
248 dev = dev_by_mac(mac);
258 int v3_vnet_add_route(struct v3_vnet_route route) {
259 struct vnet_route_info * new_route = NULL;
260 vnet_intr_flags_t flags;
262 new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
265 PrintError("Cannot allocate new route\n");
269 memset(new_route, 0, sizeof(struct vnet_route_info));
271 #ifdef V3_CONFIG_DEBUG_VNET
272 PrintDebug("VNET/P Core: add_route_entry:\n");
276 memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
277 memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
278 new_route->route_def.src_mac_qual = route.src_mac_qual;
279 new_route->route_def.dst_mac_qual = route.dst_mac_qual;
280 new_route->route_def.dst_type = route.dst_type;
281 new_route->route_def.src_type = route.src_type;
282 new_route->route_def.src_id = route.src_id;
283 new_route->route_def.dst_id = route.dst_id;
285 if (new_route->route_def.dst_type == LINK_INTERFACE) {
286 new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
289 if (new_route->route_def.src_type == LINK_INTERFACE) {
290 new_route->src_dev = dev_by_id(new_route->route_def.src_id);
294 flags = vnet_lock_irqsave(vnet_state.lock);
296 list_add(&(new_route->node), &(vnet_state.routes));
297 new_route->idx = ++ vnet_state.route_idx;
298 vnet_state.num_routes ++;
300 vnet_unlock_irqrestore(vnet_state.lock, flags);
304 #ifdef V3_CONFIG_DEBUG_VNET
308 return new_route->idx;
312 void v3_vnet_del_route(uint32_t route_idx){
313 struct vnet_route_info * route = NULL;
314 vnet_intr_flags_t flags;
316 flags = vnet_lock_irqsave(vnet_state.lock);
318 list_for_each_entry(route, &(vnet_state.routes), node) {
319 Vnet_Print(0, "v3_vnet_del_route, route idx: %d\n", route->idx);
320 if(route->idx == route_idx){
321 list_del(&(route->node));
327 vnet_unlock_irqrestore(vnet_state.lock, flags);
330 #ifdef V3_CONFIG_DEBUG_VNET
336 /* delete all route entries with specfied src or dst device id */
337 static void inline del_routes_by_dev(int dev_id){
338 struct vnet_route_info * route, *tmp_route;
339 vnet_intr_flags_t flags;
341 flags = vnet_lock_irqsave(vnet_state.lock);
343 list_for_each_entry_safe(route, tmp_route, &(vnet_state.routes), node) {
344 if((route->route_def.dst_type == LINK_INTERFACE &&
345 route->route_def.dst_id == dev_id) ||
346 (route->route_def.src_type == LINK_INTERFACE &&
347 route->route_def.src_id == dev_id)){
349 list_del(&(route->node));
350 list_del(&(route->match_node));
355 vnet_unlock_irqrestore(vnet_state.lock, flags);
359 // Match classes, must be in order
360 #define NUM_MATCH_CLASSES 4
361 #define NUM_MATCH_CLASSES_BOUND 3
368 static inline uint8_t match_mac(uint8_t test_mac[ETH_ALEN],
369 uint8_t route_mac[ETH_ALEN],
372 switch (route_qual) {
383 if (memcmp(test_mac,route_mac,ETH_ALEN)) {
390 if (memcmp(test_mac,route_mac,ETH_ALEN)) {
397 PrintError("Unknown qualifier %u\n",route_qual);
404 #define QUAL_TO_STR(q) ( \
405 (q)==MAC_NOSET ? "MAC_NOSET" : \
406 (q)==MAC_NONE ? "MAC_NONE" : \
407 (q)==MAC_ANY ? "MAC_ANY" : \
408 (q)==MAC_NOT ? "MAC_NOT" : \
409 (q)==MAC_ADDR ? "MAC_ADDR" : \
413 #define MATCH_CLASS_TO_STR(c) ( \
414 (c)==NONE ? "NONE" : \
417 (c)==DIRECT ? "DIRECT" : \
425 Original priority behavior...
427 priority src srcqual dst dstqual
442 Current priority order is given in the following table
445 // [src][dst] => priority
446 static int priority_map[NUM_MATCH_CLASSES][NUM_MATCH_CLASSES] =
448 [NONE] = { [ 0 ... NUM_MATCH_CLASSES_BOUND ] = -1}, // ignore if it's not a source match
449 [NOT][NONE] = -1, // ignore it if there is no destination match
453 [ANY][NONE] = -1, // ignore if there is no destination match
457 [DIRECT][NONE] = -1, // ignore if there is no destination match
460 [DIRECT][DIRECT] = 8,
466 static inline int match_priority(uint8_t src_mac[ETH_ALEN],
467 uint8_t dst_mac[ETH_ALEN],
468 uint8_t route_src_mac[ETH_ALEN],
469 uint8_t route_src_qual,
470 uint8_t route_dst_mac[ETH_ALEN],
471 uint8_t route_dst_qual)
475 return priority_map[match_mac(src_mac,route_src_mac,route_src_qual)][match_mac(dst_mac,route_dst_mac,route_dst_qual)];
480 Route matching will return the list of the highest priority routes that
481 match. It's a list because it's possible to have multiple high priority routes
483 static struct route_list * match_route(const struct v3_vnet_pkt * pkt)
486 struct vnet_route_info * route = NULL;
487 struct route_list * matches = NULL;
489 int max_priority = -1;
490 struct list_head match_list;
491 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
495 // NOTE: USING THE MATCH_NODE in the route list to record a match list
496 // IS A DISASTER WAITING TO HAPPEN
499 #ifdef V3_CONFIG_DEBUG_VNET
501 char dst_str[32], src_str[32];
502 mac2str(hdr->src_mac, src_str);
503 mac2str(hdr->dst_mac, dst_str);
504 PrintDebug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
508 INIT_LIST_HEAD(&match_list);
511 list_for_each_entry(route, &(vnet_state.routes), node) {
513 struct v3_vnet_route * route_def = &(route->route_def);
517 priority = match_priority(hdr->src_mac,
520 route_def->src_mac_qual,
522 route_def->dst_mac_qual);
526 #ifdef V3_CONFIG_DEBUG_VNET
531 mac2str(route_def->src_mac, src_str);
532 mac2str(route_def->dst_mac, dst_str);
534 PrintDebug("Tested match against SRC(%s) SRC_QUAL(%s), DEST(%s) DST_QUAL(%s): "
535 "SRC_MATCH=%s DEST_MATCH=%s PRIORITY=%d\n",
536 src_str, QUAL_TO_STR(route_def->src_mac_qual),
537 dst_str, QUAL_TO_STR(route_def->dst_mac_qual),
538 MATCH_CLASS_TO_STR(match_mac(hdr->src_mac,route_def->src_mac,route_def->src_mac_qual)),
539 MATCH_CLASS_TO_STR(match_mac(hdr->dst_mac,route_def->dst_mac,route_def->dst_mac_qual)),
545 PrintDebug("No match to this rule\n");
549 if (priority > max_priority) {
550 PrintDebug("New highest priority match, reseting list\n");
551 max_priority = priority;
553 struct vnet_route_info *my_route, *tmp_route;
555 list_for_each_entry_safe(my_route, tmp_route, &match_list,match_node) {
556 list_del(&(my_route->match_node));
559 list_add(&(route->match_node), &match_list);
562 } else if (priority == max_priority) {
563 PrintDebug("Equal priority match, adding to list\n");
565 list_add(&(route->match_node), &match_list);
571 PrintDebug("VNET/P Core: match_route: Matches=%d\n", num_matches);
573 if (num_matches <= 0) {
577 matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) +
578 (sizeof(struct vnet_route_info *) * num_matches));
582 PrintError("VNET/P Core: Unable to allocate matches\n");
586 matches->num_routes = num_matches;
589 list_for_each_entry(route, &match_list, match_node) {
590 if (i==num_matches) {
591 // the list should never have more than num_matches on it...
592 PrintError("Weird list behavior\n");
595 matches->routes[i++] = route;
603 int v3_vnet_query_header(uint8_t src_mac[ETH_ALEN],
604 uint8_t dest_mac[ETH_ALEN],
605 int recv, // 0 = send, 1=recv
606 struct v3_vnet_header *header)
608 struct route_list *routes;
609 struct vnet_route_info *r;
610 struct v3_vnet_pkt p;
615 memcpy(p.header,dest_mac,ETH_ALEN);
616 memcpy(p.header+ETH_ALEN,src_mac,ETH_ALEN);
617 memset(p.header+12,0,2);
619 p.src_type = LINK_EDGE;
622 memcpy(header->src_mac,src_mac,ETH_ALEN);
623 memcpy(header->dst_mac,dest_mac,ETH_ALEN);
626 flags = vnet_lock_irqsave(vnet_state.lock);
628 look_into_cache(&p,&routes);
631 routes = match_route(&p);
633 vnet_unlock_irqrestore(vnet_state.lock,flags);
634 PrintError("Cannot match route\n");
635 header->header_type=VNET_HEADER_NOMATCH;
636 header->header_len=0;
639 add_route_to_cache(&p,routes);
643 vnet_unlock_irqrestore(vnet_state.lock,flags);
645 if (routes->num_routes<1) {
646 PrintError("Less than one route\n");
647 header->header_type=VNET_HEADER_NOMATCH;
648 header->header_len=0;
652 if (routes->num_routes>1) {
653 PrintError("More than one route, building header for the first one only\n");
658 switch (r->route_def.dst_type) {
660 // switch based on the link type
661 // for mac-in-udp, we would want to generate a mac, ip, and udp header
662 // direct transmission
664 // for now we will say we have no encapsulation
666 header->header_type=VNET_HEADER_NONE;
667 header->header_len=0;
668 header->src_mac_qual=r->route_def.src_mac_qual;
669 header->dst_mac_qual=r->route_def.dst_mac_qual;
678 // direct transmission
679 // let's guess that it goes to the same interface...
680 header->header_type=VNET_HEADER_NONE;
681 header->header_len=0;
682 header->src_mac_qual=r->route_def.src_mac_qual;
683 header->dst_mac_qual=r->route_def.dst_mac_qual;
689 PrintError("Unknown destination type\n");
700 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
701 struct route_list * matched_routes = NULL;
702 vnet_intr_flags_t flags;
705 int cpu = V3_Get_CPU();
707 Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
708 cpu, pkt->size, pkt->src_id,
709 pkt->src_type, pkt->dst_id, pkt->dst_type);
712 v3_hexdump(pkt->data, pkt->size, NULL, 0);
715 flags = vnet_lock_irqsave(vnet_state.lock);
717 vnet_state.stats.rx_bytes += pkt->size;
718 vnet_state.stats.rx_pkts++;
720 look_into_cache(pkt, &matched_routes);
722 if (matched_routes == NULL) {
723 PrintDebug("VNET/P Core: sending pkt - matching route\n");
725 matched_routes = match_route(pkt);
727 if (matched_routes) {
728 add_route_to_cache(pkt, matched_routes);
730 PrintDebug("VNET/P Core: Could not find route for packet... discarding packet\n");
731 vnet_unlock_irqrestore(vnet_state.lock, flags);
732 return 0; /* do we return -1 here?*/
736 vnet_unlock_irqrestore(vnet_state.lock, flags);
738 PrintDebug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
740 for (i = 0; i < matched_routes->num_routes; i++) {
741 struct vnet_route_info * route = matched_routes->routes[i];
743 if (route->route_def.dst_type == LINK_EDGE) {
744 struct vnet_brg_dev * bridge = vnet_state.bridge;
745 pkt->dst_type = LINK_EDGE;
746 pkt->dst_id = route->route_def.dst_id;
748 if (bridge == NULL) {
749 Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
753 if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
754 Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
757 vnet_state.stats.tx_bytes += pkt->size;
758 vnet_state.stats.tx_pkts ++;
759 } else if (route->route_def.dst_type == LINK_INTERFACE) {
760 if (route->dst_dev == NULL){
761 Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
765 if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
766 Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
769 vnet_state.stats.tx_bytes += pkt->size;
770 vnet_state.stats.tx_pkts ++;
772 Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
780 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac,
781 struct v3_vnet_dev_ops * ops, int quote, int poll_state,
783 struct vnet_dev * new_dev = NULL;
784 vnet_intr_flags_t flags;
786 new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev));
788 if (new_dev == NULL) {
789 Vnet_Print(0, "VNET/P Core: Unable to allocate a new device\n");
793 memcpy(new_dev->mac_addr, mac, ETH_ALEN);
794 new_dev->dev_ops.input = ops->input;
795 new_dev->dev_ops.poll = ops->poll;
796 new_dev->private_data = priv_data;
799 new_dev->quote = quote<VNET_MAX_QUOTE ? quote : VNET_MAX_QUOTE;
800 new_dev->poll = poll_state;
802 flags = vnet_lock_irqsave(vnet_state.lock);
804 if (dev_by_mac(mac) == NULL) {
805 list_add(&(new_dev->node), &(vnet_state.devs));
806 new_dev->dev_id = ++ vnet_state.dev_idx;
807 vnet_state.num_devs ++;
810 v3_enqueue(vnet_state.poll_devs, (addr_t)new_dev);
813 PrintError("VNET/P: Device with the same MAC has already been added\n");
816 vnet_unlock_irqrestore(vnet_state.lock, flags);
818 /* if the device was found previosly the id should still be 0 */
819 if (new_dev->dev_id == 0) {
820 Vnet_Print(0, "VNET/P Core: Device Already exists\n");
824 PrintDebug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
826 return new_dev->dev_id;
830 int v3_vnet_del_dev(int dev_id){
831 struct vnet_dev * dev = NULL;
832 vnet_intr_flags_t flags;
834 flags = vnet_lock_irqsave(vnet_state.lock);
836 dev = dev_by_id(dev_id);
838 list_del(&(dev->node));
839 //del_routes_by_dev(dev_id);
840 vnet_state.num_devs --;
843 vnet_unlock_irqrestore(vnet_state.lock, flags);
847 PrintDebug("VNET/P Core: Removed Device: dev_id %d\n", dev_id);
853 int v3_vnet_stat(struct vnet_stat * stats){
854 stats->rx_bytes = vnet_state.stats.rx_bytes;
855 stats->rx_pkts = vnet_state.stats.rx_pkts;
856 stats->tx_bytes = vnet_state.stats.tx_bytes;
857 stats->tx_pkts = vnet_state.stats.tx_pkts;
862 static void deinit_devices_list(){
863 struct vnet_dev * dev, * tmp;
865 list_for_each_entry_safe(dev, tmp, &(vnet_state.devs), node) {
866 list_del(&(dev->node));
871 static void deinit_routes_list(){
872 struct vnet_route_info * route, * tmp;
874 list_for_each_entry_safe(route, tmp, &(vnet_state.routes), node) {
875 list_del(&(route->node));
876 list_del(&(route->match_node));
881 int v3_vnet_add_bridge(struct v3_vm_info * vm,
882 struct v3_vnet_bridge_ops * ops,
885 vnet_intr_flags_t flags;
887 struct vnet_brg_dev * tmp_bridge = NULL;
889 flags = vnet_lock_irqsave(vnet_state.lock);
890 if (vnet_state.bridge == NULL) {
892 vnet_state.bridge = (void *)1;
894 vnet_unlock_irqrestore(vnet_state.lock, flags);
896 if (bridge_free == 0) {
897 PrintError("VNET/P Core: Bridge already set\n");
901 tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
903 if (tmp_bridge == NULL) {
904 PrintError("VNET/P Core: Unable to allocate new bridge\n");
905 vnet_state.bridge = NULL;
910 tmp_bridge->brg_ops.input = ops->input;
911 tmp_bridge->brg_ops.poll = ops->poll;
912 tmp_bridge->private_data = priv_data;
913 tmp_bridge->type = type;
915 /* make this atomic to avoid possible race conditions */
916 flags = vnet_lock_irqsave(vnet_state.lock);
917 vnet_state.bridge = tmp_bridge;
918 vnet_unlock_irqrestore(vnet_state.lock, flags);
924 void v3_vnet_del_bridge(uint8_t type) {
925 vnet_intr_flags_t flags;
926 struct vnet_brg_dev * tmp_bridge = NULL;
928 flags = vnet_lock_irqsave(vnet_state.lock);
930 if (vnet_state.bridge != NULL && vnet_state.bridge->type == type) {
931 tmp_bridge = vnet_state.bridge;
932 vnet_state.bridge = NULL;
935 vnet_unlock_irqrestore(vnet_state.lock, flags);
938 Vnet_Free(tmp_bridge);
943 /* can be instanieoued to multiple threads
944 * that runs on multiple cores
945 * or it could be running on a dedicated side core
947 static int vnet_tx_flush(void * args){
948 struct vnet_dev * dev = NULL;
951 uint64_t noprogress_count;
953 Vnet_Print(0, "VNET/P Polling Thread Starting ....\n");
955 // since there are multiple instances of this thread, and only
956 // one queue of pollable devices, our model here will be to synchronize
957 // on that queue, removing devices as we go, and keeping them
958 // then putting them back on the queue when we are done
959 // in this way, multiple instances of this function will never
960 // be polling the same device at the same time
962 struct v3_queue * tq = v3_create_queue();
965 PrintError("VNET/P polling thread cannot allocate queue\n");
969 noprogress_count = 0;
971 while (!vnet_thread_should_stop()) {
973 more=0; // will indicate if any device has more work for us to do
975 while ((dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs))) {
976 // we are handling this device
977 v3_enqueue(tq,(addr_t)dev);
979 if (dev->poll && dev->dev_ops.poll) {
980 // The device's poll function MUST NOT BLOCK
981 rc = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data);
984 Vnet_Print(0, "VNET/P: poll from device %p error (ignoring) !\n", dev);
991 while ((dev = (struct vnet_dev *)v3_dequeue(tq))) {
992 // now someone else can handle it
993 v3_enqueue(vnet_state.poll_devs, (addr_t)dev);
1000 if ( ! ((noprogress_count+1) < noprogress_count)) {
1006 if ((!VNET_ADAPTIVE_TX_KICK) || (noprogress_count < VNET_NOPROGRESS_LIMIT)) {
1009 V3_Yield_Timed(VNET_YIELD_USEC);
1016 Vnet_Print(0, "VNET/P Polling Thread Done.\n");
1025 memset(&vnet_state, 0, sizeof(vnet_state));
1027 INIT_LIST_HEAD(&(vnet_state.routes));
1028 INIT_LIST_HEAD(&(vnet_state.devs));
1030 vnet_state.num_devs = 0;
1031 vnet_state.num_routes = 0;
1033 if (vnet_lock_init(&(vnet_state.lock)) == -1){
1034 PrintError("VNET/P: Fails to initiate lock\n");
1037 vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
1038 if (vnet_state.route_cache == NULL) {
1039 PrintError("VNET/P: Fails to initiate route cache\n");
1043 vnet_state.poll_devs = v3_create_queue();
1045 for (i=0; i<VNET_NUM_TX_KICK_THREADS;i++) {
1047 snprintf(name,32,"vnetd-%d",i);
1048 vnet_state.pkt_flush_thread[i] = vnet_start_thread(vnet_tx_flush, NULL, name);
1051 PrintDebug("VNET/P is initiated (%d tx kick threads active)\n",VNET_NUM_TX_KICK_THREADS);
1057 void v3_deinit_vnet()
1061 v3_deinit_queue(vnet_state.poll_devs);
1062 Vnet_Free(vnet_state.poll_devs);
1064 for (i=0; i<VNET_NUM_TX_KICK_THREADS;i++) {
1065 PrintDebug("Stopping tx kick thread %d\n",i);
1066 // This will pause until the flush thread is gone
1067 vnet_thread_stop(vnet_state.pkt_flush_thread[i]);
1070 // At this point there should be no lock-holder
1072 Vnet_Free(vnet_state.poll_devs);
1075 PrintDebug("Deiniting Device List\n");
1076 // close any devices we have open
1077 deinit_devices_list();
1079 PrintDebug("Deiniting Route List\n");
1080 // remove any routes we have
1081 deinit_routes_list();
1083 PrintDebug("Freeing hash table\n");
1084 // remove the hash table
1085 vnet_free_htable(vnet_state.route_cache, 1, 1);
1088 PrintDebug("Removing Bridge\n");
1089 // remove bridge if it was added
1090 if (vnet_state.bridge) {
1091 Vnet_Free(vnet_state.bridge);
1094 PrintDebug("Deleting lock\n");
1095 // eliminate the lock
1096 vnet_lock_deinit(&(vnet_state.lock));