2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu>
11 * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>
12 * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org>
15 * Author: Lei Xia <lxia@northwestern.edu>
16 * Yuan Tang <ytang@northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <vnet/vnet.h>
23 #include <vnet/vnet_hashtable.h>
24 #include <vnet/vnet_host.h>
25 #include <vnet/vnet_vmm.h>
27 #include <palacios/vmm_queue.h>
29 #ifndef V3_CONFIG_DEBUG_VNET
31 #define PrintDebug(fmt, args...)
34 #define VNET_NOPROGRESS_LIMIT 1000
35 #define VNET_YIELD_USEC 1000
40 uint8_t dst_mac[ETH_ALEN];
41 uint8_t src_mac[ETH_ALEN];
42 uint16_t type; /* indicates layer 3 protocol type */
43 } __attribute__((packed));
48 uint8_t mac_addr[ETH_ALEN];
49 struct v3_vm_info * vm;
50 struct v3_vnet_dev_ops dev_ops;
54 #define VNET_MAX_QUOTE 64
59 struct list_head node;
60 } __attribute__((packed));
64 struct v3_vm_info * vm;
65 struct v3_vnet_bridge_ops brg_ops;
70 } __attribute__((packed));
74 struct vnet_route_info {
75 struct v3_vnet_route route_def;
77 struct vnet_dev * dst_dev;
78 struct vnet_dev * src_dev;
82 struct list_head node;
83 struct list_head match_node; // used for route matching
88 uint8_t hash_buf[VNET_HASH_SIZE];
91 struct vnet_route_info * routes[0];
92 } __attribute__((packed));
97 struct v3_vnet_pkt pkt;
104 struct list_head routes;
105 struct list_head devs;
114 struct vnet_brg_dev * bridge;
117 struct vnet_stat stats;
119 /* device queue that are waiting to be polled */
120 struct v3_queue * poll_devs;
122 struct vnet_thread * pkt_flush_thread;
124 struct hashtable * route_cache;
128 #ifdef V3_CONFIG_DEBUG_VNET
129 static inline void mac2str(uint8_t * mac, char * buf) {
130 snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x",
131 mac[0], mac[1], mac[2],
132 mac[3], mac[4], mac[5]);
135 static void print_route(struct v3_vnet_route * route){
138 mac2str(route->src_mac, str);
139 PrintDebug("Src Mac (%s), src_qual (%d)\n",
140 str, route->src_mac_qual);
141 mac2str(route->dst_mac, str);
142 PrintDebug("Dst Mac (%s), dst_qual (%d)\n",
143 str, route->dst_mac_qual);
144 PrintDebug("Src dev id (%d), src type (%d)",
147 PrintDebug("Dst dev id (%d), dst type (%d)\n",
152 static void dump_routes(){
153 struct vnet_route_info *route;
155 PrintDebug("\n========Dump routes starts ============\n");
156 list_for_each_entry(route, &(vnet_state.routes), node) {
157 PrintDebug("\nroute %d:\n", route->idx);
159 print_route(&(route->route_def));
160 if (route->route_def.dst_type == LINK_INTERFACE) {
161 PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
163 route->dst_dev->dev_id,
164 (void *)&(route->dst_dev->dev_ops),
165 route->dst_dev->private_data);
169 PrintDebug("\n========Dump routes end ============\n");
176 * A VNET packet is a packed struct with the hashed fields grouped together.
177 * This means we can generate the hash from an offset into the pkt struct
179 static inline uint_t hash_fn(addr_t hdr_ptr) {
180 uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
182 return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
185 static inline int hash_eq(addr_t key1, addr_t key2) {
186 return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
189 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
190 memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
192 if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
193 PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
200 static int clear_hash_cache() {
201 vnet_free_htable(vnet_state.route_cache, 1, 1);
202 vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
207 static int look_into_cache(const struct v3_vnet_pkt * pkt,
208 struct route_list ** routes) {
209 *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
215 static struct vnet_dev * dev_by_id(int idx) {
216 struct vnet_dev * dev = NULL;
218 list_for_each_entry(dev, &(vnet_state.devs), node) {
219 if (dev->dev_id == idx) {
227 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
228 struct vnet_dev * dev = NULL;
230 list_for_each_entry(dev, &(vnet_state.devs), node) {
231 if (!compare_ethaddr(dev->mac_addr, mac)){
240 int v3_vnet_find_dev(uint8_t * mac) {
241 struct vnet_dev * dev = NULL;
243 dev = dev_by_mac(mac);
253 int v3_vnet_add_route(struct v3_vnet_route route) {
254 struct vnet_route_info * new_route = NULL;
255 vnet_intr_flags_t flags;
257 new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
260 PrintError("Cannot allocate new route\n");
264 memset(new_route, 0, sizeof(struct vnet_route_info));
266 #ifdef V3_CONFIG_DEBUG_VNET
267 PrintDebug("VNET/P Core: add_route_entry:\n");
271 memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
272 memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
273 new_route->route_def.src_mac_qual = route.src_mac_qual;
274 new_route->route_def.dst_mac_qual = route.dst_mac_qual;
275 new_route->route_def.dst_type = route.dst_type;
276 new_route->route_def.src_type = route.src_type;
277 new_route->route_def.src_id = route.src_id;
278 new_route->route_def.dst_id = route.dst_id;
280 if (new_route->route_def.dst_type == LINK_INTERFACE) {
281 new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
284 if (new_route->route_def.src_type == LINK_INTERFACE) {
285 new_route->src_dev = dev_by_id(new_route->route_def.src_id);
289 flags = vnet_lock_irqsave(vnet_state.lock);
291 list_add(&(new_route->node), &(vnet_state.routes));
292 new_route->idx = ++ vnet_state.route_idx;
293 vnet_state.num_routes ++;
295 vnet_unlock_irqrestore(vnet_state.lock, flags);
299 #ifdef V3_CONFIG_DEBUG_VNET
303 return new_route->idx;
307 void v3_vnet_del_route(uint32_t route_idx){
308 struct vnet_route_info * route = NULL;
309 vnet_intr_flags_t flags;
311 flags = vnet_lock_irqsave(vnet_state.lock);
313 list_for_each_entry(route, &(vnet_state.routes), node) {
314 Vnet_Print(0, "v3_vnet_del_route, route idx: %d\n", route->idx);
315 if(route->idx == route_idx){
316 list_del(&(route->node));
322 vnet_unlock_irqrestore(vnet_state.lock, flags);
325 #ifdef V3_CONFIG_DEBUG_VNET
331 /* delete all route entries with specfied src or dst device id */
332 static void inline del_routes_by_dev(int dev_id){
333 struct vnet_route_info * route, *tmp_route;
334 vnet_intr_flags_t flags;
336 flags = vnet_lock_irqsave(vnet_state.lock);
338 list_for_each_entry_safe(route, tmp_route, &(vnet_state.routes), node) {
339 if((route->route_def.dst_type == LINK_INTERFACE &&
340 route->route_def.dst_id == dev_id) ||
341 (route->route_def.src_type == LINK_INTERFACE &&
342 route->route_def.src_id == dev_id)){
344 list_del(&(route->node));
345 list_del(&(route->match_node));
350 vnet_unlock_irqrestore(vnet_state.lock, flags);
354 // Match classes, must be in order
355 #define NUM_MATCH_CLASSES 4
356 #define NUM_MATCH_CLASSES_BOUND 3
363 static inline uint8_t match_mac(uint8_t test_mac[ETH_ALEN],
364 uint8_t route_mac[ETH_ALEN],
367 switch (route_qual) {
378 if (memcmp(test_mac,route_mac,ETH_ALEN)) {
385 if (memcmp(test_mac,route_mac,ETH_ALEN)) {
392 PrintError("Unknown qualifier %u\n",route_qual);
399 #define QUAL_TO_STR(q) ( \
400 (q)==MAC_NOSET ? "MAC_NOSET" : \
401 (q)==MAC_NONE ? "MAC_NONE" : \
402 (q)==MAC_ANY ? "MAC_ANY" : \
403 (q)==MAC_NOT ? "MAC_NOT" : \
404 (q)==MAC_ADDR ? "MAC_ADDR" : \
408 #define MATCH_CLASS_TO_STR(c) ( \
409 (c)==NONE ? "NONE" : \
412 (c)==DIRECT ? "DIRECT" : \
420 Original priority behavior...
422 priority src srcqual dst dstqual
437 Current priority order is given in the following table
440 // [src][dst] => priority
441 static int priority_map[NUM_MATCH_CLASSES][NUM_MATCH_CLASSES] =
443 [NONE] = { [ 0 ... NUM_MATCH_CLASSES_BOUND ] = -1}, // ignore if it's not a source match
444 [NOT][NONE] = -1, // ignore it if there is no destination match
448 [ANY][NONE] = -1, // ignore if there is no destination match
452 [DIRECT][NONE] = -1, // ignore if there is no destination match
455 [DIRECT][DIRECT] = 8,
461 static inline int match_priority(uint8_t src_mac[ETH_ALEN],
462 uint8_t dst_mac[ETH_ALEN],
463 uint8_t route_src_mac[ETH_ALEN],
464 uint8_t route_src_qual,
465 uint8_t route_dst_mac[ETH_ALEN],
466 uint8_t route_dst_qual)
470 return priority_map[match_mac(src_mac,route_src_mac,route_src_qual)][match_mac(dst_mac,route_dst_mac,route_dst_qual)];
475 Route matching will return the list of the highest priority routes that
476 match. It's a list because it's possible to have multiple high priority routes
478 static struct route_list * match_route(const struct v3_vnet_pkt * pkt)
481 struct vnet_route_info * route = NULL;
482 struct route_list * matches = NULL;
484 int max_priority = -1;
485 struct list_head match_list;
486 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
490 // NOTE: USING THE MATCH_NODE in the route list to record a match list
491 // IS A DISASTER WAITING TO HAPPEN
494 #ifdef V3_CONFIG_DEBUG_VNET
496 char dst_str[32], src_str[32];
497 mac2str(hdr->src_mac, src_str);
498 mac2str(hdr->dst_mac, dst_str);
499 PrintDebug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
503 INIT_LIST_HEAD(&match_list);
506 list_for_each_entry(route, &(vnet_state.routes), node) {
508 struct v3_vnet_route * route_def = &(route->route_def);
512 priority = match_priority(hdr->src_mac,
515 route_def->src_mac_qual,
517 route_def->dst_mac_qual);
521 #ifdef V3_CONFIG_DEBUG_VNET
526 mac2str(route_def->src_mac, src_str);
527 mac2str(route_def->dst_mac, dst_str);
529 PrintDebug("Tested match against SRC(%s) SRC_QUAL(%s), DEST(%s) DST_QUAL(%s): "
530 "SRC_MATCH=%s DEST_MATCH=%s PRIORITY=%d\n",
531 src_str, QUAL_TO_STR(route_def->src_mac_qual),
532 dst_str, QUAL_TO_STR(route_def->dst_mac_qual),
533 MATCH_CLASS_TO_STR(match_mac(hdr->src_mac,route_def->src_mac,route_def->src_mac_qual)),
534 MATCH_CLASS_TO_STR(match_mac(hdr->dst_mac,route_def->dst_mac,route_def->dst_mac_qual)),
540 PrintDebug("No match to this rule\n");
544 if (priority > max_priority) {
545 PrintDebug("New highest priority match, reseting list\n");
546 max_priority = priority;
548 struct vnet_route_info *my_route, *tmp_route;
550 list_for_each_entry_safe(my_route, tmp_route, &match_list,match_node) {
551 list_del(&(my_route->match_node));
554 list_add(&(route->match_node), &match_list);
557 } else if (priority == max_priority) {
558 PrintDebug("Equal priority match, adding to list\n");
560 list_add(&(route->match_node), &match_list);
566 PrintDebug("VNET/P Core: match_route: Matches=%d\n", num_matches);
568 if (num_matches <= 0) {
572 matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) +
573 (sizeof(struct vnet_route_info *) * num_matches));
577 PrintError("VNET/P Core: Unable to allocate matches\n");
581 matches->num_routes = num_matches;
584 list_for_each_entry(route, &match_list, match_node) {
585 if (i==num_matches) {
586 // the list should never have more than num_matches on it...
587 PrintError("Weird list behavior\n");
590 matches->routes[i++] = route;
598 int v3_vnet_query_header(uint8_t src_mac[ETH_ALEN],
599 uint8_t dest_mac[ETH_ALEN],
600 int recv, // 0 = send, 1=recv
601 struct v3_vnet_header *header)
603 struct route_list *routes;
604 struct vnet_route_info *r;
605 struct v3_vnet_pkt p;
610 memcpy(p.header,dest_mac,6);
611 memcpy(p.header+6,src_mac,6);
612 memset(p.header+12,0,2);
614 p.src_type = LINK_EDGE;
617 memcpy(header->src_mac,src_mac,6);
618 memcpy(header->dst_mac,dest_mac,6);
621 flags = vnet_lock_irqsave(vnet_state.lock);
623 look_into_cache(&p,&routes);
626 routes = match_route(&p);
628 vnet_unlock_irqrestore(vnet_state.lock,flags);
629 PrintError("Cannot match route\n");
630 header->header_type=VNET_HEADER_NOMATCH;
631 header->header_len=0;
634 add_route_to_cache(&p,routes);
638 vnet_unlock_irqrestore(vnet_state.lock,flags);
640 if (routes->num_routes<1) {
641 PrintError("Less than one route\n");
642 header->header_type=VNET_HEADER_NOMATCH;
643 header->header_len=0;
647 if (routes->num_routes>1) {
648 PrintError("More than one route, building header for the first one only\n");
653 switch (r->route_def.dst_type) {
655 // switch based on the link type
656 // for mac-in-udp, we would want to generate a mac, ip, and udp header
657 // direct transmission
659 // for now we will say we have no encapsulation
661 header->header_type=VNET_HEADER_NONE;
662 header->header_len=0;
663 header->src_mac_qual=r->route_def.src_mac_qual;
664 header->dst_mac_qual=r->route_def.dst_mac_qual;
673 // direct transmission
674 // let's guess that it goes to the same interface...
675 header->header_type=VNET_HEADER_NONE;
676 header->header_len=0;
677 header->src_mac_qual=r->route_def.src_mac_qual;
678 header->dst_mac_qual=r->route_def.dst_mac_qual;
684 PrintError("Unknown destination type\n");
695 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
696 struct route_list * matched_routes = NULL;
697 vnet_intr_flags_t flags;
700 int cpu = V3_Get_CPU();
702 Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
703 cpu, pkt->size, pkt->src_id,
704 pkt->src_type, pkt->dst_id, pkt->dst_type);
707 v3_hexdump(pkt->data, pkt->size, NULL, 0);
710 flags = vnet_lock_irqsave(vnet_state.lock);
712 vnet_state.stats.rx_bytes += pkt->size;
713 vnet_state.stats.rx_pkts++;
715 look_into_cache(pkt, &matched_routes);
717 if (matched_routes == NULL) {
718 PrintDebug("VNET/P Core: sending pkt - matching route\n");
720 matched_routes = match_route(pkt);
722 if (matched_routes) {
723 add_route_to_cache(pkt, matched_routes);
725 PrintDebug("VNET/P Core: Could not find route for packet... discarding packet\n");
726 vnet_unlock_irqrestore(vnet_state.lock, flags);
727 return 0; /* do we return -1 here?*/
731 vnet_unlock_irqrestore(vnet_state.lock, flags);
733 PrintDebug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
735 for (i = 0; i < matched_routes->num_routes; i++) {
736 struct vnet_route_info * route = matched_routes->routes[i];
738 if (route->route_def.dst_type == LINK_EDGE) {
739 struct vnet_brg_dev * bridge = vnet_state.bridge;
740 pkt->dst_type = LINK_EDGE;
741 pkt->dst_id = route->route_def.dst_id;
743 if (bridge == NULL) {
744 Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
748 if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
749 Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
752 vnet_state.stats.tx_bytes += pkt->size;
753 vnet_state.stats.tx_pkts ++;
754 } else if (route->route_def.dst_type == LINK_INTERFACE) {
755 if (route->dst_dev == NULL){
756 Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
760 if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
761 Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
764 vnet_state.stats.tx_bytes += pkt->size;
765 vnet_state.stats.tx_pkts ++;
767 Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
775 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac,
776 struct v3_vnet_dev_ops * ops, int quote, int poll_state,
778 struct vnet_dev * new_dev = NULL;
779 vnet_intr_flags_t flags;
781 new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev));
783 if (new_dev == NULL) {
784 Vnet_Print(0, "VNET/P Core: Unable to allocate a new device\n");
788 memcpy(new_dev->mac_addr, mac, 6);
789 new_dev->dev_ops.input = ops->input;
790 new_dev->dev_ops.poll = ops->poll;
791 new_dev->private_data = priv_data;
794 new_dev->quote = quote<VNET_MAX_QUOTE ? quote : VNET_MAX_QUOTE;
795 new_dev->poll = poll_state;
797 flags = vnet_lock_irqsave(vnet_state.lock);
799 if (dev_by_mac(mac) == NULL) {
800 list_add(&(new_dev->node), &(vnet_state.devs));
801 new_dev->dev_id = ++ vnet_state.dev_idx;
802 vnet_state.num_devs ++;
805 v3_enqueue(vnet_state.poll_devs, (addr_t)new_dev);
808 PrintError("VNET/P: Device with the same MAC has already been added\n");
811 vnet_unlock_irqrestore(vnet_state.lock, flags);
813 /* if the device was found previosly the id should still be 0 */
814 if (new_dev->dev_id == 0) {
815 Vnet_Print(0, "VNET/P Core: Device Already exists\n");
819 PrintDebug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
821 return new_dev->dev_id;
825 int v3_vnet_del_dev(int dev_id){
826 struct vnet_dev * dev = NULL;
827 vnet_intr_flags_t flags;
829 flags = vnet_lock_irqsave(vnet_state.lock);
831 dev = dev_by_id(dev_id);
833 list_del(&(dev->node));
834 //del_routes_by_dev(dev_id);
835 vnet_state.num_devs --;
838 vnet_unlock_irqrestore(vnet_state.lock, flags);
842 PrintDebug("VNET/P Core: Removed Device: dev_id %d\n", dev_id);
848 int v3_vnet_stat(struct vnet_stat * stats){
849 stats->rx_bytes = vnet_state.stats.rx_bytes;
850 stats->rx_pkts = vnet_state.stats.rx_pkts;
851 stats->tx_bytes = vnet_state.stats.tx_bytes;
852 stats->tx_pkts = vnet_state.stats.tx_pkts;
857 static void deinit_devices_list(){
858 struct vnet_dev * dev, * tmp;
860 list_for_each_entry_safe(dev, tmp, &(vnet_state.devs), node) {
861 list_del(&(dev->node));
866 static void deinit_routes_list(){
867 struct vnet_route_info * route, * tmp;
869 list_for_each_entry_safe(route, tmp, &(vnet_state.routes), node) {
870 list_del(&(route->node));
871 list_del(&(route->match_node));
876 int v3_vnet_add_bridge(struct v3_vm_info * vm,
877 struct v3_vnet_bridge_ops * ops,
880 vnet_intr_flags_t flags;
882 struct vnet_brg_dev * tmp_bridge = NULL;
884 flags = vnet_lock_irqsave(vnet_state.lock);
885 if (vnet_state.bridge == NULL) {
887 vnet_state.bridge = (void *)1;
889 vnet_unlock_irqrestore(vnet_state.lock, flags);
891 if (bridge_free == 0) {
892 PrintError("VNET/P Core: Bridge already set\n");
896 tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
898 if (tmp_bridge == NULL) {
899 PrintError("VNET/P Core: Unable to allocate new bridge\n");
900 vnet_state.bridge = NULL;
905 tmp_bridge->brg_ops.input = ops->input;
906 tmp_bridge->brg_ops.poll = ops->poll;
907 tmp_bridge->private_data = priv_data;
908 tmp_bridge->type = type;
910 /* make this atomic to avoid possible race conditions */
911 flags = vnet_lock_irqsave(vnet_state.lock);
912 vnet_state.bridge = tmp_bridge;
913 vnet_unlock_irqrestore(vnet_state.lock, flags);
919 void v3_vnet_del_bridge(uint8_t type) {
920 vnet_intr_flags_t flags;
921 struct vnet_brg_dev * tmp_bridge = NULL;
923 flags = vnet_lock_irqsave(vnet_state.lock);
925 if (vnet_state.bridge != NULL && vnet_state.bridge->type == type) {
926 tmp_bridge = vnet_state.bridge;
927 vnet_state.bridge = NULL;
930 vnet_unlock_irqrestore(vnet_state.lock, flags);
933 Vnet_Free(tmp_bridge);
938 /* can be instanieoued to multiple threads
939 * that runs on multiple cores
940 * or it could be running on a dedicated side core
942 static int vnet_tx_flush(void * args){
943 struct vnet_dev * dev = NULL;
946 uint64_t noprogress_count;
948 Vnet_Print(0, "VNET/P Polling Thread Starting ....\n");
950 // since there are multiple instances of this thread, and only
951 // one queue of pollable devices, our model here will be to synchronize
952 // on that queue, removing devices as we go, and keeping them
953 // then putting them back on the queue when we are done
954 // in this way, multiple instances of this function will never
955 // be polling the same device at the same time
957 struct v3_queue * tq = v3_create_queue();
960 PrintError("VNET/P polling thread cannot allocate queue\n");
966 while (!vnet_thread_should_stop()) {
968 more=0; // will indicate if any device has more work for us to do
970 while ((dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs))) {
971 // we are handling this device
972 v3_enqueue(tq,(addr_t)dev);
974 if (dev->poll && dev->dev_ops.poll) {
975 // The device's poll function MUST NOT BLOCK
976 rc = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data);
979 Vnet_Print(0, "VNET/P: poll from device %p error (ignoring) !\n", dev);
986 while ((dev = (struct vnet_dev *)v3_dequeue(tq))) {
987 // now someone else can handle it
988 v3_enqueue(vnet_state.poll_devs, (addr_t)dev);
995 if ( ! ((noprogress_count+1) < noprogress_count)) {
1001 if (noprogress_count < VNET_NOPROGRESS_LIMIT) {
1004 V3_Yield_Timed(VNET_YIELD_USEC);
1011 Vnet_Print(0, "VNET/P Polling Thread Done.\n");
1016 int v3_init_vnet() {
1017 memset(&vnet_state, 0, sizeof(vnet_state));
1019 INIT_LIST_HEAD(&(vnet_state.routes));
1020 INIT_LIST_HEAD(&(vnet_state.devs));
1022 vnet_state.num_devs = 0;
1023 vnet_state.num_routes = 0;
1025 if (vnet_lock_init(&(vnet_state.lock)) == -1){
1026 PrintError("VNET/P: Fails to initiate lock\n");
1029 vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
1030 if (vnet_state.route_cache == NULL) {
1031 PrintError("VNET/P: Fails to initiate route cache\n");
1035 vnet_state.poll_devs = v3_create_queue();
1037 vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "vnetd-1");
1039 PrintDebug("VNET/P is initiated\n");
1045 void v3_deinit_vnet() {
1047 v3_deinit_queue(vnet_state.poll_devs);
1048 Vnet_Free(vnet_state.poll_devs);
1050 PrintDebug("Stopping flush thread\n");
1051 // This will pause until the flush thread is gone
1052 vnet_thread_stop(vnet_state.pkt_flush_thread);
1053 // At this point there should be no lock-holder
1055 Vnet_Free(vnet_state.poll_devs);
1058 PrintDebug("Deiniting Device List\n");
1059 // close any devices we have open
1060 deinit_devices_list();
1062 PrintDebug("Deiniting Route List\n");
1063 // remove any routes we have
1064 deinit_routes_list();
1066 PrintDebug("Freeing hash table\n");
1067 // remove the hash table
1068 vnet_free_htable(vnet_state.route_cache, 1, 1);
1071 PrintDebug("Removing Bridge\n");
1072 // remove bridge if it was added
1073 if (vnet_state.bridge) {
1074 Vnet_Free(vnet_state.bridge);
1077 PrintDebug("Deleting lock\n");
1078 // eliminate the lock
1079 vnet_lock_deinit(&(vnet_state.lock));