2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu>
11 * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>
12 * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org>
15 * Author: Lei Xia <lxia@northwestern.edu>
16 * Yuan Tang <ytang@northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <vnet/vnet.h>
23 #include <vnet/vnet_hashtable.h>
24 #include <vnet/vnet_host.h>
25 #include <vnet/vnet_vmm.h>
27 #include <palacios/vmm_queue.h>
29 #ifndef V3_CONFIG_DEBUG_VNET
31 #define PrintDebug(fmt, args...)
34 #define VNET_YIELD_USEC 1000
39 uint8_t dst_mac[ETH_ALEN];
40 uint8_t src_mac[ETH_ALEN];
41 uint16_t type; /* indicates layer 3 protocol type */
42 } __attribute__((packed));
47 uint8_t mac_addr[ETH_ALEN];
48 struct v3_vm_info * vm;
49 struct v3_vnet_dev_ops dev_ops;
53 #define VNET_MAX_QUOTE 64
58 struct list_head node;
59 } __attribute__((packed));
63 struct v3_vm_info * vm;
64 struct v3_vnet_bridge_ops brg_ops;
69 } __attribute__((packed));
73 struct vnet_route_info {
74 struct v3_vnet_route route_def;
76 struct vnet_dev * dst_dev;
77 struct vnet_dev * src_dev;
81 struct list_head node;
82 struct list_head match_node; // used for route matching
87 uint8_t hash_buf[VNET_HASH_SIZE];
90 struct vnet_route_info * routes[0];
91 } __attribute__((packed));
96 struct v3_vnet_pkt pkt;
103 struct list_head routes;
104 struct list_head devs;
113 struct vnet_brg_dev * bridge;
116 struct vnet_stat stats;
118 /* device queue that are waiting to be polled */
119 struct v3_queue * poll_devs;
121 struct vnet_thread * pkt_flush_thread;
123 struct hashtable * route_cache;
127 #ifdef V3_CONFIG_DEBUG_VNET
128 static inline void mac2str(uint8_t * mac, char * buf) {
129 snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x",
130 mac[0], mac[1], mac[2],
131 mac[3], mac[4], mac[5]);
134 static void print_route(struct v3_vnet_route * route){
137 mac2str(route->src_mac, str);
138 PrintDebug("Src Mac (%s), src_qual (%d)\n",
139 str, route->src_mac_qual);
140 mac2str(route->dst_mac, str);
141 PrintDebug("Dst Mac (%s), dst_qual (%d)\n",
142 str, route->dst_mac_qual);
143 PrintDebug("Src dev id (%d), src type (%d)",
146 PrintDebug("Dst dev id (%d), dst type (%d)\n",
151 static void dump_routes(){
152 struct vnet_route_info *route;
154 PrintDebug("\n========Dump routes starts ============\n");
155 list_for_each_entry(route, &(vnet_state.routes), node) {
156 PrintDebug("\nroute %d:\n", route->idx);
158 print_route(&(route->route_def));
159 if (route->route_def.dst_type == LINK_INTERFACE) {
160 PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
162 route->dst_dev->dev_id,
163 (void *)&(route->dst_dev->dev_ops),
164 route->dst_dev->private_data);
168 PrintDebug("\n========Dump routes end ============\n");
175 * A VNET packet is a packed struct with the hashed fields grouped together.
176 * This means we can generate the hash from an offset into the pkt struct
178 static inline uint_t hash_fn(addr_t hdr_ptr) {
179 uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
181 return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
184 static inline int hash_eq(addr_t key1, addr_t key2) {
185 return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
188 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
189 memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
191 if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
192 PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
199 static int clear_hash_cache() {
200 vnet_free_htable(vnet_state.route_cache, 1, 1);
201 vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
206 static int look_into_cache(const struct v3_vnet_pkt * pkt,
207 struct route_list ** routes) {
208 *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
214 static struct vnet_dev * dev_by_id(int idx) {
215 struct vnet_dev * dev = NULL;
217 list_for_each_entry(dev, &(vnet_state.devs), node) {
218 if (dev->dev_id == idx) {
226 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
227 struct vnet_dev * dev = NULL;
229 list_for_each_entry(dev, &(vnet_state.devs), node) {
230 if (!compare_ethaddr(dev->mac_addr, mac)){
239 int v3_vnet_find_dev(uint8_t * mac) {
240 struct vnet_dev * dev = NULL;
242 dev = dev_by_mac(mac);
252 int v3_vnet_add_route(struct v3_vnet_route route) {
253 struct vnet_route_info * new_route = NULL;
254 vnet_intr_flags_t flags;
256 new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
259 PrintError("Cannot allocate new route\n");
263 memset(new_route, 0, sizeof(struct vnet_route_info));
265 #ifdef V3_CONFIG_DEBUG_VNET
266 PrintDebug("VNET/P Core: add_route_entry:\n");
270 memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
271 memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
272 new_route->route_def.src_mac_qual = route.src_mac_qual;
273 new_route->route_def.dst_mac_qual = route.dst_mac_qual;
274 new_route->route_def.dst_type = route.dst_type;
275 new_route->route_def.src_type = route.src_type;
276 new_route->route_def.src_id = route.src_id;
277 new_route->route_def.dst_id = route.dst_id;
279 if (new_route->route_def.dst_type == LINK_INTERFACE) {
280 new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
283 if (new_route->route_def.src_type == LINK_INTERFACE) {
284 new_route->src_dev = dev_by_id(new_route->route_def.src_id);
288 flags = vnet_lock_irqsave(vnet_state.lock);
290 list_add(&(new_route->node), &(vnet_state.routes));
291 new_route->idx = ++ vnet_state.route_idx;
292 vnet_state.num_routes ++;
294 vnet_unlock_irqrestore(vnet_state.lock, flags);
298 #ifdef V3_CONFIG_DEBUG_VNET
302 return new_route->idx;
306 void v3_vnet_del_route(uint32_t route_idx){
307 struct vnet_route_info * route = NULL;
308 vnet_intr_flags_t flags;
310 flags = vnet_lock_irqsave(vnet_state.lock);
312 list_for_each_entry(route, &(vnet_state.routes), node) {
313 Vnet_Print(0, "v3_vnet_del_route, route idx: %d\n", route->idx);
314 if(route->idx == route_idx){
315 list_del(&(route->node));
321 vnet_unlock_irqrestore(vnet_state.lock, flags);
324 #ifdef V3_CONFIG_DEBUG_VNET
330 /* delete all route entries with specfied src or dst device id */
331 static void inline del_routes_by_dev(int dev_id){
332 struct vnet_route_info * route, *tmp_route;
333 vnet_intr_flags_t flags;
335 flags = vnet_lock_irqsave(vnet_state.lock);
337 list_for_each_entry_safe(route, tmp_route, &(vnet_state.routes), node) {
338 if((route->route_def.dst_type == LINK_INTERFACE &&
339 route->route_def.dst_id == dev_id) ||
340 (route->route_def.src_type == LINK_INTERFACE &&
341 route->route_def.src_id == dev_id)){
343 list_del(&(route->node));
344 list_del(&(route->match_node));
349 vnet_unlock_irqrestore(vnet_state.lock, flags);
355 /* At the end allocate a route_list
356 * This list will be inserted into the cache so we don't need to free it
358 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
359 struct vnet_route_info * route = NULL;
360 struct route_list * matches = NULL;
363 struct list_head match_list;
364 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
365 // uint8_t src_type = pkt->src_type;
366 // uint32_t src_link = pkt->src_id;
368 #ifdef V3_CONFIG_DEBUG_VNET
373 mac2str(hdr->src_mac, src_str);
374 mac2str(hdr->dst_mac, dst_str);
375 PrintDebug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
379 INIT_LIST_HEAD(&match_list);
381 #define UPDATE_MATCHES(rank) do { \
382 if (max_rank < (rank)) { \
384 INIT_LIST_HEAD(&match_list); \
386 list_add(&(route->match_node), &match_list); \
388 } else if (max_rank == (rank)) { \
389 list_add(&(route->match_node), &match_list); \
395 list_for_each_entry(route, &(vnet_state.routes), node) {
396 struct v3_vnet_route * route_def = &(route->route_def);
399 // CHECK SOURCE TYPE HERE
400 if ( (route_def->src_type != LINK_ANY) &&
401 ( (route_def->src_type != src_type) ||
402 ( (route_def->src_id != src_link) &&
403 (route_def->src_id != -1)))) {
408 if ((route_def->dst_mac_qual == MAC_ANY) &&
409 (route_def->src_mac_qual == MAC_ANY)) {
413 if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
414 if (route_def->src_mac_qual != MAC_NOT) {
415 if (route_def->dst_mac_qual == MAC_ANY) {
417 } else if (route_def->dst_mac_qual != MAC_NOT &&
418 memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
424 if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
425 if (route_def->dst_mac_qual != MAC_NOT) {
426 if (route_def->src_mac_qual == MAC_ANY) {
428 } else if ((route_def->src_mac_qual != MAC_NOT) &&
429 (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
435 if ((route_def->dst_mac_qual == MAC_NOT) &&
436 (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
437 if (route_def->src_mac_qual == MAC_ANY) {
439 } else if ((route_def->src_mac_qual != MAC_NOT) &&
440 (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
445 if ((route_def->src_mac_qual == MAC_NOT) &&
446 (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
447 if (route_def->dst_mac_qual == MAC_ANY) {
449 } else if ((route_def->dst_mac_qual != MAC_NOT) &&
450 (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
456 if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
457 (route_def->dst_mac_qual == MAC_NONE)) {
462 PrintDebug("VNET/P Core: match_route: Matches=%d\n", num_matches);
464 if (num_matches <= 0) {
468 matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) +
469 (sizeof(struct vnet_route_info *) * num_matches));
473 PrintError("VNET/P Core: Unable to allocate matches\n");
477 matches->num_routes = num_matches;
481 list_for_each_entry(route, &match_list, match_node) {
482 matches->routes[i++] = route;
489 int v3_vnet_query_header(uint8_t src_mac[6],
491 int recv, // 0 = send, 1=recv
492 struct v3_vnet_header *header)
494 struct route_list *routes;
495 struct vnet_route_info *r;
496 struct v3_vnet_pkt p;
500 memcpy(p.header,dest_mac,6);
501 memcpy(p.header+6,src_mac,6);
502 memset(p.header+12,0,2);
504 p.src_type = LINK_EDGE;
507 memcpy(header->src_mac,src_mac,6);
508 memcpy(header->dst_mac,dest_mac,6);
511 look_into_cache(&p,&routes);
514 routes = match_route(&p);
516 PrintError("Cannot match route\n");
517 header->header_type=VNET_HEADER_NOMATCH;
518 header->header_len=0;
521 add_route_to_cache(&p,routes);
525 if (routes->num_routes<1) {
526 PrintError("Less than one route\n");
527 header->header_type=VNET_HEADER_NOMATCH;
528 header->header_len=0;
532 if (routes->num_routes>1) {
533 PrintError("More than one route, building header for the first one only\n");
538 switch (r->route_def.dst_type) {
540 // switch based on the link type
541 // for mac-in-udp, we would want to generate a mac, ip, and udp header
542 // direct transmission
544 // for now we will say we have no encapsulation
546 header->header_type=VNET_HEADER_NONE;
547 header->header_len=0;
548 header->src_mac_qual=r->route_def.src_mac_qual;
549 header->dst_mac_qual=r->route_def.dst_mac_qual;
558 // direct transmission
559 // let's guess that it goes to the same interface...
560 header->header_type=VNET_HEADER_NONE;
561 header->header_len=0;
562 header->src_mac_qual=r->route_def.src_mac_qual;
563 header->dst_mac_qual=r->route_def.dst_mac_qual;
569 PrintError("Unknown destination type\n");
580 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
581 struct route_list * matched_routes = NULL;
582 vnet_intr_flags_t flags;
585 int cpu = V3_Get_CPU();
587 Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
588 cpu, pkt->size, pkt->src_id,
589 pkt->src_type, pkt->dst_id, pkt->dst_type);
592 v3_hexdump(pkt->data, pkt->size, NULL, 0);
595 flags = vnet_lock_irqsave(vnet_state.lock);
597 vnet_state.stats.rx_bytes += pkt->size;
598 vnet_state.stats.rx_pkts++;
600 look_into_cache(pkt, &matched_routes);
602 if (matched_routes == NULL) {
603 PrintDebug("VNET/P Core: sending pkt - matching route\n");
605 matched_routes = match_route(pkt);
607 if (matched_routes) {
608 add_route_to_cache(pkt, matched_routes);
610 PrintDebug("VNET/P Core: Could not find route for packet... discarding packet\n");
611 vnet_unlock_irqrestore(vnet_state.lock, flags);
612 return 0; /* do we return -1 here?*/
616 vnet_unlock_irqrestore(vnet_state.lock, flags);
618 PrintDebug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
620 for (i = 0; i < matched_routes->num_routes; i++) {
621 struct vnet_route_info * route = matched_routes->routes[i];
623 if (route->route_def.dst_type == LINK_EDGE) {
624 struct vnet_brg_dev * bridge = vnet_state.bridge;
625 pkt->dst_type = LINK_EDGE;
626 pkt->dst_id = route->route_def.dst_id;
628 if (bridge == NULL) {
629 Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
633 if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
634 Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
637 vnet_state.stats.tx_bytes += pkt->size;
638 vnet_state.stats.tx_pkts ++;
639 } else if (route->route_def.dst_type == LINK_INTERFACE) {
640 if (route->dst_dev == NULL){
641 Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
645 if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
646 Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
649 vnet_state.stats.tx_bytes += pkt->size;
650 vnet_state.stats.tx_pkts ++;
652 Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
660 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac,
661 struct v3_vnet_dev_ops * ops, int quote, int poll_state,
663 struct vnet_dev * new_dev = NULL;
664 vnet_intr_flags_t flags;
666 new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev));
668 if (new_dev == NULL) {
669 Vnet_Print(0, "VNET/P Core: Unable to allocate a new device\n");
673 memcpy(new_dev->mac_addr, mac, 6);
674 new_dev->dev_ops.input = ops->input;
675 new_dev->dev_ops.poll = ops->poll;
676 new_dev->private_data = priv_data;
679 new_dev->quote = quote<VNET_MAX_QUOTE ? quote : VNET_MAX_QUOTE;
680 new_dev->poll = poll_state;
682 flags = vnet_lock_irqsave(vnet_state.lock);
684 if (dev_by_mac(mac) == NULL) {
685 list_add(&(new_dev->node), &(vnet_state.devs));
686 new_dev->dev_id = ++ vnet_state.dev_idx;
687 vnet_state.num_devs ++;
690 v3_enqueue(vnet_state.poll_devs, (addr_t)new_dev);
693 PrintError("VNET/P: Device with the same MAC has already been added\n");
696 vnet_unlock_irqrestore(vnet_state.lock, flags);
698 /* if the device was found previosly the id should still be 0 */
699 if (new_dev->dev_id == 0) {
700 Vnet_Print(0, "VNET/P Core: Device Already exists\n");
704 PrintDebug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
706 return new_dev->dev_id;
710 int v3_vnet_del_dev(int dev_id){
711 struct vnet_dev * dev = NULL;
712 vnet_intr_flags_t flags;
714 flags = vnet_lock_irqsave(vnet_state.lock);
716 dev = dev_by_id(dev_id);
718 list_del(&(dev->node));
719 //del_routes_by_dev(dev_id);
720 vnet_state.num_devs --;
723 vnet_unlock_irqrestore(vnet_state.lock, flags);
727 PrintDebug("VNET/P Core: Removed Device: dev_id %d\n", dev_id);
733 int v3_vnet_stat(struct vnet_stat * stats){
734 stats->rx_bytes = vnet_state.stats.rx_bytes;
735 stats->rx_pkts = vnet_state.stats.rx_pkts;
736 stats->tx_bytes = vnet_state.stats.tx_bytes;
737 stats->tx_pkts = vnet_state.stats.tx_pkts;
742 static void deinit_devices_list(){
743 struct vnet_dev * dev, * tmp;
745 list_for_each_entry_safe(dev, tmp, &(vnet_state.devs), node) {
746 list_del(&(dev->node));
751 static void deinit_routes_list(){
752 struct vnet_route_info * route, * tmp;
754 list_for_each_entry_safe(route, tmp, &(vnet_state.routes), node) {
755 list_del(&(route->node));
756 list_del(&(route->match_node));
761 int v3_vnet_add_bridge(struct v3_vm_info * vm,
762 struct v3_vnet_bridge_ops * ops,
765 vnet_intr_flags_t flags;
767 struct vnet_brg_dev * tmp_bridge = NULL;
769 flags = vnet_lock_irqsave(vnet_state.lock);
770 if (vnet_state.bridge == NULL) {
772 vnet_state.bridge = (void *)1;
774 vnet_unlock_irqrestore(vnet_state.lock, flags);
776 if (bridge_free == 0) {
777 PrintError("VNET/P Core: Bridge already set\n");
781 tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
783 if (tmp_bridge == NULL) {
784 PrintError("VNET/P Core: Unable to allocate new bridge\n");
785 vnet_state.bridge = NULL;
790 tmp_bridge->brg_ops.input = ops->input;
791 tmp_bridge->brg_ops.poll = ops->poll;
792 tmp_bridge->private_data = priv_data;
793 tmp_bridge->type = type;
795 /* make this atomic to avoid possible race conditions */
796 flags = vnet_lock_irqsave(vnet_state.lock);
797 vnet_state.bridge = tmp_bridge;
798 vnet_unlock_irqrestore(vnet_state.lock, flags);
804 void v3_vnet_del_bridge(uint8_t type) {
805 vnet_intr_flags_t flags;
806 struct vnet_brg_dev * tmp_bridge = NULL;
808 flags = vnet_lock_irqsave(vnet_state.lock);
810 if (vnet_state.bridge != NULL && vnet_state.bridge->type == type) {
811 tmp_bridge = vnet_state.bridge;
812 vnet_state.bridge = NULL;
815 vnet_unlock_irqrestore(vnet_state.lock, flags);
818 Vnet_Free(tmp_bridge);
823 /* can be instanieoued to multiple threads
824 * that runs on multiple cores
825 * or it could be running on a dedicated side core
827 static int vnet_tx_flush(void * args){
828 struct vnet_dev * dev = NULL;
832 Vnet_Print(0, "VNET/P Polling Thread Starting ....\n");
834 // since there are multiple instances of this thread, and only
835 // one queue of pollable devices, our model here will be to synchronize
836 // on that queue, removing devices as we go, and keeping them
837 // then putting them back on the queue when we are done
838 // in this way, multiple instances of this function will never
839 // be polling the same device at the same time
841 struct v3_queue * tq = v3_create_queue();
844 PrintError("VNET/P polling thread cannot allocate queue\n");
849 while (!vnet_thread_should_stop()) {
851 more=0; // will indicate if any device has more work for us to do
853 while ((dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs))) {
854 // we are handling this device
855 v3_enqueue(tq,(addr_t)dev);
857 if (dev->poll && dev->dev_ops.poll) {
858 // The device's poll function MUST NOT BLOCK
859 rc = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data);
862 Vnet_Print(0, "VNET/P: poll from device %p error (ignoring) !\n", dev);
869 while ((dev = (struct vnet_dev *)v3_dequeue(tq))) {
870 // now someone else can handle it
871 v3_enqueue(vnet_state.poll_devs, (addr_t)dev);
874 // Yield regardless of whether we handled any devices - need
875 // to allow other threads to run
877 // we have more to do, so we want to get back asap
880 // put ourselves briefly to sleep if we we don't have more
881 V3_Yield_Timed(VNET_YIELD_USEC);
888 Vnet_Print(0, "VNET/P Polling Thread Done.\n");
894 memset(&vnet_state, 0, sizeof(vnet_state));
896 INIT_LIST_HEAD(&(vnet_state.routes));
897 INIT_LIST_HEAD(&(vnet_state.devs));
899 vnet_state.num_devs = 0;
900 vnet_state.num_routes = 0;
902 if (vnet_lock_init(&(vnet_state.lock)) == -1){
903 PrintError("VNET/P: Fails to initiate lock\n");
906 vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
907 if (vnet_state.route_cache == NULL) {
908 PrintError("VNET/P: Fails to initiate route cache\n");
912 vnet_state.poll_devs = v3_create_queue();
914 vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "vnetd-1");
916 PrintDebug("VNET/P is initiated\n");
922 void v3_deinit_vnet(){
924 PrintDebug("Stopping flush thread\n");
925 // This will pause until the flush thread is gone
926 vnet_thread_stop(vnet_state.pkt_flush_thread);
927 // At this point there should be no lock-holder
929 Vnet_Free(vnet_state.poll_devs);
932 PrintDebug("Deiniting Device List\n");
933 // close any devices we have open
934 deinit_devices_list();
936 PrintDebug("Deiniting Route List\n");
937 // remove any routes we have
938 deinit_routes_list();
940 PrintDebug("Freeing hash table\n");
941 // remove the hash table
942 vnet_free_htable(vnet_state.route_cache, 1, 1);
945 PrintDebug("Removing Bridge\n");
946 // remove bridge if it was added
947 if (vnet_state.bridge) {
948 Vnet_Free(vnet_state.bridge);
951 PrintDebug("Deleting lock\n");
952 // eliminate the lock
953 vnet_lock_deinit(&(vnet_state.lock));