2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2009, Lei Xia <lxia@northwestern.edu>
11 * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>
12 * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Lei Xia <lxia@northwestern.edu>
16 * Yuan Tang <ytang@northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <palacios/vmm_vnet.h>
23 #include <palacios/vm_guest_mem.h>
24 #include <palacios/vmm_lock.h>
25 #include <palacios/vmm_queue.h>
26 #include <palacios/vmm_sprintf.h>
28 #ifndef CONFIG_DEBUG_VNET
30 #define PrintDebug(fmt, args...)
38 uint16_t type; // indicates layer 3 protocol type
39 } __attribute__((packed));
48 struct v3_vm_info * vm;
50 int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data);
54 struct list_head node;
55 } __attribute__((packed));
58 #define BRIDGE_BUF_SIZE 1024
59 struct bridge_pkts_buf {
63 struct v3_vnet_pkt pkts[BRIDGE_BUF_SIZE];
64 uint8_t datas[ETHERNET_PACKET_LEN*BRIDGE_BUF_SIZE];
68 struct v3_vm_info * vm;
70 int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data);
71 void (*xcall_input)(void *data);
73 struct bridge_pkts_buf recv_buf; //packets from Vnet to vnet_bridge device
75 struct bridge_pkts_buf send_buf; //packets from vnet_bridge device to Vnet
79 uint16_t max_delayed_pkts;
80 long max_latency; //in cycles
82 } __attribute__((packed));
88 struct vnet_route_info {
89 struct v3_vnet_route route_def;
91 struct vnet_dev * dst_dev;
92 struct vnet_dev * src_dev;
94 struct list_head node;
95 struct list_head match_node; // used for route matching
100 uint8_t hash_buf[VNET_HASH_SIZE];
103 struct vnet_route_info * routes[0];
104 } __attribute__((packed));
109 struct list_head routes;
110 struct list_head devs;
115 struct vnet_brg_dev *bridge;
119 struct gen_queue * inpkt_q;
120 struct hashtable * route_cache;
127 #ifdef CONFIG_DEBUG_VNET
128 static inline void mac_to_string(char mac[6], char * buf) {
129 snprintf(buf, 100, "%d:%d:%d:%d:%d:%d",
130 mac[0], mac[1], mac[2],
131 mac[3], mac[4], mac[5]);
134 static void print_route(struct vnet_route_info *route){
137 mac_to_string(route->route_def.src_mac, str);
138 PrintDebug("Src Mac (%s), src_qual (%d)\n",
139 str, route->route_def.src_mac_qual);
140 mac_to_string(route->route_def.dst_mac, str);
141 PrintDebug("Dst Mac (%s), dst_qual (%d)\n",
142 str, route->route_def.dst_mac_qual);
143 PrintDebug("Src dev id (%d), src type (%d)",
144 route->route_def.src_id,
145 route->route_def.src_type);
146 PrintDebug("Dst dev id (%d), dst type (%d)\n",
147 route->route_def.dst_id,
148 route->route_def.dst_type);
149 if (route->route_def.dst_type == LINK_INTERFACE) {
150 PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_input (%p), dst_dev_data (%p)\n",
152 route->dst_dev->dev_id,
153 route->dst_dev->input,
154 route->dst_dev->private_data);
158 static void dump_routes(){
159 struct vnet_route_info *route;
162 PrintDebug("\n========Dump routes starts ============\n");
163 list_for_each_entry(route, &(vnet_state.routes), node) {
164 PrintDebug("\nroute %d:\n", ++i);
168 PrintDebug("\n========Dump routes end ============\n");
175 * A VNET packet is a packed struct with the hashed fields grouped together.
176 * This means we can generate the hash from an offset into the pkt struct
178 static inline uint_t hash_fn(addr_t hdr_ptr) {
179 uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
181 return v3_hash_buffer(hdr_buf, VNET_HASH_SIZE);
184 static inline int hash_eq(addr_t key1, addr_t key2) {
185 return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
188 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
189 memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
191 if (v3_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
192 PrintError("Vnet: Failed to insert new route entry to the cache\n");
199 static int clear_hash_cache() {
201 v3_free_htable(vnet_state.route_cache, 1, 1);
202 vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
207 static int look_into_cache(const struct v3_vnet_pkt * pkt, struct route_list ** routes) {
209 *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
215 static struct vnet_dev * find_dev_by_id(int idx) {
216 struct vnet_dev * dev = NULL;
218 list_for_each_entry(dev, &(vnet_state.devs), node) {
219 int dev_id = dev->dev_id;
228 static struct vnet_dev * find_dev_by_mac(char mac[6]) {
229 struct vnet_dev * dev = NULL;
231 list_for_each_entry(dev, &(vnet_state.devs), node) {
232 if (!memcmp(dev->mac_addr, mac, 6))
239 int get_device_id_by_mac(char mac[6]){
241 struct vnet_dev *dev = find_dev_by_mac(mac);
250 int v3_vnet_add_route(struct v3_vnet_route route) {
251 struct vnet_route_info * new_route = NULL;
254 new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
255 memset(new_route, 0, sizeof(struct vnet_route_info));
257 PrintDebug("Vnet: vnet_add_route_entry: dst_id: %d, dst_type: %d\n",
258 route.dst_id, route.dst_type);
260 memcpy(new_route->route_def.src_mac, route.src_mac, 6);
261 memcpy(new_route->route_def.dst_mac, route.dst_mac, 6);
262 new_route->route_def.src_mac_qual = route.src_mac_qual;
263 new_route->route_def.dst_mac_qual = route.dst_mac_qual;
264 new_route->route_def.dst_id = route.dst_id;
265 new_route->route_def.dst_type = route.dst_type;
266 new_route->route_def.src_id = route.src_id;
267 new_route->route_def.src_type = route.src_type;
269 if (new_route->route_def.dst_type == LINK_INTERFACE) {
270 new_route->dst_dev = find_dev_by_id(new_route->route_def.dst_id);
271 PrintDebug("Vnet: Add route, get device: dev_id %d, input : %p, private_data %p\n",
272 new_route->dst_dev->dev_id, new_route->dst_dev->input, new_route->dst_dev->private_data);
275 if (new_route->route_def.src_type == LINK_INTERFACE) {
276 new_route->src_dev = find_dev_by_id(new_route->route_def.src_id);
279 flags = v3_lock_irqsave(vnet_state.lock);
281 list_add(&(new_route->node), &(vnet_state.routes));
284 v3_unlock_irqrestore(vnet_state.lock, flags);
287 #ifdef CONFIG_DEBUG_VNET
296 // At the end allocate a route_list
297 // This list will be inserted into the cache so we don't need to free it
298 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
299 struct vnet_route_info * route = NULL;
300 struct route_list * matches = NULL;
303 struct list_head match_list;
304 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
305 uint8_t src_type = pkt->src_type;
306 uint32_t src_link = pkt->src_id;
308 #ifdef CONFIG_DEBUG_VNET
313 mac_to_string(hdr->src_mac, src_str);
314 mac_to_string(hdr->dst_mac, dst_str);
315 PrintDebug("Vnet: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
319 INIT_LIST_HEAD(&match_list);
321 #define UPDATE_MATCHES(rank) do { \
322 if (max_rank < (rank)) { \
324 INIT_LIST_HEAD(&match_list); \
326 list_add(&(route->match_node), &match_list); \
328 } else if (max_rank == (rank)) { \
329 list_add(&(route->match_node), &match_list); \
335 list_for_each_entry(route, &(vnet_state.routes), node) {
336 struct v3_vnet_route * route_def = &(route->route_def);
338 // CHECK SOURCE TYPE HERE
339 if ( (route_def->src_type != LINK_ANY) &&
340 ( (route_def->src_type != src_type) ||
341 ( (route_def->src_id != src_link) &&
342 (route_def->src_id != (uint32_t)-1)))) {
347 if ((route_def->dst_mac_qual == MAC_ANY) &&
348 (route_def->src_mac_qual == MAC_ANY)) {
352 if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
353 if (route_def->src_mac_qual != MAC_NOT) {
354 if (route_def->dst_mac_qual == MAC_ANY) {
356 } else if (route_def->dst_mac_qual != MAC_NOT &&
357 memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
363 if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
364 if (route_def->dst_mac_qual != MAC_NOT) {
365 if (route_def->src_mac_qual == MAC_ANY) {
367 } else if ((route_def->src_mac_qual != MAC_NOT) &&
368 (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
374 if ((route_def->dst_mac_qual == MAC_NOT) &&
375 (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
376 if (route_def->src_mac_qual == MAC_ANY) {
378 } else if ((route_def->src_mac_qual != MAC_NOT) &&
379 (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
384 if ((route_def->src_mac_qual == MAC_NOT) &&
385 (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
386 if (route_def->dst_mac_qual == MAC_ANY) {
388 } else if ((route_def->dst_mac_qual != MAC_NOT) &&
389 (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
395 if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
396 (route_def->dst_mac_qual == MAC_NONE)) {
401 PrintDebug("Vnet: match_route: Matches=%d\n", num_matches);
403 if (num_matches == 0) {
407 matches = (struct route_list *)V3_Malloc(sizeof(struct route_list) +
408 (sizeof(struct vnet_route_info *) * num_matches));
410 matches->num_routes = num_matches;
414 list_for_each_entry(route, &match_list, match_node) {
415 matches->routes[i++] = route;
423 static int flush_bridge_pkts(struct vnet_brg_dev *bridge){
425 int num, start, send;
426 struct v3_vnet_bridge_input_args args;
427 int cpu_id = bridge->vm->cores[0].cpu_id;
428 int current_core = V3_Get_CPU();
430 if (bridge == NULL) {
431 PrintDebug("VNET: No bridge to sent data to links\n");
435 flags = v3_lock_irqsave(bridge->recv_buf.lock);
437 num = bridge->recv_buf.num;
438 start = bridge->recv_buf.start;
440 bridge->recv_buf.num -= num;
441 bridge->recv_buf.start += num;
442 bridge->recv_buf.start %= BRIDGE_BUF_SIZE;
444 v3_unlock_irqrestore(bridge->recv_buf.lock, flags);
447 if(bridge->disabled){
448 PrintDebug("VNET: In flush bridge pkts: Bridge is disabled\n");
452 if(num <= 2 && num > 0){
453 PrintDebug("VNET: In flush bridge pkts: %d\n", num);
457 PrintDebug("VNET: In flush bridge pkts to bridge, cur_cpu %d, brige_core: %d\n", current_core, cpu_id);
458 if (current_core == cpu_id){
459 if ((start + num) < BRIDGE_BUF_SIZE){
460 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), num, bridge->private_data);
462 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), (BRIDGE_BUF_SIZE - start), bridge->private_data);
463 send = num - (BRIDGE_BUF_SIZE - start);
464 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[0]), send, bridge->private_data);
467 args.vm = bridge->vm;
468 args.private_data = bridge->private_data;
470 if ((start + num) < BRIDGE_BUF_SIZE){
472 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
473 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
475 args.pkt_num = BRIDGE_BUF_SIZE - start;
476 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
477 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
479 send = num - (BRIDGE_BUF_SIZE - start);
481 args.vnet_pkts = &(bridge->recv_buf.pkts[0]);
482 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
486 PrintDebug("VNET: flush bridge pkts %d\n", num);
493 static int send_to_bridge(struct v3_vnet_pkt * pkt){
494 struct vnet_brg_dev *bridge = vnet_state.bridge;
495 int cpu_id = bridge->vm->cores[0].cpu_id;
496 struct v3_vnet_bridge_input_args args;
498 if (bridge == NULL) {
499 PrintDebug("VNET: No bridge to sent data to links\n");
503 if(bridge->max_delayed_pkts <= 1){
504 if(bridge->disabled){
505 PrintDebug("VNET: Bridge diabled\n");
510 args.vm = bridge->vm;
511 args.vnet_pkts = pkt;
512 args.private_data = bridge->private_data;
514 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
515 PrintDebug("VNET: sent one packet to the bridge\n");
521 struct v3_vnet_pkt *buf;
523 PrintDebug("VNET: send_to_bridge\n");
525 flags = v3_lock_irqsave(bridge->recv_buf.lock);
527 if(bridge->disabled && bridge->recv_buf.num >= BRIDGE_BUF_SIZE){
528 PrintDebug("Bridge diabled and bridge receive buffer full\n");
529 v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
530 num = bridge->recv_buf.num;
534 end = bridge->recv_buf.end;
535 buf = &(bridge->recv_buf.pkts[end]);
537 bridge->recv_buf.num ++;
538 bridge->recv_buf.end ++;
539 bridge->recv_buf.end %= BRIDGE_BUF_SIZE;
541 num = bridge->recv_buf.num;
543 v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
546 buf->size = pkt->size;
547 buf->dst_id = pkt->dst_id;
548 buf->src_id = pkt->src_id;
549 buf->src_type = pkt->src_type;
550 buf->dst_type = pkt->dst_type;
551 memcpy(buf->header, pkt->header, ETHERNET_HEADER_LEN);
552 memcpy(buf->data, pkt->data, pkt->size);
556 if (num >= bridge->max_delayed_pkts){
557 flush_bridge_pkts(bridge);
563 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
564 struct route_list * matched_routes = NULL;
568 #ifdef CONFIG_DEBUG_VNET
570 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->header);
574 mac_to_string(hdr->src_mac, src_str);
575 mac_to_string(hdr->dst_mac, dest_str);
576 int cpu = V3_Get_CPU();
577 PrintDebug("Vnet: on cpu %d, HandleDataOverLink. SRC(%s), DEST(%s), pkt size: %d\n", cpu, src_str, dest_str, pkt->size);
581 flags = v3_lock_irqsave(vnet_state.lock);
583 look_into_cache(pkt, &matched_routes);
585 if (matched_routes == NULL) {
586 PrintDebug("Vnet: send pkt Looking into routing table\n");
588 matched_routes = match_route(pkt);
590 if (matched_routes) {
591 add_route_to_cache(pkt, matched_routes);
593 PrintDebug("Could not find route for packet... discards packet\n");
594 v3_unlock_irqrestore(vnet_state.lock, flags);
599 v3_unlock_irqrestore(vnet_state.lock, flags);
601 PrintDebug("Vnet: send pkt route matches %d\n", matched_routes->num_routes);
603 for (i = 0; i < matched_routes->num_routes; i++) {
604 struct vnet_route_info * route = matched_routes->routes[i];
606 if (route->route_def.dst_type == LINK_EDGE) {
607 pkt->dst_type = LINK_EDGE;
608 pkt->dst_id = route->route_def.dst_id;
610 if (send_to_bridge(pkt) == -1) {
611 PrintDebug("VNET: Packet not sent properly to bridge\n");
615 } else if (route->route_def.dst_type == LINK_INTERFACE) {
616 if (route->dst_dev->input(route->dst_dev->vm, pkt, route->dst_dev->private_data) == -1) {
617 PrintDebug("VNET: Packet not sent properly\n");
621 PrintDebug("Vnet: Wrong Edge type\n");
625 PrintDebug("Vnet: v3_vnet_send_pkt: Forward packet according to Route %d\n", i);
631 void v3_vnet_send_pkt_xcall(void * data){
632 struct v3_vnet_pkt * pkt = (struct v3_vnet_pkt *)data;
633 v3_vnet_send_pkt(pkt, NULL);
637 void v3_vnet_bridge_polling()
641 struct v3_vnet_pkt *buf;
642 struct vnet_brg_dev *bridge = vnet_state.bridge;
644 PrintDebug("In vnet bridge pollling: cpu %d\n", V3_Get_CPU());
647 PrintDebug("VNET: Bridge is not set\n");
651 flags = v3_lock_irqsave(bridge->send_buf.lock);
653 num = bridge->send_buf.num;
654 start = bridge->send_buf.start;
656 PrintDebug("VNET: bridge polling pkts %d\n", num);
659 buf = &(bridge->send_buf.pkts[bridge->send_buf.start]);
661 v3_vnet_send_pkt(buf, NULL);
663 bridge->send_buf.num --;
664 bridge->send_buf.start ++;
665 bridge->send_buf.start %= BRIDGE_BUF_SIZE;
669 v3_unlock_irqrestore(bridge->send_buf.lock, flags);
675 int v3_vnet_bridge_rx(uchar_t *buf, uint16_t size, uint16_t src_link){
676 struct vnet_brg_dev *bridge = vnet_state.bridge;
679 struct v3_vnet_pkt *pkt;
681 if (bridge == NULL) {
682 PrintDebug("VNET: No bridge is set\n");
686 flags = v3_lock_irqsave(bridge->send_buf.lock);
688 end = bridge->send_buf.end;
689 pkt = &(bridge->send_buf.pkts[end]);
691 if(bridge->send_buf.num > BRIDGE_BUF_SIZE){
692 PrintDebug("VNET: bridge rx: buffer full\n");
696 bridge->send_buf.num ++;
697 bridge->send_buf.end ++;
698 bridge->send_buf.end %= BRIDGE_BUF_SIZE;
701 pkt->src_id = src_link;
702 pkt->src_type = LINK_EDGE;
703 memcpy(pkt->header, buf, ETHERNET_HEADER_LEN);
704 memcpy(pkt->data, buf, size);
708 v3_unlock_irqrestore(bridge->send_buf.lock, flags);
714 int v3_vnet_add_dev(struct v3_vm_info *vm, uint8_t mac[6],
715 int (*netif_input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data),
717 struct vnet_dev * new_dev = NULL;
720 new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev));
722 if (new_dev == NULL) {
723 PrintError("VNET: Malloc fails\n");
727 memcpy(new_dev->mac_addr, mac, 6);
728 new_dev->input = netif_input;
729 new_dev->private_data = priv_data;
733 flags = v3_lock_irqsave(vnet_state.lock);
735 if (!find_dev_by_mac(mac)) {
736 list_add(&(new_dev->node), &(vnet_state.devs));
737 new_dev->dev_id = ++vnet_state.num_devs;
740 v3_unlock_irqrestore(vnet_state.lock, flags);
742 // if the device was found previosly the id should still be 0
743 if (new_dev->dev_id == 0) {
744 PrintError("Device Alrady exists\n");
748 PrintDebug("Vnet: Add Device: dev_id %d, input : %p, private_data %p\n",
749 new_dev->dev_id, new_dev->input, new_dev->private_data);
751 return new_dev->dev_id;
755 void v3_vnet_heartbeat(struct guest_info *core){
756 static long last_time, cur_time;
758 if(vnet_state.bridge == NULL)
761 if(vnet_state.bridge->max_delayed_pkts <= 1)
764 if(V3_Get_CPU() != vnet_state.bridge->vm->cores[0].cpu_id){
768 if ((cur_time - last_time) >= vnet_state.bridge->max_latency) {
769 last_time = cur_time;
770 flush_bridge_pkts(vnet_state.bridge);
774 int v3_vnet_add_bridge(struct v3_vm_info * vm,
775 int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
776 void (*xcall_input)(void *data),
777 uint16_t max_delayed_pkts,
782 struct vnet_brg_dev * tmp_bridge = NULL;
786 flags = v3_lock_irqsave(vnet_state.lock);
788 if (vnet_state.bridge == NULL) {
790 vnet_state.bridge = (void *)1;
793 v3_unlock_irqrestore(vnet_state.lock, flags);
795 if (bridge_free == 0) {
796 PrintError("Bridge already set\n");
800 tmp_bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev));
802 if (tmp_bridge == NULL) {
803 PrintError("Malloc Fails\n");
808 tmp_bridge->input = input;
809 tmp_bridge->xcall_input = xcall_input;
810 tmp_bridge->private_data = priv_data;
811 tmp_bridge->disabled = 0;
813 //initial receving buffer
814 tmp_bridge->recv_buf.start = 0;
815 tmp_bridge->recv_buf.end = 0;
816 tmp_bridge->recv_buf.num = 0;
817 if(v3_lock_init(&(tmp_bridge->recv_buf.lock)) == -1){
818 PrintError("VNET: add bridge, error to initiate recv buf lock\n");
820 tmp_bridge->max_delayed_pkts = (max_delayed_pkts<BRIDGE_BUF_SIZE)?max_delayed_pkts : BRIDGE_BUF_SIZE;
821 tmp_bridge->max_latency = max_latency;
822 for(i = 0; i<BRIDGE_BUF_SIZE; i++){
823 tmp_bridge->recv_buf.pkts[i].data = &(tmp_bridge->recv_buf.datas[i*ETHERNET_PACKET_LEN]);
826 //initial sending buffer
827 tmp_bridge->send_buf.start = 0;
828 tmp_bridge->send_buf.end = 0;
829 tmp_bridge->send_buf.num = 0;
830 if(v3_lock_init(&(tmp_bridge->send_buf.lock)) == -1){
831 PrintError("VNET: add bridge, error to initiate send buf lock\n");
833 for(i = 0; i<BRIDGE_BUF_SIZE; i++){
834 tmp_bridge->send_buf.pkts[i].data = &(tmp_bridge->send_buf.datas[i*ETHERNET_PACKET_LEN]);
837 // make this atomic to avoid possible race conditions
838 flags = v3_lock_irqsave(vnet_state.lock);
839 vnet_state.bridge = tmp_bridge;
840 v3_unlock_irqrestore(vnet_state.lock, flags);
846 int v3_vnet_disable_bridge() {
849 flags = v3_lock_irqsave(vnet_state.lock);
851 if (vnet_state.bridge != NULL) {
852 vnet_state.bridge->disabled = 1;
855 v3_unlock_irqrestore(vnet_state.lock, flags);
861 int v3_vnet_enable_bridge() {
864 flags = v3_lock_irqsave(vnet_state.lock);
866 if (vnet_state.bridge != NULL) {
867 vnet_state.bridge->disabled = 0;
870 v3_unlock_irqrestore(vnet_state.lock, flags);
879 INIT_LIST_HEAD(&(vnet_state.routes));
880 INIT_LIST_HEAD(&(vnet_state.devs));
882 vnet_state.num_devs = 0;
883 vnet_state.num_routes = 0;
885 PrintDebug("VNET: Links and Routes tables initiated\n");
887 if (v3_lock_init(&(vnet_state.lock)) == -1){
888 PrintError("VNET: Failure to init lock for routes table\n");
891 PrintDebug("VNET: Locks initiated\n");
893 vnet_state.inpkt_q = v3_create_queue();
894 v3_init_queue(vnet_state.inpkt_q);
895 PrintDebug("VNET: Receiving queue initiated\n");
897 vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
899 if (vnet_state.route_cache == NULL) {
900 PrintError("Vnet: Route Cache Init Fails\n");
904 PrintDebug("VNET: initiated\n");