2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2009, Lei Xia <lxia@northwestern.edu>
11 * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>
12 * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Lei Xia <lxia@northwestern.edu>
16 * Yuan Tang <ytang@northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <palacios/vmm_vnet.h>
23 #include <palacios/vm_guest_mem.h>
24 #include <palacios/vmm_lock.h>
25 #include <palacios/vmm_queue.h>
26 #include <palacios/vmm_sprintf.h>
28 #ifndef CONFIG_DEBUG_VNET
30 #define PrintDebug(fmt, args...)
38 uint16_t type; // indicates layer 3 protocol type
39 } __attribute__((packed));
48 struct v3_vm_info * vm;
50 int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data);
54 struct list_head node;
55 } __attribute__((packed));
58 #define BRIDGE_BUF_SIZE 512
59 struct bridge_pkts_buf {
63 struct v3_vnet_pkt pkts[BRIDGE_BUF_SIZE];
64 uint8_t datas[ETHERNET_PACKET_LEN*BRIDGE_BUF_SIZE];
68 struct v3_vm_info * vm;
70 int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data);
71 void (*xcall_input)(void *data);
72 int (*polling_pkt)(struct v3_vm_info * vm, void *private_data);
76 uint16_t max_delayed_pkts;
77 long max_latency; //in cycles
79 } __attribute__((packed));
85 struct vnet_route_info {
86 struct v3_vnet_route route_def;
88 struct vnet_dev * dst_dev;
89 struct vnet_dev * src_dev;
91 struct list_head node;
92 struct list_head match_node; // used for route matching
97 uint8_t hash_buf[VNET_HASH_SIZE];
100 struct vnet_route_info * routes[0];
101 } __attribute__((packed));
106 struct list_head routes;
107 struct list_head devs;
112 struct vnet_brg_dev *bridge;
116 struct hashtable * route_cache;
118 struct bridge_pkts_buf in_buf; //incoming packets buffer
124 #ifdef CONFIG_DEBUG_VNET
125 static inline void mac_to_string(char mac[6], char * buf) {
126 snprintf(buf, 100, "%d:%d:%d:%d:%d:%d",
127 mac[0], mac[1], mac[2],
128 mac[3], mac[4], mac[5]);
131 static void print_route(struct vnet_route_info *route){
134 mac_to_string(route->route_def.src_mac, str);
135 PrintDebug("Src Mac (%s), src_qual (%d)\n",
136 str, route->route_def.src_mac_qual);
137 mac_to_string(route->route_def.dst_mac, str);
138 PrintDebug("Dst Mac (%s), dst_qual (%d)\n",
139 str, route->route_def.dst_mac_qual);
140 PrintDebug("Src dev id (%d), src type (%d)",
141 route->route_def.src_id,
142 route->route_def.src_type);
143 PrintDebug("Dst dev id (%d), dst type (%d)\n",
144 route->route_def.dst_id,
145 route->route_def.dst_type);
146 if (route->route_def.dst_type == LINK_INTERFACE) {
147 PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_input (%p), dst_dev_data (%p)\n",
149 route->dst_dev->dev_id,
150 route->dst_dev->input,
151 route->dst_dev->private_data);
155 static void dump_routes(){
156 struct vnet_route_info *route;
159 PrintDebug("\n========Dump routes starts ============\n");
160 list_for_each_entry(route, &(vnet_state.routes), node) {
161 PrintDebug("\nroute %d:\n", ++i);
165 PrintDebug("\n========Dump routes end ============\n");
172 * A VNET packet is a packed struct with the hashed fields grouped together.
173 * This means we can generate the hash from an offset into the pkt struct
175 static inline uint_t hash_fn(addr_t hdr_ptr) {
176 uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
178 return v3_hash_buffer(hdr_buf, VNET_HASH_SIZE);
181 static inline int hash_eq(addr_t key1, addr_t key2) {
182 return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
185 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
186 memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
188 if (v3_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
189 PrintError("Vnet: Failed to insert new route entry to the cache\n");
196 static int clear_hash_cache() {
198 v3_free_htable(vnet_state.route_cache, 1, 1);
199 vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
204 static int look_into_cache(const struct v3_vnet_pkt * pkt, struct route_list ** routes) {
206 *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
212 static struct vnet_dev * find_dev_by_id(int idx) {
213 struct vnet_dev * dev = NULL;
215 list_for_each_entry(dev, &(vnet_state.devs), node) {
216 int dev_id = dev->dev_id;
225 static struct vnet_dev * find_dev_by_mac(char mac[6]) {
226 struct vnet_dev * dev = NULL;
228 list_for_each_entry(dev, &(vnet_state.devs), node) {
229 if (!memcmp(dev->mac_addr, mac, 6))
236 int get_device_id_by_mac(char mac[6]){
238 struct vnet_dev *dev = find_dev_by_mac(mac);
247 int v3_vnet_add_route(struct v3_vnet_route route) {
248 struct vnet_route_info * new_route = NULL;
251 new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
252 memset(new_route, 0, sizeof(struct vnet_route_info));
254 PrintDebug("Vnet: vnet_add_route_entry: dst_id: %d, dst_type: %d\n",
255 route.dst_id, route.dst_type);
257 memcpy(new_route->route_def.src_mac, route.src_mac, 6);
258 memcpy(new_route->route_def.dst_mac, route.dst_mac, 6);
259 new_route->route_def.src_mac_qual = route.src_mac_qual;
260 new_route->route_def.dst_mac_qual = route.dst_mac_qual;
261 new_route->route_def.dst_id = route.dst_id;
262 new_route->route_def.dst_type = route.dst_type;
263 new_route->route_def.src_id = route.src_id;
264 new_route->route_def.src_type = route.src_type;
266 if (new_route->route_def.dst_type == LINK_INTERFACE) {
267 new_route->dst_dev = find_dev_by_id(new_route->route_def.dst_id);
268 PrintDebug("Vnet: Add route, get device: dev_id %d, input : %p, private_data %p\n",
269 new_route->dst_dev->dev_id, new_route->dst_dev->input, new_route->dst_dev->private_data);
272 if (new_route->route_def.src_type == LINK_INTERFACE) {
273 new_route->src_dev = find_dev_by_id(new_route->route_def.src_id);
276 flags = v3_lock_irqsave(vnet_state.lock);
278 list_add(&(new_route->node), &(vnet_state.routes));
281 v3_unlock_irqrestore(vnet_state.lock, flags);
284 #ifdef CONFIG_DEBUG_VNET
293 // At the end allocate a route_list
294 // This list will be inserted into the cache so we don't need to free it
295 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
296 struct vnet_route_info * route = NULL;
297 struct route_list * matches = NULL;
300 struct list_head match_list;
301 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
302 uint8_t src_type = pkt->src_type;
303 uint32_t src_link = pkt->src_id;
305 #ifdef CONFIG_DEBUG_VNET
310 mac_to_string(hdr->src_mac, src_str);
311 mac_to_string(hdr->dst_mac, dst_str);
312 PrintDebug("Vnet: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
316 INIT_LIST_HEAD(&match_list);
318 #define UPDATE_MATCHES(rank) do { \
319 if (max_rank < (rank)) { \
321 INIT_LIST_HEAD(&match_list); \
323 list_add(&(route->match_node), &match_list); \
325 } else if (max_rank == (rank)) { \
326 list_add(&(route->match_node), &match_list); \
332 list_for_each_entry(route, &(vnet_state.routes), node) {
333 struct v3_vnet_route * route_def = &(route->route_def);
335 // CHECK SOURCE TYPE HERE
336 if ( (route_def->src_type != LINK_ANY) &&
337 ( (route_def->src_type != src_type) ||
338 ( (route_def->src_id != src_link) &&
339 (route_def->src_id != (uint32_t)-1)))) {
344 if ((route_def->dst_mac_qual == MAC_ANY) &&
345 (route_def->src_mac_qual == MAC_ANY)) {
349 if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
350 if (route_def->src_mac_qual != MAC_NOT) {
351 if (route_def->dst_mac_qual == MAC_ANY) {
353 } else if (route_def->dst_mac_qual != MAC_NOT &&
354 memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
360 if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
361 if (route_def->dst_mac_qual != MAC_NOT) {
362 if (route_def->src_mac_qual == MAC_ANY) {
364 } else if ((route_def->src_mac_qual != MAC_NOT) &&
365 (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
371 if ((route_def->dst_mac_qual == MAC_NOT) &&
372 (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
373 if (route_def->src_mac_qual == MAC_ANY) {
375 } else if ((route_def->src_mac_qual != MAC_NOT) &&
376 (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
381 if ((route_def->src_mac_qual == MAC_NOT) &&
382 (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
383 if (route_def->dst_mac_qual == MAC_ANY) {
385 } else if ((route_def->dst_mac_qual != MAC_NOT) &&
386 (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
392 if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
393 (route_def->dst_mac_qual == MAC_NONE)) {
398 PrintDebug("Vnet: match_route: Matches=%d\n", num_matches);
400 if (num_matches == 0) {
404 matches = (struct route_list *)V3_Malloc(sizeof(struct route_list) +
405 (sizeof(struct vnet_route_info *) * num_matches));
407 matches->num_routes = num_matches;
411 list_for_each_entry(route, &match_list, match_node) {
412 matches->routes[i++] = route;
420 static int flush_bridge_pkts(struct vnet_brg_dev *bridge){
422 int num, start, send;
423 struct v3_vnet_bridge_input_args args;
424 int cpu_id = bridge->vm->cores[0].cpu_id;
425 int current_core = V3_Get_CPU();
427 if (bridge == NULL) {
428 PrintDebug("VNET: No bridge to sent data to links\n");
432 flags = v3_lock_irqsave(bridge->recv_buf.lock);
434 num = bridge->recv_buf.num;
435 start = bridge->recv_buf.start;
437 bridge->recv_buf.num -= num;
438 bridge->recv_buf.start += num;
439 bridge->recv_buf.start %= BRIDGE_BUF_SIZE;
441 v3_unlock_irqrestore(bridge->recv_buf.lock, flags);
444 if(bridge->disabled){
445 PrintDebug("VNET: In flush bridge pkts: Bridge is disabled\n");
449 if(num <= 2 && num > 0){
450 PrintDebug("VNET: In flush bridge pkts: %d\n", num);
454 PrintDebug("VNET: In flush bridge pkts to bridge, cur_cpu %d, brige_core: %d\n", current_core, cpu_id);
455 if (current_core == cpu_id){
456 if ((start + num) < BRIDGE_BUF_SIZE){
457 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), num, bridge->private_data);
459 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), (BRIDGE_BUF_SIZE - start), bridge->private_data);
460 send = num - (BRIDGE_BUF_SIZE - start);
461 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[0]), send, bridge->private_data);
464 args.vm = bridge->vm;
465 args.private_data = bridge->private_data;
467 if ((start + num) < BRIDGE_BUF_SIZE){
469 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
470 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
472 args.pkt_num = BRIDGE_BUF_SIZE - start;
473 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
474 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
476 send = num - (BRIDGE_BUF_SIZE - start);
478 args.vnet_pkts = &(bridge->recv_buf.pkts[0]);
479 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
483 PrintDebug("VNET: flush bridge pkts %d\n", num);
490 static int send_to_bridge(struct v3_vnet_pkt * pkt){
491 struct vnet_brg_dev *bridge = vnet_state.bridge;
493 if (bridge == NULL) {
494 PrintDebug("VNET: No bridge to sent data to links\n");
498 if(bridge->max_delayed_pkts <= 1){
499 if(bridge->disabled){
500 PrintDebug("VNET: Bridge diabled\n");
505 //avoid the cross-core call here
506 int cpu_id = bridge->vm->cores[0].cpu_id;
507 struct v3_vnet_bridge_input_args args;
510 args.vm = bridge->vm;
511 args.vnet_pkts = pkt;
512 args.private_data = bridge->private_data;
514 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
516 bridge->input(bridge->vm, pkt, 1, bridge->private_data);
518 PrintDebug("VNET: sent one packet to the bridge\n");
525 struct v3_vnet_pkt *buf;
527 PrintDebug("VNET: send_to_bridge\n");
529 flags = v3_lock_irqsave(bridge->recv_buf.lock);
531 if(bridge->disabled && bridge->recv_buf.num >= BRIDGE_BUF_SIZE){
532 PrintDebug("Bridge diabled and bridge receive buffer full\n");
533 v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
534 num = bridge->recv_buf.num;
538 end = bridge->recv_buf.end;
539 buf = &(bridge->recv_buf.pkts[end]);
541 bridge->recv_buf.num ++;
542 bridge->recv_buf.end ++;
543 bridge->recv_buf.end %= BRIDGE_BUF_SIZE;
545 num = bridge->recv_buf.num;
547 v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
550 buf->size = pkt->size;
551 buf->dst_id = pkt->dst_id;
552 buf->src_id = pkt->src_id;
553 buf->src_type = pkt->src_type;
554 buf->dst_type = pkt->dst_type;
555 memcpy(buf->header, pkt->header, ETHERNET_HEADER_LEN);
556 memcpy(buf->data, pkt->data, pkt->size);
560 if (num >= bridge->max_delayed_pkts){
561 flush_bridge_pkts(bridge);
567 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
568 struct route_list * matched_routes = NULL;
572 #ifdef CONFIG_DEBUG_VNET
574 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->header);
578 mac_to_string(hdr->src_mac, src_str);
579 mac_to_string(hdr->dst_mac, dest_str);
580 int cpu = V3_Get_CPU();
581 PrintDebug("Vnet: on cpu %d, HandleDataOverLink. SRC(%s), DEST(%s), pkt size: %d\n", cpu, src_str, dest_str, pkt->size);
585 flags = v3_lock_irqsave(vnet_state.lock);
587 look_into_cache(pkt, &matched_routes);
589 if (matched_routes == NULL) {
590 PrintDebug("Vnet: send pkt Looking into routing table\n");
592 matched_routes = match_route(pkt);
594 if (matched_routes) {
595 add_route_to_cache(pkt, matched_routes);
597 PrintDebug("Could not find route for packet... discards packet\n");
598 v3_unlock_irqrestore(vnet_state.lock, flags);
603 v3_unlock_irqrestore(vnet_state.lock, flags);
605 PrintDebug("Vnet: send pkt route matches %d\n", matched_routes->num_routes);
607 for (i = 0; i < matched_routes->num_routes; i++) {
608 struct vnet_route_info * route = matched_routes->routes[i];
610 if (route->route_def.dst_type == LINK_EDGE) {
611 pkt->dst_type = LINK_EDGE;
612 pkt->dst_id = route->route_def.dst_id;
614 if (send_to_bridge(pkt) == -1) {
615 PrintDebug("VNET: Packet not sent properly to bridge\n");
619 } else if (route->route_def.dst_type == LINK_INTERFACE) {
620 if (route->dst_dev->input(route->dst_dev->vm, pkt, route->dst_dev->private_data) == -1) {
621 PrintDebug("VNET: Packet not sent properly\n");
625 PrintDebug("Vnet: Wrong Edge type\n");
629 PrintDebug("Vnet: v3_vnet_send_pkt: Forward packet according to Route %d\n", i);
635 void v3_vnet_send_pkt_xcall(void * data){
636 struct v3_vnet_pkt * pkt = (struct v3_vnet_pkt *)data;
637 v3_vnet_send_pkt(pkt, NULL);
641 void v3_vnet_polling()
645 struct v3_vnet_pkt *buf;
647 PrintDebug("In vnet pollling: cpu %d\n", V3_Get_CPU());
649 flags = v3_lock_irqsave(vnet_state.in_buf.lock);
651 num = vnet_state.in_buf.num;
652 start = vnet_state.in_buf.start;
654 PrintDebug("VNET: polling pkts %d\n", num);
657 buf = &(vnet_state.in_buf.pkts[vnet_state.in_buf.start]);
659 v3_vnet_send_pkt(buf, NULL);
661 vnet_state.in_buf.num --;
662 vnet_state.in_buf.start ++;
663 vnet_state.in_buf.start %= BRIDGE_BUF_SIZE;
667 v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
673 int v3_vnet_rx(uchar_t *buf, uint16_t size, uint16_t src_id, uint8_t src_type){
676 struct v3_vnet_pkt *pkt;
678 flags = v3_lock_irqsave(vnet_state.in_buf.lock);
680 end = vnet_state.in_buf.end;
681 pkt = &(vnet_state.in_buf.pkts[end]);
683 if(vnet_state.in_buf.num > BRIDGE_BUF_SIZE){
684 PrintDebug("VNET: bridge rx: buffer full\n");
688 vnet_state.in_buf.num ++;
689 vnet_state.in_buf.end ++;
690 vnet_state.in_buf.end %= BRIDGE_BUF_SIZE;
693 pkt->src_id = src_id;
694 pkt->src_type = src_type;
695 memcpy(pkt->header, buf, ETHERNET_HEADER_LEN);
696 memcpy(pkt->data, buf, size);
700 v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
706 int v3_vnet_add_dev(struct v3_vm_info *vm, uint8_t mac[6],
707 int (*netif_input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data),
709 struct vnet_dev * new_dev = NULL;
712 new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev));
714 if (new_dev == NULL) {
715 PrintError("VNET: Malloc fails\n");
719 memcpy(new_dev->mac_addr, mac, 6);
720 new_dev->input = netif_input;
721 new_dev->private_data = priv_data;
725 flags = v3_lock_irqsave(vnet_state.lock);
727 if (!find_dev_by_mac(mac)) {
728 list_add(&(new_dev->node), &(vnet_state.devs));
729 new_dev->dev_id = ++vnet_state.num_devs;
732 v3_unlock_irqrestore(vnet_state.lock, flags);
734 // if the device was found previosly the id should still be 0
735 if (new_dev->dev_id == 0) {
736 PrintError("Device Alrady exists\n");
740 PrintDebug("Vnet: Add Device: dev_id %d, input : %p, private_data %p\n",
741 new_dev->dev_id, new_dev->input, new_dev->private_data);
743 return new_dev->dev_id;
747 void v3_vnet_heartbeat(struct guest_info *core){
748 //static long last_time, cur_time;
750 if(vnet_state.bridge == NULL)
753 if(vnet_state.bridge->max_delayed_pkts > 1){
754 if(V3_Get_CPU() != vnet_state.bridge->vm->cores[0].cpu_id){
758 if ((cur_time - last_time) >= vnet_state.bridge->max_latency) {
759 last_time = cur_time;
760 flush_bridge_pkts(vnet_state.bridge);
764 vnet_state.bridge->polling_pkt(vnet_state.bridge->vm, vnet_state.bridge->private_data);
767 int v3_vnet_add_bridge(struct v3_vm_info * vm,
768 int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
769 void (*xcall_input)(void *data),
770 int (*poll_pkt)(struct v3_vm_info * vm, void * private_data),
771 uint16_t max_delayed_pkts,
776 struct vnet_brg_dev * tmp_bridge = NULL;
778 flags = v3_lock_irqsave(vnet_state.lock);
780 if (vnet_state.bridge == NULL) {
782 vnet_state.bridge = (void *)1;
785 v3_unlock_irqrestore(vnet_state.lock, flags);
787 if (bridge_free == 0) {
788 PrintError("Bridge already set\n");
792 tmp_bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev));
794 if (tmp_bridge == NULL) {
795 PrintError("Malloc Fails\n");
796 vnet_state.bridge = NULL;
801 tmp_bridge->input = input;
802 tmp_bridge->xcall_input = xcall_input;
803 tmp_bridge->polling_pkt = poll_pkt;
804 tmp_bridge->private_data = priv_data;
805 tmp_bridge->disabled = 0;
808 //initial receving buffer
809 tmp_bridge->recv_buf.start = 0;
810 tmp_bridge->recv_buf.end = 0;
811 tmp_bridge->recv_buf.num = 0;
812 if(v3_lock_init(&(tmp_bridge->recv_buf.lock)) == -1){
813 PrintError("VNET: add bridge, error to initiate recv buf lock\n");
816 for(i = 0; i<BRIDGE_BUF_SIZE; i++){
817 tmp_bridge->recv_buf.pkts[i].data = &(tmp_bridge->recv_buf.datas[i*ETHERNET_PACKET_LEN]);
822 tmp_bridge->max_delayed_pkts = (max_delayed_pkts<BRIDGE_BUF_SIZE)?max_delayed_pkts : BRIDGE_BUF_SIZE;
823 tmp_bridge->max_latency = max_latency;
825 // make this atomic to avoid possible race conditions
826 flags = v3_lock_irqsave(vnet_state.lock);
827 vnet_state.bridge = tmp_bridge;
828 v3_unlock_irqrestore(vnet_state.lock, flags);
834 int v3_vnet_disable_bridge() {
837 flags = v3_lock_irqsave(vnet_state.lock);
839 if (vnet_state.bridge != NULL) {
840 vnet_state.bridge->disabled = 1;
843 v3_unlock_irqrestore(vnet_state.lock, flags);
849 int v3_vnet_enable_bridge() {
852 flags = v3_lock_irqsave(vnet_state.lock);
854 if (vnet_state.bridge != NULL) {
855 vnet_state.bridge->disabled = 0;
858 v3_unlock_irqrestore(vnet_state.lock, flags);
868 memset(&vnet_state, 0, sizeof(vnet_state));
870 INIT_LIST_HEAD(&(vnet_state.routes));
871 INIT_LIST_HEAD(&(vnet_state.devs));
873 vnet_state.num_devs = 0;
874 vnet_state.num_routes = 0;
876 PrintDebug("VNET: Links and Routes tables initiated\n");
878 if (v3_lock_init(&(vnet_state.lock)) == -1){
879 PrintError("VNET: Failure to init lock for routes table\n");
882 PrintDebug("VNET: Locks initiated\n");
884 //initial incoming pkt buffer
885 vnet_state.in_buf.start = 0;
886 vnet_state.in_buf.end = 0;
887 vnet_state.in_buf.num = 0;
888 if(v3_lock_init(&(vnet_state.in_buf.lock)) == -1){
889 PrintError("VNET: add bridge, error to initiate send buf lock\n");
891 for(i = 0; i<BRIDGE_BUF_SIZE; i++){
892 vnet_state.in_buf.pkts[i].data = &(vnet_state.in_buf.datas[i*ETHERNET_PACKET_LEN]);
894 PrintDebug("VNET: Receiving buffer initiated\n");
896 vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
898 if (vnet_state.route_cache == NULL) {
899 PrintError("Vnet: Route Cache Init Fails\n");
903 PrintDebug("VNET: initiated\n");