Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


e5c17bc5f6e4b6c5104487d4aed5e49478f3d6a4
[palacios.git] / palacios / src / palacios / vmm_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2009, Lei Xia <lxia@northwestern.edu> 
11  * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>  
12  * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Lei Xia <lxia@northwestern.edu>
16  *         Yuan Tang <ytang@northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21  
22 #include <palacios/vmm_vnet.h>
23 #include <palacios/vm_guest_mem.h>
24 #include <palacios/vmm_lock.h>
25 #include <palacios/vmm_queue.h>
26 #include <palacios/vmm_sprintf.h>
27
28 #ifndef CONFIG_DEBUG_VNET
29 #undef PrintDebug
30 #define PrintDebug(fmt, args...)
31 #endif
32
33
34
35 struct eth_hdr {
36     uint8_t dst_mac[6];
37     uint8_t src_mac[6];
38     uint16_t type; // indicates layer 3 protocol type
39 } __attribute__((packed));
40
41
42
43
44
45 struct vnet_dev {
46
47     uint8_t mac_addr[6];
48     struct v3_vm_info * vm;
49     
50     int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data);
51     void * private_data;
52     
53     int dev_id;
54     struct list_head node;
55 } __attribute__((packed));
56
57
58 #define BRIDGE_BUF_SIZE 1024
59 struct bridge_pkts_buf {
60     int start, end;
61     int num; 
62     v3_lock_t lock;
63     struct v3_vnet_pkt pkts[BRIDGE_BUF_SIZE];
64     uint8_t datas[ETHERNET_PACKET_LEN*BRIDGE_BUF_SIZE];
65 };
66
67 struct vnet_brg_dev {
68     struct v3_vm_info * vm;
69     
70     int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data);
71     void (*xcall_input)(void *data);
72
73     struct bridge_pkts_buf recv_buf;  //packets from Vnet to vnet_bridge device
74
75     struct bridge_pkts_buf send_buf;  //packets from vnet_bridge device to Vnet
76
77     int disabled;
78         
79     uint16_t max_delayed_pkts;
80     long max_latency; //in cycles
81     void * private_data;
82 } __attribute__((packed));
83
84
85
86
87
88 struct vnet_route_info {
89     struct v3_vnet_route route_def;
90
91     struct vnet_dev * dst_dev;
92     struct vnet_dev * src_dev;
93
94     struct list_head node;
95     struct list_head match_node; // used for route matching
96 };
97
98
99 struct route_list {
100     uint8_t hash_buf[VNET_HASH_SIZE];
101
102     uint32_t num_routes;
103     struct vnet_route_info * routes[0];
104 } __attribute__((packed));
105
106
107
108 static struct {
109     struct list_head routes;
110     struct list_head devs;
111     
112     int num_routes;
113     int num_devs;
114
115     struct vnet_brg_dev *bridge;
116
117     v3_lock_t lock;
118
119     struct gen_queue * inpkt_q;
120     struct hashtable * route_cache;
121
122 } vnet_state;
123
124
125
126
127 #ifdef CONFIG_DEBUG_VNET
128 static inline void mac_to_string(char mac[6], char * buf) {
129     snprintf(buf, 100, "%d:%d:%d:%d:%d:%d", 
130              mac[0], mac[1], mac[2],
131              mac[3], mac[4], mac[5]);
132 }
133
134 static void print_route(struct vnet_route_info *route){
135     char str[50];
136
137     mac_to_string(route->route_def.src_mac, str);
138     PrintDebug("Src Mac (%s),  src_qual (%d)\n", 
139                         str, route->route_def.src_mac_qual);
140     mac_to_string(route->route_def.dst_mac, str);
141     PrintDebug("Dst Mac (%s),  dst_qual (%d)\n", 
142                         str, route->route_def.dst_mac_qual);
143     PrintDebug("Src dev id (%d), src type (%d)", 
144                         route->route_def.src_id, 
145                         route->route_def.src_type);
146     PrintDebug("Dst dev id (%d), dst type (%d)\n", 
147                         route->route_def.dst_id, 
148                         route->route_def.dst_type);
149     if (route->route_def.dst_type == LINK_INTERFACE) {
150         PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_input (%p), dst_dev_data (%p)\n",
151                                         route->dst_dev,
152                                         route->dst_dev->dev_id,
153                                         route->dst_dev->input,
154                                         route->dst_dev->private_data);
155     }
156 }
157
158 static void dump_routes(){
159         struct vnet_route_info *route;
160
161         int i = 0;
162         PrintDebug("\n========Dump routes starts ============\n");
163         list_for_each_entry(route, &(vnet_state.routes), node) {
164                 PrintDebug("\nroute %d:\n", ++i);
165                 
166                 print_route(route);
167         }
168         PrintDebug("\n========Dump routes end ============\n");
169 }
170
171 #endif
172
173
174 /* 
175  * A VNET packet is a packed struct with the hashed fields grouped together.
176  * This means we can generate the hash from an offset into the pkt struct
177  */
178 static inline uint_t hash_fn(addr_t hdr_ptr) {    
179     uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
180
181     return v3_hash_buffer(hdr_buf, VNET_HASH_SIZE);
182 }
183
184 static inline int hash_eq(addr_t key1, addr_t key2) {   
185     return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
186 }
187
188 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
189     memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);    
190
191     if (v3_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
192         PrintError("Vnet: Failed to insert new route entry to the cache\n");
193         return -1;
194     }
195     
196     return 0;
197 }
198
199 static int clear_hash_cache() {
200
201     v3_free_htable(vnet_state.route_cache, 1, 1);
202     vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
203
204     return 0;
205 }
206
207 static int look_into_cache(const struct v3_vnet_pkt * pkt, struct route_list ** routes) {
208     
209     *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
210    
211     return 0;
212 }
213
214
215 static struct vnet_dev * find_dev_by_id(int idx) {
216     struct vnet_dev * dev = NULL; 
217     
218     list_for_each_entry(dev, &(vnet_state.devs), node) {
219         int dev_id = dev->dev_id;
220
221         if (dev_id == idx)
222             return dev;
223     }
224
225     return NULL;
226 }
227
228 static struct vnet_dev * find_dev_by_mac(char mac[6]) {
229     struct vnet_dev * dev = NULL; 
230     
231     list_for_each_entry(dev, &(vnet_state.devs), node) {
232         if (!memcmp(dev->mac_addr, mac, 6))
233             return dev;
234     }
235
236     return NULL;
237 }
238
239 int get_device_id_by_mac(char mac[6]){
240
241     struct vnet_dev *dev = find_dev_by_mac(mac);
242
243     if (dev == NULL)
244         return -1;
245
246     return dev->dev_id;
247 }
248
249
250 int v3_vnet_add_route(struct v3_vnet_route route) {
251     struct vnet_route_info * new_route = NULL;
252     unsigned long flags; 
253
254     new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
255     memset(new_route, 0, sizeof(struct vnet_route_info));
256
257     PrintDebug("Vnet: vnet_add_route_entry: dst_id: %d, dst_type: %d\n",
258                         route.dst_id, route.dst_type);  
259     
260     memcpy(new_route->route_def.src_mac, route.src_mac, 6);
261     memcpy(new_route->route_def.dst_mac, route.dst_mac, 6);
262     new_route->route_def.src_mac_qual = route.src_mac_qual;
263     new_route->route_def.dst_mac_qual = route.dst_mac_qual;
264     new_route->route_def.dst_id = route.dst_id;
265     new_route->route_def.dst_type = route.dst_type;
266     new_route->route_def.src_id = route.src_id;
267     new_route->route_def.src_type = route.src_type;
268
269     if (new_route->route_def.dst_type == LINK_INTERFACE) {
270         new_route->dst_dev = find_dev_by_id(new_route->route_def.dst_id);
271         PrintDebug("Vnet: Add route, get device: dev_id %d, input : %p, private_data %p\n",
272                         new_route->dst_dev->dev_id, new_route->dst_dev->input, new_route->dst_dev->private_data);
273     }
274
275     if (new_route->route_def.src_type == LINK_INTERFACE) {
276         new_route->src_dev = find_dev_by_id(new_route->route_def.src_id);
277     }
278
279     flags = v3_lock_irqsave(vnet_state.lock);
280
281     list_add(&(new_route->node), &(vnet_state.routes));
282     clear_hash_cache();
283
284     v3_unlock_irqrestore(vnet_state.lock, flags);
285    
286
287 #ifdef CONFIG_DEBUG_VNET
288     dump_routes();
289 #endif
290
291     return 0;
292 }
293
294
295
296 // At the end allocate a route_list
297 // This list will be inserted into the cache so we don't need to free it
298 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
299     struct vnet_route_info * route = NULL; 
300     struct route_list * matches = NULL;
301     int num_matches = 0;
302     int max_rank = 0;
303     struct list_head match_list;
304     struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
305     uint8_t src_type = pkt->src_type;
306     uint32_t src_link = pkt->src_id;
307
308 #ifdef CONFIG_DEBUG_VNET
309     {
310         char dst_str[100];
311         char src_str[100];
312
313         mac_to_string(hdr->src_mac, src_str);  
314         mac_to_string(hdr->dst_mac, dst_str);
315         PrintDebug("Vnet: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
316     }
317 #endif
318
319     INIT_LIST_HEAD(&match_list);
320     
321 #define UPDATE_MATCHES(rank) do {                               \
322         if (max_rank < (rank)) {                                \
323             max_rank = (rank);                                  \
324             INIT_LIST_HEAD(&match_list);                        \
325                                                                 \
326             list_add(&(route->match_node), &match_list);        \
327             num_matches = 1;                                    \
328         } else if (max_rank == (rank)) {                        \
329             list_add(&(route->match_node), &match_list);        \
330             num_matches++;                                      \
331         }                                                       \
332     } while (0)
333     
334
335     list_for_each_entry(route, &(vnet_state.routes), node) {
336         struct v3_vnet_route * route_def = &(route->route_def);
337
338         // CHECK SOURCE TYPE HERE
339         if ( (route_def->src_type != LINK_ANY) && 
340              ( (route_def->src_type != src_type) || 
341                ( (route_def->src_id != src_link) &&
342                  (route_def->src_id != (uint32_t)-1)))) {
343             continue;
344         }
345
346
347         if ((route_def->dst_mac_qual == MAC_ANY) &&
348             (route_def->src_mac_qual == MAC_ANY)) {      
349             UPDATE_MATCHES(3);
350         }
351         
352         if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
353             if (route_def->src_mac_qual != MAC_NOT) {
354                 if (route_def->dst_mac_qual == MAC_ANY) {
355                     UPDATE_MATCHES(6);
356                 } else if (route_def->dst_mac_qual != MAC_NOT &&
357                            memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
358                     UPDATE_MATCHES(8);
359                 }
360             }
361         }
362             
363         if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
364             if (route_def->dst_mac_qual != MAC_NOT) {
365                 if (route_def->src_mac_qual == MAC_ANY) {
366                     UPDATE_MATCHES(6);
367                 } else if ((route_def->src_mac_qual != MAC_NOT) && 
368                            (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
369                     UPDATE_MATCHES(8);
370                 }
371             }
372         }
373             
374         if ((route_def->dst_mac_qual == MAC_NOT) &&
375             (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
376             if (route_def->src_mac_qual == MAC_ANY) {
377                 UPDATE_MATCHES(5);
378             } else if ((route_def->src_mac_qual != MAC_NOT) && 
379                        (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {     
380                 UPDATE_MATCHES(7);
381             }
382         }
383         
384         if ((route_def->src_mac_qual == MAC_NOT) &&
385             (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
386             if (route_def->dst_mac_qual == MAC_ANY) {
387                 UPDATE_MATCHES(5);
388             } else if ((route_def->dst_mac_qual != MAC_NOT) &&
389                        (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
390                 UPDATE_MATCHES(7);
391             }
392         }
393         
394         // Default route
395         if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
396              (route_def->dst_mac_qual == MAC_NONE)) {
397             UPDATE_MATCHES(4);
398         }
399     }
400
401     PrintDebug("Vnet: match_route: Matches=%d\n", num_matches);
402
403     if (num_matches == 0) {
404         return NULL;
405     }
406
407     matches = (struct route_list *)V3_Malloc(sizeof(struct route_list) + 
408                                 (sizeof(struct vnet_route_info *) * num_matches));
409
410     matches->num_routes = num_matches;
411
412     {
413         int i = 0;
414         list_for_each_entry(route, &match_list, match_node) {
415             matches->routes[i++] = route;
416         }
417     }
418
419     return matches;
420 }
421
422
423 static int flush_bridge_pkts(struct vnet_brg_dev *bridge){
424     unsigned long flags;
425     int num, start, send;
426     struct v3_vnet_bridge_input_args args;
427     int cpu_id = bridge->vm->cores[0].cpu_id;
428     int current_core = V3_Get_CPU();
429         
430     if (bridge == NULL) {
431         PrintDebug("VNET: No bridge to sent data to links\n");
432         return -1;
433     }
434
435     flags = v3_lock_irqsave(bridge->recv_buf.lock);
436                 
437     num = bridge->recv_buf.num;
438     start = bridge->recv_buf.start;
439
440     bridge->recv_buf.num -= num;
441     bridge->recv_buf.start += num;
442     bridge->recv_buf.start %= BRIDGE_BUF_SIZE;
443         
444     v3_unlock_irqrestore(bridge->recv_buf.lock, flags);
445
446
447     if(bridge->disabled){
448         PrintDebug("VNET: In flush bridge pkts: Bridge is disabled\n");
449         return -1;
450     }
451
452     if(num <= 2 && num > 0){
453         PrintDebug("VNET: In flush bridge pkts: %d\n", num);
454     }
455
456     if(num > 0) {
457         PrintDebug("VNET: In flush bridge pkts to bridge, cur_cpu %d, brige_core: %d\n", current_core, cpu_id);
458         if (current_core == cpu_id){
459             if ((start + num) < BRIDGE_BUF_SIZE){
460                 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), num, bridge->private_data);
461             }else {
462                 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), (BRIDGE_BUF_SIZE - start), bridge->private_data);                            
463                 send = num - (BRIDGE_BUF_SIZE - start);
464                 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[0]), send, bridge->private_data);
465             }   
466         }else {
467             args.vm = bridge->vm;
468             args.private_data = bridge->private_data;
469         
470             if ((start + num) < BRIDGE_BUF_SIZE){
471                 args.pkt_num = num;
472                 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
473                 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
474             }else {
475                 args.pkt_num = BRIDGE_BUF_SIZE - start;
476                 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
477                 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
478                                 
479                 send = num - (BRIDGE_BUF_SIZE - start);
480                 args.pkt_num = send;
481                 args.vnet_pkts = &(bridge->recv_buf.pkts[0]);                   
482                 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
483             }
484         }
485         
486         PrintDebug("VNET: flush bridge pkts %d\n", num);
487     }
488                         
489     return 0;
490 }
491
492
493 static int send_to_bridge(struct v3_vnet_pkt * pkt){
494     struct vnet_brg_dev *bridge = vnet_state.bridge;
495     int cpu_id = bridge->vm->cores[0].cpu_id;
496     struct v3_vnet_bridge_input_args args;
497
498     if (bridge == NULL) {
499         PrintDebug("VNET: No bridge to sent data to links\n");
500         return -1;
501     }
502
503     if(bridge->max_delayed_pkts <= 1){
504         if(bridge->disabled){
505             PrintDebug("VNET: Bridge diabled\n");
506             return -1;
507       }
508
509         args.pkt_num = 1;
510         args.vm = bridge->vm;
511         args.vnet_pkts = pkt;
512         args.private_data = bridge->private_data;
513
514         V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
515         PrintDebug("VNET: sent one packet to the bridge\n");
516         return 0;
517     }
518
519     unsigned long flags;
520     int end, num=0;
521     struct v3_vnet_pkt *buf;
522
523     PrintDebug("VNET: send_to_bridge\n");
524
525     flags = v3_lock_irqsave(bridge->recv_buf.lock);
526
527     if(bridge->disabled && bridge->recv_buf.num >= BRIDGE_BUF_SIZE){
528         PrintDebug("Bridge diabled and bridge receive buffer full\n");
529         v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
530         num = bridge->recv_buf.num;
531         goto exit;
532     }
533             
534     end =       bridge->recv_buf.end;
535     buf = &(bridge->recv_buf.pkts[end]);
536
537     bridge->recv_buf.num ++;
538     bridge->recv_buf.end ++;
539     bridge->recv_buf.end %= BRIDGE_BUF_SIZE;
540
541     num = bridge->recv_buf.num;
542
543     v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
544
545
546     buf->size = pkt->size;
547     buf->dst_id = pkt->dst_id;
548     buf->src_id = pkt->src_id;
549     buf->src_type = pkt->src_type;
550     buf->dst_type = pkt->dst_type;
551     memcpy(buf->header, pkt->header, ETHERNET_HEADER_LEN);
552     memcpy(buf->data, pkt->data, pkt->size);
553
554 exit:   
555
556     if (num >= bridge->max_delayed_pkts){
557         flush_bridge_pkts(bridge);
558     }
559
560     return 0;
561 }
562
563 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
564     struct route_list * matched_routes = NULL;
565     unsigned long flags;
566     int i;
567
568 #ifdef CONFIG_DEBUG_VNET
569    {
570         struct eth_hdr * hdr = (struct eth_hdr *)(pkt->header);
571         char dest_str[100];
572         char src_str[100];
573
574         mac_to_string(hdr->src_mac, src_str);  
575         mac_to_string(hdr->dst_mac, dest_str);
576         int cpu = V3_Get_CPU();
577         PrintDebug("Vnet: on cpu %d, HandleDataOverLink. SRC(%s), DEST(%s), pkt size: %d\n", cpu, src_str, dest_str, pkt->size);
578    }
579 #endif
580
581     flags = v3_lock_irqsave(vnet_state.lock);
582
583     look_into_cache(pkt, &matched_routes);
584         
585     if (matched_routes == NULL) {  
586         PrintDebug("Vnet: send pkt Looking into routing table\n");
587         
588         matched_routes = match_route(pkt);
589         
590         if (matched_routes) {
591             add_route_to_cache(pkt, matched_routes);
592         } else {
593             PrintDebug("Could not find route for packet... discards packet\n");
594             v3_unlock_irqrestore(vnet_state.lock, flags);
595             return -1;
596         }
597     }
598
599     v3_unlock_irqrestore(vnet_state.lock, flags);
600
601     PrintDebug("Vnet: send pkt route matches %d\n", matched_routes->num_routes);
602
603     for (i = 0; i < matched_routes->num_routes; i++) {
604          struct vnet_route_info * route = matched_routes->routes[i];
605         
606         if (route->route_def.dst_type == LINK_EDGE) {
607             pkt->dst_type = LINK_EDGE;
608             pkt->dst_id = route->route_def.dst_id;
609
610             if (send_to_bridge(pkt) == -1) {
611                 PrintDebug("VNET: Packet not sent properly to bridge\n");
612                 continue;
613              }
614             
615         } else if (route->route_def.dst_type == LINK_INTERFACE) {
616             if (route->dst_dev->input(route->dst_dev->vm, pkt, route->dst_dev->private_data) == -1) {
617                 PrintDebug("VNET: Packet not sent properly\n");
618                 continue;
619              }
620         } else {
621             PrintDebug("Vnet: Wrong Edge type\n");
622             continue;
623         }
624
625         PrintDebug("Vnet: v3_vnet_send_pkt: Forward packet according to Route %d\n", i);
626     }
627     
628     return 0;
629 }
630
631 void v3_vnet_send_pkt_xcall(void * data){
632     struct v3_vnet_pkt * pkt = (struct v3_vnet_pkt *)data;
633     v3_vnet_send_pkt(pkt, NULL);
634 }
635
636
637 void v3_vnet_bridge_polling()
638 {
639     unsigned long flags;
640     int num, start;
641     struct v3_vnet_pkt *buf;
642     struct vnet_brg_dev *bridge = vnet_state.bridge;
643
644     PrintDebug("In vnet bridge pollling: cpu %d\n", V3_Get_CPU());
645
646     if(bridge == NULL){
647         PrintDebug("VNET: Bridge is not set\n");
648         return;
649     }
650
651     flags = v3_lock_irqsave(bridge->send_buf.lock);
652                 
653     num = bridge->send_buf.num;
654     start = bridge->send_buf.start;
655
656     PrintDebug("VNET: bridge polling pkts %d\n", num);
657
658     while(num > 0) {
659         buf = &(bridge->send_buf.pkts[bridge->send_buf.start]);
660
661         v3_vnet_send_pkt(buf, NULL);
662
663         bridge->send_buf.num --;
664         bridge->send_buf.start ++;
665         bridge->send_buf.start %= BRIDGE_BUF_SIZE;
666         num --;
667     }
668
669     v3_unlock_irqrestore(bridge->send_buf.lock, flags);
670
671     return;
672 }
673
674
675 int v3_vnet_bridge_rx(uchar_t *buf, uint16_t size, uint16_t src_link){
676     struct vnet_brg_dev *bridge = vnet_state.bridge;
677     unsigned long flags;
678     int end;
679     struct v3_vnet_pkt *pkt;
680    
681     if (bridge == NULL) {
682         PrintDebug("VNET: No bridge is set\n");
683         return -1;
684     }
685
686     flags = v3_lock_irqsave(bridge->send_buf.lock);
687             
688     end =       bridge->send_buf.end;
689     pkt = &(bridge->send_buf.pkts[end]);
690
691     if(bridge->send_buf.num > BRIDGE_BUF_SIZE){
692         PrintDebug("VNET: bridge rx: buffer full\n");
693         goto exit;
694     }
695
696     bridge->send_buf.num ++;
697     bridge->send_buf.end ++;
698     bridge->send_buf.end %= BRIDGE_BUF_SIZE;
699
700     pkt->size = size;
701     pkt->src_id = src_link;
702     pkt->src_type = LINK_EDGE;
703     memcpy(pkt->header, buf, ETHERNET_HEADER_LEN);
704     memcpy(pkt->data, buf, size);
705
706 exit:
707         
708     v3_unlock_irqrestore(bridge->send_buf.lock, flags);
709
710     return 0;
711 }
712         
713
714 int v3_vnet_add_dev(struct v3_vm_info *vm, uint8_t mac[6], 
715                     int (*netif_input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data), 
716                     void * priv_data){
717     struct vnet_dev * new_dev = NULL;
718     unsigned long flags;
719
720     new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev)); 
721
722     if (new_dev == NULL) {
723         PrintError("VNET: Malloc fails\n");
724         return -1;
725     }
726    
727     memcpy(new_dev->mac_addr, mac, 6);
728     new_dev->input = netif_input;
729     new_dev->private_data = priv_data;
730     new_dev->vm = vm;
731     new_dev->dev_id = 0;        
732
733     flags = v3_lock_irqsave(vnet_state.lock);
734
735     if (!find_dev_by_mac(mac)) {
736         list_add(&(new_dev->node), &(vnet_state.devs));
737         new_dev->dev_id = ++vnet_state.num_devs;
738     }
739
740     v3_unlock_irqrestore(vnet_state.lock, flags);
741
742     // if the device was found previosly the id should still be 0
743     if (new_dev->dev_id == 0) {
744         PrintError("Device Alrady exists\n");
745         return -1;
746     }
747
748     PrintDebug("Vnet: Add Device: dev_id %d, input : %p, private_data %p\n",
749                         new_dev->dev_id, new_dev->input, new_dev->private_data);
750
751     return new_dev->dev_id;
752 }
753
754
755 void  v3_vnet_heartbeat(struct guest_info *core){
756     static long last_time, cur_time;
757
758     if(vnet_state.bridge == NULL)
759         return;
760         
761     if(vnet_state.bridge->max_delayed_pkts <= 1)
762         return;
763
764     if(V3_Get_CPU() != vnet_state.bridge->vm->cores[0].cpu_id){
765         rdtscll(cur_time);
766     }
767
768     if ((cur_time - last_time) >= vnet_state.bridge->max_latency) {
769         last_time = cur_time;
770         flush_bridge_pkts(vnet_state.bridge);
771     }
772 }
773
774 int v3_vnet_add_bridge(struct v3_vm_info * vm,
775                        int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
776                        void (*xcall_input)(void *data),
777                        uint16_t max_delayed_pkts,
778                        long max_latency,
779                        void * priv_data) {
780     unsigned long flags;
781     int bridge_free = 0;
782     struct vnet_brg_dev * tmp_bridge = NULL;
783     int i;
784     
785     
786     flags = v3_lock_irqsave(vnet_state.lock);
787
788     if (vnet_state.bridge == NULL) {
789         bridge_free = 1;
790         vnet_state.bridge = (void *)1;
791     }
792
793     v3_unlock_irqrestore(vnet_state.lock, flags);
794
795     if (bridge_free == 0) {
796         PrintError("Bridge already set\n");
797         return -1;
798     }
799
800     tmp_bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev));
801
802     if (tmp_bridge == NULL) {
803         PrintError("Malloc Fails\n");
804         return -1;
805     }
806     
807     tmp_bridge->vm = vm;
808     tmp_bridge->input = input;
809     tmp_bridge->xcall_input = xcall_input;
810     tmp_bridge->private_data = priv_data;
811     tmp_bridge->disabled = 0;
812
813     //initial receving buffer
814     tmp_bridge->recv_buf.start = 0;
815     tmp_bridge->recv_buf.end = 0;
816     tmp_bridge->recv_buf.num = 0;
817     if(v3_lock_init(&(tmp_bridge->recv_buf.lock)) == -1){
818         PrintError("VNET: add bridge, error to initiate recv buf lock\n");
819     }
820     tmp_bridge->max_delayed_pkts = (max_delayed_pkts<BRIDGE_BUF_SIZE)?max_delayed_pkts : BRIDGE_BUF_SIZE;
821     tmp_bridge->max_latency = max_latency;
822     for(i = 0; i<BRIDGE_BUF_SIZE; i++){
823         tmp_bridge->recv_buf.pkts[i].data = &(tmp_bridge->recv_buf.datas[i*ETHERNET_PACKET_LEN]);
824     }
825
826     //initial sending buffer
827     tmp_bridge->send_buf.start = 0;
828     tmp_bridge->send_buf.end = 0;
829     tmp_bridge->send_buf.num = 0;
830     if(v3_lock_init(&(tmp_bridge->send_buf.lock)) == -1){
831         PrintError("VNET: add bridge, error to initiate send buf lock\n");
832     }
833     for(i = 0; i<BRIDGE_BUF_SIZE; i++){
834         tmp_bridge->send_buf.pkts[i].data = &(tmp_bridge->send_buf.datas[i*ETHERNET_PACKET_LEN]);
835     }
836         
837     // make this atomic to avoid possible race conditions
838     flags = v3_lock_irqsave(vnet_state.lock);
839     vnet_state.bridge = tmp_bridge;
840     v3_unlock_irqrestore(vnet_state.lock, flags);
841
842     return 0;
843 }
844
845
846 int v3_vnet_disable_bridge() {
847     unsigned long flags; 
848     
849     flags = v3_lock_irqsave(vnet_state.lock);
850
851     if (vnet_state.bridge != NULL) {
852         vnet_state.bridge->disabled = 1;
853     }
854
855     v3_unlock_irqrestore(vnet_state.lock, flags);
856
857     return 0;
858 }
859
860
861 int v3_vnet_enable_bridge() {
862     unsigned long flags; 
863     
864     flags = v3_lock_irqsave(vnet_state.lock);
865
866     if (vnet_state.bridge != NULL) {
867         vnet_state.bridge->disabled = 0;
868     }
869
870     v3_unlock_irqrestore(vnet_state.lock, flags);
871
872     return 0;
873 }
874
875
876
877 int V3_init_vnet() {
878         
879     INIT_LIST_HEAD(&(vnet_state.routes));
880     INIT_LIST_HEAD(&(vnet_state.devs));
881
882     vnet_state.num_devs = 0;
883     vnet_state.num_routes = 0;
884
885     PrintDebug("VNET: Links and Routes tables initiated\n");
886
887     if (v3_lock_init(&(vnet_state.lock)) == -1){
888         PrintError("VNET: Failure to init lock for routes table\n");
889     }
890
891     PrintDebug("VNET: Locks initiated\n");
892
893     vnet_state.inpkt_q = v3_create_queue();
894     v3_init_queue(vnet_state.inpkt_q);
895     PrintDebug("VNET: Receiving queue initiated\n");
896
897     vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
898
899     if (vnet_state.route_cache == NULL) {
900         PrintError("Vnet: Route Cache Init Fails\n");
901         return -1;
902     }
903
904     PrintDebug("VNET: initiated\n");
905
906     return 0;
907 }