Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Implemented SMP, still not functional
[palacios.git] / palacios / src / palacios / vmm_vnet.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2009, Lei Xia <lxia@northwestern.edu> 
11  * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>  
12  * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Lei Xia <lxia@northwestern.edu>
16  *         Yuan Tang <ytang@northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21  
22 #include <palacios/vmm_vnet.h>
23 #include <palacios/vm_guest_mem.h>
24 #include <palacios/vmm_lock.h>
25 #include <palacios/vmm_queue.h>
26 #include <palacios/vmm_sprintf.h>
27
28 #ifndef CONFIG_DEBUG_VNET
29 #undef PrintDebug
30 #define PrintDebug(fmt, args...)
31 #endif
32
33
34
35 struct eth_hdr {
36     uint8_t dst_mac[6];
37     uint8_t src_mac[6];
38     uint16_t type; // indicates layer 3 protocol type
39 } __attribute__((packed));
40
41
42
43
44
45 struct vnet_dev {
46
47     uint8_t mac_addr[6];
48     struct v3_vm_info * vm;
49     
50     int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data);
51     void * private_data;
52     
53     int dev_id;
54     struct list_head node;
55 } __attribute__((packed));
56
57
58 #define BRIDGE_BUF_SIZE 512
59 struct bridge_pkts_buf {
60     int start, end;
61     int num; 
62     v3_lock_t lock;
63     struct v3_vnet_pkt pkts[BRIDGE_BUF_SIZE];
64     uint8_t datas[ETHERNET_PACKET_LEN*BRIDGE_BUF_SIZE];
65 };
66
67 struct vnet_brg_dev {
68     struct v3_vm_info * vm;
69     
70     int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data);
71     void (*xcall_input)(void *data);
72     int (*polling_pkt)(struct v3_vm_info * vm,  void *private_data);
73
74     int disabled;
75         
76     uint16_t max_delayed_pkts;
77     long max_latency; //in cycles
78     void * private_data;
79 } __attribute__((packed));
80
81
82
83
84
85 struct vnet_route_info {
86     struct v3_vnet_route route_def;
87
88     struct vnet_dev * dst_dev;
89     struct vnet_dev * src_dev;
90
91     struct list_head node;
92     struct list_head match_node; // used for route matching
93 };
94
95
96 struct route_list {
97     uint8_t hash_buf[VNET_HASH_SIZE];
98
99     uint32_t num_routes;
100     struct vnet_route_info * routes[0];
101 } __attribute__((packed));
102
103
104
105 static struct {
106     struct list_head routes;
107     struct list_head devs;
108     
109     int num_routes;
110     int num_devs;
111
112     struct vnet_brg_dev *bridge;
113
114     v3_lock_t lock;
115
116     struct hashtable * route_cache;
117
118     struct bridge_pkts_buf in_buf;  //incoming packets buffer
119 } vnet_state;
120
121
122
123
124 #ifdef CONFIG_DEBUG_VNET
125 static inline void mac_to_string(char mac[6], char * buf) {
126     snprintf(buf, 100, "%d:%d:%d:%d:%d:%d", 
127              mac[0], mac[1], mac[2],
128              mac[3], mac[4], mac[5]);
129 }
130
131 static void print_route(struct vnet_route_info *route){
132     char str[50];
133
134     mac_to_string(route->route_def.src_mac, str);
135     PrintDebug("Src Mac (%s),  src_qual (%d)\n", 
136                         str, route->route_def.src_mac_qual);
137     mac_to_string(route->route_def.dst_mac, str);
138     PrintDebug("Dst Mac (%s),  dst_qual (%d)\n", 
139                         str, route->route_def.dst_mac_qual);
140     PrintDebug("Src dev id (%d), src type (%d)", 
141                         route->route_def.src_id, 
142                         route->route_def.src_type);
143     PrintDebug("Dst dev id (%d), dst type (%d)\n", 
144                         route->route_def.dst_id, 
145                         route->route_def.dst_type);
146     if (route->route_def.dst_type == LINK_INTERFACE) {
147         PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_input (%p), dst_dev_data (%p)\n",
148                                         route->dst_dev,
149                                         route->dst_dev->dev_id,
150                                         route->dst_dev->input,
151                                         route->dst_dev->private_data);
152     }
153 }
154
155 static void dump_routes(){
156         struct vnet_route_info *route;
157
158         int i = 0;
159         PrintDebug("\n========Dump routes starts ============\n");
160         list_for_each_entry(route, &(vnet_state.routes), node) {
161                 PrintDebug("\nroute %d:\n", ++i);
162                 
163                 print_route(route);
164         }
165         PrintDebug("\n========Dump routes end ============\n");
166 }
167
168 #endif
169
170
171 /* 
172  * A VNET packet is a packed struct with the hashed fields grouped together.
173  * This means we can generate the hash from an offset into the pkt struct
174  */
175 static inline uint_t hash_fn(addr_t hdr_ptr) {    
176     uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
177
178     return v3_hash_buffer(hdr_buf, VNET_HASH_SIZE);
179 }
180
181 static inline int hash_eq(addr_t key1, addr_t key2) {   
182     return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
183 }
184
185 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
186     memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);    
187
188     if (v3_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
189         PrintError("Vnet: Failed to insert new route entry to the cache\n");
190         return -1;
191     }
192     
193     return 0;
194 }
195
196 static int clear_hash_cache() {
197
198     v3_free_htable(vnet_state.route_cache, 1, 1);
199     vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
200
201     return 0;
202 }
203
204 static int look_into_cache(const struct v3_vnet_pkt * pkt, struct route_list ** routes) {
205     
206     *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
207    
208     return 0;
209 }
210
211
212 static struct vnet_dev * find_dev_by_id(int idx) {
213     struct vnet_dev * dev = NULL; 
214     
215     list_for_each_entry(dev, &(vnet_state.devs), node) {
216         int dev_id = dev->dev_id;
217
218         if (dev_id == idx)
219             return dev;
220     }
221
222     return NULL;
223 }
224
225 static struct vnet_dev * find_dev_by_mac(char mac[6]) {
226     struct vnet_dev * dev = NULL; 
227     
228     list_for_each_entry(dev, &(vnet_state.devs), node) {
229         if (!memcmp(dev->mac_addr, mac, 6))
230             return dev;
231     }
232
233     return NULL;
234 }
235
236 int get_device_id_by_mac(char mac[6]){
237
238     struct vnet_dev *dev = find_dev_by_mac(mac);
239
240     if (dev == NULL)
241         return -1;
242
243     return dev->dev_id;
244 }
245
246
247 int v3_vnet_add_route(struct v3_vnet_route route) {
248     struct vnet_route_info * new_route = NULL;
249     unsigned long flags; 
250
251     new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
252     memset(new_route, 0, sizeof(struct vnet_route_info));
253
254     PrintDebug("Vnet: vnet_add_route_entry: dst_id: %d, dst_type: %d\n",
255                         route.dst_id, route.dst_type);  
256     
257     memcpy(new_route->route_def.src_mac, route.src_mac, 6);
258     memcpy(new_route->route_def.dst_mac, route.dst_mac, 6);
259     new_route->route_def.src_mac_qual = route.src_mac_qual;
260     new_route->route_def.dst_mac_qual = route.dst_mac_qual;
261     new_route->route_def.dst_id = route.dst_id;
262     new_route->route_def.dst_type = route.dst_type;
263     new_route->route_def.src_id = route.src_id;
264     new_route->route_def.src_type = route.src_type;
265
266     if (new_route->route_def.dst_type == LINK_INTERFACE) {
267         new_route->dst_dev = find_dev_by_id(new_route->route_def.dst_id);
268         PrintDebug("Vnet: Add route, get device: dev_id %d, input : %p, private_data %p\n",
269                         new_route->dst_dev->dev_id, new_route->dst_dev->input, new_route->dst_dev->private_data);
270     }
271
272     if (new_route->route_def.src_type == LINK_INTERFACE) {
273         new_route->src_dev = find_dev_by_id(new_route->route_def.src_id);
274     }
275
276     flags = v3_lock_irqsave(vnet_state.lock);
277
278     list_add(&(new_route->node), &(vnet_state.routes));
279     clear_hash_cache();
280
281     v3_unlock_irqrestore(vnet_state.lock, flags);
282    
283
284 #ifdef CONFIG_DEBUG_VNET
285     dump_routes();
286 #endif
287
288     return 0;
289 }
290
291
292
293 // At the end allocate a route_list
294 // This list will be inserted into the cache so we don't need to free it
295 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
296     struct vnet_route_info * route = NULL; 
297     struct route_list * matches = NULL;
298     int num_matches = 0;
299     int max_rank = 0;
300     struct list_head match_list;
301     struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
302     uint8_t src_type = pkt->src_type;
303     uint32_t src_link = pkt->src_id;
304
305 #ifdef CONFIG_DEBUG_VNET
306     {
307         char dst_str[100];
308         char src_str[100];
309
310         mac_to_string(hdr->src_mac, src_str);  
311         mac_to_string(hdr->dst_mac, dst_str);
312         PrintDebug("Vnet: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
313     }
314 #endif
315
316     INIT_LIST_HEAD(&match_list);
317     
318 #define UPDATE_MATCHES(rank) do {                               \
319         if (max_rank < (rank)) {                                \
320             max_rank = (rank);                                  \
321             INIT_LIST_HEAD(&match_list);                        \
322                                                                 \
323             list_add(&(route->match_node), &match_list);        \
324             num_matches = 1;                                    \
325         } else if (max_rank == (rank)) {                        \
326             list_add(&(route->match_node), &match_list);        \
327             num_matches++;                                      \
328         }                                                       \
329     } while (0)
330     
331
332     list_for_each_entry(route, &(vnet_state.routes), node) {
333         struct v3_vnet_route * route_def = &(route->route_def);
334
335         // CHECK SOURCE TYPE HERE
336         if ( (route_def->src_type != LINK_ANY) && 
337              ( (route_def->src_type != src_type) || 
338                ( (route_def->src_id != src_link) &&
339                  (route_def->src_id != (uint32_t)-1)))) {
340             continue;
341         }
342
343
344         if ((route_def->dst_mac_qual == MAC_ANY) &&
345             (route_def->src_mac_qual == MAC_ANY)) {      
346             UPDATE_MATCHES(3);
347         }
348         
349         if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
350             if (route_def->src_mac_qual != MAC_NOT) {
351                 if (route_def->dst_mac_qual == MAC_ANY) {
352                     UPDATE_MATCHES(6);
353                 } else if (route_def->dst_mac_qual != MAC_NOT &&
354                            memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
355                     UPDATE_MATCHES(8);
356                 }
357             }
358         }
359             
360         if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
361             if (route_def->dst_mac_qual != MAC_NOT) {
362                 if (route_def->src_mac_qual == MAC_ANY) {
363                     UPDATE_MATCHES(6);
364                 } else if ((route_def->src_mac_qual != MAC_NOT) && 
365                            (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
366                     UPDATE_MATCHES(8);
367                 }
368             }
369         }
370             
371         if ((route_def->dst_mac_qual == MAC_NOT) &&
372             (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
373             if (route_def->src_mac_qual == MAC_ANY) {
374                 UPDATE_MATCHES(5);
375             } else if ((route_def->src_mac_qual != MAC_NOT) && 
376                        (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {     
377                 UPDATE_MATCHES(7);
378             }
379         }
380         
381         if ((route_def->src_mac_qual == MAC_NOT) &&
382             (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
383             if (route_def->dst_mac_qual == MAC_ANY) {
384                 UPDATE_MATCHES(5);
385             } else if ((route_def->dst_mac_qual != MAC_NOT) &&
386                        (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
387                 UPDATE_MATCHES(7);
388             }
389         }
390         
391         // Default route
392         if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
393              (route_def->dst_mac_qual == MAC_NONE)) {
394             UPDATE_MATCHES(4);
395         }
396     }
397
398     PrintDebug("Vnet: match_route: Matches=%d\n", num_matches);
399
400     if (num_matches == 0) {
401         return NULL;
402     }
403
404     matches = (struct route_list *)V3_Malloc(sizeof(struct route_list) + 
405                                 (sizeof(struct vnet_route_info *) * num_matches));
406
407     matches->num_routes = num_matches;
408
409     {
410         int i = 0;
411         list_for_each_entry(route, &match_list, match_node) {
412             matches->routes[i++] = route;
413         }
414     }
415
416     return matches;
417 }
418
419 #if 0
420 static int flush_bridge_pkts(struct vnet_brg_dev *bridge){
421     unsigned long flags;
422     int num, start, send;
423     struct v3_vnet_bridge_input_args args;
424     int cpu_id = bridge->vm->cores[0].cpu_id;
425     int current_core = V3_Get_CPU();
426         
427     if (bridge == NULL) {
428         PrintDebug("VNET: No bridge to sent data to links\n");
429         return -1;
430     }
431
432     flags = v3_lock_irqsave(bridge->recv_buf.lock);
433                 
434     num = bridge->recv_buf.num;
435     start = bridge->recv_buf.start;
436
437     bridge->recv_buf.num -= num;
438     bridge->recv_buf.start += num;
439     bridge->recv_buf.start %= BRIDGE_BUF_SIZE;
440         
441     v3_unlock_irqrestore(bridge->recv_buf.lock, flags);
442
443
444     if(bridge->disabled){
445         PrintDebug("VNET: In flush bridge pkts: Bridge is disabled\n");
446         return -1;
447     }
448
449     if(num <= 2 && num > 0){
450         PrintDebug("VNET: In flush bridge pkts: %d\n", num);
451     }
452
453     if(num > 0) {
454         PrintDebug("VNET: In flush bridge pkts to bridge, cur_cpu %d, brige_core: %d\n", current_core, cpu_id);
455         if (current_core == cpu_id){
456             if ((start + num) < BRIDGE_BUF_SIZE){
457                 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), num, bridge->private_data);
458             }else {
459                 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), (BRIDGE_BUF_SIZE - start), bridge->private_data);                            
460                 send = num - (BRIDGE_BUF_SIZE - start);
461                 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[0]), send, bridge->private_data);
462             }   
463         }else {
464             args.vm = bridge->vm;
465             args.private_data = bridge->private_data;
466         
467             if ((start + num) < BRIDGE_BUF_SIZE){
468                 args.pkt_num = num;
469                 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
470                 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
471             }else {
472                 args.pkt_num = BRIDGE_BUF_SIZE - start;
473                 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
474                 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
475                                 
476                 send = num - (BRIDGE_BUF_SIZE - start);
477                 args.pkt_num = send;
478                 args.vnet_pkts = &(bridge->recv_buf.pkts[0]);                   
479                 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
480             }
481         }
482         
483         PrintDebug("VNET: flush bridge pkts %d\n", num);
484     }
485                         
486     return 0;
487 }
488 #endif
489
490 static int send_to_bridge(struct v3_vnet_pkt * pkt){
491     struct vnet_brg_dev *bridge = vnet_state.bridge;
492
493     if (bridge == NULL) {
494         PrintDebug("VNET: No bridge to sent data to links\n");
495         return -1;
496     }
497
498     if(bridge->max_delayed_pkts <= 1){
499         if(bridge->disabled){
500             PrintDebug("VNET: Bridge diabled\n");
501             return -1;
502       }
503
504 /*
505         //avoid the cross-core call here
506         int cpu_id = bridge->vm->cores[0].cpu_id;
507         struct v3_vnet_bridge_input_args args;
508
509         args.pkt_num = 1;
510         args.vm = bridge->vm;
511         args.vnet_pkts = pkt;
512         args.private_data = bridge->private_data;
513         
514         V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
515 */
516         bridge->input(bridge->vm, pkt, 1, bridge->private_data);
517
518         PrintDebug("VNET: sent one packet to the bridge\n");
519         return 0;
520     }
521
522 /*
523     unsigned long flags;
524     int end, num=0;
525     struct v3_vnet_pkt *buf;
526
527     PrintDebug("VNET: send_to_bridge\n");
528
529     flags = v3_lock_irqsave(bridge->recv_buf.lock);
530
531     if(bridge->disabled && bridge->recv_buf.num >= BRIDGE_BUF_SIZE){
532         PrintDebug("Bridge diabled and bridge receive buffer full\n");
533         v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
534         num = bridge->recv_buf.num;
535         goto exit;
536     }
537             
538     end =       bridge->recv_buf.end;
539     buf = &(bridge->recv_buf.pkts[end]);
540
541     bridge->recv_buf.num ++;
542     bridge->recv_buf.end ++;
543     bridge->recv_buf.end %= BRIDGE_BUF_SIZE;
544
545     num = bridge->recv_buf.num;
546
547     v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
548
549
550     buf->size = pkt->size;
551     buf->dst_id = pkt->dst_id;
552     buf->src_id = pkt->src_id;
553     buf->src_type = pkt->src_type;
554     buf->dst_type = pkt->dst_type;
555     memcpy(buf->header, pkt->header, ETHERNET_HEADER_LEN);
556     memcpy(buf->data, pkt->data, pkt->size);
557
558 exit:   
559
560     if (num >= bridge->max_delayed_pkts){
561         flush_bridge_pkts(bridge);
562     }
563 */
564     return 0;
565 }
566
567 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
568     struct route_list * matched_routes = NULL;
569     unsigned long flags;
570     int i;
571
572 #ifdef CONFIG_DEBUG_VNET
573    {
574         struct eth_hdr * hdr = (struct eth_hdr *)(pkt->header);
575         char dest_str[100];
576         char src_str[100];
577
578         mac_to_string(hdr->src_mac, src_str);  
579         mac_to_string(hdr->dst_mac, dest_str);
580         int cpu = V3_Get_CPU();
581         PrintDebug("Vnet: on cpu %d, HandleDataOverLink. SRC(%s), DEST(%s), pkt size: %d\n", cpu, src_str, dest_str, pkt->size);
582    }
583 #endif
584
585     flags = v3_lock_irqsave(vnet_state.lock);
586
587     look_into_cache(pkt, &matched_routes);
588         
589     if (matched_routes == NULL) {  
590         PrintDebug("Vnet: send pkt Looking into routing table\n");
591         
592         matched_routes = match_route(pkt);
593         
594         if (matched_routes) {
595             add_route_to_cache(pkt, matched_routes);
596         } else {
597             PrintDebug("Could not find route for packet... discards packet\n");
598             v3_unlock_irqrestore(vnet_state.lock, flags);
599             return -1;
600         }
601     }
602
603     v3_unlock_irqrestore(vnet_state.lock, flags);
604
605     PrintDebug("Vnet: send pkt route matches %d\n", matched_routes->num_routes);
606
607     for (i = 0; i < matched_routes->num_routes; i++) {
608          struct vnet_route_info * route = matched_routes->routes[i];
609         
610         if (route->route_def.dst_type == LINK_EDGE) {                   
611             pkt->dst_type = LINK_EDGE;
612             pkt->dst_id = route->route_def.dst_id;
613
614             if (send_to_bridge(pkt) == -1) {
615                 PrintDebug("VNET: Packet not sent properly to bridge\n");
616                 continue;
617              }
618             
619         } else if (route->route_def.dst_type == LINK_INTERFACE) {
620             if (route->dst_dev->input(route->dst_dev->vm, pkt, route->dst_dev->private_data) == -1) {
621                 PrintDebug("VNET: Packet not sent properly\n");
622                 continue;
623              }
624         } else {
625             PrintDebug("Vnet: Wrong Edge type\n");
626             continue;
627         }
628
629         PrintDebug("Vnet: v3_vnet_send_pkt: Forward packet according to Route %d\n", i);
630     }
631     
632     return 0;
633 }
634
635 void v3_vnet_send_pkt_xcall(void * data){
636     struct v3_vnet_pkt * pkt = (struct v3_vnet_pkt *)data;
637     v3_vnet_send_pkt(pkt, NULL);
638 }
639
640
641 void v3_vnet_polling()
642 {
643     unsigned long flags;
644     int num, start;
645     struct v3_vnet_pkt *buf;
646
647     PrintDebug("In vnet pollling: cpu %d\n", V3_Get_CPU());
648
649     flags = v3_lock_irqsave(vnet_state.in_buf.lock);
650                 
651     num = vnet_state.in_buf.num;
652     start = vnet_state.in_buf.start;
653
654     PrintDebug("VNET: polling pkts %d\n", num);
655
656     while(num > 0) {
657         buf = &(vnet_state.in_buf.pkts[vnet_state.in_buf.start]);
658
659         v3_vnet_send_pkt(buf, NULL);
660
661         vnet_state.in_buf.num --;
662         vnet_state.in_buf.start ++;
663         vnet_state.in_buf.start %= BRIDGE_BUF_SIZE;
664         num --;
665     }
666
667     v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
668
669     return;
670 }
671
672
673 int v3_vnet_rx(uchar_t *buf, uint16_t size, uint16_t src_id, uint8_t src_type){
674     unsigned long flags;
675     int end;
676     struct v3_vnet_pkt *pkt;
677    
678     flags = v3_lock_irqsave(vnet_state.in_buf.lock);
679             
680     end = vnet_state.in_buf.end;
681     pkt = &(vnet_state.in_buf.pkts[end]);
682
683     if(vnet_state.in_buf.num > BRIDGE_BUF_SIZE){
684         PrintDebug("VNET: bridge rx: buffer full\n");
685         goto exit;
686     }
687
688     vnet_state.in_buf.num ++;
689     vnet_state.in_buf.end ++;
690     vnet_state.in_buf.end %= BRIDGE_BUF_SIZE;
691
692     pkt->size = size;
693     pkt->src_id = src_id;
694     pkt->src_type = src_type;
695     memcpy(pkt->header, buf, ETHERNET_HEADER_LEN);
696     memcpy(pkt->data, buf, size);
697
698 exit:
699         
700     v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
701
702     return 0;
703 }
704         
705
706 int v3_vnet_add_dev(struct v3_vm_info *vm, uint8_t mac[6], 
707                     int (*netif_input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data), 
708                     void * priv_data){
709     struct vnet_dev * new_dev = NULL;
710     unsigned long flags;
711
712     new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev)); 
713
714     if (new_dev == NULL) {
715         PrintError("VNET: Malloc fails\n");
716         return -1;
717     }
718    
719     memcpy(new_dev->mac_addr, mac, 6);
720     new_dev->input = netif_input;
721     new_dev->private_data = priv_data;
722     new_dev->vm = vm;
723     new_dev->dev_id = 0;        
724
725     flags = v3_lock_irqsave(vnet_state.lock);
726
727     if (!find_dev_by_mac(mac)) {
728         list_add(&(new_dev->node), &(vnet_state.devs));
729         new_dev->dev_id = ++vnet_state.num_devs;
730     }
731
732     v3_unlock_irqrestore(vnet_state.lock, flags);
733
734     // if the device was found previosly the id should still be 0
735     if (new_dev->dev_id == 0) {
736         PrintError("Device Alrady exists\n");
737         return -1;
738     }
739
740     PrintDebug("Vnet: Add Device: dev_id %d, input : %p, private_data %p\n",
741                         new_dev->dev_id, new_dev->input, new_dev->private_data);
742
743     return new_dev->dev_id;
744 }
745
746
747 void  v3_vnet_heartbeat(struct guest_info *core){
748     //static long last_time, cur_time;
749
750     if(vnet_state.bridge == NULL)
751         return;
752 /*      
753     if(vnet_state.bridge->max_delayed_pkts > 1){
754         if(V3_Get_CPU() != vnet_state.bridge->vm->cores[0].cpu_id){
755             rdtscll(cur_time);
756         }
757
758         if ((cur_time - last_time) >= vnet_state.bridge->max_latency) {
759             last_time = cur_time;
760             flush_bridge_pkts(vnet_state.bridge);
761         }
762     }
763 */
764     vnet_state.bridge->polling_pkt(vnet_state.bridge->vm, vnet_state.bridge->private_data);
765 }
766
767 int v3_vnet_add_bridge(struct v3_vm_info * vm,
768                        int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
769                        void (*xcall_input)(void *data),
770                        int (*poll_pkt)(struct v3_vm_info * vm, void * private_data),
771                        uint16_t max_delayed_pkts,
772                        long max_latency,
773                        void * priv_data) {
774     unsigned long flags;
775     int bridge_free = 0;
776     struct vnet_brg_dev * tmp_bridge = NULL;    
777     
778     flags = v3_lock_irqsave(vnet_state.lock);
779
780     if (vnet_state.bridge == NULL) {
781         bridge_free = 1;
782         vnet_state.bridge = (void *)1;
783     }
784
785     v3_unlock_irqrestore(vnet_state.lock, flags);
786
787     if (bridge_free == 0) {
788         PrintError("Bridge already set\n");
789         return -1;
790     }
791
792     tmp_bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev));
793
794     if (tmp_bridge == NULL) {
795         PrintError("Malloc Fails\n");
796         vnet_state.bridge = NULL;
797         return -1;
798     }
799     
800     tmp_bridge->vm = vm;
801     tmp_bridge->input = input;
802     tmp_bridge->xcall_input = xcall_input;
803     tmp_bridge->polling_pkt = poll_pkt;
804     tmp_bridge->private_data = priv_data;
805     tmp_bridge->disabled = 0;
806
807 /*
808     //initial receving buffer
809     tmp_bridge->recv_buf.start = 0;
810     tmp_bridge->recv_buf.end = 0;
811     tmp_bridge->recv_buf.num = 0;
812     if(v3_lock_init(&(tmp_bridge->recv_buf.lock)) == -1){
813         PrintError("VNET: add bridge, error to initiate recv buf lock\n");
814     }
815     int i;
816     for(i = 0; i<BRIDGE_BUF_SIZE; i++){
817         tmp_bridge->recv_buf.pkts[i].data = &(tmp_bridge->recv_buf.datas[i*ETHERNET_PACKET_LEN]);
818     }
819
820 */
821     
822     tmp_bridge->max_delayed_pkts = (max_delayed_pkts<BRIDGE_BUF_SIZE)?max_delayed_pkts : BRIDGE_BUF_SIZE;
823     tmp_bridge->max_latency = max_latency;
824         
825     // make this atomic to avoid possible race conditions
826     flags = v3_lock_irqsave(vnet_state.lock);
827     vnet_state.bridge = tmp_bridge;
828     v3_unlock_irqrestore(vnet_state.lock, flags);
829
830     return 0;
831 }
832
833
834 int v3_vnet_disable_bridge() {
835     unsigned long flags; 
836     
837     flags = v3_lock_irqsave(vnet_state.lock);
838
839     if (vnet_state.bridge != NULL) {
840         vnet_state.bridge->disabled = 1;
841     }
842
843     v3_unlock_irqrestore(vnet_state.lock, flags);
844
845     return 0;
846 }
847
848
849 int v3_vnet_enable_bridge() {
850     unsigned long flags; 
851     
852     flags = v3_lock_irqsave(vnet_state.lock);
853
854     if (vnet_state.bridge != NULL) {
855         vnet_state.bridge->disabled = 0;
856     }
857
858     v3_unlock_irqrestore(vnet_state.lock, flags);
859
860     return 0;
861 }
862
863
864
865 int V3_init_vnet() {
866     int i;
867
868     memset(&vnet_state, 0, sizeof(vnet_state));
869         
870     INIT_LIST_HEAD(&(vnet_state.routes));
871     INIT_LIST_HEAD(&(vnet_state.devs));
872
873     vnet_state.num_devs = 0;
874     vnet_state.num_routes = 0;
875
876     PrintDebug("VNET: Links and Routes tables initiated\n");
877
878     if (v3_lock_init(&(vnet_state.lock)) == -1){
879         PrintError("VNET: Failure to init lock for routes table\n");
880     }
881
882     PrintDebug("VNET: Locks initiated\n");
883     
884     //initial incoming pkt buffer
885     vnet_state.in_buf.start = 0;
886     vnet_state.in_buf.end = 0;
887     vnet_state.in_buf.num = 0;
888     if(v3_lock_init(&(vnet_state.in_buf.lock)) == -1){
889         PrintError("VNET: add bridge, error to initiate send buf lock\n");
890     }
891     for(i = 0; i<BRIDGE_BUF_SIZE; i++){
892         vnet_state.in_buf.pkts[i].data = &(vnet_state.in_buf.datas[i*ETHERNET_PACKET_LEN]);
893     }
894     PrintDebug("VNET: Receiving buffer initiated\n");
895
896     vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
897
898     if (vnet_state.route_cache == NULL) {
899         PrintError("Vnet: Route Cache Init Fails\n");
900         return -1;
901     }
902
903     PrintDebug("VNET: initiated\n");
904
905     return 0;
906 }