Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


8cf2364c992532852f91759aae1f881f1c735fc6
[palacios.git] / palacios / src / vnet / vnet_core.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu> 
11  * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>  
12  * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Lei Xia <lxia@northwestern.edu>
16  *         Yuan Tang <ytang@northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21  
22 #include <vnet/vnet.h>
23 #include <vnet/vnet_hashtable.h>
24 #include <vnet/vnet_host.h>
25 #include <vnet/vnet_vmm.h>
26
27 #ifndef V3_CONFIG_DEBUG_VNET
28 #undef Vnet_Debug
29 #define Vnet_Debug(fmt, args...)
30 #endif
31
32 int net_debug = 0;
33
34 struct eth_hdr {
35     uint8_t dst_mac[ETH_ALEN];
36     uint8_t src_mac[ETH_ALEN];
37     uint16_t type; /* indicates layer 3 protocol type */
38 } __attribute__((packed));
39
40
41 struct vnet_dev {
42     int dev_id;
43     uint8_t mac_addr[ETH_ALEN];
44     struct v3_vm_info * vm;
45     struct v3_vnet_dev_ops dev_ops;
46     void * private_data;
47
48     struct list_head node;
49 } __attribute__((packed));
50
51
52 struct vnet_brg_dev {
53     struct v3_vm_info * vm;
54     struct v3_vnet_bridge_ops brg_ops;
55
56     uint8_t type;
57
58     void * private_data;
59 } __attribute__((packed));
60
61
62
63 struct vnet_route_info {
64     struct v3_vnet_route route_def;
65
66     struct vnet_dev * dst_dev;
67     struct vnet_dev * src_dev;
68
69     uint32_t idx;
70
71     struct list_head node;
72     struct list_head match_node; // used for route matching
73 };
74
75
76 struct route_list {
77     uint8_t hash_buf[VNET_HASH_SIZE];
78
79     uint32_t num_routes;
80     struct vnet_route_info * routes[0];
81 } __attribute__((packed));
82
83
84 struct queue_entry{
85     uint8_t use;
86     struct v3_vnet_pkt pkt;
87     uint8_t * data;
88     uint32_t size_alloc;
89 };
90
91 #define VNET_QUEUE_SIZE 1024
92 struct vnet_queue {
93     struct queue_entry buf[VNET_QUEUE_SIZE];
94     int head, tail;
95     int count;
96     vnet_lock_t lock;
97 };
98
99 static struct {
100     struct list_head routes;
101     struct list_head devs;
102     
103     uint32_t num_routes;
104     uint32_t route_idx;
105     uint32_t num_devs;
106     uint32_t dev_idx;
107
108     struct vnet_brg_dev * bridge;
109
110     vnet_lock_t lock;
111     struct vnet_stat stats;
112
113     struct vnet_thread * pkt_flush_thread;
114
115     struct vnet_queue pkt_q;
116
117     struct hashtable * route_cache;
118 } vnet_state;
119         
120
121 #ifdef V3_CONFIG_DEBUG_VNET
122 static inline void mac2str(uint8_t * mac, char * buf) {
123     snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x", 
124              mac[0], mac[1], mac[2],
125              mac[3], mac[4], mac[5]);
126 }
127
128 static void print_route(struct v3_vnet_route * route){
129     char str[50];
130
131     mac2str(route->src_mac, str);
132     Vnet_Debug("Src Mac (%s),  src_qual (%d)\n", 
133                str, route->src_mac_qual);
134     mac2str(route->dst_mac, str);
135     Vnet_Debug("Dst Mac (%s),  dst_qual (%d)\n", 
136                str, route->dst_mac_qual);
137     Vnet_Debug("Src dev id (%d), src type (%d)", 
138                route->src_id, 
139                route->src_type);
140     Vnet_Debug("Dst dev id (%d), dst type (%d)\n", 
141                route->dst_id, 
142                route->dst_type);
143 }
144
145 static void dump_routes(){
146     struct vnet_route_info *route;
147
148     int i = 0;
149     Vnet_Debug("\n========Dump routes starts ============\n");
150     list_for_each_entry(route, &(vnet_state.routes), node) {
151         Vnet_Debug("\nroute %d:\n", i++);
152                 
153         print_route(&(route->route_def));
154         if (route->route_def.dst_type == LINK_INTERFACE) {
155             Vnet_Debug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
156                 route->dst_dev,
157                 route->dst_dev->dev_id,
158                 (void *)&(route->dst_dev->dev_ops),
159                 route->dst_dev->private_data);
160         }
161     }
162
163     Vnet_Debug("\n========Dump routes end ============\n");
164 }
165
166 #endif
167
168
169 /* 
170  * A VNET packet is a packed struct with the hashed fields grouped together.
171  * This means we can generate the hash from an offset into the pkt struct
172  */
173 static inline uint_t hash_fn(addr_t hdr_ptr) {    
174     uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
175
176     return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
177 }
178
179 static inline int hash_eq(addr_t key1, addr_t key2) {   
180     return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
181 }
182
183 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
184     memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);    
185
186     if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
187         PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
188         return -1;
189     }
190     
191     return 0;
192 }
193
194 static int clear_hash_cache() {
195     vnet_free_htable(vnet_state.route_cache, 1, 1);
196     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
197
198     return 0;
199 }
200
201 static int look_into_cache(const struct v3_vnet_pkt * pkt, 
202                            struct route_list ** routes) {
203     *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
204    
205     return 0;
206 }
207
208
209 static struct vnet_dev * dev_by_id(int idx) {
210     struct vnet_dev * dev = NULL; 
211
212     list_for_each_entry(dev, &(vnet_state.devs), node) {
213         int dev_id = dev->dev_id;
214
215         if (dev_id == idx)
216             return dev;
217     }
218
219     return NULL;
220 }
221
222 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
223     struct vnet_dev * dev = NULL; 
224     
225     list_for_each_entry(dev, &(vnet_state.devs), node) {
226         if (!compare_ethaddr(dev->mac_addr, mac)){
227             return dev;
228         }
229     }
230
231     return NULL;
232 }
233
234
235 int v3_vnet_find_dev(uint8_t  * mac) {
236     struct vnet_dev * dev = NULL;
237
238     dev = dev_by_mac(mac);
239
240     if(dev != NULL) {
241         return dev->dev_id;
242     }
243
244     return -1;
245 }
246
247
248 int v3_vnet_add_route(struct v3_vnet_route route) {
249     struct vnet_route_info * new_route = NULL;
250     unsigned long flags; 
251
252     new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
253     memset(new_route, 0, sizeof(struct vnet_route_info));
254
255 #ifdef V3_CONFIG_DEBUG_VNET
256     Vnet_Debug("VNET/P Core: add_route_entry:\n");
257     print_route(&route);
258 #endif
259     
260     memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
261     memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
262     new_route->route_def.src_mac_qual = route.src_mac_qual;
263     new_route->route_def.dst_mac_qual = route.dst_mac_qual;
264     new_route->route_def.dst_type = route.dst_type;
265     new_route->route_def.src_type = route.src_type;
266     new_route->route_def.src_id = route.src_id;
267     new_route->route_def.dst_id = route.dst_id;
268
269     if (new_route->route_def.dst_type == LINK_INTERFACE) {
270         new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
271     }
272
273     if (new_route->route_def.src_type == LINK_INTERFACE) {
274         new_route->src_dev = dev_by_id(new_route->route_def.src_id);
275     }
276
277
278     flags = vnet_lock_irqsave(vnet_state.lock);
279
280     list_add(&(new_route->node), &(vnet_state.routes));
281     new_route->idx = ++ vnet_state.route_idx;
282     vnet_state.num_routes ++;
283         
284     vnet_unlock_irqrestore(vnet_state.lock, flags);
285
286     clear_hash_cache();
287
288 #ifdef V3_CONFIG_DEBUG_VNET
289     dump_routes();
290 #endif
291
292     return new_route->idx;
293 }
294
295
296 void v3_vnet_del_route(uint32_t route_idx){
297     struct vnet_route_info * route = NULL;
298     unsigned long flags; 
299
300     flags = vnet_lock_irqsave(vnet_state.lock);
301
302     list_for_each_entry(route, &(vnet_state.routes), node) {
303         if(route->idx == route_idx){
304             list_del(&(route->node));
305             list_del(&(route->match_node));
306             Vnet_Free(route);    
307         }
308     }
309
310     vnet_unlock_irqrestore(vnet_state.lock, flags);
311 }
312
313
314 /* delete all route entries with specfied src or dst device id */ 
315 static void inline del_routes_by_dev(int dev_id){
316     struct vnet_route_info * route = NULL;
317     unsigned long flags; 
318
319     flags = vnet_lock_irqsave(vnet_state.lock);
320
321     list_for_each_entry(route, &(vnet_state.routes), node) {
322         if((route->route_def.dst_type == LINK_INTERFACE &&
323              route->route_def.dst_id == dev_id) ||
324              (route->route_def.src_type == LINK_INTERFACE &&
325               route->route_def.src_id == dev_id)){
326               
327             list_del(&(route->node));
328             list_del(&(route->match_node));
329             Vnet_Free(route);    
330         }
331     }
332
333     vnet_unlock_irqrestore(vnet_state.lock, flags);
334 }
335
336 /* At the end allocate a route_list
337  * This list will be inserted into the cache so we don't need to free it
338  */
339 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
340     struct vnet_route_info * route = NULL; 
341     struct route_list * matches = NULL;
342     int num_matches = 0;
343     int max_rank = 0;
344     struct list_head match_list;
345     struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
346     //  uint8_t src_type = pkt->src_type;
347     //  uint32_t src_link = pkt->src_id;
348
349 #ifdef V3_CONFIG_DEBUG_VNET
350     {
351         char dst_str[100];
352         char src_str[100];
353
354         mac2str(hdr->src_mac, src_str);  
355         mac2str(hdr->dst_mac, dst_str);
356         Vnet_Debug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
357     }
358 #endif
359
360     INIT_LIST_HEAD(&match_list);
361     
362 #define UPDATE_MATCHES(rank) do {                               \
363         if (max_rank < (rank)) {                                \
364             max_rank = (rank);                                  \
365             INIT_LIST_HEAD(&match_list);                        \
366                                                                 \
367             list_add(&(route->match_node), &match_list);        \
368             num_matches = 1;                                    \
369         } else if (max_rank == (rank)) {                        \
370             list_add(&(route->match_node), &match_list);        \
371             num_matches++;                                      \
372         }                                                       \
373     } while (0)
374     
375
376     list_for_each_entry(route, &(vnet_state.routes), node) {
377         struct v3_vnet_route * route_def = &(route->route_def);
378
379 /*
380         // CHECK SOURCE TYPE HERE
381         if ( (route_def->src_type != LINK_ANY) && 
382              ( (route_def->src_type != src_type) || 
383                ( (route_def->src_id != src_link) &&
384                  (route_def->src_id != -1)))) {
385             continue;
386         }
387 */
388
389         if ((route_def->dst_mac_qual == MAC_ANY) &&
390             (route_def->src_mac_qual == MAC_ANY)) {      
391             UPDATE_MATCHES(3);
392         }
393         
394         if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
395             if (route_def->src_mac_qual != MAC_NOT) {
396                 if (route_def->dst_mac_qual == MAC_ANY) {
397                     UPDATE_MATCHES(6);
398                 } else if (route_def->dst_mac_qual != MAC_NOT &&
399                            memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
400                     UPDATE_MATCHES(8);
401                 }
402             }
403         }
404             
405         if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
406             if (route_def->dst_mac_qual != MAC_NOT) {
407                 if (route_def->src_mac_qual == MAC_ANY) {
408                     UPDATE_MATCHES(6);
409                 } else if ((route_def->src_mac_qual != MAC_NOT) && 
410                            (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
411                     UPDATE_MATCHES(8);
412                 }
413             }
414         }
415             
416         if ((route_def->dst_mac_qual == MAC_NOT) &&
417             (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
418             if (route_def->src_mac_qual == MAC_ANY) {
419                 UPDATE_MATCHES(5);
420             } else if ((route_def->src_mac_qual != MAC_NOT) && 
421                        (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {     
422                 UPDATE_MATCHES(7);
423             }
424         }
425         
426         if ((route_def->src_mac_qual == MAC_NOT) &&
427             (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
428             if (route_def->dst_mac_qual == MAC_ANY) {
429                 UPDATE_MATCHES(5);
430             } else if ((route_def->dst_mac_qual != MAC_NOT) &&
431                        (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
432                 UPDATE_MATCHES(7);
433             }
434         }
435         
436         // Default route
437         if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
438              (route_def->dst_mac_qual == MAC_NONE)) {
439             UPDATE_MATCHES(4);
440         }
441     }
442
443     Vnet_Debug("VNET/P Core: match_route: Matches=%d\n", num_matches);
444
445     if (num_matches == 0) {
446         return NULL;
447     }
448
449     matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) + 
450                                                (sizeof(struct vnet_route_info *) * num_matches));
451
452     matches->num_routes = num_matches;
453
454     {
455         int i = 0;
456         list_for_each_entry(route, &match_list, match_node) {
457             matches->routes[i++] = route;
458         }
459     }
460
461     return matches;
462 }
463
464
465 int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
466     struct route_list * matched_routes = NULL;
467     unsigned long flags;
468     int i;
469
470     int cpu = V3_Get_CPU();
471     Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
472                   cpu, pkt->size, pkt->src_id, 
473                   pkt->src_type, pkt->dst_id, pkt->dst_type);
474     if(net_debug >= 4){
475             v3_hexdump(pkt->data, pkt->size, NULL, 0);
476     }
477
478     flags = vnet_lock_irqsave(vnet_state.lock);
479
480     vnet_state.stats.rx_bytes += pkt->size;
481     vnet_state.stats.rx_pkts++;
482
483     look_into_cache(pkt, &matched_routes);
484     if (matched_routes == NULL) {  
485         Vnet_Debug("VNET/P Core: send pkt Looking into routing table\n");
486         
487         matched_routes = match_route(pkt);
488         
489         if (matched_routes) {
490             add_route_to_cache(pkt, matched_routes);
491         } else {
492             Vnet_Debug("VNET/P Core: Could not find route for packet... discards packet\n");
493             vnet_unlock_irqrestore(vnet_state.lock, flags);
494             return 0; /* do we return -1 here?*/
495         }
496     }
497
498     vnet_unlock_irqrestore(vnet_state.lock, flags);
499
500     Vnet_Debug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
501
502     for (i = 0; i < matched_routes->num_routes; i++) {
503         struct vnet_route_info * route = matched_routes->routes[i];
504         
505         if (route->route_def.dst_type == LINK_EDGE) {
506             struct vnet_brg_dev * bridge = vnet_state.bridge;
507             pkt->dst_type = LINK_EDGE;
508             pkt->dst_id = route->route_def.dst_id;
509
510             if (bridge == NULL) {
511                 Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
512                 continue;
513             }
514
515             if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
516                 Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
517                 continue;
518             }         
519             vnet_state.stats.tx_bytes += pkt->size;
520             vnet_state.stats.tx_pkts ++;
521         } else if (route->route_def.dst_type == LINK_INTERFACE) {
522             if (route->dst_dev == NULL){
523                   Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
524                 continue;
525             }
526
527             if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
528                 Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
529                 continue;
530             }
531             vnet_state.stats.tx_bytes += pkt->size;
532             vnet_state.stats.tx_pkts ++;
533         } else {
534             Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
535         }
536     }
537     
538     return 0;
539 }
540
541
542 static int vnet_pkt_enqueue(struct v3_vnet_pkt * pkt){
543     unsigned long flags;
544     struct queue_entry * entry;
545     struct vnet_queue * q = &(vnet_state.pkt_q);
546     uint16_t num_pages;
547
548     flags = vnet_lock_irqsave(q->lock);
549
550     if (q->count >= VNET_QUEUE_SIZE){
551         Vnet_Print(1, "VNET Queue overflow!\n");
552         vnet_unlock_irqrestore(q->lock, flags);
553         return -1;
554     }
555         
556     q->count ++;
557     entry = &(q->buf[q->tail++]);
558     q->tail %= VNET_QUEUE_SIZE;
559         
560     vnet_unlock_irqrestore(q->lock, flags);
561
562     /* this is ugly, but should happen very unlikely */
563     while(entry->use);
564
565     if(entry->size_alloc < pkt->size){
566         if(entry->data != NULL){
567             Vnet_FreePages(Vnet_PAddr(entry->data), (entry->size_alloc / PAGE_SIZE));
568             entry->data = NULL;
569         }
570
571         num_pages = 1 + (pkt->size / PAGE_SIZE);
572         entry->data = Vnet_VAddr(Vnet_AllocPages(num_pages));
573         if(entry->data == NULL){
574             return -1;
575         }
576         entry->size_alloc = PAGE_SIZE * num_pages;
577     }
578
579     entry->pkt.data = entry->data;
580     memcpy(&(entry->pkt), pkt, sizeof(struct v3_vnet_pkt));
581     memcpy(entry->data, pkt->data, pkt->size);
582
583     entry->use = 1;
584
585     return 0;
586 }
587
588
589 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize) {
590     if(synchronize){
591         vnet_tx_one_pkt(pkt, NULL);
592     }else {
593        vnet_pkt_enqueue(pkt);
594        Vnet_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size);
595     }
596         
597     return 0;
598 }
599
600 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, 
601                     struct v3_vnet_dev_ops *ops,
602                     void * priv_data){
603     struct vnet_dev * new_dev = NULL;
604     unsigned long flags;
605
606     new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev)); 
607
608     if (new_dev == NULL) {
609         Vnet_Print(0, "Malloc fails\n");
610         return -1;
611     }
612    
613     memcpy(new_dev->mac_addr, mac, 6);
614     new_dev->dev_ops.input = ops->input;
615     new_dev->private_data = priv_data;
616     new_dev->vm = vm;
617     new_dev->dev_id = 0;
618
619     flags = vnet_lock_irqsave(vnet_state.lock);
620
621     if (dev_by_mac(mac) == NULL) {
622         list_add(&(new_dev->node), &(vnet_state.devs));
623         new_dev->dev_id = ++ vnet_state.dev_idx;
624         vnet_state.num_devs ++;
625     }
626
627     vnet_unlock_irqrestore(vnet_state.lock, flags);
628
629     /* if the device was found previosly the id should still be 0 */
630     if (new_dev->dev_id == 0) {
631         Vnet_Print(0, "VNET/P Core: Device Already exists\n");
632         return -1;
633     }
634
635     Vnet_Debug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
636
637     return new_dev->dev_id;
638 }
639
640
641 int v3_vnet_del_dev(int dev_id){
642     struct vnet_dev * dev = NULL;
643     unsigned long flags;
644
645     flags = vnet_lock_irqsave(vnet_state.lock);
646         
647     dev = dev_by_id(dev_id);
648     if (dev != NULL){
649         list_del(&(dev->node));
650         //del_routes_by_dev(dev_id);
651         vnet_state.num_devs --;
652     }
653         
654     vnet_unlock_irqrestore(vnet_state.lock, flags);
655
656     Vnet_Free(dev);
657
658     Vnet_Debug("VNET/P Core: Remove Device: dev_id %d\n", dev_id);
659
660     return 0;
661 }
662
663
664 int v3_vnet_stat(struct vnet_stat * stats){
665     stats->rx_bytes = vnet_state.stats.rx_bytes;
666     stats->rx_pkts = vnet_state.stats.rx_pkts;
667     stats->tx_bytes = vnet_state.stats.tx_bytes;
668     stats->tx_pkts = vnet_state.stats.tx_pkts;
669
670     return 0;
671 }
672
673 static void deinit_devices_list(){
674     struct vnet_dev * dev = NULL; 
675
676     list_for_each_entry(dev, &(vnet_state.devs), node) {
677         list_del(&(dev->node));
678         Vnet_Free(dev);
679     }
680 }
681
682 static void deinit_routes_list(){
683     struct vnet_route_info * route = NULL; 
684
685     list_for_each_entry(route, &(vnet_state.routes), node) {
686         list_del(&(route->node));
687         list_del(&(route->match_node));
688         Vnet_Free(route);
689     }
690 }
691
692 int v3_vnet_add_bridge(struct v3_vm_info * vm,
693                        struct v3_vnet_bridge_ops * ops,
694                        uint8_t type,
695                        void * priv_data) {
696     unsigned long flags;
697     int bridge_free = 0;
698     struct vnet_brg_dev * tmp_bridge = NULL;    
699     
700     flags = vnet_lock_irqsave(vnet_state.lock);
701     if (vnet_state.bridge == NULL) {
702         bridge_free = 1;
703         vnet_state.bridge = (void *)1;
704     }
705     vnet_unlock_irqrestore(vnet_state.lock, flags);
706
707     if (bridge_free == 0) {
708         PrintError("VNET/P Core: Bridge already set\n");
709         return -1;
710     }
711
712     tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
713
714     if (tmp_bridge == NULL) {
715         PrintError("Malloc Fails\n");
716         vnet_state.bridge = NULL;
717         return -1;
718     }
719     
720     tmp_bridge->vm = vm;
721     tmp_bridge->brg_ops.input = ops->input;
722     tmp_bridge->brg_ops.poll = ops->poll;
723     tmp_bridge->private_data = priv_data;
724     tmp_bridge->type = type;
725         
726     /* make this atomic to avoid possible race conditions */
727     flags = vnet_lock_irqsave(vnet_state.lock);
728     vnet_state.bridge = tmp_bridge;
729     vnet_unlock_irqrestore(vnet_state.lock, flags);
730
731     return 0;
732 }
733
734
735 void v3_vnet_del_bridge(uint8_t type) {
736     unsigned long flags;
737     struct vnet_brg_dev * tmp_bridge = NULL;    
738     
739     flags = vnet_lock_irqsave(vnet_state.lock);
740         
741     if (vnet_state.bridge != NULL && vnet_state.bridge->type == type) {
742         tmp_bridge = vnet_state.bridge;
743        vnet_state.bridge = NULL;
744     }
745         
746     vnet_unlock_irqrestore(vnet_state.lock, flags);
747
748     if (tmp_bridge) {
749         Vnet_Free(tmp_bridge);
750     }
751 }
752
753
754 static int vnet_tx_flush(void *args){
755     unsigned long flags;
756     struct queue_entry * entry;
757     struct vnet_queue * q = &(vnet_state.pkt_q);
758
759     Vnet_Print(0, "VNET/P Handing Pkt Thread Starting ....\n");
760
761     /* we need thread sleep/wakeup in Palacios */
762     while(!vnet_thread_should_stop()){
763         flags = vnet_lock_irqsave(q->lock);
764
765         if (q->count <= 0){
766             vnet_unlock_irqrestore(q->lock, flags);
767             Vnet_Yield();
768         }else {
769             q->count --;
770             entry = &(q->buf[q->head++]);
771             q->head %= VNET_QUEUE_SIZE;
772
773             vnet_unlock_irqrestore(q->lock, flags);
774
775             /* this is ugly, but should happen very unlikely */
776             while(!entry->use);
777             vnet_tx_one_pkt(&(entry->pkt), NULL);
778
779             /* asynchronizely release allocated memory for buffer entry here */     
780             entry->use = 0;
781
782             Vnet_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size);   
783         }
784     }
785
786     return 0;
787 }
788
789 int v3_init_vnet() {
790     memset(&vnet_state, 0, sizeof(vnet_state));
791         
792     INIT_LIST_HEAD(&(vnet_state.routes));
793     INIT_LIST_HEAD(&(vnet_state.devs));
794
795     vnet_state.num_devs = 0;
796     vnet_state.num_routes = 0;
797
798     if (vnet_lock_init(&(vnet_state.lock)) == -1){
799         PrintError("VNET/P Core: Fails to initiate lock\n");
800     }
801
802     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
803     if (vnet_state.route_cache == NULL) {
804         PrintError("VNET/P Core: Fails to initiate route cache\n");
805         return -1;
806     }
807
808     vnet_lock_init(&(vnet_state.pkt_q.lock));
809
810     vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "VNET_Pkts");
811
812     Vnet_Debug("VNET/P Core is initiated\n");
813
814     return 0;
815 }
816
817
818 void v3_deinit_vnet(){
819
820     vnet_lock_deinit(&(vnet_state.lock));
821
822     deinit_devices_list();
823     deinit_routes_list();
824
825     vnet_free_htable(vnet_state.route_cache, 1, 1);
826     Vnet_Free(vnet_state.bridge);
827 }
828
829