Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


3a1ad38378b037ebf36ef85d8b35c4f20eaadfc9
[palacios.git] / palacios / src / vnet / vnet_core.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu> 
11  * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>  
12  * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Lei Xia <lxia@northwestern.edu>
16  *         Yuan Tang <ytang@northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21  
22 #include <vnet/vnet.h>
23 #include <vnet/vnet_hashtable.h>
24 #include <vnet/vnet_host.h>
25 #include <vnet/vnet_vmm.h>
26
27 #ifndef V3_CONFIG_DEBUG_VNET
28 #undef Vnet_Debug
29 #define Vnet_Debug(fmt, args...)
30 #endif
31
32 int net_debug = 0;
33
34 struct eth_hdr {
35     uint8_t dst_mac[ETH_ALEN];
36     uint8_t src_mac[ETH_ALEN];
37     uint16_t type; /* indicates layer 3 protocol type */
38 } __attribute__((packed));
39
40
41 struct vnet_dev {
42     int dev_id;
43     uint8_t mac_addr[ETH_ALEN];
44     struct v3_vm_info * vm;
45     struct v3_vnet_dev_ops dev_ops;
46     void * private_data;
47
48     struct list_head node;
49 } __attribute__((packed));
50
51
52 struct vnet_brg_dev {
53     struct v3_vm_info * vm;
54     struct v3_vnet_bridge_ops brg_ops;
55
56     uint8_t type;
57
58     void * private_data;
59 } __attribute__((packed));
60
61
62
63 struct vnet_route_info {
64     struct v3_vnet_route route_def;
65
66     struct vnet_dev * dst_dev;
67     struct vnet_dev * src_dev;
68
69     uint32_t idx;
70
71     struct list_head node;
72     struct list_head match_node; // used for route matching
73 };
74
75
76 struct route_list {
77     uint8_t hash_buf[VNET_HASH_SIZE];
78
79     uint32_t num_routes;
80     struct vnet_route_info * routes[0];
81 } __attribute__((packed));
82
83
84 struct queue_entry{
85     uint8_t use;
86     struct v3_vnet_pkt pkt;
87     uint8_t * data;
88     uint32_t size_alloc;
89 };
90
91 #define VNET_QUEUE_SIZE 1024
92 struct vnet_queue {
93     struct queue_entry buf[VNET_QUEUE_SIZE];
94     int head, tail;
95     int count;
96     vnet_lock_t lock;
97 };
98
99 static struct {
100     struct list_head routes;
101     struct list_head devs;
102
103     uint8_t status; 
104    
105     uint32_t num_routes;
106     uint32_t route_idx;
107     uint32_t num_devs;
108     uint32_t dev_idx;
109
110     struct vnet_brg_dev * bridge;
111
112     vnet_lock_t lock;
113     struct vnet_stat stats;
114
115     struct vnet_thread * pkt_flush_thread;
116
117     struct vnet_queue pkt_q;
118
119     struct hashtable * route_cache;
120 } vnet_state;
121         
122
123 #ifdef V3_CONFIG_DEBUG_VNET
124 static inline void mac2str(uint8_t * mac, char * buf) {
125     snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x", 
126              mac[0], mac[1], mac[2],
127              mac[3], mac[4], mac[5]);
128 }
129
130 static void print_route(struct v3_vnet_route * route){
131     char str[50];
132
133     mac2str(route->src_mac, str);
134     Vnet_Debug("Src Mac (%s),  src_qual (%d)\n", 
135                str, route->src_mac_qual);
136     mac2str(route->dst_mac, str);
137     Vnet_Debug("Dst Mac (%s),  dst_qual (%d)\n", 
138                str, route->dst_mac_qual);
139     Vnet_Debug("Src dev id (%d), src type (%d)", 
140                route->src_id, 
141                route->src_type);
142     Vnet_Debug("Dst dev id (%d), dst type (%d)\n", 
143                route->dst_id, 
144                route->dst_type);
145 }
146
147 static void dump_routes(){
148     struct vnet_route_info *route;
149
150     int i = 0;
151     Vnet_Debug("\n========Dump routes starts ============\n");
152     list_for_each_entry(route, &(vnet_state.routes), node) {
153         Vnet_Debug("\nroute %d:\n", i++);
154                 
155         print_route(&(route->route_def));
156         if (route->route_def.dst_type == LINK_INTERFACE) {
157             Vnet_Debug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
158                 route->dst_dev,
159                 route->dst_dev->dev_id,
160                 (void *)&(route->dst_dev->dev_ops),
161                 route->dst_dev->private_data);
162         }
163     }
164
165     Vnet_Debug("\n========Dump routes end ============\n");
166 }
167
168 #endif
169
170
171 /* 
172  * A VNET packet is a packed struct with the hashed fields grouped together.
173  * This means we can generate the hash from an offset into the pkt struct
174  */
175 static inline uint_t hash_fn(addr_t hdr_ptr) {    
176     uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
177
178     return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
179 }
180
181 static inline int hash_eq(addr_t key1, addr_t key2) {   
182     return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
183 }
184
185 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
186     memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);    
187
188     if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
189         PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
190         return -1;
191     }
192     
193     return 0;
194 }
195
196 static int clear_hash_cache() {
197     vnet_free_htable(vnet_state.route_cache, 1, 1);
198     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
199
200     return 0;
201 }
202
203 static int look_into_cache(const struct v3_vnet_pkt * pkt, 
204                            struct route_list ** routes) {
205     *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
206    
207     return 0;
208 }
209
210
211 static struct vnet_dev * dev_by_id(int idx) {
212     struct vnet_dev * dev = NULL; 
213
214     list_for_each_entry(dev, &(vnet_state.devs), node) {
215         int dev_id = dev->dev_id;
216
217         if (dev_id == idx)
218             return dev;
219     }
220
221     return NULL;
222 }
223
224 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
225     struct vnet_dev * dev = NULL; 
226     
227     list_for_each_entry(dev, &(vnet_state.devs), node) {
228         if (!compare_ethaddr(dev->mac_addr, mac)){
229             return dev;
230         }
231     }
232
233     return NULL;
234 }
235
236
237 int v3_vnet_find_dev(uint8_t  * mac) {
238     struct vnet_dev * dev = NULL;
239
240     dev = dev_by_mac(mac);
241
242     if(dev != NULL) {
243         return dev->dev_id;
244     }
245
246     return -1;
247 }
248
249
250 int v3_vnet_add_route(struct v3_vnet_route route) {
251     struct vnet_route_info * new_route = NULL;
252     unsigned long flags; 
253
254     new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
255     memset(new_route, 0, sizeof(struct vnet_route_info));
256
257 #ifdef V3_CONFIG_DEBUG_VNET
258     Vnet_Debug("VNET/P Core: add_route_entry:\n");
259     print_route(&route);
260 #endif
261     
262     memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
263     memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
264     new_route->route_def.src_mac_qual = route.src_mac_qual;
265     new_route->route_def.dst_mac_qual = route.dst_mac_qual;
266     new_route->route_def.dst_type = route.dst_type;
267     new_route->route_def.src_type = route.src_type;
268     new_route->route_def.src_id = route.src_id;
269     new_route->route_def.dst_id = route.dst_id;
270
271     if (new_route->route_def.dst_type == LINK_INTERFACE) {
272         new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
273     }
274
275     if (new_route->route_def.src_type == LINK_INTERFACE) {
276         new_route->src_dev = dev_by_id(new_route->route_def.src_id);
277     }
278
279
280     flags = vnet_lock_irqsave(vnet_state.lock);
281
282     list_add(&(new_route->node), &(vnet_state.routes));
283     new_route->idx = ++ vnet_state.route_idx;
284     vnet_state.num_routes ++;
285         
286     vnet_unlock_irqrestore(vnet_state.lock, flags);
287
288     clear_hash_cache();
289
290 #ifdef V3_CONFIG_DEBUG_VNET
291     dump_routes();
292 #endif
293
294     return new_route->idx;
295 }
296
297
298 void v3_vnet_del_route(uint32_t route_idx){
299     struct vnet_route_info * route = NULL;
300     unsigned long flags; 
301
302     flags = vnet_lock_irqsave(vnet_state.lock);
303
304     list_for_each_entry(route, &(vnet_state.routes), node) {
305         if(route->idx == route_idx){
306             list_del(&(route->node));
307             list_del(&(route->match_node));
308             Vnet_Free(route);    
309         }
310     }
311
312     vnet_unlock_irqrestore(vnet_state.lock, flags);
313 }
314
315
316 /* delete all route entries with specfied src or dst device id */ 
317 static void inline del_routes_by_dev(int dev_id){
318     struct vnet_route_info * route = NULL;
319     unsigned long flags; 
320
321     flags = vnet_lock_irqsave(vnet_state.lock);
322
323     list_for_each_entry(route, &(vnet_state.routes), node) {
324         if((route->route_def.dst_type == LINK_INTERFACE &&
325              route->route_def.dst_id == dev_id) ||
326              (route->route_def.src_type == LINK_INTERFACE &&
327               route->route_def.src_id == dev_id)){
328               
329             list_del(&(route->node));
330             list_del(&(route->match_node));
331             Vnet_Free(route);    
332         }
333     }
334
335     vnet_unlock_irqrestore(vnet_state.lock, flags);
336 }
337
338 /* At the end allocate a route_list
339  * This list will be inserted into the cache so we don't need to free it
340  */
341 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
342     struct vnet_route_info * route = NULL; 
343     struct route_list * matches = NULL;
344     int num_matches = 0;
345     int max_rank = 0;
346     struct list_head match_list;
347     struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
348     //  uint8_t src_type = pkt->src_type;
349     //  uint32_t src_link = pkt->src_id;
350
351 #ifdef V3_CONFIG_DEBUG_VNET
352     {
353         char dst_str[100];
354         char src_str[100];
355
356         mac2str(hdr->src_mac, src_str);  
357         mac2str(hdr->dst_mac, dst_str);
358         Vnet_Debug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
359     }
360 #endif
361
362     INIT_LIST_HEAD(&match_list);
363     
364 #define UPDATE_MATCHES(rank) do {                               \
365         if (max_rank < (rank)) {                                \
366             max_rank = (rank);                                  \
367             INIT_LIST_HEAD(&match_list);                        \
368                                                                 \
369             list_add(&(route->match_node), &match_list);        \
370             num_matches = 1;                                    \
371         } else if (max_rank == (rank)) {                        \
372             list_add(&(route->match_node), &match_list);        \
373             num_matches++;                                      \
374         }                                                       \
375     } while (0)
376     
377
378     list_for_each_entry(route, &(vnet_state.routes), node) {
379         struct v3_vnet_route * route_def = &(route->route_def);
380
381 /*
382         // CHECK SOURCE TYPE HERE
383         if ( (route_def->src_type != LINK_ANY) && 
384              ( (route_def->src_type != src_type) || 
385                ( (route_def->src_id != src_link) &&
386                  (route_def->src_id != -1)))) {
387             continue;
388         }
389 */
390
391         if ((route_def->dst_mac_qual == MAC_ANY) &&
392             (route_def->src_mac_qual == MAC_ANY)) {      
393             UPDATE_MATCHES(3);
394         }
395         
396         if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
397             if (route_def->src_mac_qual != MAC_NOT) {
398                 if (route_def->dst_mac_qual == MAC_ANY) {
399                     UPDATE_MATCHES(6);
400                 } else if (route_def->dst_mac_qual != MAC_NOT &&
401                            memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
402                     UPDATE_MATCHES(8);
403                 }
404             }
405         }
406             
407         if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
408             if (route_def->dst_mac_qual != MAC_NOT) {
409                 if (route_def->src_mac_qual == MAC_ANY) {
410                     UPDATE_MATCHES(6);
411                 } else if ((route_def->src_mac_qual != MAC_NOT) && 
412                            (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
413                     UPDATE_MATCHES(8);
414                 }
415             }
416         }
417             
418         if ((route_def->dst_mac_qual == MAC_NOT) &&
419             (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
420             if (route_def->src_mac_qual == MAC_ANY) {
421                 UPDATE_MATCHES(5);
422             } else if ((route_def->src_mac_qual != MAC_NOT) && 
423                        (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {     
424                 UPDATE_MATCHES(7);
425             }
426         }
427         
428         if ((route_def->src_mac_qual == MAC_NOT) &&
429             (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
430             if (route_def->dst_mac_qual == MAC_ANY) {
431                 UPDATE_MATCHES(5);
432             } else if ((route_def->dst_mac_qual != MAC_NOT) &&
433                        (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
434                 UPDATE_MATCHES(7);
435             }
436         }
437         
438         // Default route
439         if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
440              (route_def->dst_mac_qual == MAC_NONE)) {
441             UPDATE_MATCHES(4);
442         }
443     }
444
445     Vnet_Debug("VNET/P Core: match_route: Matches=%d\n", num_matches);
446
447     if (num_matches == 0) {
448         return NULL;
449     }
450
451     matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) + 
452                                                (sizeof(struct vnet_route_info *) * num_matches));
453
454     matches->num_routes = num_matches;
455
456     {
457         int i = 0;
458         list_for_each_entry(route, &match_list, match_node) {
459             matches->routes[i++] = route;
460         }
461     }
462
463     return matches;
464 }
465
466
467 int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
468     struct route_list * matched_routes = NULL;
469     unsigned long flags;
470     int i;
471
472     int cpu = V3_Get_CPU();
473     Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
474                   cpu, pkt->size, pkt->src_id, 
475                   pkt->src_type, pkt->dst_id, pkt->dst_type);
476     if(net_debug >= 4){
477             v3_hexdump(pkt->data, pkt->size, NULL, 0);
478     }
479
480     flags = vnet_lock_irqsave(vnet_state.lock);
481
482     vnet_state.stats.rx_bytes += pkt->size;
483     vnet_state.stats.rx_pkts++;
484
485     look_into_cache(pkt, &matched_routes);
486     if (matched_routes == NULL) {  
487         Vnet_Debug("VNET/P Core: send pkt Looking into routing table\n");
488         
489         matched_routes = match_route(pkt);
490         
491         if (matched_routes) {
492             add_route_to_cache(pkt, matched_routes);
493         } else {
494             Vnet_Debug("VNET/P Core: Could not find route for packet... discards packet\n");
495             vnet_unlock_irqrestore(vnet_state.lock, flags);
496             return 0; /* do we return -1 here?*/
497         }
498     }
499
500     vnet_unlock_irqrestore(vnet_state.lock, flags);
501
502     Vnet_Debug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
503
504     for (i = 0; i < matched_routes->num_routes; i++) {
505         struct vnet_route_info * route = matched_routes->routes[i];
506         
507         if (route->route_def.dst_type == LINK_EDGE) {
508             struct vnet_brg_dev * bridge = vnet_state.bridge;
509             pkt->dst_type = LINK_EDGE;
510             pkt->dst_id = route->route_def.dst_id;
511
512             if (bridge == NULL) {
513                 Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
514                 continue;
515             }
516
517             if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
518                 Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
519                 continue;
520             }         
521             vnet_state.stats.tx_bytes += pkt->size;
522             vnet_state.stats.tx_pkts ++;
523         } else if (route->route_def.dst_type == LINK_INTERFACE) {
524             if (route->dst_dev == NULL){
525                   Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
526                 continue;
527             }
528
529             if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
530                 Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
531                 continue;
532             }
533             vnet_state.stats.tx_bytes += pkt->size;
534             vnet_state.stats.tx_pkts ++;
535         } else {
536             Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
537         }
538     }
539     
540     return 0;
541 }
542
543
544 static int vnet_pkt_enqueue(struct v3_vnet_pkt * pkt){
545     unsigned long flags;
546     struct queue_entry * entry;
547     struct vnet_queue * q = &(vnet_state.pkt_q);
548     uint16_t num_pages;
549
550     flags = vnet_lock_irqsave(q->lock);
551
552     if (q->count >= VNET_QUEUE_SIZE){
553         Vnet_Print(1, "VNET Queue overflow!\n");
554         vnet_unlock_irqrestore(q->lock, flags);
555         return -1;
556     }
557         
558     q->count ++;
559     entry = &(q->buf[q->tail++]);
560     q->tail %= VNET_QUEUE_SIZE;
561         
562     vnet_unlock_irqrestore(q->lock, flags);
563
564     /* this is ugly, but should happen very unlikely */
565     while(entry->use);
566
567     if(entry->size_alloc < pkt->size){
568         if(entry->data != NULL){
569             Vnet_FreePages(Vnet_PAddr(entry->data), (entry->size_alloc / PAGE_SIZE));
570             entry->data = NULL;
571         }
572
573         num_pages = 1 + (pkt->size / PAGE_SIZE);
574         entry->data = Vnet_VAddr(Vnet_AllocPages(num_pages));
575         if(entry->data == NULL){
576             return -1;
577         }
578         entry->size_alloc = PAGE_SIZE * num_pages;
579     }
580
581     entry->pkt.data = entry->data;
582     memcpy(&(entry->pkt), pkt, sizeof(struct v3_vnet_pkt));
583     memcpy(entry->data, pkt->data, pkt->size);
584
585     entry->use = 1;
586
587     return 0;
588 }
589
590
591 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize) {
592     if(synchronize){
593         vnet_tx_one_pkt(pkt, NULL);
594     }else {
595        vnet_pkt_enqueue(pkt);
596        Vnet_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size);
597     }
598         
599     return 0;
600 }
601
602 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, 
603                     struct v3_vnet_dev_ops *ops,
604                     void * priv_data){
605     struct vnet_dev * new_dev = NULL;
606     unsigned long flags;
607
608     new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev)); 
609
610     if (new_dev == NULL) {
611         Vnet_Print(0, "Malloc fails\n");
612         return -1;
613     }
614    
615     memcpy(new_dev->mac_addr, mac, 6);
616     new_dev->dev_ops.input = ops->input;
617     new_dev->private_data = priv_data;
618     new_dev->vm = vm;
619     new_dev->dev_id = 0;
620
621     flags = vnet_lock_irqsave(vnet_state.lock);
622
623     if (dev_by_mac(mac) == NULL) {
624         list_add(&(new_dev->node), &(vnet_state.devs));
625         new_dev->dev_id = ++ vnet_state.dev_idx;
626         vnet_state.num_devs ++;
627     }
628
629     vnet_unlock_irqrestore(vnet_state.lock, flags);
630
631     /* if the device was found previosly the id should still be 0 */
632     if (new_dev->dev_id == 0) {
633         Vnet_Print(0, "VNET/P Core: Device Already exists\n");
634         return -1;
635     }
636
637     Vnet_Debug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
638
639     return new_dev->dev_id;
640 }
641
642
643 int v3_vnet_del_dev(int dev_id){
644     struct vnet_dev * dev = NULL;
645     unsigned long flags;
646
647     flags = vnet_lock_irqsave(vnet_state.lock);
648         
649     dev = dev_by_id(dev_id);
650     if (dev != NULL){
651         list_del(&(dev->node));
652         //del_routes_by_dev(dev_id);
653         vnet_state.num_devs --;
654     }
655         
656     vnet_unlock_irqrestore(vnet_state.lock, flags);
657
658     Vnet_Free(dev);
659
660     Vnet_Debug("VNET/P Core: Remove Device: dev_id %d\n", dev_id);
661
662     return 0;
663 }
664
665
666 int v3_vnet_stat(struct vnet_stat * stats){
667     stats->rx_bytes = vnet_state.stats.rx_bytes;
668     stats->rx_pkts = vnet_state.stats.rx_pkts;
669     stats->tx_bytes = vnet_state.stats.tx_bytes;
670     stats->tx_pkts = vnet_state.stats.tx_pkts;
671
672     return 0;
673 }
674
675 static void deinit_devices_list(){
676     struct vnet_dev * dev = NULL; 
677
678     list_for_each_entry(dev, &(vnet_state.devs), node) {
679         list_del(&(dev->node));
680         Vnet_Free(dev);
681     }
682 }
683
684 static void deinit_routes_list(){
685     struct vnet_route_info * route = NULL; 
686
687     list_for_each_entry(route, &(vnet_state.routes), node) {
688         list_del(&(route->node));
689         list_del(&(route->match_node));
690         Vnet_Free(route);
691     }
692 }
693
694 int v3_vnet_add_bridge(struct v3_vm_info * vm,
695                        struct v3_vnet_bridge_ops * ops,
696                        uint8_t type,
697                        void * priv_data) {
698     unsigned long flags;
699     int bridge_free = 0;
700     struct vnet_brg_dev * tmp_bridge = NULL;    
701     
702     flags = vnet_lock_irqsave(vnet_state.lock);
703     if (vnet_state.bridge == NULL) {
704         bridge_free = 1;
705         vnet_state.bridge = (void *)1;
706     }
707     vnet_unlock_irqrestore(vnet_state.lock, flags);
708
709     if (bridge_free == 0) {
710         PrintError("VNET/P Core: Bridge already set\n");
711         return -1;
712     }
713
714     tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
715
716     if (tmp_bridge == NULL) {
717         PrintError("Malloc Fails\n");
718         vnet_state.bridge = NULL;
719         return -1;
720     }
721     
722     tmp_bridge->vm = vm;
723     tmp_bridge->brg_ops.input = ops->input;
724     tmp_bridge->brg_ops.poll = ops->poll;
725     tmp_bridge->private_data = priv_data;
726     tmp_bridge->type = type;
727         
728     /* make this atomic to avoid possible race conditions */
729     flags = vnet_lock_irqsave(vnet_state.lock);
730     vnet_state.bridge = tmp_bridge;
731     vnet_unlock_irqrestore(vnet_state.lock, flags);
732
733     return 0;
734 }
735
736
737 void v3_vnet_del_bridge(uint8_t type) {
738     unsigned long flags;
739     struct vnet_brg_dev * tmp_bridge = NULL;    
740     
741     flags = vnet_lock_irqsave(vnet_state.lock);
742         
743     if (vnet_state.bridge != NULL && vnet_state.bridge->type == type) {
744         tmp_bridge = vnet_state.bridge;
745        vnet_state.bridge = NULL;
746     }
747         
748     vnet_unlock_irqrestore(vnet_state.lock, flags);
749
750     if (tmp_bridge) {
751         Vnet_Free(tmp_bridge);
752     }
753 }
754
755
756 static int vnet_tx_flush(void *args){
757     unsigned long flags;
758     struct queue_entry * entry;
759     struct vnet_queue * q = &(vnet_state.pkt_q);
760
761     Vnet_Print(0, "VNET/P Handing Pkt Thread Starting ....\n");
762
763     /* we need thread sleep/wakeup in Palacios */
764     while(!vnet_thread_should_stop()){
765         flags = vnet_lock_irqsave(q->lock);
766
767         if (q->count <= 0){
768             vnet_unlock_irqrestore(q->lock, flags);
769             Vnet_Yield();
770         }else {
771             q->count --;
772             entry = &(q->buf[q->head++]);
773             q->head %= VNET_QUEUE_SIZE;
774
775             vnet_unlock_irqrestore(q->lock, flags);
776
777             /* this is ugly, but should happen very unlikely */
778             while(!entry->use);
779             vnet_tx_one_pkt(&(entry->pkt), NULL);
780
781             /* asynchronizely release allocated memory for buffer entry here */     
782             entry->use = 0;
783
784             Vnet_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size);   
785         }
786     }
787
788     return 0;
789 }
790
791 int v3_init_vnet() {
792     memset(&vnet_state, 0, sizeof(vnet_state));
793         
794     INIT_LIST_HEAD(&(vnet_state.routes));
795     INIT_LIST_HEAD(&(vnet_state.devs));
796
797     vnet_state.num_devs = 0;
798     vnet_state.num_routes = 0;
799
800     if (vnet_lock_init(&(vnet_state.lock)) == -1){
801         PrintError("VNET/P Core: Fails to initiate lock\n");
802     }
803
804     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
805     if (vnet_state.route_cache == NULL) {
806         PrintError("VNET/P Core: Fails to initiate route cache\n");
807         return -1;
808     }
809
810     vnet_lock_init(&(vnet_state.pkt_q.lock));
811
812     vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "VNET_Pkts");
813
814     Vnet_Debug("VNET/P Core is initiated\n");
815
816     return 0;
817 }
818
819
820 void v3_deinit_vnet(){
821
822     vnet_lock_deinit(&(vnet_state.lock));
823
824     deinit_devices_list();
825     deinit_routes_list();
826
827     vnet_free_htable(vnet_state.route_cache, 1, 1);
828     Vnet_Free(vnet_state.bridge);
829 }
830
831