Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


42c9d3bc571da1bc8776df776e6c2d9af1ba8956
[palacios.git] / palacios / src / vnet / vnet_core.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu> 
11  * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>  
12  * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Lei Xia <lxia@northwestern.edu>
16  *         Yuan Tang <ytang@northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21  
22 #include <vnet/vnet.h>
23 #include <vnet/vnet_hashtable.h>
24 #include <vnet/vnet_host.h>
25 #include <vnet/vnet_vmm.h>
26
27 #ifndef V3_CONFIG_DEBUG_VNET
28 #undef Vnet_Debug
29 #define Vnet_Debug(fmt, args...)
30 #endif
31
32 int vnet_debug = 0;
33
34 struct eth_hdr {
35     uint8_t dst_mac[ETH_ALEN];
36     uint8_t src_mac[ETH_ALEN];
37     uint16_t type; /* indicates layer 3 protocol type */
38 } __attribute__((packed));
39
40
41 struct vnet_dev {
42     int dev_id;
43     uint8_t mac_addr[ETH_ALEN];
44     struct v3_vm_info * vm;
45     struct v3_vnet_dev_ops dev_ops;
46     void * private_data;
47
48     struct list_head node;
49 } __attribute__((packed));
50
51
52 struct vnet_brg_dev {
53     struct v3_vm_info * vm;
54     struct v3_vnet_bridge_ops brg_ops;
55
56     uint8_t type;
57
58     void * private_data;
59 } __attribute__((packed));
60
61
62
63 struct vnet_route_info {
64     struct v3_vnet_route route_def;
65
66     struct vnet_dev * dst_dev;
67     struct vnet_dev * src_dev;
68
69     struct list_head node;
70     struct list_head match_node; // used for route matching
71 };
72
73
74 struct route_list {
75     uint8_t hash_buf[VNET_HASH_SIZE];
76
77     uint32_t num_routes;
78     struct vnet_route_info * routes[0];
79 } __attribute__((packed));
80
81
82 struct queue_entry{
83     uint8_t use;
84     struct v3_vnet_pkt pkt;
85     uint8_t * data;
86     uint32_t size_alloc;
87 };
88
89 #define VNET_QUEUE_SIZE 1024
90 struct vnet_queue {
91     struct queue_entry buf[VNET_QUEUE_SIZE];
92     int head, tail;
93     int count;
94     vnet_lock_t lock;
95 };
96
97 static struct {
98     struct list_head routes;
99     struct list_head devs;
100     
101     int num_routes;
102     int num_devs;
103
104     struct vnet_brg_dev * bridge;
105
106     vnet_lock_t lock;
107     struct vnet_stat stats;
108
109     struct vnet_thread * pkt_flush_thread;
110
111     struct vnet_queue pkt_q;
112
113     struct hashtable * route_cache;
114 } vnet_state;
115         
116
117 #ifdef V3_CONFIG_DEBUG_VNET
118 static inline void mac_to_string(uint8_t * mac, char * buf) {
119     snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x", 
120              mac[0], mac[1], mac[2],
121              mac[3], mac[4], mac[5]);
122 }
123
124 static void print_route(struct v3_vnet_route * route){
125     char str[50];
126
127     mac_to_string(route->src_mac, str);
128     Vnet_Debug("Src Mac (%s),  src_qual (%d)\n", 
129                str, route->src_mac_qual);
130     mac_to_string(route->dst_mac, str);
131     Vnet_Debug("Dst Mac (%s),  dst_qual (%d)\n", 
132                str, route->dst_mac_qual);
133     Vnet_Debug("Src dev id (%d), src type (%d)", 
134                route->src_id, 
135                route->src_type);
136     Vnet_Debug("Dst dev id (%d), dst type (%d)\n", 
137                route->dst_id, 
138                route->dst_type);
139 }
140
141 static void dump_routes(){
142     struct vnet_route_info *route;
143
144     int i = 0;
145     Vnet_Debug("\n========Dump routes starts ============\n");
146     list_for_each_entry(route, &(vnet_state.routes), node) {
147         Vnet_Debug("\nroute %d:\n", i++);
148                 
149         print_route(&(route->route_def));
150         if (route->route_def.dst_type == LINK_INTERFACE) {
151             Vnet_Debug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
152                 route->dst_dev,
153                 route->dst_dev->dev_id,
154                 (void *)&(route->dst_dev->dev_ops),
155                 route->dst_dev->private_data);
156         }
157     }
158
159     Vnet_Debug("\n========Dump routes end ============\n");
160 }
161
162 #endif
163
164
165 /* 
166  * A VNET packet is a packed struct with the hashed fields grouped together.
167  * This means we can generate the hash from an offset into the pkt struct
168  */
169 static inline uint_t hash_fn(addr_t hdr_ptr) {    
170     uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
171
172     return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
173 }
174
175 static inline int hash_eq(addr_t key1, addr_t key2) {   
176     return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
177 }
178
179 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
180     memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);    
181
182     if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
183         PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
184         return -1;
185     }
186     
187     return 0;
188 }
189
190 static int clear_hash_cache() {
191     vnet_free_htable(vnet_state.route_cache, 1, 1);
192     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
193
194     return 0;
195 }
196
197 static int look_into_cache(const struct v3_vnet_pkt * pkt, 
198                            struct route_list ** routes) {
199     *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
200    
201     return 0;
202 }
203
204
205 static struct vnet_dev * dev_by_id(int idx) {
206     struct vnet_dev * dev = NULL; 
207
208     list_for_each_entry(dev, &(vnet_state.devs), node) {
209         int dev_id = dev->dev_id;
210
211         if (dev_id == idx)
212             return dev;
213     }
214
215     return NULL;
216 }
217
218 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
219     struct vnet_dev * dev = NULL; 
220     
221     list_for_each_entry(dev, &(vnet_state.devs), node) {
222         if (!compare_ethaddr(dev->mac_addr, mac)){
223             return dev;
224         }
225     }
226
227     return NULL;
228 }
229
230
231 int v3_vnet_find_dev(uint8_t  * mac) {
232     struct vnet_dev * dev = NULL;
233
234     dev = dev_by_mac(mac);
235
236     if(dev != NULL) {
237         return dev->dev_id;
238     }
239
240     return -1;
241 }
242
243
244 int v3_vnet_add_route(struct v3_vnet_route route) {
245     struct vnet_route_info * new_route = NULL;
246     unsigned long flags; 
247
248     new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
249     memset(new_route, 0, sizeof(struct vnet_route_info));
250
251 #ifdef V3_CONFIG_DEBUG_VNET
252     Vnet_Debug("VNET/P Core: add_route_entry:\n");
253     print_route(&route);
254 #endif
255     
256     memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
257     memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
258     new_route->route_def.src_mac_qual = route.src_mac_qual;
259     new_route->route_def.dst_mac_qual = route.dst_mac_qual;
260     new_route->route_def.dst_type = route.dst_type;
261     new_route->route_def.src_type = route.src_type;
262     new_route->route_def.src_id = route.src_id;
263     new_route->route_def.dst_id = route.dst_id;
264
265     if (new_route->route_def.dst_type == LINK_INTERFACE) {
266         new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
267     }
268
269     if (new_route->route_def.src_type == LINK_INTERFACE) {
270         new_route->src_dev = dev_by_id(new_route->route_def.src_id);
271     }
272
273
274     flags = vnet_lock_irqsave(vnet_state.lock);
275
276     list_add(&(new_route->node), &(vnet_state.routes));
277     clear_hash_cache();
278
279     vnet_unlock_irqrestore(vnet_state.lock, flags);
280    
281
282 #ifdef V3_CONFIG_DEBUG_VNET
283     dump_routes();
284 #endif
285
286     return 0;
287 }
288
289
290 /* delete all route entries with specfied src or dst device id */ 
291 static void inline del_routes_by_dev(int dev_id){
292     struct vnet_route_info * route = NULL;
293     unsigned long flags; 
294
295     flags = vnet_lock_irqsave(vnet_state.lock);
296
297     list_for_each_entry(route, &(vnet_state.routes), node) {
298         if((route->route_def.dst_type == LINK_INTERFACE &&
299              route->route_def.dst_id == dev_id) ||
300              (route->route_def.src_type == LINK_INTERFACE &&
301               route->route_def.src_id == dev_id)){
302               
303             list_del(&(route->node));
304             list_del(&(route->match_node));
305             Vnet_Free(route);    
306         }
307     }
308
309     vnet_unlock_irqrestore(vnet_state.lock, flags);
310 }
311
312 /* At the end allocate a route_list
313  * This list will be inserted into the cache so we don't need to free it
314  */
315 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
316     struct vnet_route_info * route = NULL; 
317     struct route_list * matches = NULL;
318     int num_matches = 0;
319     int max_rank = 0;
320     struct list_head match_list;
321     struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
322     //    uint8_t src_type = pkt->src_type;
323     //  uint32_t src_link = pkt->src_id;
324
325 #ifdef V3_CONFIG_DEBUG_VNET
326     {
327         char dst_str[100];
328         char src_str[100];
329
330         mac_to_string(hdr->src_mac, src_str);  
331         mac_to_string(hdr->dst_mac, dst_str);
332         Vnet_Debug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
333     }
334 #endif
335
336     INIT_LIST_HEAD(&match_list);
337     
338 #define UPDATE_MATCHES(rank) do {                               \
339         if (max_rank < (rank)) {                                \
340             max_rank = (rank);                                  \
341             INIT_LIST_HEAD(&match_list);                        \
342                                                                 \
343             list_add(&(route->match_node), &match_list);        \
344             num_matches = 1;                                    \
345         } else if (max_rank == (rank)) {                        \
346             list_add(&(route->match_node), &match_list);        \
347             num_matches++;                                      \
348         }                                                       \
349     } while (0)
350     
351
352     list_for_each_entry(route, &(vnet_state.routes), node) {
353         struct v3_vnet_route * route_def = &(route->route_def);
354
355 /*
356         // CHECK SOURCE TYPE HERE
357         if ( (route_def->src_type != LINK_ANY) && 
358              ( (route_def->src_type != src_type) || 
359                ( (route_def->src_id != src_link) &&
360                  (route_def->src_id != -1)))) {
361             continue;
362         }
363 */
364
365         if ((route_def->dst_mac_qual == MAC_ANY) &&
366             (route_def->src_mac_qual == MAC_ANY)) {      
367             UPDATE_MATCHES(3);
368         }
369         
370         if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
371             if (route_def->src_mac_qual != MAC_NOT) {
372                 if (route_def->dst_mac_qual == MAC_ANY) {
373                     UPDATE_MATCHES(6);
374                 } else if (route_def->dst_mac_qual != MAC_NOT &&
375                            memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
376                     UPDATE_MATCHES(8);
377                 }
378             }
379         }
380             
381         if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
382             if (route_def->dst_mac_qual != MAC_NOT) {
383                 if (route_def->src_mac_qual == MAC_ANY) {
384                     UPDATE_MATCHES(6);
385                 } else if ((route_def->src_mac_qual != MAC_NOT) && 
386                            (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
387                     UPDATE_MATCHES(8);
388                 }
389             }
390         }
391             
392         if ((route_def->dst_mac_qual == MAC_NOT) &&
393             (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
394             if (route_def->src_mac_qual == MAC_ANY) {
395                 UPDATE_MATCHES(5);
396             } else if ((route_def->src_mac_qual != MAC_NOT) && 
397                        (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {     
398                 UPDATE_MATCHES(7);
399             }
400         }
401         
402         if ((route_def->src_mac_qual == MAC_NOT) &&
403             (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
404             if (route_def->dst_mac_qual == MAC_ANY) {
405                 UPDATE_MATCHES(5);
406             } else if ((route_def->dst_mac_qual != MAC_NOT) &&
407                        (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
408                 UPDATE_MATCHES(7);
409             }
410         }
411         
412         // Default route
413         if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
414              (route_def->dst_mac_qual == MAC_NONE)) {
415             UPDATE_MATCHES(4);
416         }
417     }
418
419     Vnet_Debug("VNET/P Core: match_route: Matches=%d\n", num_matches);
420
421     if (num_matches == 0) {
422         return NULL;
423     }
424
425     matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) + 
426                                                (sizeof(struct vnet_route_info *) * num_matches));
427
428     matches->num_routes = num_matches;
429
430     {
431         int i = 0;
432         list_for_each_entry(route, &match_list, match_node) {
433             matches->routes[i++] = route;
434         }
435     }
436
437     return matches;
438 }
439
440
441 int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
442     struct route_list * matched_routes = NULL;
443     unsigned long flags;
444     int i;
445
446     int cpu = V3_Get_CPU();
447     Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
448                   cpu, pkt->size, pkt->src_id, 
449                   pkt->src_type, pkt->dst_id, pkt->dst_type);
450     if(vnet_debug >= 4){
451             v3_hexdump(pkt->data, pkt->size, NULL, 0);
452     }
453
454     flags = vnet_lock_irqsave(vnet_state.lock);
455
456     vnet_state.stats.rx_bytes += pkt->size;
457     vnet_state.stats.rx_pkts++;
458
459     look_into_cache(pkt, &matched_routes);
460     if (matched_routes == NULL) {  
461         Vnet_Debug("VNET/P Core: send pkt Looking into routing table\n");
462         
463         matched_routes = match_route(pkt);
464         
465         if (matched_routes) {
466             add_route_to_cache(pkt, matched_routes);
467         } else {
468             Vnet_Debug("VNET/P Core: Could not find route for packet... discards packet\n");
469             vnet_unlock_irqrestore(vnet_state.lock, flags);
470             return 0; /* do we return -1 here?*/
471         }
472     }
473
474     vnet_unlock_irqrestore(vnet_state.lock, flags);
475
476     Vnet_Debug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
477
478     for (i = 0; i < matched_routes->num_routes; i++) {
479         struct vnet_route_info * route = matched_routes->routes[i];
480         
481         if (route->route_def.dst_type == LINK_EDGE) {
482             struct vnet_brg_dev * bridge = vnet_state.bridge;
483             pkt->dst_type = LINK_EDGE;
484             pkt->dst_id = route->route_def.dst_id;
485
486             if (bridge == NULL) {
487                 Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
488                 continue;
489             }
490
491             if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
492                 Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
493                 continue;
494             }         
495             vnet_state.stats.tx_bytes += pkt->size;
496             vnet_state.stats.tx_pkts ++;
497         } else if (route->route_def.dst_type == LINK_INTERFACE) {
498             if (route->dst_dev == NULL){
499                   Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
500                 continue;
501             }
502
503             if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
504                 Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
505                 continue;
506             }
507             vnet_state.stats.tx_bytes += pkt->size;
508             vnet_state.stats.tx_pkts ++;
509         } else {
510             Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
511         }
512     }
513     
514     return 0;
515 }
516
517
518 static int vnet_pkt_enqueue(struct v3_vnet_pkt * pkt){
519     unsigned long flags;
520     struct queue_entry * entry;
521     struct vnet_queue * q = &(vnet_state.pkt_q);
522     uint16_t num_pages;
523
524     flags = vnet_lock_irqsave(q->lock);
525
526     if (q->count >= VNET_QUEUE_SIZE){
527         Vnet_Print(1, "VNET Queue overflow!\n");
528         vnet_unlock_irqrestore(q->lock, flags);
529         return -1;
530     }
531         
532     q->count ++;
533     entry = &(q->buf[q->tail++]);
534     q->tail %= VNET_QUEUE_SIZE;
535         
536     vnet_unlock_irqrestore(q->lock, flags);
537
538     /* this is ugly, but should happen very unlikely */
539     while(entry->use);
540
541     if(entry->size_alloc < pkt->size){
542         if(entry->data != NULL){
543             Vnet_FreePages(Vnet_PAddr(entry->data), (entry->size_alloc / PAGE_SIZE));
544             entry->data = NULL;
545         }
546
547         num_pages = 1 + (pkt->size / PAGE_SIZE);
548         entry->data = Vnet_VAddr(Vnet_AllocPages(num_pages));
549         if(entry->data == NULL){
550             return -1;
551         }
552         entry->size_alloc = PAGE_SIZE * num_pages;
553     }
554
555     entry->pkt.data = entry->data;
556     memcpy(&(entry->pkt), pkt, sizeof(struct v3_vnet_pkt));
557     memcpy(entry->data, pkt->data, pkt->size);
558
559     entry->use = 1;
560
561     return 0;
562 }
563
564
565 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize) {
566     if(synchronize){
567         vnet_tx_one_pkt(pkt, NULL);
568     }else {
569        vnet_pkt_enqueue(pkt);
570        Vnet_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size);
571     }
572         
573     return 0;
574 }
575
576 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, 
577                     struct v3_vnet_dev_ops *ops,
578                     void * priv_data){
579     struct vnet_dev * new_dev = NULL;
580     unsigned long flags;
581
582     new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev)); 
583
584     if (new_dev == NULL) {
585         Vnet_Print(0, "Malloc fails\n");
586         return -1;
587     }
588    
589     memcpy(new_dev->mac_addr, mac, 6);
590     new_dev->dev_ops.input = ops->input;
591     new_dev->private_data = priv_data;
592     new_dev->vm = vm;
593     new_dev->dev_id = 0;
594
595     flags = vnet_lock_irqsave(vnet_state.lock);
596
597     if (dev_by_mac(mac) == NULL) {
598         list_add(&(new_dev->node), &(vnet_state.devs));
599         new_dev->dev_id = ++vnet_state.num_devs;
600     }
601
602     vnet_unlock_irqrestore(vnet_state.lock, flags);
603
604     /* if the device was found previosly the id should still be 0 */
605     if (new_dev->dev_id == 0) {
606         Vnet_Print(0, "VNET/P Core: Device Already exists\n");
607         return -1;
608     }
609
610     Vnet_Debug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
611
612     return new_dev->dev_id;
613 }
614
615
616 int v3_vnet_del_dev(int dev_id){
617     struct vnet_dev * dev = NULL;
618     unsigned long flags;
619
620     flags = vnet_lock_irqsave(vnet_state.lock);
621         
622     dev = dev_by_id(dev_id);
623     if (dev != NULL){
624         list_del(&(dev->node));
625         del_routes_by_dev(dev_id);
626     }
627         
628     vnet_unlock_irqrestore(vnet_state.lock, flags);
629
630     Vnet_Free(dev);
631
632     Vnet_Debug("VNET/P Core: Remove Device: dev_id %d\n", dev_id);
633
634     return 0;
635 }
636
637
638 int v3_vnet_stat(struct vnet_stat * stats){
639         
640     stats->rx_bytes = vnet_state.stats.rx_bytes;
641     stats->rx_pkts = vnet_state.stats.rx_pkts;
642     stats->tx_bytes = vnet_state.stats.tx_bytes;
643     stats->tx_pkts = vnet_state.stats.tx_pkts;
644
645     return 0;
646 }
647
648 static void free_devices(){
649     struct vnet_dev * dev = NULL; 
650
651     list_for_each_entry(dev, &(vnet_state.devs), node) {
652         list_del(&(dev->node));
653         Vnet_Free(dev);
654     }
655 }
656
657 static void free_routes(){
658     struct vnet_route_info * route = NULL; 
659
660     list_for_each_entry(route, &(vnet_state.routes), node) {
661         list_del(&(route->node));
662         list_del(&(route->match_node));
663         Vnet_Free(route);
664     }
665 }
666
667 int v3_vnet_add_bridge(struct v3_vm_info * vm,
668                        struct v3_vnet_bridge_ops * ops,
669                        uint8_t type,
670                        void * priv_data) {
671     unsigned long flags;
672     int bridge_free = 0;
673     struct vnet_brg_dev * tmp_bridge = NULL;    
674     
675     flags = vnet_lock_irqsave(vnet_state.lock);
676     if (vnet_state.bridge == NULL) {
677         bridge_free = 1;
678         vnet_state.bridge = (void *)1;
679     }
680     vnet_unlock_irqrestore(vnet_state.lock, flags);
681
682     if (bridge_free == 0) {
683         PrintError("VNET/P Core: Bridge already set\n");
684         return -1;
685     }
686
687     tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
688
689     if (tmp_bridge == NULL) {
690         PrintError("Malloc Fails\n");
691         vnet_state.bridge = NULL;
692         return -1;
693     }
694     
695     tmp_bridge->vm = vm;
696     tmp_bridge->brg_ops.input = ops->input;
697     tmp_bridge->brg_ops.poll = ops->poll;
698     tmp_bridge->private_data = priv_data;
699     tmp_bridge->type = type;
700         
701     /* make this atomic to avoid possible race conditions */
702     flags = vnet_lock_irqsave(vnet_state.lock);
703     vnet_state.bridge = tmp_bridge;
704     vnet_unlock_irqrestore(vnet_state.lock, flags);
705
706     return 0;
707 }
708
709 static int vnet_tx_flush(void *args){
710     unsigned long flags;
711     struct queue_entry * entry;
712     struct vnet_queue * q = &(vnet_state.pkt_q);
713
714     Vnet_Print(0, "VNET/P Handing Pkt Thread Starting ....\n");
715
716     /* we need thread sleep/wakeup in Palacios */
717     while(!vnet_thread_should_stop()){
718         flags = vnet_lock_irqsave(q->lock);
719
720         if (q->count <= 0){
721             vnet_unlock_irqrestore(q->lock, flags);
722             Vnet_Yield();
723         }else {
724             q->count --;
725             entry = &(q->buf[q->head++]);
726             q->head %= VNET_QUEUE_SIZE;
727
728             vnet_unlock_irqrestore(q->lock, flags);
729
730             /* this is ugly, but should happen very unlikely */
731             while(!entry->use);
732             vnet_tx_one_pkt(&(entry->pkt), NULL);
733
734             /* asynchronizely release allocated memory for buffer entry here */     
735             entry->use = 0;
736
737             Vnet_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size);   
738         }
739     }
740
741     return 0;
742 }
743
744 int v3_init_vnet() {
745     memset(&vnet_state, 0, sizeof(vnet_state));
746         
747     INIT_LIST_HEAD(&(vnet_state.routes));
748     INIT_LIST_HEAD(&(vnet_state.devs));
749
750     vnet_state.num_devs = 0;
751     vnet_state.num_routes = 0;
752
753     if (vnet_lock_init(&(vnet_state.lock)) == -1){
754         PrintError("VNET/P Core: Fails to initiate lock\n");
755     }
756
757     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
758     if (vnet_state.route_cache == NULL) {
759         PrintError("VNET/P Core: Fails to initiate route cache\n");
760         return -1;
761     }
762
763     vnet_lock_init(&(vnet_state.pkt_q.lock));
764
765     vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "VNET_Pkts");
766
767     Vnet_Debug("VNET/P Core is initiated\n");
768
769     return 0;
770 }
771
772
773 void v3_deinit_vnet(){
774
775     vnet_lock_deinit(&(vnet_state.lock));
776
777     free_devices();
778     free_routes();
779
780     vnet_free_htable(vnet_state.route_cache, 1, 1);
781     Vnet_Free(vnet_state.bridge);
782 }
783
784