Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Fix bugs when deinit a list, use list_for_each_entry_safe instead of list_for_each_entry
[palacios.git] / palacios / src / vnet / vnet_core.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu> 
11  * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>  
12  * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved
14  *
15  * Author: Lei Xia <lxia@northwestern.edu>
16  *         Yuan Tang <ytang@northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21  
22 #include <vnet/vnet.h>
23 #include <vnet/vnet_hashtable.h>
24 #include <vnet/vnet_host.h>
25 #include <vnet/vnet_vmm.h>
26
27 #include <palacios/vmm_queue.h>
28
29 #ifndef V3_CONFIG_DEBUG_VNET
30 #undef Vnet_Debug
31 #define Vnet_Debug(fmt, args...)
32 #endif
33
34 int net_debug = 0;
35
36 struct eth_hdr {
37     uint8_t dst_mac[ETH_ALEN];
38     uint8_t src_mac[ETH_ALEN];
39     uint16_t type; /* indicates layer 3 protocol type */
40 } __attribute__((packed));
41
42
43 struct vnet_dev {
44     int dev_id;
45     uint8_t mac_addr[ETH_ALEN];
46     struct v3_vm_info * vm;
47     struct v3_vnet_dev_ops dev_ops;
48
49     int poll;
50
51 #define VNET_MAX_QUOTE 64
52     int quote;
53         
54     void * private_data;
55
56     struct list_head node;
57 } __attribute__((packed));
58
59
60 struct vnet_brg_dev {
61     struct v3_vm_info * vm;
62     struct v3_vnet_bridge_ops brg_ops;
63
64     uint8_t type;
65
66     void * private_data;
67 } __attribute__((packed));
68
69
70
71 struct vnet_route_info {
72     struct v3_vnet_route route_def;
73
74     struct vnet_dev * dst_dev;
75     struct vnet_dev * src_dev;
76
77     uint32_t idx;
78
79     struct list_head node;
80     struct list_head match_node; // used for route matching
81 };
82
83
84 struct route_list {
85     uint8_t hash_buf[VNET_HASH_SIZE];
86
87     uint32_t num_routes;
88     struct vnet_route_info * routes[0];
89 } __attribute__((packed));
90
91
92 struct queue_entry{
93     uint8_t use;
94     struct v3_vnet_pkt pkt;
95     uint8_t * data;
96     uint32_t size_alloc;
97 };
98
99
100 static struct {
101     struct list_head routes;
102     struct list_head devs;
103
104     uint8_t status; 
105    
106     uint32_t num_routes;
107     uint32_t route_idx;
108     uint32_t num_devs;
109     uint32_t dev_idx;
110
111     struct vnet_brg_dev * bridge;
112
113     vnet_lock_t lock;
114     struct vnet_stat stats;
115
116    /* device queue that are waiting to be polled */
117     struct v3_queue * poll_devs;
118
119     struct vnet_thread * pkt_flush_thread;
120
121     struct hashtable * route_cache;
122 } vnet_state;
123         
124
125 #ifdef V3_CONFIG_DEBUG_VNET
126 static inline void mac2str(uint8_t * mac, char * buf) {
127     snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x", 
128              mac[0], mac[1], mac[2],
129              mac[3], mac[4], mac[5]);
130 }
131
132 static void print_route(struct v3_vnet_route * route){
133     char str[50];
134
135     mac2str(route->src_mac, str);
136     Vnet_Debug("Src Mac (%s),  src_qual (%d)\n", 
137                str, route->src_mac_qual);
138     mac2str(route->dst_mac, str);
139     Vnet_Debug("Dst Mac (%s),  dst_qual (%d)\n", 
140                str, route->dst_mac_qual);
141     Vnet_Debug("Src dev id (%d), src type (%d)", 
142                route->src_id, 
143                route->src_type);
144     Vnet_Debug("Dst dev id (%d), dst type (%d)\n", 
145                route->dst_id, 
146                route->dst_type);
147 }
148
149 static void dump_routes(){
150     struct vnet_route_info *route;
151
152     Vnet_Debug("\n========Dump routes starts ============\n");
153     list_for_each_entry(route, &(vnet_state.routes), node) {
154         Vnet_Debug("\nroute %d:\n", route->idx);
155                 
156         print_route(&(route->route_def));
157         if (route->route_def.dst_type == LINK_INTERFACE) {
158             Vnet_Debug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
159                 route->dst_dev,
160                 route->dst_dev->dev_id,
161                 (void *)&(route->dst_dev->dev_ops),
162                 route->dst_dev->private_data);
163         }
164     }
165
166     Vnet_Debug("\n========Dump routes end ============\n");
167 }
168
169 #endif
170
171
172 /* 
173  * A VNET packet is a packed struct with the hashed fields grouped together.
174  * This means we can generate the hash from an offset into the pkt struct
175  */
176 static inline uint_t hash_fn(addr_t hdr_ptr) {    
177     uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
178
179     return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
180 }
181
182 static inline int hash_eq(addr_t key1, addr_t key2) {   
183     return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
184 }
185
186 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
187     memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);    
188
189     if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
190         PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
191         return -1;
192     }
193     
194     return 0;
195 }
196
197 static int clear_hash_cache() {
198     vnet_free_htable(vnet_state.route_cache, 1, 1);
199     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
200
201     return 0;
202 }
203
204 static int look_into_cache(const struct v3_vnet_pkt * pkt, 
205                            struct route_list ** routes) {
206     *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
207    
208     return 0;
209 }
210
211
212 static struct vnet_dev * dev_by_id(int idx) {
213     struct vnet_dev * dev = NULL; 
214
215     list_for_each_entry(dev, &(vnet_state.devs), node) {
216         if (dev->dev_id == idx) {
217             return dev;
218         }
219     }
220
221     return NULL;
222 }
223
224 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
225     struct vnet_dev * dev = NULL; 
226     
227     list_for_each_entry(dev, &(vnet_state.devs), node) {
228         if (!compare_ethaddr(dev->mac_addr, mac)){
229             return dev;
230         }
231     }
232
233     return NULL;
234 }
235
236
237 int v3_vnet_find_dev(uint8_t  * mac) {
238     struct vnet_dev * dev = NULL;
239
240     dev = dev_by_mac(mac);
241
242     if(dev != NULL) {
243         return dev->dev_id;
244     }
245
246     return -1;
247 }
248
249
250 int v3_vnet_add_route(struct v3_vnet_route route) {
251     struct vnet_route_info * new_route = NULL;
252     unsigned long flags; 
253
254     new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
255     memset(new_route, 0, sizeof(struct vnet_route_info));
256
257 #ifdef V3_CONFIG_DEBUG_VNET
258     Vnet_Debug("VNET/P Core: add_route_entry:\n");
259     print_route(&route);
260 #endif
261     
262     memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
263     memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
264     new_route->route_def.src_mac_qual = route.src_mac_qual;
265     new_route->route_def.dst_mac_qual = route.dst_mac_qual;
266     new_route->route_def.dst_type = route.dst_type;
267     new_route->route_def.src_type = route.src_type;
268     new_route->route_def.src_id = route.src_id;
269     new_route->route_def.dst_id = route.dst_id;
270
271     if (new_route->route_def.dst_type == LINK_INTERFACE) {
272         new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
273     }
274
275     if (new_route->route_def.src_type == LINK_INTERFACE) {
276         new_route->src_dev = dev_by_id(new_route->route_def.src_id);
277     }
278
279
280     flags = vnet_lock_irqsave(vnet_state.lock);
281
282     list_add(&(new_route->node), &(vnet_state.routes));
283     new_route->idx = ++ vnet_state.route_idx;
284     vnet_state.num_routes ++;
285         
286     vnet_unlock_irqrestore(vnet_state.lock, flags);
287
288     clear_hash_cache();
289
290 #ifdef V3_CONFIG_DEBUG_VNET
291     dump_routes();
292 #endif
293
294     return new_route->idx;
295 }
296
297
298 void v3_vnet_del_route(uint32_t route_idx){
299     struct vnet_route_info * route = NULL;
300     unsigned long flags; 
301
302     flags = vnet_lock_irqsave(vnet_state.lock);
303
304     list_for_each_entry(route, &(vnet_state.routes), node) {
305         Vnet_Print(0, "v3_vnet_del_route, route idx: %d\n", route->idx);
306         if(route->idx == route_idx){
307             list_del(&(route->node));
308             Vnet_Free(route);
309             break;    
310         }
311     }
312
313     vnet_unlock_irqrestore(vnet_state.lock, flags);
314     clear_hash_cache();
315
316 #ifdef V3_CONFIG_DEBUG_VNET
317     dump_routes();
318 #endif  
319 }
320
321
322 /* delete all route entries with specfied src or dst device id */ 
323 static void inline del_routes_by_dev(int dev_id){
324     struct vnet_route_info * route, *tmp_route;
325     unsigned long flags; 
326
327     flags = vnet_lock_irqsave(vnet_state.lock);
328
329     list_for_each_entry_safe(route, tmp_route, &(vnet_state.routes), node) {
330         if((route->route_def.dst_type == LINK_INTERFACE &&
331              route->route_def.dst_id == dev_id) ||
332              (route->route_def.src_type == LINK_INTERFACE &&
333               route->route_def.src_id == dev_id)){
334               
335             list_del(&(route->node));
336             list_del(&(route->match_node));
337             Vnet_Free(route);    
338         }
339     }
340
341     vnet_unlock_irqrestore(vnet_state.lock, flags);
342 }
343
344 /* At the end allocate a route_list
345  * This list will be inserted into the cache so we don't need to free it
346  */
347 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
348     struct vnet_route_info * route = NULL; 
349     struct route_list * matches = NULL;
350     int num_matches = 0;
351     int max_rank = 0;
352     struct list_head match_list;
353     struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
354     //  uint8_t src_type = pkt->src_type;
355     //  uint32_t src_link = pkt->src_id;
356
357 #ifdef V3_CONFIG_DEBUG_VNET
358     {
359         char dst_str[100];
360         char src_str[100];
361
362         mac2str(hdr->src_mac, src_str);  
363         mac2str(hdr->dst_mac, dst_str);
364         Vnet_Debug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
365     }
366 #endif
367
368     INIT_LIST_HEAD(&match_list);
369     
370 #define UPDATE_MATCHES(rank) do {                               \
371         if (max_rank < (rank)) {                                \
372             max_rank = (rank);                                  \
373             INIT_LIST_HEAD(&match_list);                        \
374                                                                 \
375             list_add(&(route->match_node), &match_list);        \
376             num_matches = 1;                                    \
377         } else if (max_rank == (rank)) {                        \
378             list_add(&(route->match_node), &match_list);        \
379             num_matches++;                                      \
380         }                                                       \
381     } while (0)
382     
383
384     list_for_each_entry(route, &(vnet_state.routes), node) {
385         struct v3_vnet_route * route_def = &(route->route_def);
386
387 /*
388         // CHECK SOURCE TYPE HERE
389         if ( (route_def->src_type != LINK_ANY) && 
390              ( (route_def->src_type != src_type) || 
391                ( (route_def->src_id != src_link) &&
392                  (route_def->src_id != -1)))) {
393             continue;
394         }
395 */
396
397         if ((route_def->dst_mac_qual == MAC_ANY) &&
398             (route_def->src_mac_qual == MAC_ANY)) {      
399             UPDATE_MATCHES(3);
400         }
401         
402         if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
403             if (route_def->src_mac_qual != MAC_NOT) {
404                 if (route_def->dst_mac_qual == MAC_ANY) {
405                     UPDATE_MATCHES(6);
406                 } else if (route_def->dst_mac_qual != MAC_NOT &&
407                            memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
408                     UPDATE_MATCHES(8);
409                 }
410             }
411         }
412             
413         if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
414             if (route_def->dst_mac_qual != MAC_NOT) {
415                 if (route_def->src_mac_qual == MAC_ANY) {
416                     UPDATE_MATCHES(6);
417                 } else if ((route_def->src_mac_qual != MAC_NOT) && 
418                            (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
419                     UPDATE_MATCHES(8);
420                 }
421             }
422         }
423             
424         if ((route_def->dst_mac_qual == MAC_NOT) &&
425             (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
426             if (route_def->src_mac_qual == MAC_ANY) {
427                 UPDATE_MATCHES(5);
428             } else if ((route_def->src_mac_qual != MAC_NOT) && 
429                        (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {     
430                 UPDATE_MATCHES(7);
431             }
432         }
433         
434         if ((route_def->src_mac_qual == MAC_NOT) &&
435             (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
436             if (route_def->dst_mac_qual == MAC_ANY) {
437                 UPDATE_MATCHES(5);
438             } else if ((route_def->dst_mac_qual != MAC_NOT) &&
439                        (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
440                 UPDATE_MATCHES(7);
441             }
442         }
443         
444         // Default route
445         if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
446              (route_def->dst_mac_qual == MAC_NONE)) {
447             UPDATE_MATCHES(4);
448         }
449     }
450
451     Vnet_Debug("VNET/P Core: match_route: Matches=%d\n", num_matches);
452
453     if (num_matches == 0) {
454         return NULL;
455     }
456
457     matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) + 
458                                                (sizeof(struct vnet_route_info *) * num_matches));
459
460     matches->num_routes = num_matches;
461
462     {
463         int i = 0;
464         list_for_each_entry(route, &match_list, match_node) {
465             matches->routes[i++] = route;
466         }
467     }
468
469     return matches;
470 }
471
472
473 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
474     struct route_list * matched_routes = NULL;
475     unsigned long flags;
476     int i;
477
478     int cpu = V3_Get_CPU();
479     Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
480                cpu, pkt->size, pkt->src_id, 
481                pkt->src_type, pkt->dst_id, pkt->dst_type);
482     if(net_debug >= 4){
483         v3_hexdump(pkt->data, pkt->size, NULL, 0);
484     }
485
486     flags = vnet_lock_irqsave(vnet_state.lock);
487
488     vnet_state.stats.rx_bytes += pkt->size;
489     vnet_state.stats.rx_pkts++;
490
491     look_into_cache(pkt, &matched_routes);
492     if (matched_routes == NULL) {  
493         Vnet_Debug("VNET/P Core: send pkt Looking into routing table\n");
494         
495         matched_routes = match_route(pkt);
496         
497         if (matched_routes) {
498             add_route_to_cache(pkt, matched_routes);
499         } else {
500             Vnet_Debug("VNET/P Core: Could not find route for packet... discards packet\n");
501             vnet_unlock_irqrestore(vnet_state.lock, flags);
502             return 0; /* do we return -1 here?*/
503         }
504     }
505
506     vnet_unlock_irqrestore(vnet_state.lock, flags);
507
508     Vnet_Debug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
509
510     for (i = 0; i < matched_routes->num_routes; i++) {
511         struct vnet_route_info * route = matched_routes->routes[i];
512         
513         if (route->route_def.dst_type == LINK_EDGE) {
514             struct vnet_brg_dev * bridge = vnet_state.bridge;
515             pkt->dst_type = LINK_EDGE;
516             pkt->dst_id = route->route_def.dst_id;
517
518             if (bridge == NULL) {
519                 Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
520                 continue;
521             }
522
523             if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
524                 Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
525                 continue;
526             }         
527             vnet_state.stats.tx_bytes += pkt->size;
528             vnet_state.stats.tx_pkts ++;
529         } else if (route->route_def.dst_type == LINK_INTERFACE) {
530             if (route->dst_dev == NULL){
531                   Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
532                 continue;
533             }
534
535             if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
536                 Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
537                 continue;
538             }
539             vnet_state.stats.tx_bytes += pkt->size;
540             vnet_state.stats.tx_pkts ++;
541         } else {
542             Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
543         }
544     }
545     
546     return 0;
547 }
548
549
550 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, 
551                     struct v3_vnet_dev_ops * ops, int quote, int poll_state,
552                     void * priv_data){
553     struct vnet_dev * new_dev = NULL;
554     unsigned long flags;
555
556     new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev)); 
557
558     if (new_dev == NULL) {
559         Vnet_Print(0, "Malloc fails\n");
560         return -1;
561     }
562    
563     memcpy(new_dev->mac_addr, mac, 6);
564     new_dev->dev_ops.input = ops->input;
565     new_dev->dev_ops.poll = ops->poll;
566     new_dev->private_data = priv_data;
567     new_dev->vm = vm;
568     new_dev->dev_id = 0;
569     new_dev->quote = quote<VNET_MAX_QUOTE?quote:VNET_MAX_QUOTE;
570     new_dev->poll = poll_state;
571
572     flags = vnet_lock_irqsave(vnet_state.lock);
573
574     if (dev_by_mac(mac) == NULL) {
575         list_add(&(new_dev->node), &(vnet_state.devs));
576         new_dev->dev_id = ++ vnet_state.dev_idx;
577         vnet_state.num_devs ++;
578
579         if(new_dev->poll) {
580             v3_enqueue(vnet_state.poll_devs, (addr_t)new_dev);
581         }
582     } else {
583         PrintError("VNET/P: Device with the same MAC is already there\n");
584     }
585
586     vnet_unlock_irqrestore(vnet_state.lock, flags);
587
588     /* if the device was found previosly the id should still be 0 */
589     if (new_dev->dev_id == 0) {
590         Vnet_Print(0, "VNET/P Core: Device Already exists\n");
591         return -1;
592     }
593
594     Vnet_Debug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
595
596     return new_dev->dev_id;
597 }
598
599
600 int v3_vnet_del_dev(int dev_id){
601     struct vnet_dev * dev = NULL;
602     unsigned long flags;
603
604     flags = vnet_lock_irqsave(vnet_state.lock);
605         
606     dev = dev_by_id(dev_id);
607     if (dev != NULL){
608         list_del(&(dev->node));
609         //del_routes_by_dev(dev_id);
610         vnet_state.num_devs --;
611     }
612         
613     vnet_unlock_irqrestore(vnet_state.lock, flags);
614
615     Vnet_Free(dev);
616
617     Vnet_Debug("VNET/P Core: Remove Device: dev_id %d\n", dev_id);
618
619     return 0;
620 }
621
622
623 int v3_vnet_stat(struct vnet_stat * stats){
624     stats->rx_bytes = vnet_state.stats.rx_bytes;
625     stats->rx_pkts = vnet_state.stats.rx_pkts;
626     stats->tx_bytes = vnet_state.stats.tx_bytes;
627     stats->tx_pkts = vnet_state.stats.tx_pkts;
628
629     return 0;
630 }
631
632 static void deinit_devices_list(){
633     struct vnet_dev * dev, * tmp; 
634
635     list_for_each_entry_safe(dev, tmp, &(vnet_state.devs), node) {
636         list_del(&(dev->node));
637         Vnet_Free(dev);
638     }
639 }
640
641 static void deinit_routes_list(){
642     struct vnet_route_info * route, * tmp; 
643
644     list_for_each_entry_safe(route, tmp, &(vnet_state.routes), node) {
645         list_del(&(route->node));
646         list_del(&(route->match_node));
647         Vnet_Free(route);
648     }
649 }
650
651 int v3_vnet_add_bridge(struct v3_vm_info * vm,
652                        struct v3_vnet_bridge_ops * ops,
653                        uint8_t type,
654                        void * priv_data) {
655     unsigned long flags;
656     int bridge_free = 0;
657     struct vnet_brg_dev * tmp_bridge = NULL;    
658     
659     flags = vnet_lock_irqsave(vnet_state.lock);
660     if (vnet_state.bridge == NULL) {
661         bridge_free = 1;
662         vnet_state.bridge = (void *)1;
663     }
664     vnet_unlock_irqrestore(vnet_state.lock, flags);
665
666     if (bridge_free == 0) {
667         PrintError("VNET/P Core: Bridge already set\n");
668         return -1;
669     }
670
671     tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
672
673     if (tmp_bridge == NULL) {
674         PrintError("Malloc Fails\n");
675         vnet_state.bridge = NULL;
676         return -1;
677     }
678     
679     tmp_bridge->vm = vm;
680     tmp_bridge->brg_ops.input = ops->input;
681     tmp_bridge->brg_ops.poll = ops->poll;
682     tmp_bridge->private_data = priv_data;
683     tmp_bridge->type = type;
684         
685     /* make this atomic to avoid possible race conditions */
686     flags = vnet_lock_irqsave(vnet_state.lock);
687     vnet_state.bridge = tmp_bridge;
688     vnet_unlock_irqrestore(vnet_state.lock, flags);
689
690     return 0;
691 }
692
693
694 void v3_vnet_del_bridge(uint8_t type) {
695     unsigned long flags;
696     struct vnet_brg_dev * tmp_bridge = NULL;    
697     
698     flags = vnet_lock_irqsave(vnet_state.lock);
699         
700     if (vnet_state.bridge != NULL && vnet_state.bridge->type == type) {
701         tmp_bridge = vnet_state.bridge;
702        vnet_state.bridge = NULL;
703     }
704         
705     vnet_unlock_irqrestore(vnet_state.lock, flags);
706
707     if (tmp_bridge) {
708         Vnet_Free(tmp_bridge);
709     }
710 }
711
712
713 /* can be instanieoued to multiple threads
714   * that runs on multiple cores 
715   * or it could be running on a dedicated side core
716   */
717 static int vnet_tx_flush(void * args){
718     struct vnet_dev * dev = NULL;
719     int ret;
720
721     Vnet_Print(0, "VNET/P Polling Thread Starting ....\n");
722
723     while(!vnet_thread_should_stop()){
724         dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs);
725         if(dev != NULL){
726             if(dev->poll && dev->dev_ops.poll != NULL){
727                 ret = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data);
728                 
729                 if (ret < 0){
730                     Vnet_Print(0, "VNET/P: poll from device %p error!\n", dev);
731                 }
732             }
733             v3_enqueue(vnet_state.poll_devs, (addr_t)dev); 
734         }else { /* no device needs to be polled */
735            /* sleep here? */
736             Vnet_Yield();
737         }
738     }
739
740     return 0;
741 }
742
743 int v3_init_vnet() {
744     memset(&vnet_state, 0, sizeof(vnet_state));
745         
746     INIT_LIST_HEAD(&(vnet_state.routes));
747     INIT_LIST_HEAD(&(vnet_state.devs));
748
749     vnet_state.num_devs = 0;
750     vnet_state.num_routes = 0;
751
752     if (vnet_lock_init(&(vnet_state.lock)) == -1){
753         PrintError("VNET/P: Fails to initiate lock\n");
754     }
755
756     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
757     if (vnet_state.route_cache == NULL) {
758         PrintError("VNET/P: Fails to initiate route cache\n");
759         return -1;
760     }
761
762     vnet_state.poll_devs = v3_create_queue();
763
764     vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "vnetd-1");
765
766     Vnet_Debug("VNET/P is initiated\n");
767
768     return 0;
769 }
770
771
772 void v3_deinit_vnet(){
773
774     vnet_lock_deinit(&(vnet_state.lock));
775
776     deinit_devices_list();
777     deinit_routes_list();
778
779     vnet_free_htable(vnet_state.route_cache, 1, 1);
780     Vnet_Free(vnet_state.bridge);
781 }
782
783