Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


46690294165b5f50eac65f501ba7ef2f8c24bba1
[palacios.git] / palacios / src / vnet / vnet_core.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu> 
11  * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>  
12  * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved
14  *
15  * Author: Lei Xia <lxia@northwestern.edu>
16  *         Yuan Tang <ytang@northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21  
22 #include <vnet/vnet.h>
23 #include <vnet/vnet_hashtable.h>
24 #include <vnet/vnet_host.h>
25 #include <vnet/vnet_vmm.h>
26
27 #include <palacios/vmm_queue.h>
28
29 #ifndef V3_CONFIG_DEBUG_VNET
30 #undef Vnet_Debug
31 #define Vnet_Debug(fmt, args...)
32 #endif
33
34 int net_debug = 0;
35
36 struct eth_hdr {
37     uint8_t dst_mac[ETH_ALEN];
38     uint8_t src_mac[ETH_ALEN];
39     uint16_t type; /* indicates layer 3 protocol type */
40 } __attribute__((packed));
41
42
43 struct vnet_dev {
44     int dev_id;
45     uint8_t mac_addr[ETH_ALEN];
46     struct v3_vm_info * vm;
47     struct v3_vnet_dev_ops dev_ops;
48
49     int poll;
50
51 #define VNET_MAX_QUOTE 64
52     int quote;
53         
54     void * private_data;
55
56     struct list_head node;\r
57 } __attribute__((packed));
58
59
60 struct vnet_brg_dev {
61     struct v3_vm_info * vm;
62     struct v3_vnet_bridge_ops brg_ops;
63
64     uint8_t type;
65
66     void * private_data;
67 } __attribute__((packed));
68
69
70
71 struct vnet_route_info {
72     struct v3_vnet_route route_def;
73
74     struct vnet_dev * dst_dev;
75     struct vnet_dev * src_dev;
76
77     uint32_t idx;
78
79     struct list_head node;
80     struct list_head match_node; // used for route matching
81 };
82
83
84 struct route_list {
85     uint8_t hash_buf[VNET_HASH_SIZE];
86
87     uint32_t num_routes;
88     struct vnet_route_info * routes[0];
89 } __attribute__((packed));
90
91
92 struct queue_entry{
93     uint8_t use;
94     struct v3_vnet_pkt pkt;
95     uint8_t * data;
96     uint32_t size_alloc;
97 };
98
99
100 static struct {
101     struct list_head routes;
102     struct list_head devs;
103
104     uint8_t status; 
105    
106     uint32_t num_routes;
107     uint32_t route_idx;
108     uint32_t num_devs;
109     uint32_t dev_idx;
110
111     struct vnet_brg_dev * bridge;
112
113     vnet_lock_t lock;
114     struct vnet_stat stats;
115
116    /* device queue that are waiting to be polled */
117     struct v3_queue * poll_devs;
118
119     struct vnet_thread * pkt_flush_thread;
120
121     struct hashtable * route_cache;
122 } vnet_state;
123         
124
125 #ifdef V3_CONFIG_DEBUG_VNET
126 static inline void mac2str(uint8_t * mac, char * buf) {
127     snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x", 
128              mac[0], mac[1], mac[2],
129              mac[3], mac[4], mac[5]);
130 }
131
132 static void print_route(struct v3_vnet_route * route){
133     char str[50];
134
135     mac2str(route->src_mac, str);
136     Vnet_Debug("Src Mac (%s),  src_qual (%d)\n", 
137                str, route->src_mac_qual);
138     mac2str(route->dst_mac, str);
139     Vnet_Debug("Dst Mac (%s),  dst_qual (%d)\n", 
140                str, route->dst_mac_qual);
141     Vnet_Debug("Src dev id (%d), src type (%d)", 
142                route->src_id, 
143                route->src_type);
144     Vnet_Debug("Dst dev id (%d), dst type (%d)\n", 
145                route->dst_id, 
146                route->dst_type);
147 }
148
149 static void dump_routes(){
150     struct vnet_route_info *route;
151
152     Vnet_Debug("\n========Dump routes starts ============\n");
153     list_for_each_entry(route, &(vnet_state.routes), node) {
154         Vnet_Debug("\nroute %d:\n", route->idx);
155                 
156         print_route(&(route->route_def));
157         if (route->route_def.dst_type == LINK_INTERFACE) {
158             Vnet_Debug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
159                 route->dst_dev,
160                 route->dst_dev->dev_id,
161                 (void *)&(route->dst_dev->dev_ops),
162                 route->dst_dev->private_data);
163         }
164     }
165
166     Vnet_Debug("\n========Dump routes end ============\n");
167 }
168
169 #endif
170
171
172 /* 
173  * A VNET packet is a packed struct with the hashed fields grouped together.
174  * This means we can generate the hash from an offset into the pkt struct
175  */
176 static inline uint_t hash_fn(addr_t hdr_ptr) {    
177     uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
178
179     return vnet_hash_buffer(hdr_buf, VNET_HASH_SIZE);
180 }
181
182 static inline int hash_eq(addr_t key1, addr_t key2) {   
183     return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
184 }
185
186 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
187     memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);    
188
189     if (vnet_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
190         PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
191         return -1;
192     }
193     
194     return 0;
195 }
196
197 static int clear_hash_cache() {
198     vnet_free_htable(vnet_state.route_cache, 1, 1);
199     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
200
201     return 0;
202 }
203
204 static int look_into_cache(const struct v3_vnet_pkt * pkt, 
205                            struct route_list ** routes) {
206     *routes = (struct route_list *)vnet_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
207    
208     return 0;
209 }
210
211
212 static struct vnet_dev * dev_by_id(int idx) {
213     struct vnet_dev * dev = NULL; 
214
215     list_for_each_entry(dev, &(vnet_state.devs), node) {
216         int dev_id = dev->dev_id;
217
218         if (dev_id == idx)
219             return dev;
220     }
221
222     return NULL;
223 }
224
225 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
226     struct vnet_dev * dev = NULL; 
227     
228     list_for_each_entry(dev, &(vnet_state.devs), node) {
229         if (!compare_ethaddr(dev->mac_addr, mac)){
230             return dev;
231         }
232     }
233
234     return NULL;
235 }
236
237
238 int v3_vnet_find_dev(uint8_t  * mac) {
239     struct vnet_dev * dev = NULL;
240
241     dev = dev_by_mac(mac);
242
243     if(dev != NULL) {
244         return dev->dev_id;
245     }
246
247     return -1;
248 }
249
250
251 int v3_vnet_add_route(struct v3_vnet_route route) {
252     struct vnet_route_info * new_route = NULL;
253     unsigned long flags; 
254
255     new_route = (struct vnet_route_info *)Vnet_Malloc(sizeof(struct vnet_route_info));
256     memset(new_route, 0, sizeof(struct vnet_route_info));
257
258 #ifdef V3_CONFIG_DEBUG_VNET
259     Vnet_Debug("VNET/P Core: add_route_entry:\n");
260     print_route(&route);
261 #endif
262     
263     memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
264     memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
265     new_route->route_def.src_mac_qual = route.src_mac_qual;
266     new_route->route_def.dst_mac_qual = route.dst_mac_qual;
267     new_route->route_def.dst_type = route.dst_type;
268     new_route->route_def.src_type = route.src_type;
269     new_route->route_def.src_id = route.src_id;
270     new_route->route_def.dst_id = route.dst_id;
271
272     if (new_route->route_def.dst_type == LINK_INTERFACE) {
273         new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
274     }
275
276     if (new_route->route_def.src_type == LINK_INTERFACE) {
277         new_route->src_dev = dev_by_id(new_route->route_def.src_id);
278     }
279
280
281     flags = vnet_lock_irqsave(vnet_state.lock);
282
283     list_add(&(new_route->node), &(vnet_state.routes));
284     new_route->idx = ++ vnet_state.route_idx;
285     vnet_state.num_routes ++;
286         
287     vnet_unlock_irqrestore(vnet_state.lock, flags);
288
289     clear_hash_cache();
290
291 #ifdef V3_CONFIG_DEBUG_VNET
292     dump_routes();
293 #endif
294
295     return new_route->idx;
296 }
297
298
299 void v3_vnet_del_route(uint32_t route_idx){
300     struct vnet_route_info * route = NULL;
301     unsigned long flags; 
302
303     flags = vnet_lock_irqsave(vnet_state.lock);
304
305     list_for_each_entry(route, &(vnet_state.routes), node) {
306         V3_Print("v3_vnet_del_route, route idx: %d\n", route->idx);
307         if(route->idx == route_idx){
308             list_del(&(route->node));
309             Vnet_Free(route);
310             break;    
311         }
312     }
313
314     vnet_unlock_irqrestore(vnet_state.lock, flags);
315     clear_hash_cache();
316
317 #ifdef V3_CONFIG_DEBUG_VNET
318     dump_routes();
319 #endif  
320 }
321
322
323 /* delete all route entries with specfied src or dst device id */ 
324 static void inline del_routes_by_dev(int dev_id){
325     struct vnet_route_info * route = NULL;
326     unsigned long flags; 
327
328     flags = vnet_lock_irqsave(vnet_state.lock);
329
330     list_for_each_entry(route, &(vnet_state.routes), node) {
331         if((route->route_def.dst_type == LINK_INTERFACE &&
332              route->route_def.dst_id == dev_id) ||
333              (route->route_def.src_type == LINK_INTERFACE &&
334               route->route_def.src_id == dev_id)){
335               
336             list_del(&(route->node));
337             list_del(&(route->match_node));
338             Vnet_Free(route);    
339         }
340     }
341
342     vnet_unlock_irqrestore(vnet_state.lock, flags);
343 }
344
345 /* At the end allocate a route_list
346  * This list will be inserted into the cache so we don't need to free it
347  */
348 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
349     struct vnet_route_info * route = NULL; 
350     struct route_list * matches = NULL;
351     int num_matches = 0;
352     int max_rank = 0;
353     struct list_head match_list;
354     struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
355     //  uint8_t src_type = pkt->src_type;
356     //  uint32_t src_link = pkt->src_id;
357
358 #ifdef V3_CONFIG_DEBUG_VNET
359     {
360         char dst_str[100];
361         char src_str[100];
362
363         mac2str(hdr->src_mac, src_str);  
364         mac2str(hdr->dst_mac, dst_str);
365         Vnet_Debug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
366     }
367 #endif
368
369     INIT_LIST_HEAD(&match_list);
370     
371 #define UPDATE_MATCHES(rank) do {                               \
372         if (max_rank < (rank)) {                                \
373             max_rank = (rank);                                  \
374             INIT_LIST_HEAD(&match_list);                        \
375                                                                 \
376             list_add(&(route->match_node), &match_list);        \
377             num_matches = 1;                                    \
378         } else if (max_rank == (rank)) {                        \
379             list_add(&(route->match_node), &match_list);        \
380             num_matches++;                                      \
381         }                                                       \
382     } while (0)
383     
384
385     list_for_each_entry(route, &(vnet_state.routes), node) {
386         struct v3_vnet_route * route_def = &(route->route_def);
387
388 /*
389         // CHECK SOURCE TYPE HERE
390         if ( (route_def->src_type != LINK_ANY) && 
391              ( (route_def->src_type != src_type) || 
392                ( (route_def->src_id != src_link) &&
393                  (route_def->src_id != -1)))) {
394             continue;
395         }
396 */
397
398         if ((route_def->dst_mac_qual == MAC_ANY) &&
399             (route_def->src_mac_qual == MAC_ANY)) {      
400             UPDATE_MATCHES(3);
401         }
402         
403         if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
404             if (route_def->src_mac_qual != MAC_NOT) {
405                 if (route_def->dst_mac_qual == MAC_ANY) {
406                     UPDATE_MATCHES(6);
407                 } else if (route_def->dst_mac_qual != MAC_NOT &&
408                            memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
409                     UPDATE_MATCHES(8);
410                 }
411             }
412         }
413             
414         if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
415             if (route_def->dst_mac_qual != MAC_NOT) {
416                 if (route_def->src_mac_qual == MAC_ANY) {
417                     UPDATE_MATCHES(6);
418                 } else if ((route_def->src_mac_qual != MAC_NOT) && 
419                            (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
420                     UPDATE_MATCHES(8);
421                 }
422             }
423         }
424             
425         if ((route_def->dst_mac_qual == MAC_NOT) &&
426             (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
427             if (route_def->src_mac_qual == MAC_ANY) {
428                 UPDATE_MATCHES(5);
429             } else if ((route_def->src_mac_qual != MAC_NOT) && 
430                        (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {     
431                 UPDATE_MATCHES(7);
432             }
433         }
434         
435         if ((route_def->src_mac_qual == MAC_NOT) &&
436             (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
437             if (route_def->dst_mac_qual == MAC_ANY) {
438                 UPDATE_MATCHES(5);
439             } else if ((route_def->dst_mac_qual != MAC_NOT) &&
440                        (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
441                 UPDATE_MATCHES(7);
442             }
443         }
444         
445         // Default route
446         if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
447              (route_def->dst_mac_qual == MAC_NONE)) {
448             UPDATE_MATCHES(4);
449         }
450     }
451
452     Vnet_Debug("VNET/P Core: match_route: Matches=%d\n", num_matches);
453
454     if (num_matches == 0) {
455         return NULL;
456     }
457
458     matches = (struct route_list *)Vnet_Malloc(sizeof(struct route_list) + 
459                                                (sizeof(struct vnet_route_info *) * num_matches));
460
461     matches->num_routes = num_matches;
462
463     {
464         int i = 0;
465         list_for_each_entry(route, &match_list, match_node) {
466             matches->routes[i++] = route;
467         }
468     }
469
470     return matches;
471 }
472
473
474 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
475     struct route_list * matched_routes = NULL;
476     unsigned long flags;
477     int i;
478
479     int cpu = V3_Get_CPU();
480     Vnet_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
481                cpu, pkt->size, pkt->src_id, 
482                pkt->src_type, pkt->dst_id, pkt->dst_type);
483     if(net_debug >= 4){
484         v3_hexdump(pkt->data, pkt->size, NULL, 0);
485     }
486
487     flags = vnet_lock_irqsave(vnet_state.lock);
488
489     vnet_state.stats.rx_bytes += pkt->size;
490     vnet_state.stats.rx_pkts++;
491
492     look_into_cache(pkt, &matched_routes);
493     if (matched_routes == NULL) {  
494         Vnet_Debug("VNET/P Core: send pkt Looking into routing table\n");
495         
496         matched_routes = match_route(pkt);
497         
498         if (matched_routes) {
499             add_route_to_cache(pkt, matched_routes);
500         } else {
501             Vnet_Debug("VNET/P Core: Could not find route for packet... discards packet\n");
502             vnet_unlock_irqrestore(vnet_state.lock, flags);
503             return 0; /* do we return -1 here?*/
504         }
505     }
506
507     vnet_unlock_irqrestore(vnet_state.lock, flags);
508
509     Vnet_Debug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
510
511     for (i = 0; i < matched_routes->num_routes; i++) {
512         struct vnet_route_info * route = matched_routes->routes[i];
513         
514         if (route->route_def.dst_type == LINK_EDGE) {
515             struct vnet_brg_dev * bridge = vnet_state.bridge;
516             pkt->dst_type = LINK_EDGE;
517             pkt->dst_id = route->route_def.dst_id;
518
519             if (bridge == NULL) {
520                 Vnet_Print(2, "VNET/P Core: No active bridge to sent data to\n");
521                 continue;
522             }
523
524             if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
525                 Vnet_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
526                 continue;
527             }         
528             vnet_state.stats.tx_bytes += pkt->size;
529             vnet_state.stats.tx_pkts ++;
530         } else if (route->route_def.dst_type == LINK_INTERFACE) {
531             if (route->dst_dev == NULL){
532                   Vnet_Print(2, "VNET/P Core: No active device to sent data to\n");
533                 continue;
534             }
535
536             if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
537                 Vnet_Print(2, "VNET/P Core: Packet not sent properly\n");
538                 continue;
539             }
540             vnet_state.stats.tx_bytes += pkt->size;
541             vnet_state.stats.tx_pkts ++;
542         } else {
543             Vnet_Print(0, "VNET/P Core: Wrong dst type\n");
544         }
545     }
546     
547     return 0;
548 }
549
550
551 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, 
552                     struct v3_vnet_dev_ops *ops, int quote, int poll_state,
553                     void * priv_data){
554     struct vnet_dev * new_dev = NULL;
555     unsigned long flags;
556
557     new_dev = (struct vnet_dev *)Vnet_Malloc(sizeof(struct vnet_dev)); 
558
559     if (new_dev == NULL) {
560         Vnet_Print(0, "Malloc fails\n");
561         return -1;
562     }
563    
564     memcpy(new_dev->mac_addr, mac, 6);
565     new_dev->dev_ops.input = ops->input;
566     new_dev->private_data = priv_data;
567     new_dev->vm = vm;
568     new_dev->dev_id = 0;
569     new_dev->quote = quote<VNET_MAX_QUOTE?quote:VNET_MAX_QUOTE;
570     new_dev->poll = poll_state;
571
572     flags = vnet_lock_irqsave(vnet_state.lock);
573
574     if (dev_by_mac(mac) == NULL) {
575         list_add(&(new_dev->node), &(vnet_state.devs));
576         new_dev->dev_id = ++ vnet_state.dev_idx;
577         vnet_state.num_devs ++;
578     }
579
580     vnet_unlock_irqrestore(vnet_state.lock, flags);
581
582     /* if the device was found previosly the id should still be 0 */
583     if (new_dev->dev_id == 0) {
584         Vnet_Print(0, "VNET/P Core: Device Already exists\n");
585         return -1;
586     }
587
588     Vnet_Debug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
589
590     return new_dev->dev_id;
591 }
592
593
594 int v3_vnet_del_dev(int dev_id){
595     struct vnet_dev * dev = NULL;
596     unsigned long flags;
597
598     flags = vnet_lock_irqsave(vnet_state.lock);
599         
600     dev = dev_by_id(dev_id);
601     if (dev != NULL){
602         list_del(&(dev->node));
603         //del_routes_by_dev(dev_id);
604         vnet_state.num_devs --;
605     }
606         
607     vnet_unlock_irqrestore(vnet_state.lock, flags);
608
609     Vnet_Free(dev);
610
611     Vnet_Debug("VNET/P Core: Remove Device: dev_id %d\n", dev_id);
612
613     return 0;
614 }
615
616
617 int v3_vnet_stat(struct vnet_stat * stats){
618     stats->rx_bytes = vnet_state.stats.rx_bytes;
619     stats->rx_pkts = vnet_state.stats.rx_pkts;
620     stats->tx_bytes = vnet_state.stats.tx_bytes;
621     stats->tx_pkts = vnet_state.stats.tx_pkts;
622
623     return 0;
624 }
625
626 static void deinit_devices_list(){
627     struct vnet_dev * dev = NULL; 
628
629     list_for_each_entry(dev, &(vnet_state.devs), node) {
630         list_del(&(dev->node));
631         Vnet_Free(dev);
632     }
633 }
634
635 static void deinit_routes_list(){
636     struct vnet_route_info * route = NULL; 
637
638     list_for_each_entry(route, &(vnet_state.routes), node) {
639         list_del(&(route->node));
640         list_del(&(route->match_node));
641         Vnet_Free(route);
642     }
643 }
644
645 int v3_vnet_add_bridge(struct v3_vm_info * vm,
646                        struct v3_vnet_bridge_ops * ops,
647                        uint8_t type,
648                        void * priv_data) {
649     unsigned long flags;
650     int bridge_free = 0;
651     struct vnet_brg_dev * tmp_bridge = NULL;    
652     
653     flags = vnet_lock_irqsave(vnet_state.lock);
654     if (vnet_state.bridge == NULL) {
655         bridge_free = 1;
656         vnet_state.bridge = (void *)1;
657     }
658     vnet_unlock_irqrestore(vnet_state.lock, flags);
659
660     if (bridge_free == 0) {
661         PrintError("VNET/P Core: Bridge already set\n");
662         return -1;
663     }
664
665     tmp_bridge = (struct vnet_brg_dev *)Vnet_Malloc(sizeof(struct vnet_brg_dev));
666
667     if (tmp_bridge == NULL) {
668         PrintError("Malloc Fails\n");
669         vnet_state.bridge = NULL;
670         return -1;
671     }
672     
673     tmp_bridge->vm = vm;
674     tmp_bridge->brg_ops.input = ops->input;
675     tmp_bridge->brg_ops.poll = ops->poll;
676     tmp_bridge->private_data = priv_data;
677     tmp_bridge->type = type;
678         
679     /* make this atomic to avoid possible race conditions */
680     flags = vnet_lock_irqsave(vnet_state.lock);
681     vnet_state.bridge = tmp_bridge;
682     vnet_unlock_irqrestore(vnet_state.lock, flags);
683
684     return 0;
685 }
686
687
688 void v3_vnet_del_bridge(uint8_t type) {
689     unsigned long flags;
690     struct vnet_brg_dev * tmp_bridge = NULL;    
691     
692     flags = vnet_lock_irqsave(vnet_state.lock);
693         
694     if (vnet_state.bridge != NULL && vnet_state.bridge->type == type) {
695         tmp_bridge = vnet_state.bridge;
696        vnet_state.bridge = NULL;
697     }
698         
699     vnet_unlock_irqrestore(vnet_state.lock, flags);
700
701     if (tmp_bridge) {
702         Vnet_Free(tmp_bridge);
703     }
704 }
705
706
707 /* can be instanieoued to multiple threads
708   * that runs on multiple cores 
709   * or it could be running on a dedicated side core
710   */
711 static int vnet_tx_flush(void *args){
712     struct vnet_dev * dev = NULL;
713     int ret;
714
715     Vnet_Print(0, "VNET/P Polling Thread Starting ....\n");
716
717     /* we need thread sleep/wakeup in Palacios */
718     while(!vnet_thread_should_stop()){
719         dev = (struct vnet_dev *)v3_dequeue(vnet_state.poll_devs);
720         if(dev != NULL){
721             if(dev->poll && dev->dev_ops.poll != NULL){
722                 ret = dev->dev_ops.poll(dev->vm, dev->quote, dev->private_data);
723
724                 if (ret < 0){
725                     PrintDebug("VNET/P: poll from device %p error!\n", dev);
726                 }
727
728                 v3_enqueue(vnet_state.poll_devs, (addr_t)dev); 
729             }
730         }else { /* no device needs to be polled */
731            /* sleep here? */
732             Vnet_Yield();
733         }
734     }
735
736     return 0;
737 }
738
739
740 int v3_init_vnet() {
741     memset(&vnet_state, 0, sizeof(vnet_state));
742         
743     INIT_LIST_HEAD(&(vnet_state.routes));
744     INIT_LIST_HEAD(&(vnet_state.devs));
745
746     vnet_state.num_devs = 0;
747     vnet_state.num_routes = 0;
748
749     if (vnet_lock_init(&(vnet_state.lock)) == -1){
750         PrintError("VNET/P: Fails to initiate lock\n");
751     }
752
753     vnet_state.route_cache = vnet_create_htable(0, &hash_fn, &hash_eq);
754     if (vnet_state.route_cache == NULL) {
755         PrintError("VNET/P: Fails to initiate route cache\n");
756         return -1;
757     }
758
759     vnet_state.poll_devs = v3_create_queue();
760
761     vnet_state.pkt_flush_thread = vnet_start_thread(vnet_tx_flush, NULL, "vnetd");
762
763     Vnet_Debug("VNET/P is initiated\n");
764
765     return 0;
766 }
767
768
769 void v3_deinit_vnet(){
770
771     vnet_lock_deinit(&(vnet_state.lock));
772
773     deinit_devices_list();
774     deinit_routes_list();
775
776     vnet_free_htable(vnet_state.route_cache, 1, 1);
777     Vnet_Free(vnet_state.bridge);
778 }
779
780