Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


compile fix
[palacios.git] / palacios / src / palacios / vmm_vnet_core.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu> 
11  * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>  
12  * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Lei Xia <lxia@northwestern.edu>
16  *         Yuan Tang <ytang@northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21  
22 #include <palacios/vmm_vnet.h>
23 #include <palacios/vm_guest_mem.h>
24 #include <palacios/vmm_lock.h>
25 #include <palacios/vmm_queue.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vmm_ethernet.h>
28
29 #ifndef CONFIG_DEBUG_VNET
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 int v3_net_debug = 0;
35
36 struct eth_hdr {
37     uint8_t dst_mac[ETH_ALEN];
38     uint8_t src_mac[ETH_ALEN];
39     uint16_t type; /* indicates layer 3 protocol type */
40 } __attribute__((packed));
41
42
43 struct vnet_dev {
44     int dev_id;
45     uint8_t mac_addr[ETH_ALEN];
46     struct v3_vm_info * vm;
47     struct v3_vnet_dev_ops dev_ops;
48     void * private_data;
49
50     struct list_head node;
51 } __attribute__((packed));
52
53
54 struct vnet_brg_dev {
55     struct v3_vm_info * vm;
56     struct v3_vnet_bridge_ops brg_ops;
57
58     uint8_t type;
59
60     void * private_data;
61 } __attribute__((packed));
62
63
64
65 struct vnet_route_info {
66     struct v3_vnet_route route_def;
67
68     struct vnet_dev * dst_dev;
69     struct vnet_dev * src_dev;
70
71     struct list_head node;
72     struct list_head match_node; // used for route matching
73 };
74
75
76 struct route_list {
77     uint8_t hash_buf[VNET_HASH_SIZE];
78
79     uint32_t num_routes;
80     struct vnet_route_info * routes[0];
81 } __attribute__((packed));
82
83
84 struct queue_entry{
85     uint8_t use;
86     struct v3_vnet_pkt pkt;
87     uint8_t data[ETHERNET_PACKET_LEN];
88 };
89
90 #define VNET_QUEUE_SIZE 10240
91 struct vnet_queue {
92         struct queue_entry buf[VNET_QUEUE_SIZE];
93         int head, tail;
94         int count;
95         v3_lock_t lock;
96 };
97
98 static struct {
99     struct list_head routes;
100     struct list_head devs;
101     
102     int num_routes;
103     int num_devs;
104
105     struct vnet_brg_dev * bridge;
106
107     v3_lock_t lock;
108     struct vnet_stat stats;
109
110     void * pkt_flush_thread;
111
112     struct vnet_queue pkt_q;
113
114     struct hashtable * route_cache;
115 } vnet_state;
116         
117
118 #ifdef CONFIG_DEBUG_VNET
119 static inline void mac_to_string(uint8_t * mac, char * buf) {
120     snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x", 
121              mac[0], mac[1], mac[2],
122              mac[3], mac[4], mac[5]);
123 }
124
125 static void print_route(struct v3_vnet_route * route){
126     char str[50];
127
128     mac_to_string(route->src_mac, str);
129     PrintDebug("Src Mac (%s),  src_qual (%d)\n", 
130                str, route->src_mac_qual);
131     mac_to_string(route->dst_mac, str);
132     PrintDebug("Dst Mac (%s),  dst_qual (%d)\n", 
133                str, route->dst_mac_qual);
134     PrintDebug("Src dev id (%d), src type (%d)", 
135                route->src_id, 
136                route->src_type);
137     PrintDebug("Dst dev id (%d), dst type (%d)\n", 
138                route->dst_id, 
139                route->dst_type);
140 }
141
142 static void dump_routes(){
143     struct vnet_route_info *route;
144
145     int i = 0;
146     PrintDebug("\n========Dump routes starts ============\n");
147     list_for_each_entry(route, &(vnet_state.routes), node) {
148         PrintDebug("\nroute %d:\n", i++);
149                 
150         print_route(&(route->route_def));
151         if (route->route_def.dst_type == LINK_INTERFACE) {
152             PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
153                 route->dst_dev,
154                 route->dst_dev->dev_id,
155                 (void *)&(route->dst_dev->dev_ops),
156                 route->dst_dev->private_data);
157         }
158     }
159
160     PrintDebug("\n========Dump routes end ============\n");
161 }
162
163 #endif
164
165
166 /* 
167  * A VNET packet is a packed struct with the hashed fields grouped together.
168  * This means we can generate the hash from an offset into the pkt struct
169  */
170 static inline uint_t hash_fn(addr_t hdr_ptr) {    
171     uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
172
173     return v3_hash_buffer(hdr_buf, VNET_HASH_SIZE);
174 }
175
176 static inline int hash_eq(addr_t key1, addr_t key2) {   
177     return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
178 }
179
180 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
181     memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);    
182
183     if (v3_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
184         PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
185         return -1;
186     }
187     
188     return 0;
189 }
190
191 static int clear_hash_cache() {
192     v3_free_htable(vnet_state.route_cache, 1, 1);
193     vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
194
195     return 0;
196 }
197
198 static int look_into_cache(const struct v3_vnet_pkt * pkt, 
199                            struct route_list ** routes) {
200     *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
201    
202     return 0;
203 }
204
205
206 static struct vnet_dev * dev_by_id(int idx) {
207     struct vnet_dev * dev = NULL; 
208
209     list_for_each_entry(dev, &(vnet_state.devs), node) {
210         int dev_id = dev->dev_id;
211
212         if (dev_id == idx)
213             return dev;
214     }
215
216     return NULL;
217 }
218
219 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
220     struct vnet_dev * dev = NULL; 
221     
222     list_for_each_entry(dev, &(vnet_state.devs), node) {
223         if (!compare_ethaddr(dev->mac_addr, mac)){
224             return dev;
225         }
226     }
227
228     return NULL;
229 }
230
231
232 int v3_vnet_find_dev(uint8_t  * mac) {
233     struct vnet_dev * dev = NULL;
234
235     dev = dev_by_mac(mac);
236
237     if(dev != NULL) {
238         return dev->dev_id;
239     }
240
241     return -1;
242 }
243
244
245 int v3_vnet_add_route(struct v3_vnet_route route) {
246     struct vnet_route_info * new_route = NULL;
247     unsigned long flags; 
248
249     new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
250     memset(new_route, 0, sizeof(struct vnet_route_info));
251
252 #ifdef CONFIG_DEBUG_VNET
253     PrintDebug("VNET/P Core: add_route_entry:\n");
254     print_route(&route);
255 #endif
256     
257     memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
258     memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
259     new_route->route_def.src_mac_qual = route.src_mac_qual;
260     new_route->route_def.dst_mac_qual = route.dst_mac_qual;
261     new_route->route_def.dst_type = route.dst_type;
262     new_route->route_def.src_type = route.src_type;
263     new_route->route_def.src_id = route.src_id;
264     new_route->route_def.dst_id = route.dst_id;
265
266     if (new_route->route_def.dst_type == LINK_INTERFACE) {
267         new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
268     }
269
270     if (new_route->route_def.src_type == LINK_INTERFACE) {
271         new_route->src_dev = dev_by_id(new_route->route_def.src_id);
272     }
273
274
275     flags = v3_lock_irqsave(vnet_state.lock);
276
277     list_add(&(new_route->node), &(vnet_state.routes));
278     clear_hash_cache();
279
280     v3_unlock_irqrestore(vnet_state.lock, flags);
281    
282
283 #ifdef CONFIG_DEBUG_VNET
284     dump_routes();
285 #endif
286
287     return 0;
288 }
289
290
291 /* delete all route entries with specfied src or dst device id */ 
292 static void inline del_routes_by_dev(int dev_id){
293     struct vnet_route_info * route = NULL;
294     unsigned long flags; 
295
296     flags = v3_lock_irqsave(vnet_state.lock);
297
298     list_for_each_entry(route, &(vnet_state.routes), node) {
299         if((route->route_def.dst_type == LINK_INTERFACE &&
300              route->route_def.dst_id == dev_id) ||
301              (route->route_def.src_type == LINK_INTERFACE &&
302               route->route_def.src_id == dev_id)){
303               
304             list_del(&(route->node));
305             list_del(&(route->match_node));
306             V3_Free(route);    
307         }
308     }
309
310     v3_unlock_irqrestore(vnet_state.lock, flags);
311 }
312
313 /* At the end allocate a route_list
314  * This list will be inserted into the cache so we don't need to free it
315  */
316 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
317     struct vnet_route_info * route = NULL; 
318     struct route_list * matches = NULL;
319     int num_matches = 0;
320     int max_rank = 0;
321     struct list_head match_list;
322     struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
323     //    uint8_t src_type = pkt->src_type;
324     //  uint32_t src_link = pkt->src_id;
325
326 #ifdef CONFIG_DEBUG_VNET
327     {
328         char dst_str[100];
329         char src_str[100];
330
331         mac_to_string(hdr->src_mac, src_str);  
332         mac_to_string(hdr->dst_mac, dst_str);
333         PrintDebug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
334     }
335 #endif
336
337     INIT_LIST_HEAD(&match_list);
338     
339 #define UPDATE_MATCHES(rank) do {                               \
340         if (max_rank < (rank)) {                                \
341             max_rank = (rank);                                  \
342             INIT_LIST_HEAD(&match_list);                        \
343                                                                 \
344             list_add(&(route->match_node), &match_list);        \
345             num_matches = 1;                                    \
346         } else if (max_rank == (rank)) {                        \
347             list_add(&(route->match_node), &match_list);        \
348             num_matches++;                                      \
349         }                                                       \
350     } while (0)
351     
352
353     list_for_each_entry(route, &(vnet_state.routes), node) {
354         struct v3_vnet_route * route_def = &(route->route_def);
355
356 /*
357         // CHECK SOURCE TYPE HERE
358         if ( (route_def->src_type != LINK_ANY) && 
359              ( (route_def->src_type != src_type) || 
360                ( (route_def->src_id != src_link) &&
361                  (route_def->src_id != -1)))) {
362             continue;
363         }
364 */
365
366         if ((route_def->dst_mac_qual == MAC_ANY) &&
367             (route_def->src_mac_qual == MAC_ANY)) {      
368             UPDATE_MATCHES(3);
369         }
370         
371         if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
372             if (route_def->src_mac_qual != MAC_NOT) {
373                 if (route_def->dst_mac_qual == MAC_ANY) {
374                     UPDATE_MATCHES(6);
375                 } else if (route_def->dst_mac_qual != MAC_NOT &&
376                            memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
377                     UPDATE_MATCHES(8);
378                 }
379             }
380         }
381             
382         if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
383             if (route_def->dst_mac_qual != MAC_NOT) {
384                 if (route_def->src_mac_qual == MAC_ANY) {
385                     UPDATE_MATCHES(6);
386                 } else if ((route_def->src_mac_qual != MAC_NOT) && 
387                            (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
388                     UPDATE_MATCHES(8);
389                 }
390             }
391         }
392             
393         if ((route_def->dst_mac_qual == MAC_NOT) &&
394             (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
395             if (route_def->src_mac_qual == MAC_ANY) {
396                 UPDATE_MATCHES(5);
397             } else if ((route_def->src_mac_qual != MAC_NOT) && 
398                        (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {     
399                 UPDATE_MATCHES(7);
400             }
401         }
402         
403         if ((route_def->src_mac_qual == MAC_NOT) &&
404             (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
405             if (route_def->dst_mac_qual == MAC_ANY) {
406                 UPDATE_MATCHES(5);
407             } else if ((route_def->dst_mac_qual != MAC_NOT) &&
408                        (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
409                 UPDATE_MATCHES(7);
410             }
411         }
412         
413         // Default route
414         if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
415              (route_def->dst_mac_qual == MAC_NONE)) {
416             UPDATE_MATCHES(4);
417         }
418     }
419
420     PrintDebug("VNET/P Core: match_route: Matches=%d\n", num_matches);
421
422     if (num_matches == 0) {
423         return NULL;
424     }
425
426     matches = (struct route_list *)V3_Malloc(sizeof(struct route_list) + 
427                                 (sizeof(struct vnet_route_info *) * num_matches));
428
429     matches->num_routes = num_matches;
430
431     {
432         int i = 0;
433         list_for_each_entry(route, &match_list, match_node) {
434             matches->routes[i++] = route;
435         }
436     }
437
438     return matches;
439 }
440
441
442 int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
443     struct route_list * matched_routes = NULL;
444     unsigned long flags;
445     int i;
446
447     int cpu = V3_Get_CPU();
448     V3_Net_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
449                   cpu, pkt->size, pkt->src_id, 
450                   pkt->src_type, pkt->dst_id, pkt->dst_type);
451     if(v3_net_debug >= 4){
452             v3_hexdump(pkt->data, pkt->size, NULL, 0);
453     }
454
455     flags = v3_lock_irqsave(vnet_state.lock);
456
457     vnet_state.stats.rx_bytes += pkt->size;
458     vnet_state.stats.rx_pkts++;
459
460     look_into_cache(pkt, &matched_routes);
461     if (matched_routes == NULL) {  
462         PrintDebug("VNET/P Core: send pkt Looking into routing table\n");
463         
464         matched_routes = match_route(pkt);
465         
466         if (matched_routes) {
467             add_route_to_cache(pkt, matched_routes);
468         } else {
469             PrintDebug("VNET/P Core: Could not find route for packet... discards packet\n");
470             v3_unlock_irqrestore(vnet_state.lock, flags);
471             return 0; /* do we return -1 here?*/
472         }
473     }
474
475     v3_unlock_irqrestore(vnet_state.lock, flags);
476
477     PrintDebug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
478
479     for (i = 0; i < matched_routes->num_routes; i++) {
480         struct vnet_route_info * route = matched_routes->routes[i];
481         
482         if (route->route_def.dst_type == LINK_EDGE) {
483             struct vnet_brg_dev * bridge = vnet_state.bridge;
484             pkt->dst_type = LINK_EDGE;
485             pkt->dst_id = route->route_def.dst_id;
486
487             if (bridge == NULL) {
488                 V3_Net_Print(2, "VNET/P Core: No active bridge to sent data to\n");
489                  continue;
490             }
491
492             if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
493                 V3_Net_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
494                 continue;
495             }         
496             vnet_state.stats.tx_bytes += pkt->size;
497             vnet_state.stats.tx_pkts ++;
498         } else if (route->route_def.dst_type == LINK_INTERFACE) {
499             if (route->dst_dev == NULL){
500                   V3_Net_Print(2, "VNET/P Core: No active device to sent data to\n");
501                 continue;
502             }
503
504             if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
505                 V3_Net_Print(2, "VNET/P Core: Packet not sent properly\n");
506                 continue;
507             }
508             vnet_state.stats.tx_bytes += pkt->size;
509             vnet_state.stats.tx_pkts ++;
510         } else {
511             PrintError("VNET/P Core: Wrong dst type\n");
512         }
513     }
514     
515     return 0;
516 }
517
518
519 static int vnet_pkt_enqueue(struct v3_vnet_pkt * pkt){
520     unsigned long flags;
521     struct queue_entry * entry;
522     struct vnet_queue * q = &(vnet_state.pkt_q);
523
524     flags = v3_lock_irqsave(q->lock);
525
526     if (q->count >= VNET_QUEUE_SIZE){
527         V3_Net_Print(1, "VNET Queue overflow!\n");
528         v3_unlock_irqrestore(q->lock, flags);
529         return -1;
530     }
531         
532     q->count ++;
533     entry = &(q->buf[q->tail++]);
534     q->tail %= VNET_QUEUE_SIZE;
535         
536     v3_unlock_irqrestore(q->lock, flags);
537
538     /* this is ugly, but should happen very unlikely */
539     while(entry->use);
540
541     entry->pkt.data = entry->data;
542     memcpy(&(entry->pkt), pkt, sizeof(struct v3_vnet_pkt));
543     memcpy(entry->data, pkt->data, pkt->size);
544
545     entry->use = 1;
546
547     return 0;
548 }
549
550
551 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize) {
552     if(synchronize){
553         vnet_tx_one_pkt(pkt, NULL);
554     }else {
555        vnet_pkt_enqueue(pkt);
556         V3_Net_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size);
557     }
558         
559     return 0;
560 }
561
562 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, 
563                     struct v3_vnet_dev_ops *ops,
564                     void * priv_data){
565     struct vnet_dev * new_dev = NULL;
566     unsigned long flags;
567
568     new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev)); 
569
570     if (new_dev == NULL) {
571         PrintError("Malloc fails\n");
572         return -1;
573     }
574    
575     memcpy(new_dev->mac_addr, mac, 6);
576     new_dev->dev_ops.input = ops->input;
577     new_dev->private_data = priv_data;
578     new_dev->vm = vm;
579     new_dev->dev_id = 0;
580
581     flags = v3_lock_irqsave(vnet_state.lock);
582
583     if (dev_by_mac(mac) == NULL) {
584         list_add(&(new_dev->node), &(vnet_state.devs));
585         new_dev->dev_id = ++vnet_state.num_devs;
586     }
587
588     v3_unlock_irqrestore(vnet_state.lock, flags);
589
590     /* if the device was found previosly the id should still be 0 */
591     if (new_dev->dev_id == 0) {
592         PrintError("VNET/P Core: Device Already exists\n");
593         return -1;
594     }
595
596     PrintDebug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
597
598     return new_dev->dev_id;
599 }
600
601
602 int v3_vnet_del_dev(int dev_id){
603     struct vnet_dev * dev = NULL;
604     unsigned long flags;
605
606     flags = v3_lock_irqsave(vnet_state.lock);
607         
608     dev = dev_by_id(dev_id);
609     if (dev != NULL){
610         list_del(&(dev->node));
611         del_routes_by_dev(dev_id);
612     }
613         
614     v3_unlock_irqrestore(vnet_state.lock, flags);
615
616     V3_Free(dev);
617
618     PrintDebug("VNET/P Core: Remove Device: dev_id %d\n", dev_id);
619
620     return 0;
621 }
622
623
624 int v3_vnet_stat(struct vnet_stat * stats){
625         
626     stats->rx_bytes = vnet_state.stats.rx_bytes;
627     stats->rx_pkts = vnet_state.stats.rx_pkts;
628     stats->tx_bytes = vnet_state.stats.tx_bytes;
629     stats->tx_pkts = vnet_state.stats.tx_pkts;
630
631     return 0;
632 }
633
634 static void free_devices(){
635     struct vnet_dev * dev = NULL; 
636
637     list_for_each_entry(dev, &(vnet_state.devs), node) {
638         list_del(&(dev->node));
639         V3_Free(dev);
640     }
641 }
642
643 static void free_routes(){
644     struct vnet_route_info * route = NULL; 
645
646     list_for_each_entry(route, &(vnet_state.routes), node) {
647         list_del(&(route->node));
648         list_del(&(route->match_node));
649         V3_Free(route);
650     }
651 }
652
653 int v3_vnet_add_bridge(struct v3_vm_info * vm,
654                        struct v3_vnet_bridge_ops * ops,
655                        uint8_t type,
656                        void * priv_data) {
657     unsigned long flags;
658     int bridge_free = 0;
659     struct vnet_brg_dev * tmp_bridge = NULL;    
660     
661     flags = v3_lock_irqsave(vnet_state.lock);
662     if (vnet_state.bridge == NULL) {
663         bridge_free = 1;
664         vnet_state.bridge = (void *)1;
665     }
666     v3_unlock_irqrestore(vnet_state.lock, flags);
667
668     if (bridge_free == 0) {
669         PrintError("VNET/P Core: Bridge already set\n");
670         return -1;
671     }
672
673     tmp_bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev));
674
675     if (tmp_bridge == NULL) {
676         PrintError("Malloc Fails\n");
677         vnet_state.bridge = NULL;
678         return -1;
679     }
680     
681     tmp_bridge->vm = vm;
682     tmp_bridge->brg_ops.input = ops->input;
683     tmp_bridge->brg_ops.poll = ops->poll;
684     tmp_bridge->private_data = priv_data;
685     tmp_bridge->type = type;
686         
687     /* make this atomic to avoid possible race conditions */
688     flags = v3_lock_irqsave(vnet_state.lock);
689     vnet_state.bridge = tmp_bridge;
690     v3_unlock_irqrestore(vnet_state.lock, flags);
691
692     return 0;
693 }
694
695
696 static int vnet_tx_flush(void *args){
697     unsigned long flags;
698     struct queue_entry * entry;
699     struct vnet_queue * q = &(vnet_state.pkt_q);
700
701     V3_Print("VNET/P Handing Pkt Thread Starting ....\n");
702
703     //V3_THREAD_SLEEP();
704     /* we need thread sleep/wakeup in Palacios */
705     while(1){
706         flags = v3_lock_irqsave(q->lock);
707
708         if (q->count <= 0){
709             v3_unlock_irqrestore(q->lock, flags);
710             v3_yield(NULL);
711             //V3_THREAD_SLEEP();
712         }else {
713             q->count --;
714             entry = &(q->buf[q->head++]);
715             q->head %= VNET_QUEUE_SIZE;
716
717             v3_unlock_irqrestore(q->lock, flags);
718
719             /* this is ugly, but should happen very unlikely */
720             while(!entry->use);
721             vnet_tx_one_pkt(&(entry->pkt), NULL);
722             entry->use = 0;
723
724             V3_Net_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size);   
725         }
726     }
727 }
728
729 int v3_init_vnet() {
730     memset(&vnet_state, 0, sizeof(vnet_state));
731         
732     INIT_LIST_HEAD(&(vnet_state.routes));
733     INIT_LIST_HEAD(&(vnet_state.devs));
734
735     vnet_state.num_devs = 0;
736     vnet_state.num_routes = 0;
737
738     if (v3_lock_init(&(vnet_state.lock)) == -1){
739         PrintError("VNET/P Core: Fails to initiate lock\n");
740     }
741
742     vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
743     if (vnet_state.route_cache == NULL) {
744         PrintError("VNET/P Core: Fails to initiate route cache\n");
745         return -1;
746     }
747
748     v3_lock_init(&(vnet_state.pkt_q.lock));
749
750     vnet_state.pkt_flush_thread = V3_CREATE_THREAD(vnet_tx_flush, NULL, "VNET_Pkts");
751
752     PrintDebug("VNET/P Core is initiated\n");
753
754     return 0;
755 }
756
757
758 void v3_deinit_vnet(){
759
760     v3_lock_deinit(&(vnet_state.lock));
761
762     free_devices();
763     free_routes();
764
765     v3_free_htable(vnet_state.route_cache, 1, 1);
766     V3_Free(vnet_state.bridge);
767 }
768
769