Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


move vnet code outside of palacios core directory
[palacios.git] / palacios / src / vnet / vnet_core.c
1 /* 
2  * This file is part of the Palacios Virtual Machine Monitor developed
3  * by the V3VEE Project with funding from the United States National 
4  * Science Foundation and the Department of Energy.  
5  *
6  * The V3VEE Project is a joint project between Northwestern University
7  * and the University of New Mexico.  You can find out more at 
8  * http://www.v3vee.org
9  *
10  * Copyright (c) 2010, Lei Xia <lxia@northwestern.edu> 
11  * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>  
12  * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org> 
13  * All rights reserved.
14  *
15  * Author: Lei Xia <lxia@northwestern.edu>
16  *         Yuan Tang <ytang@northwestern.edu>
17  *
18  * This is free software.  You are permitted to use,
19  * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
20  */
21  
22 #include <vnet/vnet.h>
23 #include <palacios/vm_guest_mem.h>
24 #include <palacios/vmm_lock.h>
25 #include <palacios/vmm_queue.h>
26 #include <palacios/vmm_sprintf.h>
27 #include <palacios/vmm_ethernet.h>
28
29 #ifndef V3_CONFIG_DEBUG_VNET
30 #undef PrintDebug
31 #define PrintDebug(fmt, args...)
32 #endif
33
34 int v3_net_debug = 0;
35
36 struct eth_hdr {
37     uint8_t dst_mac[ETH_ALEN];
38     uint8_t src_mac[ETH_ALEN];
39     uint16_t type; /* indicates layer 3 protocol type */
40 } __attribute__((packed));
41
42
43 struct vnet_dev {
44     int dev_id;
45     uint8_t mac_addr[ETH_ALEN];
46     struct v3_vm_info * vm;
47     struct v3_vnet_dev_ops dev_ops;
48     void * private_data;
49
50     struct list_head node;
51 } __attribute__((packed));
52
53
54 struct vnet_brg_dev {
55     struct v3_vm_info * vm;
56     struct v3_vnet_bridge_ops brg_ops;
57
58     uint8_t type;
59
60     void * private_data;
61 } __attribute__((packed));
62
63
64
65 struct vnet_route_info {
66     struct v3_vnet_route route_def;
67
68     struct vnet_dev * dst_dev;
69     struct vnet_dev * src_dev;
70
71     struct list_head node;
72     struct list_head match_node; // used for route matching
73 };
74
75
76 struct route_list {
77     uint8_t hash_buf[VNET_HASH_SIZE];
78
79     uint32_t num_routes;
80     struct vnet_route_info * routes[0];
81 } __attribute__((packed));
82
83
84 struct queue_entry{
85     uint8_t use;
86     struct v3_vnet_pkt pkt;
87     uint8_t * data;
88     uint32_t size_alloc;
89 };
90
91 #define VNET_QUEUE_SIZE 1024
92 struct vnet_queue {
93         struct queue_entry buf[VNET_QUEUE_SIZE];
94         int head, tail;
95         int count;
96         v3_lock_t lock;
97 };
98
99 static struct {
100     struct list_head routes;
101     struct list_head devs;
102     
103     int num_routes;
104     int num_devs;
105
106     struct vnet_brg_dev * bridge;
107
108     v3_lock_t lock;
109     struct vnet_stat stats;
110
111     void * pkt_flush_thread;
112
113     struct vnet_queue pkt_q;
114
115     struct hashtable * route_cache;
116 } vnet_state;
117         
118
119 #ifdef V3_CONFIG_DEBUG_VNET
120 static inline void mac_to_string(uint8_t * mac, char * buf) {
121     snprintf(buf, 100, "%2x:%2x:%2x:%2x:%2x:%2x", 
122              mac[0], mac[1], mac[2],
123              mac[3], mac[4], mac[5]);
124 }
125
126 static void print_route(struct v3_vnet_route * route){
127     char str[50];
128
129     mac_to_string(route->src_mac, str);
130     PrintDebug("Src Mac (%s),  src_qual (%d)\n", 
131                str, route->src_mac_qual);
132     mac_to_string(route->dst_mac, str);
133     PrintDebug("Dst Mac (%s),  dst_qual (%d)\n", 
134                str, route->dst_mac_qual);
135     PrintDebug("Src dev id (%d), src type (%d)", 
136                route->src_id, 
137                route->src_type);
138     PrintDebug("Dst dev id (%d), dst type (%d)\n", 
139                route->dst_id, 
140                route->dst_type);
141 }
142
143 static void dump_routes(){
144     struct vnet_route_info *route;
145
146     int i = 0;
147     PrintDebug("\n========Dump routes starts ============\n");
148     list_for_each_entry(route, &(vnet_state.routes), node) {
149         PrintDebug("\nroute %d:\n", i++);
150                 
151         print_route(&(route->route_def));
152         if (route->route_def.dst_type == LINK_INTERFACE) {
153             PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_ops(%p), dst_dev_data (%p)\n",
154                 route->dst_dev,
155                 route->dst_dev->dev_id,
156                 (void *)&(route->dst_dev->dev_ops),
157                 route->dst_dev->private_data);
158         }
159     }
160
161     PrintDebug("\n========Dump routes end ============\n");
162 }
163
164 #endif
165
166
167 /* 
168  * A VNET packet is a packed struct with the hashed fields grouped together.
169  * This means we can generate the hash from an offset into the pkt struct
170  */
171 static inline uint_t hash_fn(addr_t hdr_ptr) {    
172     uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
173
174     return v3_hash_buffer(hdr_buf, VNET_HASH_SIZE);
175 }
176
177 static inline int hash_eq(addr_t key1, addr_t key2) {   
178     return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
179 }
180
181 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
182     memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);    
183
184     if (v3_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
185         PrintError("VNET/P Core: Failed to insert new route entry to the cache\n");
186         return -1;
187     }
188     
189     return 0;
190 }
191
192 static int clear_hash_cache() {
193     v3_free_htable(vnet_state.route_cache, 1, 1);
194     vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
195
196     return 0;
197 }
198
199 static int look_into_cache(const struct v3_vnet_pkt * pkt, 
200                            struct route_list ** routes) {
201     *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
202    
203     return 0;
204 }
205
206
207 static struct vnet_dev * dev_by_id(int idx) {
208     struct vnet_dev * dev = NULL; 
209
210     list_for_each_entry(dev, &(vnet_state.devs), node) {
211         int dev_id = dev->dev_id;
212
213         if (dev_id == idx)
214             return dev;
215     }
216
217     return NULL;
218 }
219
220 static struct vnet_dev * dev_by_mac(uint8_t * mac) {
221     struct vnet_dev * dev = NULL; 
222     
223     list_for_each_entry(dev, &(vnet_state.devs), node) {
224         if (!compare_ethaddr(dev->mac_addr, mac)){
225             return dev;
226         }
227     }
228
229     return NULL;
230 }
231
232
233 int v3_vnet_find_dev(uint8_t  * mac) {
234     struct vnet_dev * dev = NULL;
235
236     dev = dev_by_mac(mac);
237
238     if(dev != NULL) {
239         return dev->dev_id;
240     }
241
242     return -1;
243 }
244
245
246 int v3_vnet_add_route(struct v3_vnet_route route) {
247     struct vnet_route_info * new_route = NULL;
248     unsigned long flags; 
249
250     new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
251     memset(new_route, 0, sizeof(struct vnet_route_info));
252
253 #ifdef V3_CONFIG_DEBUG_VNET
254     PrintDebug("VNET/P Core: add_route_entry:\n");
255     print_route(&route);
256 #endif
257     
258     memcpy(new_route->route_def.src_mac, route.src_mac, ETH_ALEN);
259     memcpy(new_route->route_def.dst_mac, route.dst_mac, ETH_ALEN);
260     new_route->route_def.src_mac_qual = route.src_mac_qual;
261     new_route->route_def.dst_mac_qual = route.dst_mac_qual;
262     new_route->route_def.dst_type = route.dst_type;
263     new_route->route_def.src_type = route.src_type;
264     new_route->route_def.src_id = route.src_id;
265     new_route->route_def.dst_id = route.dst_id;
266
267     if (new_route->route_def.dst_type == LINK_INTERFACE) {
268         new_route->dst_dev = dev_by_id(new_route->route_def.dst_id);
269     }
270
271     if (new_route->route_def.src_type == LINK_INTERFACE) {
272         new_route->src_dev = dev_by_id(new_route->route_def.src_id);
273     }
274
275
276     flags = v3_lock_irqsave(vnet_state.lock);
277
278     list_add(&(new_route->node), &(vnet_state.routes));
279     clear_hash_cache();
280
281     v3_unlock_irqrestore(vnet_state.lock, flags);
282    
283
284 #ifdef V3_CONFIG_DEBUG_VNET
285     dump_routes();
286 #endif
287
288     return 0;
289 }
290
291
292 /* delete all route entries with specfied src or dst device id */ 
293 static void inline del_routes_by_dev(int dev_id){
294     struct vnet_route_info * route = NULL;
295     unsigned long flags; 
296
297     flags = v3_lock_irqsave(vnet_state.lock);
298
299     list_for_each_entry(route, &(vnet_state.routes), node) {
300         if((route->route_def.dst_type == LINK_INTERFACE &&
301              route->route_def.dst_id == dev_id) ||
302              (route->route_def.src_type == LINK_INTERFACE &&
303               route->route_def.src_id == dev_id)){
304               
305             list_del(&(route->node));
306             list_del(&(route->match_node));
307             V3_Free(route);    
308         }
309     }
310
311     v3_unlock_irqrestore(vnet_state.lock, flags);
312 }
313
314 /* At the end allocate a route_list
315  * This list will be inserted into the cache so we don't need to free it
316  */
317 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
318     struct vnet_route_info * route = NULL; 
319     struct route_list * matches = NULL;
320     int num_matches = 0;
321     int max_rank = 0;
322     struct list_head match_list;
323     struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
324     //    uint8_t src_type = pkt->src_type;
325     //  uint32_t src_link = pkt->src_id;
326
327 #ifdef V3_CONFIG_DEBUG_VNET
328     {
329         char dst_str[100];
330         char src_str[100];
331
332         mac_to_string(hdr->src_mac, src_str);  
333         mac_to_string(hdr->dst_mac, dst_str);
334         PrintDebug("VNET/P Core: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
335     }
336 #endif
337
338     INIT_LIST_HEAD(&match_list);
339     
340 #define UPDATE_MATCHES(rank) do {                               \
341         if (max_rank < (rank)) {                                \
342             max_rank = (rank);                                  \
343             INIT_LIST_HEAD(&match_list);                        \
344                                                                 \
345             list_add(&(route->match_node), &match_list);        \
346             num_matches = 1;                                    \
347         } else if (max_rank == (rank)) {                        \
348             list_add(&(route->match_node), &match_list);        \
349             num_matches++;                                      \
350         }                                                       \
351     } while (0)
352     
353
354     list_for_each_entry(route, &(vnet_state.routes), node) {
355         struct v3_vnet_route * route_def = &(route->route_def);
356
357 /*
358         // CHECK SOURCE TYPE HERE
359         if ( (route_def->src_type != LINK_ANY) && 
360              ( (route_def->src_type != src_type) || 
361                ( (route_def->src_id != src_link) &&
362                  (route_def->src_id != -1)))) {
363             continue;
364         }
365 */
366
367         if ((route_def->dst_mac_qual == MAC_ANY) &&
368             (route_def->src_mac_qual == MAC_ANY)) {      
369             UPDATE_MATCHES(3);
370         }
371         
372         if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
373             if (route_def->src_mac_qual != MAC_NOT) {
374                 if (route_def->dst_mac_qual == MAC_ANY) {
375                     UPDATE_MATCHES(6);
376                 } else if (route_def->dst_mac_qual != MAC_NOT &&
377                            memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
378                     UPDATE_MATCHES(8);
379                 }
380             }
381         }
382             
383         if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
384             if (route_def->dst_mac_qual != MAC_NOT) {
385                 if (route_def->src_mac_qual == MAC_ANY) {
386                     UPDATE_MATCHES(6);
387                 } else if ((route_def->src_mac_qual != MAC_NOT) && 
388                            (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
389                     UPDATE_MATCHES(8);
390                 }
391             }
392         }
393             
394         if ((route_def->dst_mac_qual == MAC_NOT) &&
395             (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
396             if (route_def->src_mac_qual == MAC_ANY) {
397                 UPDATE_MATCHES(5);
398             } else if ((route_def->src_mac_qual != MAC_NOT) && 
399                        (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {     
400                 UPDATE_MATCHES(7);
401             }
402         }
403         
404         if ((route_def->src_mac_qual == MAC_NOT) &&
405             (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
406             if (route_def->dst_mac_qual == MAC_ANY) {
407                 UPDATE_MATCHES(5);
408             } else if ((route_def->dst_mac_qual != MAC_NOT) &&
409                        (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
410                 UPDATE_MATCHES(7);
411             }
412         }
413         
414         // Default route
415         if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
416              (route_def->dst_mac_qual == MAC_NONE)) {
417             UPDATE_MATCHES(4);
418         }
419     }
420
421     PrintDebug("VNET/P Core: match_route: Matches=%d\n", num_matches);
422
423     if (num_matches == 0) {
424         return NULL;
425     }
426
427     matches = (struct route_list *)V3_Malloc(sizeof(struct route_list) + 
428                                 (sizeof(struct vnet_route_info *) * num_matches));
429
430     matches->num_routes = num_matches;
431
432     {
433         int i = 0;
434         list_for_each_entry(route, &match_list, match_node) {
435             matches->routes[i++] = route;
436         }
437     }
438
439     return matches;
440 }
441
442
443 int vnet_tx_one_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
444     struct route_list * matched_routes = NULL;
445     unsigned long flags;
446     int i;
447
448     int cpu = V3_Get_CPU();
449     V3_Net_Print(2, "VNET/P Core: cpu %d: pkt (size %d, src_id:%d, src_type: %d, dst_id: %d, dst_type: %d)\n",
450                   cpu, pkt->size, pkt->src_id, 
451                   pkt->src_type, pkt->dst_id, pkt->dst_type);
452     if(v3_net_debug >= 4){
453             v3_hexdump(pkt->data, pkt->size, NULL, 0);
454     }
455
456     flags = v3_lock_irqsave(vnet_state.lock);
457
458     vnet_state.stats.rx_bytes += pkt->size;
459     vnet_state.stats.rx_pkts++;
460
461     look_into_cache(pkt, &matched_routes);
462     if (matched_routes == NULL) {  
463         PrintDebug("VNET/P Core: send pkt Looking into routing table\n");
464         
465         matched_routes = match_route(pkt);
466         
467         if (matched_routes) {
468             add_route_to_cache(pkt, matched_routes);
469         } else {
470             PrintDebug("VNET/P Core: Could not find route for packet... discards packet\n");
471             v3_unlock_irqrestore(vnet_state.lock, flags);
472             return 0; /* do we return -1 here?*/
473         }
474     }
475
476     v3_unlock_irqrestore(vnet_state.lock, flags);
477
478     PrintDebug("VNET/P Core: send pkt route matches %d\n", matched_routes->num_routes);
479
480     for (i = 0; i < matched_routes->num_routes; i++) {
481         struct vnet_route_info * route = matched_routes->routes[i];
482         
483         if (route->route_def.dst_type == LINK_EDGE) {
484             struct vnet_brg_dev * bridge = vnet_state.bridge;
485             pkt->dst_type = LINK_EDGE;
486             pkt->dst_id = route->route_def.dst_id;
487
488             if (bridge == NULL) {
489                 V3_Net_Print(2, "VNET/P Core: No active bridge to sent data to\n");
490                  continue;
491             }
492
493             if(bridge->brg_ops.input(bridge->vm, pkt, bridge->private_data) < 0){
494                 V3_Net_Print(2, "VNET/P Core: Packet not sent properly to bridge\n");
495                 continue;
496             }         
497             vnet_state.stats.tx_bytes += pkt->size;
498             vnet_state.stats.tx_pkts ++;
499         } else if (route->route_def.dst_type == LINK_INTERFACE) {
500             if (route->dst_dev == NULL){
501                   V3_Net_Print(2, "VNET/P Core: No active device to sent data to\n");
502                 continue;
503             }
504
505             if(route->dst_dev->dev_ops.input(route->dst_dev->vm, pkt, route->dst_dev->private_data) < 0) {
506                 V3_Net_Print(2, "VNET/P Core: Packet not sent properly\n");
507                 continue;
508             }
509             vnet_state.stats.tx_bytes += pkt->size;
510             vnet_state.stats.tx_pkts ++;
511         } else {
512             PrintError("VNET/P Core: Wrong dst type\n");
513         }
514     }
515     
516     return 0;
517 }
518
519
520 static int vnet_pkt_enqueue(struct v3_vnet_pkt * pkt){
521     unsigned long flags;
522     struct queue_entry * entry;
523     struct vnet_queue * q = &(vnet_state.pkt_q);
524     uint16_t num_pages;
525
526     flags = v3_lock_irqsave(q->lock);
527
528     if (q->count >= VNET_QUEUE_SIZE){
529         V3_Net_Print(1, "VNET Queue overflow!\n");
530         v3_unlock_irqrestore(q->lock, flags);
531         return -1;
532     }
533         
534     q->count ++;
535     entry = &(q->buf[q->tail++]);
536     q->tail %= VNET_QUEUE_SIZE;
537         
538     v3_unlock_irqrestore(q->lock, flags);
539
540     /* this is ugly, but should happen very unlikely */
541     while(entry->use);
542
543     if(entry->size_alloc < pkt->size){
544         if(entry->data != NULL){
545             V3_FreePages(V3_PAddr(entry->data), (entry->size_alloc / PAGE_SIZE));
546             entry->data = NULL;
547         }
548
549         num_pages = 1 + (pkt->size / PAGE_SIZE);
550         entry->data = V3_VAddr(V3_AllocPages(num_pages));
551         if(entry->data == NULL){
552             return -1;
553         }
554         entry->size_alloc = PAGE_SIZE * num_pages;
555     }
556
557     entry->pkt.data = entry->data;
558     memcpy(&(entry->pkt), pkt, sizeof(struct v3_vnet_pkt));
559     memcpy(entry->data, pkt->data, pkt->size);
560
561     entry->use = 1;
562
563     return 0;
564 }
565
566
567 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data, int synchronize) {
568     if(synchronize){
569         vnet_tx_one_pkt(pkt, NULL);
570     }else {
571        vnet_pkt_enqueue(pkt);
572         V3_Net_Print(2, "VNET/P Core: Put pkt into Queue: pkt size %d\n", pkt->size);
573     }
574         
575     return 0;
576 }
577
578 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t * mac, 
579                     struct v3_vnet_dev_ops *ops,
580                     void * priv_data){
581     struct vnet_dev * new_dev = NULL;
582     unsigned long flags;
583
584     new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev)); 
585
586     if (new_dev == NULL) {
587         PrintError("Malloc fails\n");
588         return -1;
589     }
590    
591     memcpy(new_dev->mac_addr, mac, 6);
592     new_dev->dev_ops.input = ops->input;
593     new_dev->private_data = priv_data;
594     new_dev->vm = vm;
595     new_dev->dev_id = 0;
596
597     flags = v3_lock_irqsave(vnet_state.lock);
598
599     if (dev_by_mac(mac) == NULL) {
600         list_add(&(new_dev->node), &(vnet_state.devs));
601         new_dev->dev_id = ++vnet_state.num_devs;
602     }
603
604     v3_unlock_irqrestore(vnet_state.lock, flags);
605
606     /* if the device was found previosly the id should still be 0 */
607     if (new_dev->dev_id == 0) {
608         PrintError("VNET/P Core: Device Already exists\n");
609         return -1;
610     }
611
612     PrintDebug("VNET/P Core: Add Device: dev_id %d\n", new_dev->dev_id);
613
614     return new_dev->dev_id;
615 }
616
617
618 int v3_vnet_del_dev(int dev_id){
619     struct vnet_dev * dev = NULL;
620     unsigned long flags;
621
622     flags = v3_lock_irqsave(vnet_state.lock);
623         
624     dev = dev_by_id(dev_id);
625     if (dev != NULL){
626         list_del(&(dev->node));
627         del_routes_by_dev(dev_id);
628     }
629         
630     v3_unlock_irqrestore(vnet_state.lock, flags);
631
632     V3_Free(dev);
633
634     PrintDebug("VNET/P Core: Remove Device: dev_id %d\n", dev_id);
635
636     return 0;
637 }
638
639
640 int v3_vnet_stat(struct vnet_stat * stats){
641         
642     stats->rx_bytes = vnet_state.stats.rx_bytes;
643     stats->rx_pkts = vnet_state.stats.rx_pkts;
644     stats->tx_bytes = vnet_state.stats.tx_bytes;
645     stats->tx_pkts = vnet_state.stats.tx_pkts;
646
647     return 0;
648 }
649
650 static void free_devices(){
651     struct vnet_dev * dev = NULL; 
652
653     list_for_each_entry(dev, &(vnet_state.devs), node) {
654         list_del(&(dev->node));
655         V3_Free(dev);
656     }
657 }
658
659 static void free_routes(){
660     struct vnet_route_info * route = NULL; 
661
662     list_for_each_entry(route, &(vnet_state.routes), node) {
663         list_del(&(route->node));
664         list_del(&(route->match_node));
665         V3_Free(route);
666     }
667 }
668
669 int v3_vnet_add_bridge(struct v3_vm_info * vm,
670                        struct v3_vnet_bridge_ops * ops,
671                        uint8_t type,
672                        void * priv_data) {
673     unsigned long flags;
674     int bridge_free = 0;
675     struct vnet_brg_dev * tmp_bridge = NULL;    
676     
677     flags = v3_lock_irqsave(vnet_state.lock);
678     if (vnet_state.bridge == NULL) {
679         bridge_free = 1;
680         vnet_state.bridge = (void *)1;
681     }
682     v3_unlock_irqrestore(vnet_state.lock, flags);
683
684     if (bridge_free == 0) {
685         PrintError("VNET/P Core: Bridge already set\n");
686         return -1;
687     }
688
689     tmp_bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev));
690
691     if (tmp_bridge == NULL) {
692         PrintError("Malloc Fails\n");
693         vnet_state.bridge = NULL;
694         return -1;
695     }
696     
697     tmp_bridge->vm = vm;
698     tmp_bridge->brg_ops.input = ops->input;
699     tmp_bridge->brg_ops.poll = ops->poll;
700     tmp_bridge->private_data = priv_data;
701     tmp_bridge->type = type;
702         
703     /* make this atomic to avoid possible race conditions */
704     flags = v3_lock_irqsave(vnet_state.lock);
705     vnet_state.bridge = tmp_bridge;
706     v3_unlock_irqrestore(vnet_state.lock, flags);
707
708     return 0;
709 }
710
711 #if 0
712 static int vnet_tx_flush(void *args){
713     unsigned long flags;
714     struct queue_entry * entry;
715     struct vnet_queue * q = &(vnet_state.pkt_q);
716
717     V3_Print("VNET/P Handing Pkt Thread Starting ....\n");
718
719     //V3_THREAD_SLEEP();
720     /* we need thread sleep/wakeup in Palacios */
721     while(1){
722         flags = v3_lock_irqsave(q->lock);
723
724         if (q->count <= 0){
725             v3_unlock_irqrestore(q->lock, flags);
726             v3_yield(NULL);
727             //V3_THREAD_SLEEP();
728         }else {
729             q->count --;
730             entry = &(q->buf[q->head++]);
731             q->head %= VNET_QUEUE_SIZE;
732
733             v3_unlock_irqrestore(q->lock, flags);
734
735             /* this is ugly, but should happen very unlikely */
736             while(!entry->use);
737             vnet_tx_one_pkt(&(entry->pkt), NULL);
738
739             /* asynchronizely release allocated memory for buffer entry here */     
740             entry->use = 0;
741
742             V3_Net_Print(2, "vnet_tx_flush: pkt (size %d)\n", entry->pkt.size);   
743         }
744     }
745 }
746 #endif
747
748 int v3_init_vnet() {
749     memset(&vnet_state, 0, sizeof(vnet_state));
750         
751     INIT_LIST_HEAD(&(vnet_state.routes));
752     INIT_LIST_HEAD(&(vnet_state.devs));
753
754     vnet_state.num_devs = 0;
755     vnet_state.num_routes = 0;
756
757     if (v3_lock_init(&(vnet_state.lock)) == -1){
758         PrintError("VNET/P Core: Fails to initiate lock\n");
759     }
760
761     vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
762     if (vnet_state.route_cache == NULL) {
763         PrintError("VNET/P Core: Fails to initiate route cache\n");
764         return -1;
765     }
766
767     v3_lock_init(&(vnet_state.pkt_q.lock));
768
769     //vnet_state.pkt_flush_thread = V3_CREATE_THREAD(vnet_tx_flush, NULL, "VNET_Pkts");
770
771     PrintDebug("VNET/P Core is initiated\n");
772
773     return 0;
774 }
775
776
777 void v3_deinit_vnet(){
778
779     v3_lock_deinit(&(vnet_state.lock));
780
781     free_devices();
782     free_routes();
783
784     v3_free_htable(vnet_state.route_cache, 1, 1);
785     V3_Free(vnet_state.bridge);
786 }
787
788