2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2009, Lei Xia <lxia@northwestern.edu>
11 * Copyright (c) 2009, Yuan Tang <ytang@northwestern.edu>
12 * Copyright (c) 2009, The V3VEE Project <http://www.v3vee.org>
13 * All rights reserved.
15 * Author: Lei Xia <lxia@northwestern.edu>
16 * Yuan Tang <ytang@northwestern.edu>
18 * This is free software. You are permitted to use,
19 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
22 #include <palacios/vmm_vnet.h>
23 #include <palacios/vm_guest_mem.h>
24 #include <palacios/vmm_lock.h>
25 #include <palacios/vmm_queue.h>
26 #include <palacios/vmm_sprintf.h>
28 #ifndef CONFIG_DEBUG_VNET
30 #define PrintDebug(fmt, args...)
38 uint16_t type; // indicates layer 3 protocol type
39 } __attribute__((packed));
48 struct v3_vm_info * vm;
50 int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data);
54 struct list_head node;
55 } __attribute__((packed));
58 #define BRIDGE_BUF_SIZE 512
59 struct bridge_pkts_buf {
63 struct v3_vnet_pkt pkts[BRIDGE_BUF_SIZE];
64 uint8_t datas[ETHERNET_PACKET_LEN * BRIDGE_BUF_SIZE];
68 struct v3_vm_info * vm;
70 int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data);
71 void (*xcall_input)(void * data);
72 int (*polling_pkt)(struct v3_vm_info * vm, void * private_data);
76 uint16_t max_delayed_pkts;
77 long max_latency; //in cycles
79 } __attribute__((packed));
85 struct vnet_route_info {
86 struct v3_vnet_route route_def;
88 struct vnet_dev * dst_dev;
89 struct vnet_dev * src_dev;
91 struct list_head node;
92 struct list_head match_node; // used for route matching
97 uint8_t hash_buf[VNET_HASH_SIZE];
100 struct vnet_route_info * routes[0];
101 } __attribute__((packed));
106 struct list_head routes;
107 struct list_head devs;
112 struct vnet_brg_dev * bridge;
116 struct hashtable * route_cache;
118 struct bridge_pkts_buf in_buf; //incoming packets buffer
124 #ifdef CONFIG_DEBUG_VNET
125 static inline void mac_to_string(uint8_t mac[6], char * buf) {
126 snprintf(buf, 100, "%d:%d:%d:%d:%d:%d",
127 mac[0], mac[1], mac[2],
128 mac[3], mac[4], mac[5]);
131 static void print_route(struct vnet_route_info * route){
136 mac_to_string(route->route_def.src_mac, str);
137 PrintDebug("Src Mac (%s), src_qual (%d)\n",
138 str, route->route_def.src_mac_qual);
140 mac_to_string(route->route_def.dst_mac, str);
141 PrintDebug("Dst Mac (%s), dst_qual (%d)\n",
142 str, route->route_def.dst_mac_qual);
144 PrintDebug("Src dev id (%d), src type (%d)",
145 route->route_def.src_id,
146 route->route_def.src_type);
148 PrintDebug("Dst dev id (%d), dst type (%d)\n",
149 route->route_def.dst_id,
150 route->route_def.dst_type);
152 if (route->route_def.dst_type == LINK_INTERFACE) {
153 PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_input (%p), dst_dev_data (%p)\n",
155 route->dst_dev->dev_id,
156 route->dst_dev->input,
157 route->dst_dev->private_data);
161 static void dump_routes() {
162 struct vnet_route_info * route = NULL;
165 PrintDebug("\n========Dump routes starts ============\n");
167 list_for_each_entry(route, &(vnet_state.routes), node) {
168 PrintDebug("\nroute %d:\n", i++);
172 PrintDebug("\n========Dump routes end ============\n");
179 * A VNET packet is a packed struct with the hashed fields grouped together.
180 * This means we can generate the hash from an offset into the pkt struct
182 static inline uint_t hash_fn(addr_t hdr_ptr) {
183 uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
185 return v3_hash_buffer(hdr_buf, VNET_HASH_SIZE);
188 static inline int hash_eq(addr_t key1, addr_t key2) {
189 return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
192 static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
193 memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
195 if (v3_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
196 PrintError("Vnet: Failed to insert new route entry to the cache\n");
203 static int clear_hash_cache() {
205 v3_free_htable(vnet_state.route_cache, 1, 1);
206 vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
211 static int look_into_cache(const struct v3_vnet_pkt * pkt, struct route_list ** routes) {
213 *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
219 static struct vnet_dev * find_dev_by_id(int idx) {
220 struct vnet_dev * dev = NULL;
222 list_for_each_entry(dev, &(vnet_state.devs), node) {
223 int dev_id = dev->dev_id;
233 static struct vnet_dev * find_dev_by_mac(char mac[6]) {
234 struct vnet_dev * dev = NULL;
236 list_for_each_entry(dev, &(vnet_state.devs), node) {
237 if (memcmp(dev->mac_addr, mac, 6) == 0) {
245 int get_device_id_by_mac(char mac[6]) {
246 struct vnet_dev * dev = find_dev_by_mac(mac);
256 int v3_vnet_add_route(struct v3_vnet_route route) {
257 struct vnet_route_info * new_route = NULL;
260 new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
261 memset(new_route, 0, sizeof(struct vnet_route_info));
263 PrintDebug("Vnet: vnet_add_route_entry: dst_id: %d, dst_type: %d\n",
264 route.dst_id, route.dst_type);
266 memcpy(new_route->route_def.src_mac, route.src_mac, 6);
267 memcpy(new_route->route_def.dst_mac, route.dst_mac, 6);
268 new_route->route_def.src_mac_qual = route.src_mac_qual;
269 new_route->route_def.dst_mac_qual = route.dst_mac_qual;
270 new_route->route_def.dst_id = route.dst_id;
271 new_route->route_def.dst_type = route.dst_type;
272 new_route->route_def.src_id = route.src_id;
273 new_route->route_def.src_type = route.src_type;
275 if (new_route->route_def.dst_type == LINK_INTERFACE) {
276 new_route->dst_dev = find_dev_by_id(new_route->route_def.dst_id);
277 PrintDebug("Vnet: Add route, get device: dev_id %d, input : %p, private_data %p\n",
278 new_route->dst_dev->dev_id, new_route->dst_dev->input, new_route->dst_dev->private_data);
281 if (new_route->route_def.src_type == LINK_INTERFACE) {
282 new_route->src_dev = find_dev_by_id(new_route->route_def.src_id);
285 flags = v3_lock_irqsave(vnet_state.lock);
287 list_add(&(new_route->node), &(vnet_state.routes));
290 v3_unlock_irqrestore(vnet_state.lock, flags);
293 #ifdef CONFIG_DEBUG_VNET
302 // At the end allocate a route_list
303 // This list will be inserted into the cache so we don't need to free it
304 static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
305 struct vnet_route_info * route = NULL;
306 struct route_list * matches = NULL;
309 struct list_head match_list;
310 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
311 uint8_t src_type = pkt->src_type;
312 uint32_t src_link = pkt->src_id;
314 #ifdef CONFIG_DEBUG_VNET
319 mac_to_string(hdr->src_mac, src_str);
320 mac_to_string(hdr->dst_mac, dst_str);
321 PrintDebug("Vnet: match_route. pkt: SRC(%s), DEST(%s)\n", src_str, dst_str);
325 INIT_LIST_HEAD(&match_list);
327 #define UPDATE_MATCHES(rank) do { \
328 if (max_rank < (rank)) { \
330 INIT_LIST_HEAD(&match_list); \
332 list_add(&(route->match_node), &match_list); \
334 } else if (max_rank == (rank)) { \
335 list_add(&(route->match_node), &match_list); \
341 list_for_each_entry(route, &(vnet_state.routes), node) {
342 struct v3_vnet_route * route_def = &(route->route_def);
344 // CHECK SOURCE TYPE HERE
345 if ( (route_def->src_type != LINK_ANY) &&
346 ( (route_def->src_type != src_type) ||
347 ( (route_def->src_id != src_link) &&
348 (route_def->src_id != (uint32_t)-1)))) {
353 if ((route_def->dst_mac_qual == MAC_ANY) &&
354 (route_def->src_mac_qual == MAC_ANY)) {
358 if (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) {
359 if (route_def->src_mac_qual != MAC_NOT) {
360 if (route_def->dst_mac_qual == MAC_ANY) {
362 } else if (route_def->dst_mac_qual != MAC_NOT &&
363 memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
369 if (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0) {
370 if (route_def->dst_mac_qual != MAC_NOT) {
371 if (route_def->src_mac_qual == MAC_ANY) {
373 } else if ((route_def->src_mac_qual != MAC_NOT) &&
374 (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
380 if ((route_def->dst_mac_qual == MAC_NOT) &&
381 (memcmp(route_def->dst_mac, hdr->dst_mac, 6) != 0)) {
382 if (route_def->src_mac_qual == MAC_ANY) {
384 } else if ((route_def->src_mac_qual != MAC_NOT) &&
385 (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0)) {
390 if ((route_def->src_mac_qual == MAC_NOT) &&
391 (memcmp(route_def->src_mac, hdr->src_mac, 6) != 0)) {
392 if (route_def->dst_mac_qual == MAC_ANY) {
394 } else if ((route_def->dst_mac_qual != MAC_NOT) &&
395 (memcmp(route_def->dst_mac, hdr->dst_mac, 6) == 0)) {
401 if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
402 (route_def->dst_mac_qual == MAC_NONE)) {
407 PrintDebug("Vnet: match_route: Matches=%d\n", num_matches);
409 if (num_matches == 0) {
413 matches = (struct route_list *)V3_Malloc(sizeof(struct route_list) +
414 (sizeof(struct vnet_route_info *) * num_matches));
416 matches->num_routes = num_matches;
420 list_for_each_entry(route, &match_list, match_node) {
421 matches->routes[i++] = route;
429 static int flush_bridge_pkts(struct vnet_brg_dev *bridge){
434 struct v3_vnet_bridge_input_args args;
435 int cpu_id = bridge->vm->cores[0].cpu_id;
436 int current_core = V3_Get_CPU();
438 if (bridge == NULL) {
439 PrintDebug("VNET: No bridge to sent data to links\n");
443 flags = v3_lock_irqsave(bridge->recv_buf.lock);
445 num = bridge->recv_buf.num;
446 start = bridge->recv_buf.start;
448 bridge->recv_buf.num -= num;
449 bridge->recv_buf.start += num;
450 bridge->recv_buf.start %= BRIDGE_BUF_SIZE;
452 v3_unlock_irqrestore(bridge->recv_buf.lock, flags);
455 if (bridge->disabled) {
456 PrintDebug("VNET: In flush bridge pkts: Bridge is disabled\n");
460 if (num <= 2 && num > 0) {
461 PrintDebug("VNET: In flush bridge pkts: %d\n", num);
465 PrintDebug("VNET: In flush bridge pkts to bridge, cur_cpu %d, brige_core: %d\n", current_core, cpu_id);
466 if (current_core == cpu_id) {
467 if ((start + num) < BRIDGE_BUF_SIZE) {
468 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), num, bridge->private_data);
470 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), (BRIDGE_BUF_SIZE - start), bridge->private_data);
471 send = num - (BRIDGE_BUF_SIZE - start);
472 bridge->input(bridge->vm, &(bridge->recv_buf.pkts[0]), send, bridge->private_data);
475 args.vm = bridge->vm;
476 args.private_data = bridge->private_data;
478 if ((start + num) < BRIDGE_BUF_SIZE) {
480 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
481 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
483 args.pkt_num = BRIDGE_BUF_SIZE - start;
484 args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
485 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
487 send = num - (BRIDGE_BUF_SIZE - start);
489 args.vnet_pkts = &(bridge->recv_buf.pkts[0]);
490 V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
494 PrintDebug("VNET: flush bridge pkts %d\n", num);
501 static int send_to_bridge(struct v3_vnet_pkt * pkt){
502 struct vnet_brg_dev * bridge = vnet_state.bridge;
504 if (bridge == NULL) {
505 PrintDebug("VNET: No bridge to sent data to links\n");
509 if (bridge->max_delayed_pkts <= 1) {
511 if (bridge->disabled) {
512 PrintDebug("VNET: Bridge diabled\n");
516 bridge->input(bridge->vm, pkt, 1, bridge->private_data);
518 PrintDebug("VNET: sent one packet to the bridge\n");
525 int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
526 struct route_list * matched_routes = NULL;
530 #ifdef CONFIG_DEBUG_VNET
532 struct eth_hdr * hdr = (struct eth_hdr *)(pkt->header);
535 int cpu = V3_Get_CPU();
537 mac_to_string(hdr->src_mac, src_str);
538 mac_to_string(hdr->dst_mac, dest_str);
539 PrintDebug("Vnet: on cpu %d, HandleDataOverLink. SRC(%s), DEST(%s), pkt size: %d\n", cpu, src_str, dest_str, pkt->size);
543 flags = v3_lock_irqsave(vnet_state.lock);
545 look_into_cache(pkt, &matched_routes);
547 if (matched_routes == NULL) {
548 PrintDebug("Vnet: send pkt Looking into routing table\n");
550 matched_routes = match_route(pkt);
552 if (matched_routes) {
553 add_route_to_cache(pkt, matched_routes);
555 PrintDebug("Could not find route for packet... discards packet\n");
556 v3_unlock_irqrestore(vnet_state.lock, flags);
561 v3_unlock_irqrestore(vnet_state.lock, flags);
563 PrintDebug("Vnet: send pkt route matches %d\n", matched_routes->num_routes);
565 for (i = 0; i < matched_routes->num_routes; i++) {
566 struct vnet_route_info * route = matched_routes->routes[i];
568 if (route->route_def.dst_type == LINK_EDGE) {
569 pkt->dst_type = LINK_EDGE;
570 pkt->dst_id = route->route_def.dst_id;
572 if (send_to_bridge(pkt) == -1) {
573 PrintDebug("VNET: Packet not sent properly to bridge\n");
577 } else if (route->route_def.dst_type == LINK_INTERFACE) {
578 if (route->dst_dev->input(route->dst_dev->vm, pkt, route->dst_dev->private_data) == -1) {
579 PrintDebug("VNET: Packet not sent properly\n");
583 PrintDebug("Vnet: Wrong Edge type\n");
587 PrintDebug("Vnet: v3_vnet_send_pkt: Forward packet according to Route %d\n", i);
593 void v3_vnet_send_pkt_xcall(void * data) {
594 struct v3_vnet_pkt * pkt = (struct v3_vnet_pkt *)data;
595 v3_vnet_send_pkt(pkt, NULL);
599 void v3_vnet_polling() {
603 struct v3_vnet_pkt * buf = NULL;
605 PrintDebug("In vnet pollling: cpu %d\n", V3_Get_CPU());
607 flags = v3_lock_irqsave(vnet_state.in_buf.lock);
609 num = vnet_state.in_buf.num;
610 start = vnet_state.in_buf.start;
612 PrintDebug("VNET: polling pkts %d\n", num);
615 buf = &(vnet_state.in_buf.pkts[vnet_state.in_buf.start]);
617 v3_vnet_send_pkt(buf, NULL);
619 vnet_state.in_buf.num--;
620 vnet_state.in_buf.start++;
621 vnet_state.in_buf.start %= BRIDGE_BUF_SIZE;
625 v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
631 int v3_vnet_rx(uint8_t * buf, uint16_t size, uint16_t src_id, uint8_t src_type) {
634 struct v3_vnet_pkt * pkt = NULL;
636 flags = v3_lock_irqsave(vnet_state.in_buf.lock);
638 end = vnet_state.in_buf.end;
639 pkt = &(vnet_state.in_buf.pkts[end]);
641 if (vnet_state.in_buf.num > BRIDGE_BUF_SIZE){
642 PrintDebug("VNET: bridge rx: buffer full\n");
643 v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
647 vnet_state.in_buf.num++;
648 vnet_state.in_buf.end++;
649 vnet_state.in_buf.end %= BRIDGE_BUF_SIZE;
652 pkt->src_id = src_id;
653 pkt->src_type = src_type;
654 memcpy(pkt->header, buf, ETHERNET_HEADER_LEN);
655 memcpy(pkt->data, buf, size);
658 v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
664 int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t mac[6],
665 int (*netif_input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data),
667 struct vnet_dev * new_dev = NULL;
670 new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev));
672 if (new_dev == NULL) {
673 PrintError("VNET: Malloc fails\n");
677 memcpy(new_dev->mac_addr, mac, 6);
678 new_dev->input = netif_input;
679 new_dev->private_data = priv_data;
683 flags = v3_lock_irqsave(vnet_state.lock);
685 if (!find_dev_by_mac(mac)) {
686 list_add(&(new_dev->node), &(vnet_state.devs));
687 new_dev->dev_id = ++vnet_state.num_devs;
690 v3_unlock_irqrestore(vnet_state.lock, flags);
692 // if the device was found previosly the id should still be 0
693 if (new_dev->dev_id == 0) {
694 PrintError("Device Alrady exists\n");
698 PrintDebug("Vnet: Add Device: dev_id %d, input : %p, private_data %p\n",
699 new_dev->dev_id, new_dev->input, new_dev->private_data);
701 return new_dev->dev_id;
705 void v3_vnet_heartbeat(struct guest_info *core){
706 //static long last_time, cur_time;
708 if (vnet_state.bridge == NULL) {
712 if(vnet_state.bridge->max_delayed_pkts > 1){
713 if(V3_Get_CPU() != vnet_state.bridge->vm->cores[0].cpu_id){
717 if ((cur_time - last_time) >= vnet_state.bridge->max_latency) {
718 last_time = cur_time;
719 flush_bridge_pkts(vnet_state.bridge);
723 vnet_state.bridge->polling_pkt(vnet_state.bridge->vm, vnet_state.bridge->private_data);
726 int v3_vnet_add_bridge(struct v3_vm_info * vm,
727 int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
728 void (*xcall_input)(void * data),
729 int (*poll_pkt)(struct v3_vm_info * vm, void * private_data),
730 uint16_t max_delayed_pkts,
736 struct vnet_brg_dev * tmp_bridge = NULL;
738 flags = v3_lock_irqsave(vnet_state.lock);
740 if (vnet_state.bridge == NULL) {
742 vnet_state.bridge = (void *)1;
745 v3_unlock_irqrestore(vnet_state.lock, flags);
747 if (bridge_free == 0) {
748 PrintError("Bridge already set\n");
752 tmp_bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev));
754 if (tmp_bridge == NULL) {
755 PrintError("Malloc Fails\n");
756 vnet_state.bridge = NULL;
761 tmp_bridge->input = input;
762 tmp_bridge->xcall_input = xcall_input;
763 tmp_bridge->polling_pkt = poll_pkt;
764 tmp_bridge->private_data = priv_data;
765 tmp_bridge->disabled = 0;
768 //initial receving buffer
769 tmp_bridge->recv_buf.start = 0;
770 tmp_bridge->recv_buf.end = 0;
771 tmp_bridge->recv_buf.num = 0;
772 if(v3_lock_init(&(tmp_bridge->recv_buf.lock)) == -1){
773 PrintError("VNET: add bridge, error to initiate recv buf lock\n");
776 for(i = 0; i<BRIDGE_BUF_SIZE; i++){
777 tmp_bridge->recv_buf.pkts[i].data = &(tmp_bridge->recv_buf.datas[i*ETHERNET_PACKET_LEN]);
782 tmp_bridge->max_delayed_pkts = (max_delayed_pkts < BRIDGE_BUF_SIZE) ? max_delayed_pkts : BRIDGE_BUF_SIZE;
783 tmp_bridge->max_latency = max_latency;
785 // make this atomic to avoid possible race conditions
786 flags = v3_lock_irqsave(vnet_state.lock);
787 vnet_state.bridge = tmp_bridge;
788 v3_unlock_irqrestore(vnet_state.lock, flags);
794 int v3_vnet_disable_bridge() {
797 flags = v3_lock_irqsave(vnet_state.lock);
799 if (vnet_state.bridge != NULL) {
800 vnet_state.bridge->disabled = 1;
803 v3_unlock_irqrestore(vnet_state.lock, flags);
809 int v3_vnet_enable_bridge() {
812 flags = v3_lock_irqsave(vnet_state.lock);
814 if (vnet_state.bridge != NULL) {
815 vnet_state.bridge->disabled = 0;
818 v3_unlock_irqrestore(vnet_state.lock, flags);
828 memset(&vnet_state, 0, sizeof(vnet_state));
830 INIT_LIST_HEAD(&(vnet_state.routes));
831 INIT_LIST_HEAD(&(vnet_state.devs));
833 vnet_state.num_devs = 0;
834 vnet_state.num_routes = 0;
836 PrintDebug("VNET: Links and Routes tables initiated\n");
838 if (v3_lock_init(&(vnet_state.lock)) == -1){
839 PrintError("VNET: Failure to init lock for routes table\n");
842 PrintDebug("VNET: Locks initiated\n");
844 //initial incoming pkt buffer
845 vnet_state.in_buf.start = 0;
846 vnet_state.in_buf.end = 0;
847 vnet_state.in_buf.num = 0;
849 if (v3_lock_init(&(vnet_state.in_buf.lock)) == -1){
850 PrintError("VNET: add bridge, error to initiate send buf lock\n");
853 for (i = 0; i < BRIDGE_BUF_SIZE; i++){
854 vnet_state.in_buf.pkts[i].data = &(vnet_state.in_buf.datas[i * ETHERNET_PACKET_LEN]);
857 PrintDebug("VNET: Receiving buffer initiated\n");
859 vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);
861 if (vnet_state.route_cache == NULL) {
862 PrintError("Vnet: Route Cache Init Fails\n");
866 PrintDebug("VNET: initiated\n");