uint8_t mac_addr[6];
struct v3_vm_info * vm;
- int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data);
+ int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data);
void * private_data;
int dev_id;
} __attribute__((packed));
+#define BRIDGE_BUF_SIZE 512
+struct bridge_pkts_buf {
+ int start, end;
+ int num;
+ v3_lock_t lock;
+ struct v3_vnet_pkt pkts[BRIDGE_BUF_SIZE];
+ uint8_t datas[ETHERNET_PACKET_LEN*BRIDGE_BUF_SIZE];
+};
+
+struct vnet_brg_dev {
+ struct v3_vm_info * vm;
+
+ int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data);
+ void (*xcall_input)(void *data);
+ int (*polling_pkt)(struct v3_vm_info * vm, void *private_data);
+
+ int disabled;
+
+ uint16_t max_delayed_pkts;
+ long max_latency; //in cycles
+ void * private_data;
+} __attribute__((packed));
+
+
};
-
-
struct route_list {
uint8_t hash_buf[VNET_HASH_SIZE];
static struct {
struct list_head routes;
struct list_head devs;
-
+
int num_routes;
int num_devs;
+ struct vnet_brg_dev *bridge;
+
v3_lock_t lock;
- struct gen_queue * inpkt_q;
struct hashtable * route_cache;
+ struct bridge_pkts_buf in_buf; //incoming packets buffer
} vnet_state;
#ifdef CONFIG_DEBUG_VNET
static inline void mac_to_string(char mac[6], char * buf) {
- snprintf(buf, 20, "%02x:%02x:%02x:%02x:%02x:%02x",
+ snprintf(buf, 100, "%d:%d:%d:%d:%d:%d",
mac[0], mac[1], mac[2],
mac[3], mac[4], mac[5]);
}
+static void print_route(struct vnet_route_info *route){
+ char str[50];
+
+ mac_to_string(route->route_def.src_mac, str);
+ PrintDebug("Src Mac (%s), src_qual (%d)\n",
+ str, route->route_def.src_mac_qual);
+ mac_to_string(route->route_def.dst_mac, str);
+ PrintDebug("Dst Mac (%s), dst_qual (%d)\n",
+ str, route->route_def.dst_mac_qual);
+ PrintDebug("Src dev id (%d), src type (%d)",
+ route->route_def.src_id,
+ route->route_def.src_type);
+ PrintDebug("Dst dev id (%d), dst type (%d)\n",
+ route->route_def.dst_id,
+ route->route_def.dst_type);
+ if (route->route_def.dst_type == LINK_INTERFACE) {
+ PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_input (%p), dst_dev_data (%p)\n",
+ route->dst_dev,
+ route->dst_dev->dev_id,
+ route->dst_dev->input,
+ route->dst_dev->private_data);
+ }
+}
+
+static void dump_routes(){
+ struct vnet_route_info *route;
+
+ int i = 0;
+ PrintDebug("\n========Dump routes starts ============\n");
+ list_for_each_entry(route, &(vnet_state.routes), node) {
+ PrintDebug("\nroute %d:\n", ++i);
+
+ print_route(route);
+ }
+ PrintDebug("\n========Dump routes end ============\n");
+}
+
#endif
* This means we can generate the hash from an offset into the pkt struct
*/
static inline uint_t hash_fn(addr_t hdr_ptr) {
- uint8_t * hdr_buf = (uint8_t *)&(hdr_ptr);
-
+ uint8_t * hdr_buf = (uint8_t *)hdr_ptr;
+
return v3_hash_buffer(hdr_buf, VNET_HASH_SIZE);
}
-static inline int hash_eq(addr_t key1, addr_t key2) {
+static inline int hash_eq(addr_t key1, addr_t key2) {
return (memcmp((uint8_t *)key1, (uint8_t *)key2, VNET_HASH_SIZE) == 0);
}
-
-static int add_route_to_cache(struct v3_vnet_pkt * pkt, struct route_list * routes) {
+static int add_route_to_cache(const struct v3_vnet_pkt * pkt, struct route_list * routes) {
memcpy(routes->hash_buf, pkt->hash_buf, VNET_HASH_SIZE);
if (v3_htable_insert(vnet_state.route_cache, (addr_t)routes->hash_buf, (addr_t)routes) == 0) {
return 0;
}
-static int look_into_cache(struct v3_vnet_pkt * pkt, struct route_list ** routes) {
+static int look_into_cache(const struct v3_vnet_pkt * pkt, struct route_list ** routes) {
- *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)pkt);
+ *routes = (struct route_list *)v3_htable_search(vnet_state.route_cache, (addr_t)(pkt->hash_buf));
return 0;
}
-struct vnet_dev * find_dev_by_id(int idx) {
+static struct vnet_dev * find_dev_by_id(int idx) {
struct vnet_dev * dev = NULL;
list_for_each_entry(dev, &(vnet_state.devs), node) {
return NULL;
}
-static struct vnet_dev * find_dev_by_mac(char * name) {
+static struct vnet_dev * find_dev_by_mac(char mac[6]) {
struct vnet_dev * dev = NULL;
list_for_each_entry(dev, &(vnet_state.devs), node) {
- if (!memcmp(dev->mac_addr, name, 6))
+ if (!memcmp(dev->mac_addr, mac, 6))
return dev;
}
return NULL;
}
+int get_device_id_by_mac(char mac[6]){
+
+ struct vnet_dev *dev = find_dev_by_mac(mac);
+
+ if (dev == NULL)
+ return -1;
+
+ return dev->dev_id;
+}
+
int v3_vnet_add_route(struct v3_vnet_route route) {
struct vnet_route_info * new_route = NULL;
new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
memset(new_route, 0, sizeof(struct vnet_route_info));
- PrintDebug("Vnet: vnet_add_route_entry\n");
+ PrintDebug("Vnet: vnet_add_route_entry: dst_id: %d, dst_type: %d\n",
+ route.dst_id, route.dst_type);
- new_route->route_def = route;
+ memcpy(new_route->route_def.src_mac, route.src_mac, 6);
+ memcpy(new_route->route_def.dst_mac, route.dst_mac, 6);
+ new_route->route_def.src_mac_qual = route.src_mac_qual;
+ new_route->route_def.dst_mac_qual = route.dst_mac_qual;
+ new_route->route_def.dst_id = route.dst_id;
+ new_route->route_def.dst_type = route.dst_type;
+ new_route->route_def.src_id = route.src_id;
+ new_route->route_def.src_type = route.src_type;
- /* TODO: Find devices */
if (new_route->route_def.dst_type == LINK_INTERFACE) {
new_route->dst_dev = find_dev_by_id(new_route->route_def.dst_id);
+ PrintDebug("Vnet: Add route, get device: dev_id %d, input : %p, private_data %p\n",
+ new_route->dst_dev->dev_id, new_route->dst_dev->input, new_route->dst_dev->private_data);
}
if (new_route->route_def.src_type == LINK_INTERFACE) {
}
flags = v3_lock_irqsave(vnet_state.lock);
+
list_add(&(new_route->node), &(vnet_state.routes));
+ clear_hash_cache();
+
v3_unlock_irqrestore(vnet_state.lock, flags);
- clear_hash_cache();
+
+#ifdef CONFIG_DEBUG_VNET
+ dump_routes();
+#endif
return 0;
}
// At the end allocate a route_list
// This list will be inserted into the cache so we don't need to free it
-static struct route_list * match_route(struct v3_vnet_pkt * pkt) {
+static struct route_list * match_route(const struct v3_vnet_pkt * pkt) {
struct vnet_route_info * route = NULL;
struct route_list * matches = NULL;
int num_matches = 0;
#ifdef CONFIG_DEBUG_VNET
{
- char dst_str[18];
- char src_str[18];
+ char dst_str[100];
+ char src_str[100];
mac_to_string(hdr->src_mac, src_str);
mac_to_string(hdr->dst_mac, dst_str);
}
// Default route
- if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &
+ if ( (memcmp(route_def->src_mac, hdr->src_mac, 6) == 0) &&
(route_def->dst_mac_qual == MAC_NONE)) {
UPDATE_MATCHES(4);
}
return NULL;
}
- matches = V3_Malloc(sizeof(struct route_list) +
- (sizeof(struct vnet_route_info *) * num_matches));
+ matches = (struct route_list *)V3_Malloc(sizeof(struct route_list) +
+ (sizeof(struct vnet_route_info *) * num_matches));
matches->num_routes = num_matches;
{
int i = 0;
- list_for_each_entry(route, &match_list, node) {
+ list_for_each_entry(route, &match_list, match_node) {
matches->routes[i++] = route;
}
}
return matches;
}
-static int handle_one_pkt(struct v3_vnet_pkt * pkt) {
+#if 0
+static int flush_bridge_pkts(struct vnet_brg_dev *bridge){
+ unsigned long flags;
+ int num, start, send;
+ struct v3_vnet_bridge_input_args args;
+ int cpu_id = bridge->vm->cores[0].cpu_id;
+ int current_core = V3_Get_CPU();
+
+ if (bridge == NULL) {
+ PrintDebug("VNET: No bridge to sent data to links\n");
+ return -1;
+ }
+
+ flags = v3_lock_irqsave(bridge->recv_buf.lock);
+
+ num = bridge->recv_buf.num;
+ start = bridge->recv_buf.start;
+
+ bridge->recv_buf.num -= num;
+ bridge->recv_buf.start += num;
+ bridge->recv_buf.start %= BRIDGE_BUF_SIZE;
+
+ v3_unlock_irqrestore(bridge->recv_buf.lock, flags);
+
+
+ if(bridge->disabled){
+ PrintDebug("VNET: In flush bridge pkts: Bridge is disabled\n");
+ return -1;
+ }
+
+ if(num <= 2 && num > 0){
+ PrintDebug("VNET: In flush bridge pkts: %d\n", num);
+ }
+
+ if(num > 0) {
+ PrintDebug("VNET: In flush bridge pkts to bridge, cur_cpu %d, brige_core: %d\n", current_core, cpu_id);
+ if (current_core == cpu_id){
+ if ((start + num) < BRIDGE_BUF_SIZE){
+ bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), num, bridge->private_data);
+ }else {
+ bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), (BRIDGE_BUF_SIZE - start), bridge->private_data);
+ send = num - (BRIDGE_BUF_SIZE - start);
+ bridge->input(bridge->vm, &(bridge->recv_buf.pkts[0]), send, bridge->private_data);
+ }
+ }else {
+ args.vm = bridge->vm;
+ args.private_data = bridge->private_data;
+
+ if ((start + num) < BRIDGE_BUF_SIZE){
+ args.pkt_num = num;
+ args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
+ V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
+ }else {
+ args.pkt_num = BRIDGE_BUF_SIZE - start;
+ args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
+ V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
+
+ send = num - (BRIDGE_BUF_SIZE - start);
+ args.pkt_num = send;
+ args.vnet_pkts = &(bridge->recv_buf.pkts[0]);
+ V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
+ }
+ }
+
+ PrintDebug("VNET: flush bridge pkts %d\n", num);
+ }
+
+ return 0;
+}
+#endif
+
+static int send_to_bridge(struct v3_vnet_pkt * pkt){
+ struct vnet_brg_dev *bridge = vnet_state.bridge;
+
+ if (bridge == NULL) {
+ PrintDebug("VNET: No bridge to sent data to links\n");
+ return -1;
+ }
+
+ if(bridge->max_delayed_pkts <= 1){
+ if(bridge->disabled){
+ PrintDebug("VNET: Bridge diabled\n");
+ return -1;
+ }
+
+/*
+ //avoid the cross-core call here
+ int cpu_id = bridge->vm->cores[0].cpu_id;
+ struct v3_vnet_bridge_input_args args;
+
+ args.pkt_num = 1;
+ args.vm = bridge->vm;
+ args.vnet_pkts = pkt;
+ args.private_data = bridge->private_data;
+
+ V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
+*/
+ bridge->input(bridge->vm, pkt, 1, bridge->private_data);
+
+ PrintDebug("VNET: sent one packet to the bridge\n");
+ return 0;
+ }
+
+/*
+ unsigned long flags;
+ int end, num=0;
+ struct v3_vnet_pkt *buf;
+
+ PrintDebug("VNET: send_to_bridge\n");
+
+ flags = v3_lock_irqsave(bridge->recv_buf.lock);
+
+ if(bridge->disabled && bridge->recv_buf.num >= BRIDGE_BUF_SIZE){
+ PrintDebug("Bridge diabled and bridge receive buffer full\n");
+ v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
+ num = bridge->recv_buf.num;
+ goto exit;
+ }
+
+ end = bridge->recv_buf.end;
+ buf = &(bridge->recv_buf.pkts[end]);
+
+ bridge->recv_buf.num ++;
+ bridge->recv_buf.end ++;
+ bridge->recv_buf.end %= BRIDGE_BUF_SIZE;
+
+ num = bridge->recv_buf.num;
+
+ v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
+
+
+ buf->size = pkt->size;
+ buf->dst_id = pkt->dst_id;
+ buf->src_id = pkt->src_id;
+ buf->src_type = pkt->src_type;
+ buf->dst_type = pkt->dst_type;
+ memcpy(buf->header, pkt->header, ETHERNET_HEADER_LEN);
+ memcpy(buf->data, pkt->data, pkt->size);
+
+exit:
+
+ if (num >= bridge->max_delayed_pkts){
+ flush_bridge_pkts(bridge);
+ }
+*/
+ return 0;
+}
+
+int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
struct route_list * matched_routes = NULL;
unsigned long flags;
int i;
-
#ifdef CONFIG_DEBUG_VNET
{
- struct eth_hdr * hdr = (struct eth_hdr *)(pkt->data);
- char dest_str[18];
- char src_str[18];
+ struct eth_hdr * hdr = (struct eth_hdr *)(pkt->header);
+ char dest_str[100];
+ char src_str[100];
mac_to_string(hdr->src_mac, src_str);
mac_to_string(hdr->dst_mac, dest_str);
- PrintDebug("Vnet: HandleDataOverLink. SRC(%s), DEST(%s)\n", src_str, dest_str);
+ int cpu = V3_Get_CPU();
+ PrintDebug("Vnet: on cpu %d, HandleDataOverLink. SRC(%s), DEST(%s), pkt size: %d\n", cpu, src_str, dest_str, pkt->size);
}
#endif
look_into_cache(pkt, &matched_routes);
if (matched_routes == NULL) {
- matched_routes = match_route(pkt);
-
- if (matched_routes) {
+ PrintDebug("Vnet: send pkt Looking into routing table\n");
+
+ matched_routes = match_route(pkt);
+
+ if (matched_routes) {
add_route_to_cache(pkt, matched_routes);
} else {
- PrintError("Could not find route for packet...\n");
+ PrintDebug("Could not find route for packet... discards packet\n");
v3_unlock_irqrestore(vnet_state.lock, flags);
return -1;
}
}
v3_unlock_irqrestore(vnet_state.lock, flags);
-
-
- for (i = 0; i < matched_routes->num_routes; i++) {
- struct vnet_route_info * route = matched_routes->routes[i];
- if (route->route_def.dst_type == LINK_EDGE) {
+ PrintDebug("Vnet: send pkt route matches %d\n", matched_routes->num_routes);
+
+ for (i = 0; i < matched_routes->num_routes; i++) {
+ struct vnet_route_info * route = matched_routes->routes[i];
+
+ if (route->route_def.dst_type == LINK_EDGE) {
+ pkt->dst_type = LINK_EDGE;
+ pkt->dst_id = route->route_def.dst_id;
+ if (send_to_bridge(pkt) == -1) {
+ PrintDebug("VNET: Packet not sent properly to bridge\n");
+ continue;
+ }
+
} else if (route->route_def.dst_type == LINK_INTERFACE) {
if (route->dst_dev->input(route->dst_dev->vm, pkt, route->dst_dev->private_data) == -1) {
PrintDebug("VNET: Packet not sent properly\n");
continue;
}
- PrintDebug("Vnet: HandleDataOverLink: Forward packet according to Route\n");
+ PrintDebug("Vnet: v3_vnet_send_pkt: Forward packet according to Route %d\n", i);
}
return 0;
}
-int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt) {
-
- PrintDebug("In Vnet Send: pkt size: %d\n", pkt->size);
+void v3_vnet_send_pkt_xcall(void * data){
+ struct v3_vnet_pkt * pkt = (struct v3_vnet_pkt *)data;
+ v3_vnet_send_pkt(pkt, NULL);
+}
+
+
+void v3_vnet_polling()
+{
+ unsigned long flags;
+ int num, start;
+ struct v3_vnet_pkt *buf;
+
+ PrintDebug("In vnet pollling: cpu %d\n", V3_Get_CPU());
+
+ flags = v3_lock_irqsave(vnet_state.in_buf.lock);
- if (handle_one_pkt(pkt) != -1) {
- PrintDebug("VNET: send one packet! pt length %d\n", pkt->size);
- } else {
- PrintDebug("VNET: Fail to forward one packet, discard it!\n");
+ num = vnet_state.in_buf.num;
+ start = vnet_state.in_buf.start;
+
+ PrintDebug("VNET: polling pkts %d\n", num);
+
+ while(num > 0) {
+ buf = &(vnet_state.in_buf.pkts[vnet_state.in_buf.start]);
+
+ v3_vnet_send_pkt(buf, NULL);
+
+ vnet_state.in_buf.num --;
+ vnet_state.in_buf.start ++;
+ vnet_state.in_buf.start %= BRIDGE_BUF_SIZE;
+ num --;
+ }
+
+ v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
+
+ return;
+}
+
+
+int v3_vnet_rx(uchar_t *buf, uint16_t size, uint16_t src_id, uint8_t src_type){
+ unsigned long flags;
+ int end;
+ struct v3_vnet_pkt *pkt;
+
+ flags = v3_lock_irqsave(vnet_state.in_buf.lock);
+
+ end = vnet_state.in_buf.end;
+ pkt = &(vnet_state.in_buf.pkts[end]);
+
+ if(vnet_state.in_buf.num > BRIDGE_BUF_SIZE){
+ PrintDebug("VNET: bridge rx: buffer full\n");
+ goto exit;
}
+ vnet_state.in_buf.num ++;
+ vnet_state.in_buf.end ++;
+ vnet_state.in_buf.end %= BRIDGE_BUF_SIZE;
+
+ pkt->size = size;
+ pkt->src_id = src_id;
+ pkt->src_type = src_type;
+ memcpy(pkt->header, buf, ETHERNET_HEADER_LEN);
+ memcpy(pkt->data, buf, size);
+
+exit:
+
+ v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
+
return 0;
}
+
-int v3_vnet_add_dev(struct v3_vm_info *vm,uint8_t mac[6],
+int v3_vnet_add_dev(struct v3_vm_info *vm, uint8_t mac[6],
int (*netif_input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data),
void * priv_data){
struct vnet_dev * new_dev = NULL;
+ unsigned long flags;
- new_dev = find_dev_by_mac(mac);
-
- PrintDebug("VNET: register device\n");
-
- if (new_dev) {
- PrintDebug("VNET: register device: Already has device with the same mac\n");
- return -1;
- }
-
new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev));
if (new_dev == NULL) {
new_dev->input = netif_input;
new_dev->private_data = priv_data;
new_dev->vm = vm;
+ new_dev->dev_id = 0;
+
+ flags = v3_lock_irqsave(vnet_state.lock);
+
+ if (!find_dev_by_mac(mac)) {
+ list_add(&(new_dev->node), &(vnet_state.devs));
+ new_dev->dev_id = ++vnet_state.num_devs;
+ }
+
+ v3_unlock_irqrestore(vnet_state.lock, flags);
+
+ // if the device was found previosly the id should still be 0
+ if (new_dev->dev_id == 0) {
+ PrintError("Device Alrady exists\n");
+ return -1;
+ }
+
+ PrintDebug("Vnet: Add Device: dev_id %d, input : %p, private_data %p\n",
+ new_dev->dev_id, new_dev->input, new_dev->private_data);
+
+ return new_dev->dev_id;
+}
+
+
+void v3_vnet_heartbeat(struct guest_info *core){
+ //static long last_time, cur_time;
+
+ if(vnet_state.bridge == NULL)
+ return;
+/*
+ if(vnet_state.bridge->max_delayed_pkts > 1){
+ if(V3_Get_CPU() != vnet_state.bridge->vm->cores[0].cpu_id){
+ rdtscll(cur_time);
+ }
+
+ if ((cur_time - last_time) >= vnet_state.bridge->max_latency) {
+ last_time = cur_time;
+ flush_bridge_pkts(vnet_state.bridge);
+ }
+ }
+*/
+ vnet_state.bridge->polling_pkt(vnet_state.bridge->vm, vnet_state.bridge->private_data);
+}
+
+int v3_vnet_add_bridge(struct v3_vm_info * vm,
+ int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
+ void (*xcall_input)(void *data),
+ int (*poll_pkt)(struct v3_vm_info * vm, void * private_data),
+ uint16_t max_delayed_pkts,
+ long max_latency,
+ void * priv_data) {
+ unsigned long flags;
+ int bridge_free = 0;
+ struct vnet_brg_dev * tmp_bridge = NULL;
+
+ flags = v3_lock_irqsave(vnet_state.lock);
+
+ if (vnet_state.bridge == NULL) {
+ bridge_free = 1;
+ vnet_state.bridge = (void *)1;
+ }
+
+ v3_unlock_irqrestore(vnet_state.lock, flags);
+
+ if (bridge_free == 0) {
+ PrintError("Bridge already set\n");
+ return -1;
+ }
+
+ tmp_bridge = (struct vnet_brg_dev *)V3_Malloc(sizeof(struct vnet_brg_dev));
+
+ if (tmp_bridge == NULL) {
+ PrintError("Malloc Fails\n");
+ vnet_state.bridge = NULL;
+ return -1;
+ }
+
+ tmp_bridge->vm = vm;
+ tmp_bridge->input = input;
+ tmp_bridge->xcall_input = xcall_input;
+ tmp_bridge->polling_pkt = poll_pkt;
+ tmp_bridge->private_data = priv_data;
+ tmp_bridge->disabled = 0;
+
+/*
+ //initial receving buffer
+ tmp_bridge->recv_buf.start = 0;
+ tmp_bridge->recv_buf.end = 0;
+ tmp_bridge->recv_buf.num = 0;
+ if(v3_lock_init(&(tmp_bridge->recv_buf.lock)) == -1){
+ PrintError("VNET: add bridge, error to initiate recv buf lock\n");
+ }
+ int i;
+ for(i = 0; i<BRIDGE_BUF_SIZE; i++){
+ tmp_bridge->recv_buf.pkts[i].data = &(tmp_bridge->recv_buf.datas[i*ETHERNET_PACKET_LEN]);
+ }
- PrintDebug("VNET: register device new_dev22 %p\n", (void *)new_dev);
+*/
+
+ tmp_bridge->max_delayed_pkts = (max_delayed_pkts<BRIDGE_BUF_SIZE)?max_delayed_pkts : BRIDGE_BUF_SIZE;
+ tmp_bridge->max_latency = max_latency;
- list_add(&(new_dev->node), &(vnet_state.devs));
- vnet_state.num_devs ++;
- new_dev->dev_id = vnet_state.num_devs;
+ // make this atomic to avoid possible race conditions
+ flags = v3_lock_irqsave(vnet_state.lock);
+ vnet_state.bridge = tmp_bridge;
+ v3_unlock_irqrestore(vnet_state.lock, flags);
return 0;
}
-#if 0
-static int v3_vnet_pkt_process() {
- struct v3_vnet_pkt * pkt = NULL;
- while ((pkt = (struct v3_vnet_pkt *)v3_dequeue(vnet_state.inpkt_q)) != NULL) {
- if (handle_one_pkt(pkt) != -1) {
- PrintDebug("VNET: vnet_check: handle one packet! pt length %d\n", (int)pkt->size);
- } else {
- PrintDebug("VNET: vnet_check: Fail to forward one packet, discard it!\n");
- }
-
- V3_Free(pkt); // be careful here
+int v3_vnet_disable_bridge() {
+ unsigned long flags;
+
+ flags = v3_lock_irqsave(vnet_state.lock);
+
+ if (vnet_state.bridge != NULL) {
+ vnet_state.bridge->disabled = 1;
}
+
+ v3_unlock_irqrestore(vnet_state.lock, flags);
+
+ return 0;
+}
+
+
+int v3_vnet_enable_bridge() {
+ unsigned long flags;
+ flags = v3_lock_irqsave(vnet_state.lock);
+
+ if (vnet_state.bridge != NULL) {
+ vnet_state.bridge->disabled = 0;
+ }
+
+ v3_unlock_irqrestore(vnet_state.lock, flags);
+
return 0;
}
-#endif
+
+
int V3_init_vnet() {
+ int i;
+
+ memset(&vnet_state, 0, sizeof(vnet_state));
INIT_LIST_HEAD(&(vnet_state.routes));
INIT_LIST_HEAD(&(vnet_state.devs));
}
PrintDebug("VNET: Locks initiated\n");
-
- /*initial pkt receiving queue */
- vnet_state.inpkt_q = v3_create_queue();
- v3_init_queue(vnet_state.inpkt_q);
- PrintDebug("VNET: Receiving queue initiated\n");
+
+ //initial incoming pkt buffer
+ vnet_state.in_buf.start = 0;
+ vnet_state.in_buf.end = 0;
+ vnet_state.in_buf.num = 0;
+ if(v3_lock_init(&(vnet_state.in_buf.lock)) == -1){
+ PrintError("VNET: add bridge, error to initiate send buf lock\n");
+ }
+ for(i = 0; i<BRIDGE_BUF_SIZE; i++){
+ vnet_state.in_buf.pkts[i].data = &(vnet_state.in_buf.datas[i*ETHERNET_PACKET_LEN]);
+ }
+ PrintDebug("VNET: Receiving buffer initiated\n");
vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);