#define VNET_HASH_SIZE 17
#define ETHERNET_HEADER_LEN 14
-#define ETHERNET_DATA_MAX 1500
-#define ETHERNET_PACKET_LEN (ETHERNET_HEADER_LEN + ETHERNET_DATA_MAX)
+#define ETHERNET_MTU 6000
+#define ETHERNET_PACKET_LEN (ETHERNET_HEADER_LEN + ETHERNET_MTU)
//routing table entry
struct v3_vnet_route {
int V3_init_vnet();
int v3_vnet_add_bridge(struct v3_vm_info * vm,
- int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
- void (*xcall_input)(void *data),
- uint16_t max_delayed_pkts,
- long max_latency,
- void * priv_data);
+ int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
+ void (*xcall_input)(void *data),
+ int (*poll_pkt)(struct v3_vm_info * vm, void * private_data),
+ uint16_t max_delayed_pkts,
+ long max_latency,
+ void * priv_data);
int v3_vnet_add_dev(struct v3_vm_info *info, uint8_t mac[6],
int (*dev_input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data),
int v3_vnet_disable_bridge();
int v3_vnet_enable_bridge();
-void v3_vnet_bridge_polling();
+void v3_vnet_polling();
-int v3_vnet_bridge_rx(uchar_t *buf, uint16_t size, uint16_t src_link);
+int v3_vnet_rx(uchar_t *buf, uint16_t size, uint16_t src_id, uint8_t src_type);
#endif
ulong_t pkt_sent, pkt_recv, pkt_drop;
+#if 1 //for temporary performance testing purpose
+ long last_sent_time, last_recv_time;
+#endif
+
struct v3_dev_net_ops * net_ops;
v3_lock_t lock;
}
#ifdef CONFIG_VNET_PROFILE
- if (virtio_state->pkt_sent % 10000 == 0){
- PrintError("Virtio NIC: sent: %ld, rxed: %ld, dropped: %ld\n",
- virtio_state->pkt_sent,
- virtio_state->pkt_recv,
- virtio_state->pkt_drop);
+ if (virtio_state->pkt_sent % 50000 == 0){
+ long cur_time, time;
+ rdtscll(cur_time);
+ time = cur_time - virtio_state->last_sent_time;
+ PrintError("Virtio NIC: last sent 50000 cycles: %ld\n",time);
+ //PrintError("Virtio NIC: sent: %ld, rxed: %ld, dropped: %ld\n",
+ // virtio_state->pkt_sent,
+ // virtio_state->pkt_recv,
+ // virtio_state->pkt_drop);
+ rdtscll(virtio_state->last_sent_time);
}
#endif
exit:
#ifdef CONFIG_VNET_PROFILE
- if (virtio->pkt_recv % 100000 == 0){
- PrintError("Virtio NIC: sent: %ld, rxed: %ld, dropped: %ld\n",
- virtio->pkt_sent,
- virtio->pkt_recv,
- virtio->pkt_drop);
+ if (virtio->pkt_recv % 50000 == 0){
+ long cur_time, time;
+ rdtscll(cur_time);
+ time = cur_time - virtio->last_recv_time;
+ PrintError("Virtio NIC: last recv 50000 cycles: %ld\n",time);
+ //PrintError("Virtio NIC: sent: %ld, rxed: %ld, dropped: %ld\n",
+ //virtio->pkt_sent,
+ //virtio->pkt_recv,
+ //virtio->pkt_drop);
+ rdtscll(virtio->last_recv_time);
}
#endif
#endif
-#define QUEUE_SIZE 8192
+#define QUEUE_SIZE 4096
#define CMD_QUEUE_SIZE 128
#define NUM_QUEUES 3
int io_range_size;
v3_lock_t lock;
- ulong_t pkt_sent, pkt_recv, pkt_drop;
+ ulong_t pkt_sent, pkt_recv, pkt_drop, tx_exit, rx_exit, total_exit;
int ready;
};
struct vring_desc * pkt_desc = NULL;
struct vnet_bridge_pkt * virtio_pkt = NULL;
- //if(q->cur_avail_idx % 100 == 0)
- // PrintError("cur_avai_idx %d, idx: %d\n", q->cur_avail_idx, q->avail->index);
-
pkt_desc = &(q->desc[pkt_idx]);
PrintDebug("VNET Bridge RX: buffer desc len: %d\n", pkt_desc->length);
memcpy(virtio_pkt->pkt, pkt->data, pkt->size);
q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
- q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt); // This should be the total length of data sent to guest (header+pkt_data)
+ q->used->ring[q->used->index % q->queue_size].length = sizeof(struct vnet_bridge_pkt);
q->used->index++;
q->cur_avail_idx++;
} else {
- //PrintError("VNET Bridge: guest RX buffer full: cur_avai_idx %d, idx: %d\nDisable Bridge\n", q->cur_avail_idx, q->avail->index);
vnet_state->pkt_drop ++;
v3_vnet_disable_bridge();
}
ret_val = 0;
-exit:
-
+
#ifdef CONFIG_VNET_PROFILE
- if (vnet_state->pkt_recv % 10000 == 0)
- PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld\n",
+ if (vnet_state->pkt_recv % 200000 == 0)
+ PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
vnet_state->pkt_sent,
vnet_state->pkt_recv,
- vnet_state->pkt_drop);
+ vnet_state->pkt_drop,
+ vnet_state->total_exit,
+ vnet_state->tx_exit,
+ vnet_state->rx_exit);
#endif
+exit:
+
v3_unlock_irqrestore(vnet_state->lock, flags);
return ret_val;
static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state)
{
struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
+ unsigned long flags = 0;
+ int recvd = 0;
+
+ flags = v3_lock_irqsave(vnet_state->lock);
+
+ if (q->ring_avail_addr == 0) {
+ goto exit;
+ }
while (q->cur_avail_idx != q->avail->index) {
uint16_t desc_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
PrintDebug("VNET Bridge: TX: on cpu %d pkt size: %d, dst link: %d\n", cpu, virtio_pkt->pkt_size, virtio_pkt->link_id);
- v3_vnet_bridge_rx(virtio_pkt->pkt, virtio_pkt->pkt_size, virtio_pkt->link_id);
+ v3_vnet_rx(virtio_pkt->pkt, virtio_pkt->pkt_size, virtio_pkt->link_id, LINK_EDGE);
q->used->ring[q->used->index % q->queue_size].id = q->avail->ring[q->cur_avail_idx % q->queue_size];
q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
q->used->index++;
vnet_state->pkt_sent ++;
+ recvd ++;
q->cur_avail_idx++;
}
- //interrupt the vnet to poll pkts
+ if(recvd == 0){
+ goto exit;
+ }
+
+ //PrintError("In polling get %d\n", recvd);
+
+ //if on the dom0 core, interrupt the domU core to poll pkts
+ //otherwise, call the polling directly
int cpu = V3_Get_CPU();
- cpu = (cpu == 0)?1:0;
- v3_interrupt_cpu(vnet_state->vm, cpu, V3_VNET_POLLING_VECTOR);
+ if(vnet_state->vm->cores[0].cpu_id == cpu){
+ cpu = (cpu == 0)?1:0;
+ v3_interrupt_cpu(vnet_state->vm, cpu, V3_VNET_POLLING_VECTOR);
+ }else{
+ v3_vnet_polling();
+ }
if((vnet_state->pkt_sent % (QUEUE_SIZE/20)) == 0) { //optimized for guest's, batch the interrupts
if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
}
#ifdef CONFIG_VNET_PROFILE
- if (vnet_state->pkt_sent % 10000 == 0)
- PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld\n",
+ if (vnet_state->pkt_sent % 200000 == 0)
+ PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
vnet_state->pkt_sent,
vnet_state->pkt_recv,
- vnet_state->pkt_drop);
+ vnet_state->pkt_drop,
+ vnet_state->total_exit,
+ vnet_state->tx_exit,
+ vnet_state->rx_exit);
#endif
+exit:
+ v3_unlock_irqrestore(vnet_state->lock,flags);
+
return 0;
}
+static int polling_pkt_from_guest(struct v3_vm_info * vm, void *private_data){
+ struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
+
+ return handle_pkt_kick(&(vm->cores[0]), vnet_state);
+}
static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state)
{
port, length, *(uint32_t *)src);
PrintDebug("VNET Bridge: port idx=%d\n", port_idx);
+ vnet_state->total_exit ++;
switch (port_idx) {
case GUEST_FEATURES_PORT:
if(vnet_state->queue[RECV_QUEUE].avail != NULL){
vnet_state->ready = 1;
}
+
+ //No notify when there is pkt tx from guest
+ if(vnet_state->queue[XMIT_QUEUE].used != NULL){
+ vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
+ }
+
} else {
PrintError("Illegal write length for page frame number\n");
return -1;
if (handle_pkt_kick(core, vnet_state) == -1){
PrintError("Could not handle Virtio VNET TX\n");
return -1;
- }
+ }
+ vnet_state->tx_exit ++;
+ //PrintError("Notify on TX\n");
} else if (queue_idx == 2) {
if (handle_rx_kick(core, vnet_state) == -1){
PrintError("Could not handle Virtio RX buffer refills Kick\n");
return -1;
}
+ vnet_state->rx_exit ++;
} else {
PrintError("VNET Bridge: Kick on invalid queue (%d)\n", queue_idx);
return -1;
struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
int port_idx = port % vnet_state->io_range_size;
-/*
- PrintDebug("VirtioVNET: VIRTIO SYMBIOTIC Read for port %d (index =%d), length=%d\n",
- port, port_idx, length);
-*/
switch (port_idx) {
case HOST_FEATURES_PORT:
if (length != 4) {
virtio_reset(vnet_state);
V3_Print("Registering Virtio device as vnet bridge\n");
- v3_vnet_add_bridge(vm, vnet_pkt_input_cb, vnet_pkt_input_xcall, 0, 500000, (void *)vnet_state);
+ v3_vnet_add_bridge(vm, vnet_pkt_input_cb, vnet_pkt_input_xcall, polling_pkt_from_guest, 0, 500000, (void *)vnet_state);
return 0;
static int vnet_send(uint8_t * buf, uint32_t len, void * private_data, struct vm_device *dest_dev){
- struct v3_vnet_pkt pkt;
struct vnet_nic_state *vnetnic = (struct vnet_nic_state *)private_data;
-
+ struct v3_vnet_pkt pkt;
pkt.size = len;
pkt.src_type = LINK_INTERFACE;
pkt.src_id = vnetnic->vnet_dev_id;
#ifdef CONFIG_DEBUG_VNET_NIC
{
- PrintDebug("Virtio VNET-NIC: send pkt size: %d, pkt src_id: %d, src_type: %d\n",
- len, pkt.src_id, pkt.src_type);
+ PrintDebug("Virtio VNET-NIC: send pkt size: %d, pkt src_id: %d\n",
+ len, vnetnic->vnet_dev_id);
v3_hexdump(buf, len, NULL, 0);
}
#endif
-
-
+/*
+ v3_vnet_rx(buf, len, vnetnic->vnet_dev_id, LINK_INTERFACE);
+
+ //if on the dom0 core, interrupt the domU core to poll pkts
+ //otherwise, call the polling directly
+ int cpu = V3_Get_CPU();
+ cpu = (cpu == 0)?1:0;
+ v3_interrupt_cpu(vnetnic->vm, cpu, V3_VNET_POLLING_VECTOR);
+ */
+
v3_vnet_send_pkt(&pkt, NULL);
return 0;
}
#endif
+//for temporary hack for Linux bridge (w/o encapuslation) test
+#if 0
+ {
+ static int vnet_nic_guestid = -1;
+ static int vnet_nic_dom0 = -1;
+ uchar_t zeromac[6] = {0,0,0,0,0,0};
+
+ if(!strcmp(name, "vnet_nic")){ //domu
+ vnet_nic_guestid = vnet_dev_id;
+ }
+ if (!strcmp(name, "vnet_nic_dom0")){
+ vnet_nic_dom0 = vnet_dev_id;
+ }\r
+
+ if(vnet_nic_guestid != -1 && vnet_nic_dom0 !=-1){
+ struct v3_vnet_route route;
+
+ route.src_id = vnet_nic_guestid;
+ route.src_type = LINK_INTERFACE;
+ route.dst_id = vnet_nic_dom0;
+ route.dst_type = LINK_INTERFACE;
+ memcpy(route.dst_mac, zeromac, 6);
+ route.dst_mac_qual = MAC_ANY;
+ memcpy(route.src_mac, zeromac, 6);
+ route.src_mac_qual = MAC_ANY;
+ v3_vnet_add_route(route);
+
+
+ route.src_id = vnet_nic_dom0;
+ route.src_type = LINK_INTERFACE;
+ route.dst_id = vnet_nic_guestid;
+ route.dst_type = LINK_INTERFACE;
+ memcpy(route.dst_mac, zeromac, 6);
+ route.dst_mac_qual = MAC_ANY;
+ memcpy(route.src_mac, zeromac, 6);
+ route.src_mac_qual = MAC_ANY;
+
+ v3_vnet_add_route(route);
+ }
+ }
+#endif
+
return 0;
}
} __attribute__((packed));
-#define BRIDGE_BUF_SIZE 1024
+#define BRIDGE_BUF_SIZE 512
struct bridge_pkts_buf {
int start, end;
int num;
int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data);
void (*xcall_input)(void *data);
-
- struct bridge_pkts_buf recv_buf; //packets from Vnet to vnet_bridge device
-
- struct bridge_pkts_buf send_buf; //packets from vnet_bridge device to Vnet
+ int (*polling_pkt)(struct v3_vm_info * vm, void *private_data);
int disabled;
v3_lock_t lock;
- struct gen_queue * inpkt_q;
struct hashtable * route_cache;
+ struct bridge_pkts_buf in_buf; //incoming packets buffer
} vnet_state;
return matches;
}
-
+#if 0
static int flush_bridge_pkts(struct vnet_brg_dev *bridge){
unsigned long flags;
int num, start, send;
return 0;
}
-
+#endif
static int send_to_bridge(struct v3_vnet_pkt * pkt){
struct vnet_brg_dev *bridge = vnet_state.bridge;
- int cpu_id = bridge->vm->cores[0].cpu_id;
- struct v3_vnet_bridge_input_args args;
if (bridge == NULL) {
PrintDebug("VNET: No bridge to sent data to links\n");
return -1;
}
+/*
+ //avoid the cross-core call here
+ int cpu_id = bridge->vm->cores[0].cpu_id;
+ struct v3_vnet_bridge_input_args args;
+
args.pkt_num = 1;
args.vm = bridge->vm;
args.vnet_pkts = pkt;
args.private_data = bridge->private_data;
-
+
V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
+*/
+ bridge->input(bridge->vm, pkt, 1, bridge->private_data);
+
PrintDebug("VNET: sent one packet to the bridge\n");
return 0;
}
+/*
unsigned long flags;
int end, num=0;
struct v3_vnet_pkt *buf;
if (num >= bridge->max_delayed_pkts){
flush_bridge_pkts(bridge);
}
-
+*/
return 0;
}
for (i = 0; i < matched_routes->num_routes; i++) {
struct vnet_route_info * route = matched_routes->routes[i];
- if (route->route_def.dst_type == LINK_EDGE) {
+ if (route->route_def.dst_type == LINK_EDGE) {
pkt->dst_type = LINK_EDGE;
pkt->dst_id = route->route_def.dst_id;
}
-void v3_vnet_bridge_polling()
+void v3_vnet_polling()
{
unsigned long flags;
int num, start;
struct v3_vnet_pkt *buf;
- struct vnet_brg_dev *bridge = vnet_state.bridge;
-
- PrintDebug("In vnet bridge pollling: cpu %d\n", V3_Get_CPU());
- if(bridge == NULL){
- PrintDebug("VNET: Bridge is not set\n");
- return;
- }
+ PrintDebug("In vnet pollling: cpu %d\n", V3_Get_CPU());
- flags = v3_lock_irqsave(bridge->send_buf.lock);
+ flags = v3_lock_irqsave(vnet_state.in_buf.lock);
- num = bridge->send_buf.num;
- start = bridge->send_buf.start;
+ num = vnet_state.in_buf.num;
+ start = vnet_state.in_buf.start;
- PrintDebug("VNET: bridge polling pkts %d\n", num);
+ PrintDebug("VNET: polling pkts %d\n", num);
while(num > 0) {
- buf = &(bridge->send_buf.pkts[bridge->send_buf.start]);
+ buf = &(vnet_state.in_buf.pkts[vnet_state.in_buf.start]);
v3_vnet_send_pkt(buf, NULL);
- bridge->send_buf.num --;
- bridge->send_buf.start ++;
- bridge->send_buf.start %= BRIDGE_BUF_SIZE;
+ vnet_state.in_buf.num --;
+ vnet_state.in_buf.start ++;
+ vnet_state.in_buf.start %= BRIDGE_BUF_SIZE;
num --;
}
- v3_unlock_irqrestore(bridge->send_buf.lock, flags);
+ v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
return;
}
-int v3_vnet_bridge_rx(uchar_t *buf, uint16_t size, uint16_t src_link){
- struct vnet_brg_dev *bridge = vnet_state.bridge;
+int v3_vnet_rx(uchar_t *buf, uint16_t size, uint16_t src_id, uint8_t src_type){
unsigned long flags;
int end;
struct v3_vnet_pkt *pkt;
- if (bridge == NULL) {
- PrintDebug("VNET: No bridge is set\n");
- return -1;
- }
-
- flags = v3_lock_irqsave(bridge->send_buf.lock);
+ flags = v3_lock_irqsave(vnet_state.in_buf.lock);
- end = bridge->send_buf.end;
- pkt = &(bridge->send_buf.pkts[end]);
+ end = vnet_state.in_buf.end;
+ pkt = &(vnet_state.in_buf.pkts[end]);
- if(bridge->send_buf.num > BRIDGE_BUF_SIZE){
+ if(vnet_state.in_buf.num > BRIDGE_BUF_SIZE){
PrintDebug("VNET: bridge rx: buffer full\n");
goto exit;
}
- bridge->send_buf.num ++;
- bridge->send_buf.end ++;
- bridge->send_buf.end %= BRIDGE_BUF_SIZE;
+ vnet_state.in_buf.num ++;
+ vnet_state.in_buf.end ++;
+ vnet_state.in_buf.end %= BRIDGE_BUF_SIZE;
pkt->size = size;
- pkt->src_id = src_link;
- pkt->src_type = LINK_EDGE;
+ pkt->src_id = src_id;
+ pkt->src_type = src_type;
memcpy(pkt->header, buf, ETHERNET_HEADER_LEN);
memcpy(pkt->data, buf, size);
exit:
- v3_unlock_irqrestore(bridge->send_buf.lock, flags);
+ v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
return 0;
}
void v3_vnet_heartbeat(struct guest_info *core){
- static long last_time, cur_time;
+ //static long last_time, cur_time;
if(vnet_state.bridge == NULL)
return;
-
- if(vnet_state.bridge->max_delayed_pkts <= 1)
- return;
-
- if(V3_Get_CPU() != vnet_state.bridge->vm->cores[0].cpu_id){
- rdtscll(cur_time);
- }
-
- if ((cur_time - last_time) >= vnet_state.bridge->max_latency) {
- last_time = cur_time;
- flush_bridge_pkts(vnet_state.bridge);
- }
+/*
+ if(vnet_state.bridge->max_delayed_pkts > 1){
+ if(V3_Get_CPU() != vnet_state.bridge->vm->cores[0].cpu_id){
+ rdtscll(cur_time);
+ }
+
+ if ((cur_time - last_time) >= vnet_state.bridge->max_latency) {
+ last_time = cur_time;
+ flush_bridge_pkts(vnet_state.bridge);
+ }
+ }
+*/
+ vnet_state.bridge->polling_pkt(vnet_state.bridge->vm, vnet_state.bridge->private_data);
}
int v3_vnet_add_bridge(struct v3_vm_info * vm,
int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
void (*xcall_input)(void *data),
+ int (*poll_pkt)(struct v3_vm_info * vm, void * private_data),
uint16_t max_delayed_pkts,
long max_latency,
void * priv_data) {
unsigned long flags;
int bridge_free = 0;
- struct vnet_brg_dev * tmp_bridge = NULL;
- int i;
-
+ struct vnet_brg_dev * tmp_bridge = NULL;
flags = v3_lock_irqsave(vnet_state.lock);
if (tmp_bridge == NULL) {
PrintError("Malloc Fails\n");
+ vnet_state.bridge = NULL;
return -1;
}
tmp_bridge->vm = vm;
tmp_bridge->input = input;
tmp_bridge->xcall_input = xcall_input;
+ tmp_bridge->polling_pkt = poll_pkt;
tmp_bridge->private_data = priv_data;
tmp_bridge->disabled = 0;
+/*
//initial receving buffer
tmp_bridge->recv_buf.start = 0;
tmp_bridge->recv_buf.end = 0;
if(v3_lock_init(&(tmp_bridge->recv_buf.lock)) == -1){
PrintError("VNET: add bridge, error to initiate recv buf lock\n");
}
- tmp_bridge->max_delayed_pkts = (max_delayed_pkts<BRIDGE_BUF_SIZE)?max_delayed_pkts : BRIDGE_BUF_SIZE;
- tmp_bridge->max_latency = max_latency;
+ int i;
for(i = 0; i<BRIDGE_BUF_SIZE; i++){
tmp_bridge->recv_buf.pkts[i].data = &(tmp_bridge->recv_buf.datas[i*ETHERNET_PACKET_LEN]);
}
- //initial sending buffer
- tmp_bridge->send_buf.start = 0;
- tmp_bridge->send_buf.end = 0;
- tmp_bridge->send_buf.num = 0;
- if(v3_lock_init(&(tmp_bridge->send_buf.lock)) == -1){
- PrintError("VNET: add bridge, error to initiate send buf lock\n");
- }
- for(i = 0; i<BRIDGE_BUF_SIZE; i++){
- tmp_bridge->send_buf.pkts[i].data = &(tmp_bridge->send_buf.datas[i*ETHERNET_PACKET_LEN]);
- }
+*/
+
+ tmp_bridge->max_delayed_pkts = (max_delayed_pkts<BRIDGE_BUF_SIZE)?max_delayed_pkts : BRIDGE_BUF_SIZE;
+ tmp_bridge->max_latency = max_latency;
// make this atomic to avoid possible race conditions
flags = v3_lock_irqsave(vnet_state.lock);
int V3_init_vnet() {
+ int i;
+
+ memset(&vnet_state, 0, sizeof(vnet_state));
INIT_LIST_HEAD(&(vnet_state.routes));
INIT_LIST_HEAD(&(vnet_state.devs));
}
PrintDebug("VNET: Locks initiated\n");
-
- vnet_state.inpkt_q = v3_create_queue();
- v3_init_queue(vnet_state.inpkt_q);
- PrintDebug("VNET: Receiving queue initiated\n");
+
+ //initial incoming pkt buffer
+ vnet_state.in_buf.start = 0;
+ vnet_state.in_buf.end = 0;
+ vnet_state.in_buf.num = 0;
+ if(v3_lock_init(&(vnet_state.in_buf.lock)) == -1){
+ PrintError("VNET: add bridge, error to initiate send buf lock\n");
+ }
+ for(i = 0; i<BRIDGE_BUF_SIZE; i++){
+ vnet_state.in_buf.pkts[i].data = &(vnet_state.in_buf.datas[i*ETHERNET_PACKET_LEN]);
+ }
+ PrintDebug("VNET: Receiving buffer initiated\n");
vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);