int io_range_size;
v3_lock_t lock;
- ulong_t pkt_sent, pkt_recv, pkt_drop, tx_exit, rx_exit, total_exit;
+ uint32_t pkt_sent;
+ uint32_t pkt_recv;
+ uint32_t pkt_drop;
+ uint32_t tx_exit;
+ uint32_t rx_exit;
+ uint32_t total_exit;
+
int ready;
};
uint8_t status = 0;
- PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
+ PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n",
+ desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
if (desc_cnt < 3) {
PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
int ret_val = -1;
unsigned long flags;
uint16_t sent;
- struct v3_vnet_pkt *pkt;
+ struct v3_vnet_pkt * pkt = NULL;
- if(pkt_num <= 0)
+ if (pkt_num <= 0) {
return 0;
+ }
flags = v3_lock_irqsave(vnet_state->lock);
if (q->ring_avail_addr == 0) {
PrintError("Queue is not set\n");
- goto exit;
+ v3_unlock_irqrestore(vnet_state->lock, flags);
+ return ret_val;
}
PrintDebug("VNET Bridge: RX: running on cpu: %d, num of pkts: %d\n", V3_Get_CPU(), pkt_num);
- for(sent = 0; sent < pkt_num; sent ++) {
+ for (sent = 0; sent < pkt_num; sent++) {
pkt = &vnet_pkts[sent];
- vnet_state->pkt_recv ++;
+ vnet_state->pkt_recv++;
if (q->cur_avail_idx != q->avail->index) {
uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
PrintError("Could not translate buffer address\n");
- goto exit;
+ v3_unlock_irqrestore(vnet_state->lock, flags);
+ return ret_val;
}
PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
q->used->index++;
q->cur_avail_idx++;
} else {
- vnet_state->pkt_drop ++;
+ vnet_state->pkt_drop++;
v3_vnet_disable_bridge();
}
}
- if(sent == 0){
- goto exit;
+ if (sent == 0) {
+ v3_unlock_irqrestore(vnet_state->lock, flags);
+ return ret_val;
}
if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
#ifdef CONFIG_VNET_PROFILE
if (vnet_state->pkt_recv % 200000 == 0)
PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
- vnet_state->pkt_sent,
- vnet_state->pkt_recv,
- vnet_state->pkt_drop,
- vnet_state->total_exit,
- vnet_state->tx_exit,
- vnet_state->rx_exit);
+ vnet_state->pkt_sent,
+ vnet_state->pkt_recv,
+ vnet_state->pkt_drop,
+ vnet_state->total_exit,
+ vnet_state->tx_exit,
+ vnet_state->rx_exit);
#endif
-exit:
-
v3_unlock_irqrestore(vnet_state->lock, flags);
-
+
return ret_val;
+
}
-static void vnet_pkt_input_xcall(void *data){
- struct v3_vnet_bridge_input_args *args = (struct v3_vnet_bridge_input_args *)data;
+static void vnet_pkt_input_xcall(void * data) {
+ struct v3_vnet_bridge_input_args * args = (struct v3_vnet_bridge_input_args *)data;
vnet_pkt_input_cb(args->vm, args->vnet_pkts, args->pkt_num, args->private_data);
}
-static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state)
-{
+static int handle_pkt_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
unsigned long flags = 0;
int recvd = 0;
-
+ int cpu = V3_Get_CPU();
+
flags = v3_lock_irqsave(vnet_state->lock);
if (q->ring_avail_addr == 0) {
- goto exit;
+ v3_unlock_irqrestore(vnet_state->lock,flags);
+ return 0;
}
while (q->cur_avail_idx != q->avail->index) {
q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
q->used->index++;
- vnet_state->pkt_sent ++;
- recvd ++;
+ vnet_state->pkt_sent++;
+ recvd++;
q->cur_avail_idx++;
}
- if(recvd == 0){
- goto exit;
+ if (recvd == 0) {
+ v3_unlock_irqrestore(vnet_state->lock,flags);
+ return 0;
}
//PrintError("In polling get %d\n", recvd);
//if on the dom0 core, interrupt the domU core to poll pkts
//otherwise, call the polling directly
- int cpu = V3_Get_CPU();
- if(vnet_state->vm->cores[0].cpu_id == cpu){
- cpu = (cpu == 0)?1:0;
+
+
+ if (vnet_state->vm->cores[0].cpu_id == cpu) {
+ cpu = (cpu == 0) ? 1 : 0;
v3_interrupt_cpu(vnet_state->vm, cpu, V3_VNET_POLLING_VECTOR);
- }else{
+ } else {
v3_vnet_polling();
}
- if((vnet_state->pkt_sent % (QUEUE_SIZE/20)) == 0) { //optimized for guest's, batch the interrupts
- if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
- v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
- vnet_state->virtio_cfg.pci_isr = 0x1;
- }
+ if ((vnet_state->pkt_sent % (QUEUE_SIZE/20)) == 0) {
+ //optimized for guest's, batch the interrupts
+
+ if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
+ v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
+ vnet_state->virtio_cfg.pci_isr = 0x1;
+ }
}
-
+
#ifdef CONFIG_VNET_PROFILE
if (vnet_state->pkt_sent % 200000 == 0)
PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
- vnet_state->pkt_sent,
- vnet_state->pkt_recv,
- vnet_state->pkt_drop,
- vnet_state->total_exit,
- vnet_state->tx_exit,
- vnet_state->rx_exit);
+ vnet_state->pkt_sent,
+ vnet_state->pkt_recv,
+ vnet_state->pkt_drop,
+ vnet_state->total_exit,
+ vnet_state->tx_exit,
+ vnet_state->rx_exit);
#endif
-exit:
v3_unlock_irqrestore(vnet_state->lock,flags);
return 0;
}
-static int polling_pkt_from_guest(struct v3_vm_info * vm, void *private_data){
+static int polling_pkt_from_guest(struct v3_vm_info * vm, void *private_data) {
struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
return handle_pkt_kick(&(vm->cores[0]), vnet_state);
}
-static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state)
-{
+static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) {
v3_vnet_enable_bridge();
return 0;
port, length, *(uint32_t *)src);
PrintDebug("VNET Bridge: port idx=%d\n", port_idx);
- vnet_state->total_exit ++;
+ vnet_state->total_exit++;
switch (port_idx) {
case GUEST_FEATURES_PORT:
+
if (length != 4) {
PrintError("Illegal write length for guest features\n");
return -1;
}
+
vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
break;
- case VRING_PG_NUM_PORT:
- if (length == 4) {
- addr_t pfn = *(uint32_t *)src;
- addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
+ case VRING_PG_NUM_PORT: {
- vnet_state->cur_queue->pfn = pfn;
-
- vnet_state->cur_queue->ring_desc_addr = page_addr ;
- vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
- vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
- sizeof(struct vring_avail) + \
- (QUEUE_SIZE * sizeof(uint16_t)));
-
- // round up to next page boundary.
- vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
+ addr_t pfn = *(uint32_t *)src;
+ addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
- if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
- PrintError("Could not translate ring descriptor address\n");
- return -1;
- }
-
- if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
- PrintError("Could not translate ring available address\n");
- return -1;
- }
-
- if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
- PrintError("Could not translate ring used address\n");
- return -1;
- }
-
- PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
- (void *)(vnet_state->cur_queue->ring_desc_addr),
- (void *)(vnet_state->cur_queue->ring_avail_addr),
- (void *)(vnet_state->cur_queue->ring_used_addr));
-
- PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n",
- vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
-
- if(vnet_state->queue[RECV_QUEUE].avail != NULL){
- vnet_state->ready = 1;
- }
+ if (length != 4) {
+ PrintError("Illegal write length for page frame number\n");
+ return -1;
+ }
+
- //No notify when there is pkt tx from guest
- if(vnet_state->queue[XMIT_QUEUE].used != NULL){
- vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
- }
+ vnet_state->cur_queue->pfn = pfn;
- } else {
- PrintError("Illegal write length for page frame number\n");
+ vnet_state->cur_queue->ring_desc_addr = page_addr ;
+ vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
+ vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
+ sizeof(struct vring_avail) + \
+ (QUEUE_SIZE * sizeof(uint16_t)));
+
+ // round up to next page boundary.
+ vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
+
+ if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
+ PrintError("Could not translate ring descriptor address\n");
return -1;
}
+
+ if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
+ PrintError("Could not translate ring available address\n");
+ return -1;
+ }
+
+ if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
+ PrintError("Could not translate ring used address\n");
+ return -1;
+ }
+
+ PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
+ (void *)(vnet_state->cur_queue->ring_desc_addr),
+ (void *)(vnet_state->cur_queue->ring_avail_addr),
+ (void *)(vnet_state->cur_queue->ring_used_addr));
+
+ PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n",
+ vnet_state->cur_queue->desc,
+ vnet_state->cur_queue->avail,
+ vnet_state->cur_queue->used);
+
+ if (vnet_state->queue[RECV_QUEUE].avail != NULL){
+ vnet_state->ready = 1;
+ }
+
+ //No notify when there is pkt tx from guest
+ if (vnet_state->queue[XMIT_QUEUE].used != NULL) {
+ vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
+ }
+
break;
+ }
case VRING_Q_SEL_PORT:
vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;