// So the selector needs to be VV00
// and the base needs to be VV000
//
- core->rip=0;
- core->segments.cs.selector = icr->vec<<8;
- core->segments.cs.limit= 0xffff;
- core->segments.cs.base = icr->vec<<12;
+ core->rip = 0;
+ core->segments.cs.selector = icr->vec << 8;
+ core->segments.cs.limit = 0xffff;
+ core->segments.cs.base = icr->vec << 12;
PrintDebug("icc_bus: SIPI delivery (0x%x -> 0x%x:0x0) to core %u\n",
icr->vec, core->segments.cs.selector, core->cpu_id);
// Maybe need to adjust the APIC?
// We transition the target core to SIPI state
- core->cpu_mode=REAL; // note: locking should not be needed here
+ core->cpu_mode = REAL; // note: locking should not be needed here
// As with INIT, we should not need to do anything else
case IOAPIC_ARB_REG:
*op_val = ioapic->ioapic_arb_id.val;
break;
- default:
- {
- uint_t redir_index = (ioapic->index_reg - IOAPIC_REDIR_BASE_REG) >> 1;
- uint_t hi_val = (ioapic->index_reg - IOAPIC_REDIR_BASE_REG) % 1;
-
- if (redir_index > 0x3f) {
- PrintError("ioapic %u: Invalid redirection table entry %x\n", ioapic->ioapic_id.id, (uint32_t)redir_index);
- return -1;
- }
- if (hi_val) {
- *op_val = ioapic->redir_tbl[redir_index].hi;
- } else {
- *op_val = ioapic->redir_tbl[redir_index].lo;
- }
+ default: {
+ uint_t redir_index = (ioapic->index_reg - IOAPIC_REDIR_BASE_REG) >> 1;
+ uint_t hi_val = (ioapic->index_reg - IOAPIC_REDIR_BASE_REG) % 1;
+
+ if (redir_index > 0x3f) {
+ PrintError("ioapic %u: Invalid redirection table entry %x\n", ioapic->ioapic_id.id, (uint32_t)redir_index);
+ return -1;
+ }
+
+ if (hi_val) {
+ *op_val = ioapic->redir_tbl[redir_index].hi;
+ } else {
+ *op_val = ioapic->redir_tbl[redir_index].lo;
}
+ }
}
}
int io_range_size;
v3_lock_t lock;
- ulong_t pkt_sent, pkt_recv, pkt_drop, tx_exit, rx_exit, total_exit;
+ uint32_t pkt_sent;
+ uint32_t pkt_recv;
+ uint32_t pkt_drop;
+ uint32_t tx_exit;
+ uint32_t rx_exit;
+ uint32_t total_exit;
+
int ready;
};
uint8_t status = 0;
- PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n", desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
+ PrintDebug("VNET Bridge: CMD: Descriptor Count=%d, index=%d, desc_idx=%d\n",
+ desc_cnt, q->cur_avail_idx % QUEUE_SIZE, desc_idx);
if (desc_cnt < 3) {
PrintError("VNET Bridge cmd must include at least 3 descriptors (cnt=%d)\n", desc_cnt);
int ret_val = -1;
unsigned long flags;
uint16_t sent;
- struct v3_vnet_pkt *pkt;
+ struct v3_vnet_pkt * pkt = NULL;
- if(pkt_num <= 0)
+ if (pkt_num <= 0) {
return 0;
+ }
flags = v3_lock_irqsave(vnet_state->lock);
if (q->ring_avail_addr == 0) {
PrintError("Queue is not set\n");
- goto exit;
+ v3_unlock_irqrestore(vnet_state->lock, flags);
+ return ret_val;
}
PrintDebug("VNET Bridge: RX: running on cpu: %d, num of pkts: %d\n", V3_Get_CPU(), pkt_num);
- for(sent = 0; sent < pkt_num; sent ++) {
+ for (sent = 0; sent < pkt_num; sent++) {
pkt = &vnet_pkts[sent];
- vnet_state->pkt_recv ++;
+ vnet_state->pkt_recv++;
if (q->cur_avail_idx != q->avail->index) {
uint16_t pkt_idx = q->avail->ring[q->cur_avail_idx % q->queue_size];
if (v3_gpa_to_hva(&(vm->cores[0]), pkt_desc->addr_gpa, (addr_t *)&(virtio_pkt)) == -1) {
PrintError("Could not translate buffer address\n");
- goto exit;
+ v3_unlock_irqrestore(vnet_state->lock, flags);
+ return ret_val;
}
PrintDebug("VNET Bridge: RX: pkt sent to guest pkt size: %d, dst link: %d\n", pkt->size, pkt->dst_id);
q->used->index++;
q->cur_avail_idx++;
} else {
- vnet_state->pkt_drop ++;
+ vnet_state->pkt_drop++;
v3_vnet_disable_bridge();
}
}
- if(sent == 0){
- goto exit;
+ if (sent == 0) {
+ v3_unlock_irqrestore(vnet_state->lock, flags);
+ return ret_val;
}
if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
#ifdef CONFIG_VNET_PROFILE
if (vnet_state->pkt_recv % 200000 == 0)
PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
- vnet_state->pkt_sent,
- vnet_state->pkt_recv,
- vnet_state->pkt_drop,
- vnet_state->total_exit,
- vnet_state->tx_exit,
- vnet_state->rx_exit);
+ vnet_state->pkt_sent,
+ vnet_state->pkt_recv,
+ vnet_state->pkt_drop,
+ vnet_state->total_exit,
+ vnet_state->tx_exit,
+ vnet_state->rx_exit);
#endif
-exit:
-
v3_unlock_irqrestore(vnet_state->lock, flags);
-
+
return ret_val;
+
}
-static void vnet_pkt_input_xcall(void *data){
- struct v3_vnet_bridge_input_args *args = (struct v3_vnet_bridge_input_args *)data;
+static void vnet_pkt_input_xcall(void * data) {
+ struct v3_vnet_bridge_input_args * args = (struct v3_vnet_bridge_input_args *)data;
vnet_pkt_input_cb(args->vm, args->vnet_pkts, args->pkt_num, args->private_data);
}
-static int handle_pkt_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state)
-{
+static int handle_pkt_kick(struct guest_info * core, struct virtio_vnet_state * vnet_state) {
struct virtio_queue * q = &(vnet_state->queue[XMIT_QUEUE]);
unsigned long flags = 0;
int recvd = 0;
-
+ int cpu = V3_Get_CPU();
+
flags = v3_lock_irqsave(vnet_state->lock);
if (q->ring_avail_addr == 0) {
- goto exit;
+ v3_unlock_irqrestore(vnet_state->lock,flags);
+ return 0;
}
while (q->cur_avail_idx != q->avail->index) {
q->used->ring[q->used->index % q->queue_size].length = pkt_desc->length; // What do we set this to????
q->used->index++;
- vnet_state->pkt_sent ++;
- recvd ++;
+ vnet_state->pkt_sent++;
+ recvd++;
q->cur_avail_idx++;
}
- if(recvd == 0){
- goto exit;
+ if (recvd == 0) {
+ v3_unlock_irqrestore(vnet_state->lock,flags);
+ return 0;
}
//PrintError("In polling get %d\n", recvd);
//if on the dom0 core, interrupt the domU core to poll pkts
//otherwise, call the polling directly
- int cpu = V3_Get_CPU();
- if(vnet_state->vm->cores[0].cpu_id == cpu){
- cpu = (cpu == 0)?1:0;
+
+
+ if (vnet_state->vm->cores[0].cpu_id == cpu) {
+ cpu = (cpu == 0) ? 1 : 0;
v3_interrupt_cpu(vnet_state->vm, cpu, V3_VNET_POLLING_VECTOR);
- }else{
+ } else {
v3_vnet_polling();
}
- if((vnet_state->pkt_sent % (QUEUE_SIZE/20)) == 0) { //optimized for guest's, batch the interrupts
- if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
- v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
- vnet_state->virtio_cfg.pci_isr = 0x1;
- }
+ if ((vnet_state->pkt_sent % (QUEUE_SIZE/20)) == 0) {
+ //optimized for guest's, batch the interrupts
+
+ if (!(q->avail->flags & VIRTIO_NO_IRQ_FLAG)) {
+ v3_pci_raise_irq(vnet_state->pci_bus, 0, vnet_state->pci_dev);
+ vnet_state->virtio_cfg.pci_isr = 0x1;
+ }
}
-
+
#ifdef CONFIG_VNET_PROFILE
if (vnet_state->pkt_sent % 200000 == 0)
PrintError("Vnet Bridge: sent: %ld, rxed: %ld, dropped: %ld, total exit: %ld, tx exit: %ld, rx exit: %ld\n",
- vnet_state->pkt_sent,
- vnet_state->pkt_recv,
- vnet_state->pkt_drop,
- vnet_state->total_exit,
- vnet_state->tx_exit,
- vnet_state->rx_exit);
+ vnet_state->pkt_sent,
+ vnet_state->pkt_recv,
+ vnet_state->pkt_drop,
+ vnet_state->total_exit,
+ vnet_state->tx_exit,
+ vnet_state->rx_exit);
#endif
-exit:
v3_unlock_irqrestore(vnet_state->lock,flags);
return 0;
}
-static int polling_pkt_from_guest(struct v3_vm_info * vm, void *private_data){
+static int polling_pkt_from_guest(struct v3_vm_info * vm, void *private_data) {
struct virtio_vnet_state * vnet_state = (struct virtio_vnet_state *)private_data;
return handle_pkt_kick(&(vm->cores[0]), vnet_state);
}
-static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state)
-{
+static int handle_rx_kick(struct guest_info *core, struct virtio_vnet_state * vnet_state) {
v3_vnet_enable_bridge();
return 0;
port, length, *(uint32_t *)src);
PrintDebug("VNET Bridge: port idx=%d\n", port_idx);
- vnet_state->total_exit ++;
+ vnet_state->total_exit++;
switch (port_idx) {
case GUEST_FEATURES_PORT:
+
if (length != 4) {
PrintError("Illegal write length for guest features\n");
return -1;
}
+
vnet_state->virtio_cfg.guest_features = *(uint32_t *)src;
break;
- case VRING_PG_NUM_PORT:
- if (length == 4) {
- addr_t pfn = *(uint32_t *)src;
- addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
+ case VRING_PG_NUM_PORT: {
- vnet_state->cur_queue->pfn = pfn;
-
- vnet_state->cur_queue->ring_desc_addr = page_addr ;
- vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
- vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
- sizeof(struct vring_avail) + \
- (QUEUE_SIZE * sizeof(uint16_t)));
-
- // round up to next page boundary.
- vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
+ addr_t pfn = *(uint32_t *)src;
+ addr_t page_addr = (pfn << VIRTIO_PAGE_SHIFT);
- if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
- PrintError("Could not translate ring descriptor address\n");
- return -1;
- }
-
- if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
- PrintError("Could not translate ring available address\n");
- return -1;
- }
-
- if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
- PrintError("Could not translate ring used address\n");
- return -1;
- }
-
- PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
- (void *)(vnet_state->cur_queue->ring_desc_addr),
- (void *)(vnet_state->cur_queue->ring_avail_addr),
- (void *)(vnet_state->cur_queue->ring_used_addr));
-
- PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n",
- vnet_state->cur_queue->desc, vnet_state->cur_queue->avail, vnet_state->cur_queue->used);
-
- if(vnet_state->queue[RECV_QUEUE].avail != NULL){
- vnet_state->ready = 1;
- }
+ if (length != 4) {
+ PrintError("Illegal write length for page frame number\n");
+ return -1;
+ }
+
- //No notify when there is pkt tx from guest
- if(vnet_state->queue[XMIT_QUEUE].used != NULL){
- vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
- }
+ vnet_state->cur_queue->pfn = pfn;
- } else {
- PrintError("Illegal write length for page frame number\n");
+ vnet_state->cur_queue->ring_desc_addr = page_addr ;
+ vnet_state->cur_queue->ring_avail_addr = page_addr + (QUEUE_SIZE * sizeof(struct vring_desc));
+ vnet_state->cur_queue->ring_used_addr = ( vnet_state->cur_queue->ring_avail_addr + \
+ sizeof(struct vring_avail) + \
+ (QUEUE_SIZE * sizeof(uint16_t)));
+
+ // round up to next page boundary.
+ vnet_state->cur_queue->ring_used_addr = (vnet_state->cur_queue->ring_used_addr + 0xfff) & ~0xfff;
+
+ if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_desc_addr, (addr_t *)&(vnet_state->cur_queue->desc)) == -1) {
+ PrintError("Could not translate ring descriptor address\n");
return -1;
}
+
+ if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_avail_addr, (addr_t *)&(vnet_state->cur_queue->avail)) == -1) {
+ PrintError("Could not translate ring available address\n");
+ return -1;
+ }
+
+ if (v3_gpa_to_hva(core, vnet_state->cur_queue->ring_used_addr, (addr_t *)&(vnet_state->cur_queue->used)) == -1) {
+ PrintError("Could not translate ring used address\n");
+ return -1;
+ }
+
+ PrintDebug("VNET Bridge: RingDesc_addr=%p, Avail_addr=%p, Used_addr=%p\n",
+ (void *)(vnet_state->cur_queue->ring_desc_addr),
+ (void *)(vnet_state->cur_queue->ring_avail_addr),
+ (void *)(vnet_state->cur_queue->ring_used_addr));
+
+ PrintDebug("VNET Bridge: RingDesc=%p, Avail=%p, Used=%p\n",
+ vnet_state->cur_queue->desc,
+ vnet_state->cur_queue->avail,
+ vnet_state->cur_queue->used);
+
+ if (vnet_state->queue[RECV_QUEUE].avail != NULL){
+ vnet_state->ready = 1;
+ }
+
+ //No notify when there is pkt tx from guest
+ if (vnet_state->queue[XMIT_QUEUE].used != NULL) {
+ vnet_state->queue[XMIT_QUEUE].used->flags |= VRING_NO_NOTIFY_FLAG;
+ }
+
break;
+ }
case VRING_Q_SEL_PORT:
vnet_state->virtio_cfg.vring_queue_selector = *(uint16_t *)src;
};
-static int vnet_send(uint8_t * buf, uint32_t len, void * private_data, struct vm_device *dest_dev){
- struct vnet_nic_state *vnetnic = (struct vnet_nic_state *)private_data;
-
+static int vnet_send(uint8_t * buf, uint32_t len, void * private_data, struct vm_device * dest_dev){
+ struct vnet_nic_state * vnetnic = (struct vnet_nic_state *)private_data;
struct v3_vnet_pkt pkt;
+
pkt.size = len;
pkt.src_type = LINK_INTERFACE;
pkt.src_id = vnetnic->vnet_dev_id;
}
static int register_to_vnet(struct v3_vm_info * vm,
- struct vnet_nic_state *vnet_nic,
- char *dev_name,
- uchar_t mac[6]) {
-
+ struct vnet_nic_state * vnet_nic,
+ char * dev_name,
+ uint8_t mac[6]) {
+
PrintDebug("Vnet-nic: register Vnet-nic device %s, state %p to VNET\n", dev_name, vnet_nic);
return v3_vnet_add_dev(vm, mac, virtio_input, (void *)vnet_nic);
};
-static int str2mac(char *macstr, char mac[6]){
- char hex[2], *s = macstr;
+static int str2mac(char * macstr, uint8_t mac[6]){
+ uint8_t hex[2];
int i = 0;
+ char * s = macstr;
- while(s){
+ while (s) {
memcpy(hex, s, 2);
mac[i++] = (char)atox(hex);
- if (i == 6) return 0;
- s=strchr(s, ':');
- if(s) s++;
+
+ if (i == 6) {
+ return 0;
+ }
+
+ s = strchr(s, ':');
+
+ if (s) {
+ s++;
+ }
}
return -1;
struct vnet_nic_state * vnetnic = NULL;
char * dev_id = v3_cfg_val(cfg, "ID");
char * macstr = NULL;
- char mac[6];
int vnet_dev_id = 0;
-
v3_cfg_tree_t * frontend_cfg = v3_cfg_subtree(cfg, "frontend");
+
macstr = v3_cfg_val(frontend_cfg, "mac");
if (macstr == NULL) {
- PrintDebug("Vnet-nic: No Mac specified\n");
- } else {
- str2mac(macstr, mac);
+ PrintDebug("Vnet-nic configuration error: No Mac specified\n");
+ return -1;
}
vnetnic = (struct vnet_nic_state *)V3_Malloc(sizeof(struct vnet_nic_state));
return -1;
}
+
+
vnetnic->net_ops.send = vnet_send;
- memcpy(vnetnic->mac, mac, 6);
+ str2mac(macstr, vnetnic->mac);
vnetnic->vm = vm;
if (v3_dev_connect_net(vm, v3_cfg_val(frontend_cfg, "tag"),
}
PrintDebug("Vnet-nic: Connect %s to frontend %s\n",
- dev_id, v3_cfg_val(frontend_cfg, "tag"));
+ dev_id, v3_cfg_val(frontend_cfg, "tag"));
if ((vnet_dev_id = register_to_vnet(vm, vnetnic, dev_id, vnetnic->mac)) == -1) {
PrintError("Vnet-nic device %s (mac: %s) fails to registered to VNET\n", dev_id, macstr);
+ return -1;
}
- vnetnic->vnet_dev_id = vnet_dev_id;
-
- PrintDebug("Vnet-nic device %s (mac: %s, %ld) registered to VNET\n", dev_id, macstr, *((ulong_t *)vnetnic->mac));
+ vnetnic->vnet_dev_id = vnet_dev_id;
-//for temporary hack for vnet bridge test
-#if 1
- {
- uchar_t zeromac[6] = {0,0,0,0,0,0};
-
- if(!strcmp(dev_id, "vnet_nic")){
- struct v3_vnet_route route;
-
- route.dst_id = vnet_dev_id;
- route.dst_type = LINK_INTERFACE;
- route.src_id = 0;
- route.src_type = LINK_EDGE;
- memcpy(route.dst_mac, zeromac, 6);
- route.dst_mac_qual = MAC_ANY;
- memcpy(route.src_mac, zeromac, 6);
- route.src_mac_qual = MAC_ANY;
- v3_vnet_add_route(route);
-
-
- route.dst_id = 0;
- route.dst_type = LINK_EDGE;
- route.src_id = vnet_dev_id;
- route.src_type = LINK_INTERFACE;
- memcpy(route.dst_mac, zeromac, 6);
- route.dst_mac_qual = MAC_ANY;
- memcpy(route.src_mac, zeromac, 6);
- route.src_mac_qual = MAC_ANY;
-
- v3_vnet_add_route(route);
- }
- }
-#endif
+ PrintDebug("Vnet-nic device %s (mac: %s, %ld) registered to VNET\n", dev_id, macstr, *((uint32_t *)vnetnic->mac));
-//for temporary hack for Linux bridge (w/o encapuslation) test
-#if 0
- {
- static int vnet_nic_guestid = -1;
- static int vnet_nic_dom0 = -1;
- uchar_t zeromac[6] = {0,0,0,0,0,0};
-
- if(!strcmp(dev_id, "vnet_nic")){ //domu
- vnet_nic_guestid = vnet_dev_id;
- }
- if (!strcmp(dev_id, "vnet_nic_dom0")){
- vnet_nic_dom0 = vnet_dev_id;
- }\r
-
- if(vnet_nic_guestid != -1 && vnet_nic_dom0 !=-1){
- struct v3_vnet_route route;
-
- route.src_id = vnet_nic_guestid;
- route.src_type = LINK_INTERFACE;
- route.dst_id = vnet_nic_dom0;
- route.dst_type = LINK_INTERFACE;
- memcpy(route.dst_mac, zeromac, 6);
- route.dst_mac_qual = MAC_ANY;
- memcpy(route.src_mac, zeromac, 6);
- route.src_mac_qual = MAC_ANY;
- v3_vnet_add_route(route);
-
-
- route.src_id = vnet_nic_dom0;
- route.src_type = LINK_INTERFACE;
- route.dst_id = vnet_nic_guestid;
- route.dst_type = LINK_INTERFACE;
- memcpy(route.dst_mac, zeromac, 6);
- route.dst_mac_qual = MAC_ANY;
- memcpy(route.src_mac, zeromac, 6);
- route.src_mac_qual = MAC_ANY;
-
- v3_vnet_add_route(route);
- }
- }
-#endif
return 0;
}
int num;
v3_lock_t lock;
struct v3_vnet_pkt pkts[BRIDGE_BUF_SIZE];
- uint8_t datas[ETHERNET_PACKET_LEN*BRIDGE_BUF_SIZE];
+ uint8_t datas[ETHERNET_PACKET_LEN * BRIDGE_BUF_SIZE];
};
struct vnet_brg_dev {
struct v3_vm_info * vm;
int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data);
- void (*xcall_input)(void *data);
- int (*polling_pkt)(struct v3_vm_info * vm, void *private_data);
+ void (*xcall_input)(void * data);
+ int (*polling_pkt)(struct v3_vm_info * vm, void * private_data);
int disabled;
int num_routes;
int num_devs;
- struct vnet_brg_dev *bridge;
+ struct vnet_brg_dev * bridge;
v3_lock_t lock;
#ifdef CONFIG_DEBUG_VNET
-static inline void mac_to_string(char mac[6], char * buf) {
+static inline void mac_to_string(uint8_t mac[6], char * buf) {
snprintf(buf, 100, "%d:%d:%d:%d:%d:%d",
mac[0], mac[1], mac[2],
mac[3], mac[4], mac[5]);
}
-static void print_route(struct vnet_route_info *route){
+static void print_route(struct vnet_route_info * route){
char str[50];
+ memset(str, 0, 50);
+
mac_to_string(route->route_def.src_mac, str);
PrintDebug("Src Mac (%s), src_qual (%d)\n",
str, route->route_def.src_mac_qual);
+
mac_to_string(route->route_def.dst_mac, str);
PrintDebug("Dst Mac (%s), dst_qual (%d)\n",
str, route->route_def.dst_mac_qual);
+
PrintDebug("Src dev id (%d), src type (%d)",
route->route_def.src_id,
route->route_def.src_type);
+
PrintDebug("Dst dev id (%d), dst type (%d)\n",
route->route_def.dst_id,
route->route_def.dst_type);
+
if (route->route_def.dst_type == LINK_INTERFACE) {
PrintDebug("dst_dev (%p), dst_dev_id (%d), dst_dev_input (%p), dst_dev_data (%p)\n",
- route->dst_dev,
- route->dst_dev->dev_id,
- route->dst_dev->input,
- route->dst_dev->private_data);
+ route->dst_dev,
+ route->dst_dev->dev_id,
+ route->dst_dev->input,
+ route->dst_dev->private_data);
}
}
-static void dump_routes(){
- struct vnet_route_info *route;
-
+static void dump_routes() {
+ struct vnet_route_info * route = NULL;
int i = 0;
+
PrintDebug("\n========Dump routes starts ============\n");
+
list_for_each_entry(route, &(vnet_state.routes), node) {
- PrintDebug("\nroute %d:\n", ++i);
-
+ PrintDebug("\nroute %d:\n", i++);
print_route(route);
}
+
PrintDebug("\n========Dump routes end ============\n");
}
list_for_each_entry(dev, &(vnet_state.devs), node) {
int dev_id = dev->dev_id;
- if (dev_id == idx)
+ if (dev_id == idx) {
return dev;
+ }
}
return NULL;
struct vnet_dev * dev = NULL;
list_for_each_entry(dev, &(vnet_state.devs), node) {
- if (!memcmp(dev->mac_addr, mac, 6))
+ if (memcmp(dev->mac_addr, mac, 6) == 0) {
return dev;
+ }
}
return NULL;
}
-int get_device_id_by_mac(char mac[6]){
-
- struct vnet_dev *dev = find_dev_by_mac(mac);
-
- if (dev == NULL)
+int get_device_id_by_mac(char mac[6]) {
+ struct vnet_dev * dev = find_dev_by_mac(mac);
+
+ if (dev == NULL) {
return -1;
-
+ }
+
return dev->dev_id;
}
int v3_vnet_add_route(struct v3_vnet_route route) {
struct vnet_route_info * new_route = NULL;
- unsigned long flags;
+ uint32_t flags = 0;
new_route = (struct vnet_route_info *)V3_Malloc(sizeof(struct vnet_route_info));
memset(new_route, 0, sizeof(struct vnet_route_info));
#if 0
static int flush_bridge_pkts(struct vnet_brg_dev *bridge){
- unsigned long flags;
- int num, start, send;
+ uint32_t flags;
+ int num;
+ int start;
+ int send;
struct v3_vnet_bridge_input_args args;
int cpu_id = bridge->vm->cores[0].cpu_id;
int current_core = V3_Get_CPU();
v3_unlock_irqrestore(bridge->recv_buf.lock, flags);
- if(bridge->disabled){
+ if (bridge->disabled) {
PrintDebug("VNET: In flush bridge pkts: Bridge is disabled\n");
return -1;
}
- if(num <= 2 && num > 0){
+ if (num <= 2 && num > 0) {
PrintDebug("VNET: In flush bridge pkts: %d\n", num);
}
- if(num > 0) {
+ if (num > 0) {
PrintDebug("VNET: In flush bridge pkts to bridge, cur_cpu %d, brige_core: %d\n", current_core, cpu_id);
- if (current_core == cpu_id){
- if ((start + num) < BRIDGE_BUF_SIZE){
+ if (current_core == cpu_id) {
+ if ((start + num) < BRIDGE_BUF_SIZE) {
bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), num, bridge->private_data);
- }else {
+ } else {
bridge->input(bridge->vm, &(bridge->recv_buf.pkts[start]), (BRIDGE_BUF_SIZE - start), bridge->private_data);
send = num - (BRIDGE_BUF_SIZE - start);
bridge->input(bridge->vm, &(bridge->recv_buf.pkts[0]), send, bridge->private_data);
}
- }else {
+ } else {
args.vm = bridge->vm;
args.private_data = bridge->private_data;
- if ((start + num) < BRIDGE_BUF_SIZE){
+ if ((start + num) < BRIDGE_BUF_SIZE) {
args.pkt_num = num;
args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
- }else {
+ } else {
args.pkt_num = BRIDGE_BUF_SIZE - start;
args.vnet_pkts = &(bridge->recv_buf.pkts[start]);
V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
#endif
static int send_to_bridge(struct v3_vnet_pkt * pkt){
- struct vnet_brg_dev *bridge = vnet_state.bridge;
+ struct vnet_brg_dev * bridge = vnet_state.bridge;
if (bridge == NULL) {
PrintDebug("VNET: No bridge to sent data to links\n");
return -1;
}
- if(bridge->max_delayed_pkts <= 1){
- if(bridge->disabled){
+ if (bridge->max_delayed_pkts <= 1) {
+
+ if (bridge->disabled) {
PrintDebug("VNET: Bridge diabled\n");
return -1;
- }
+ }
-/*
- //avoid the cross-core call here
- int cpu_id = bridge->vm->cores[0].cpu_id;
- struct v3_vnet_bridge_input_args args;
-
- args.pkt_num = 1;
- args.vm = bridge->vm;
- args.vnet_pkts = pkt;
- args.private_data = bridge->private_data;
-
- V3_Call_On_CPU(cpu_id, bridge->xcall_input, (void *)&args);
-*/
bridge->input(bridge->vm, pkt, 1, bridge->private_data);
PrintDebug("VNET: sent one packet to the bridge\n");
- return 0;
- }
-
-/*
- unsigned long flags;
- int end, num=0;
- struct v3_vnet_pkt *buf;
-
- PrintDebug("VNET: send_to_bridge\n");
-
- flags = v3_lock_irqsave(bridge->recv_buf.lock);
-
- if(bridge->disabled && bridge->recv_buf.num >= BRIDGE_BUF_SIZE){
- PrintDebug("Bridge diabled and bridge receive buffer full\n");
- v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
- num = bridge->recv_buf.num;
- goto exit;
}
-
- end = bridge->recv_buf.end;
- buf = &(bridge->recv_buf.pkts[end]);
-
- bridge->recv_buf.num ++;
- bridge->recv_buf.end ++;
- bridge->recv_buf.end %= BRIDGE_BUF_SIZE;
-
- num = bridge->recv_buf.num;
-
- v3_unlock_irqrestore(bridge->recv_buf.lock, flags);//maybe should move this after copy
- buf->size = pkt->size;
- buf->dst_id = pkt->dst_id;
- buf->src_id = pkt->src_id;
- buf->src_type = pkt->src_type;
- buf->dst_type = pkt->dst_type;
- memcpy(buf->header, pkt->header, ETHERNET_HEADER_LEN);
- memcpy(buf->data, pkt->data, pkt->size);
-
-exit:
-
- if (num >= bridge->max_delayed_pkts){
- flush_bridge_pkts(bridge);
- }
-*/
return 0;
}
int v3_vnet_send_pkt(struct v3_vnet_pkt * pkt, void * private_data) {
struct route_list * matched_routes = NULL;
- unsigned long flags;
- int i;
-
+ uint32_t flags = 0;
+ int i = 0;
+
#ifdef CONFIG_DEBUG_VNET
- {
+ {
struct eth_hdr * hdr = (struct eth_hdr *)(pkt->header);
char dest_str[100];
char src_str[100];
-
+ int cpu = V3_Get_CPU();
+
mac_to_string(hdr->src_mac, src_str);
mac_to_string(hdr->dst_mac, dest_str);
- int cpu = V3_Get_CPU();
PrintDebug("Vnet: on cpu %d, HandleDataOverLink. SRC(%s), DEST(%s), pkt size: %d\n", cpu, src_str, dest_str, pkt->size);
}
#endif
return 0;
}
-void v3_vnet_send_pkt_xcall(void * data){
+void v3_vnet_send_pkt_xcall(void * data) {
struct v3_vnet_pkt * pkt = (struct v3_vnet_pkt *)data;
v3_vnet_send_pkt(pkt, NULL);
}
-void v3_vnet_polling()
-{
- unsigned long flags;
- int num, start;
- struct v3_vnet_pkt *buf;
+void v3_vnet_polling() {
+ uint32_t flags = 0;
+ int num = 0;
+ int start = 0;
+ struct v3_vnet_pkt * buf = NULL;
PrintDebug("In vnet pollling: cpu %d\n", V3_Get_CPU());
PrintDebug("VNET: polling pkts %d\n", num);
- while(num > 0) {
+ while (num > 0) {
buf = &(vnet_state.in_buf.pkts[vnet_state.in_buf.start]);
v3_vnet_send_pkt(buf, NULL);
- vnet_state.in_buf.num --;
- vnet_state.in_buf.start ++;
+ vnet_state.in_buf.num--;
+ vnet_state.in_buf.start++;
vnet_state.in_buf.start %= BRIDGE_BUF_SIZE;
- num --;
+ num--;
}
v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
}
-int v3_vnet_rx(uchar_t *buf, uint16_t size, uint16_t src_id, uint8_t src_type){
- unsigned long flags;
- int end;
- struct v3_vnet_pkt *pkt;
+int v3_vnet_rx(uint8_t * buf, uint16_t size, uint16_t src_id, uint8_t src_type) {
+ uint32_t flags = 0;
+ int end = 0;
+ struct v3_vnet_pkt * pkt = NULL;
flags = v3_lock_irqsave(vnet_state.in_buf.lock);
end = vnet_state.in_buf.end;
pkt = &(vnet_state.in_buf.pkts[end]);
- if(vnet_state.in_buf.num > BRIDGE_BUF_SIZE){
+ if (vnet_state.in_buf.num > BRIDGE_BUF_SIZE){
PrintDebug("VNET: bridge rx: buffer full\n");
- goto exit;
+ v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
+ return 0;
}
- vnet_state.in_buf.num ++;
- vnet_state.in_buf.end ++;
+ vnet_state.in_buf.num++;
+ vnet_state.in_buf.end++;
vnet_state.in_buf.end %= BRIDGE_BUF_SIZE;
pkt->size = size;
memcpy(pkt->header, buf, ETHERNET_HEADER_LEN);
memcpy(pkt->data, buf, size);
-exit:
v3_unlock_irqrestore(vnet_state.in_buf.lock, flags);
}
-int v3_vnet_add_dev(struct v3_vm_info *vm, uint8_t mac[6],
+int v3_vnet_add_dev(struct v3_vm_info * vm, uint8_t mac[6],
int (*netif_input)(struct v3_vm_info * vm, struct v3_vnet_pkt * pkt, void * private_data),
void * priv_data){
struct vnet_dev * new_dev = NULL;
- unsigned long flags;
+ uint32_t flags = 0;
new_dev = (struct vnet_dev *)V3_Malloc(sizeof(struct vnet_dev));
}
PrintDebug("Vnet: Add Device: dev_id %d, input : %p, private_data %p\n",
- new_dev->dev_id, new_dev->input, new_dev->private_data);
+ new_dev->dev_id, new_dev->input, new_dev->private_data);
return new_dev->dev_id;
}
-void v3_vnet_heartbeat(struct guest_info *core){
+void v3_vnet_heartbeat(struct guest_info *core){
//static long last_time, cur_time;
- if(vnet_state.bridge == NULL)
+ if (vnet_state.bridge == NULL) {
return;
+ }
/*
if(vnet_state.bridge->max_delayed_pkts > 1){
if(V3_Get_CPU() != vnet_state.bridge->vm->cores[0].cpu_id){
int v3_vnet_add_bridge(struct v3_vm_info * vm,
int (*input)(struct v3_vm_info * vm, struct v3_vnet_pkt pkt[], uint16_t pkt_num, void * private_data),
- void (*xcall_input)(void *data),
+ void (*xcall_input)(void * data),
int (*poll_pkt)(struct v3_vm_info * vm, void * private_data),
uint16_t max_delayed_pkts,
long max_latency,
void * priv_data) {
- unsigned long flags;
+
+ uint32_t flags = 0;
int bridge_free = 0;
struct vnet_brg_dev * tmp_bridge = NULL;
*/
- tmp_bridge->max_delayed_pkts = (max_delayed_pkts<BRIDGE_BUF_SIZE)?max_delayed_pkts : BRIDGE_BUF_SIZE;
+ tmp_bridge->max_delayed_pkts = (max_delayed_pkts < BRIDGE_BUF_SIZE) ? max_delayed_pkts : BRIDGE_BUF_SIZE;
tmp_bridge->max_latency = max_latency;
// make this atomic to avoid possible race conditions
int v3_vnet_disable_bridge() {
- unsigned long flags;
+ uint32_t flags = 0;
flags = v3_lock_irqsave(vnet_state.lock);
int v3_vnet_enable_bridge() {
- unsigned long flags;
+ uint32_t flags = 0;
flags = v3_lock_irqsave(vnet_state.lock);
int V3_init_vnet() {
- int i;
+ int i = 0;
memset(&vnet_state, 0, sizeof(vnet_state));
vnet_state.in_buf.start = 0;
vnet_state.in_buf.end = 0;
vnet_state.in_buf.num = 0;
- if(v3_lock_init(&(vnet_state.in_buf.lock)) == -1){
+
+ if (v3_lock_init(&(vnet_state.in_buf.lock)) == -1){
PrintError("VNET: add bridge, error to initiate send buf lock\n");
}
- for(i = 0; i<BRIDGE_BUF_SIZE; i++){
- vnet_state.in_buf.pkts[i].data = &(vnet_state.in_buf.datas[i*ETHERNET_PACKET_LEN]);
+
+ for (i = 0; i < BRIDGE_BUF_SIZE; i++){
+ vnet_state.in_buf.pkts[i].data = &(vnet_state.in_buf.datas[i * ETHERNET_PACKET_LEN]);
}
+
PrintDebug("VNET: Receiving buffer initiated\n");
vnet_state.route_cache = v3_create_htable(0, &hash_fn, &hash_eq);