switch (op.type) {
case PALACIOS_HOST_DEV_USER_REQUEST_READ_GUEST: {
+ // possible overflow here, but only if user is asking for too much...
void *temp = palacios_alloc(op.len);
DEEP_DEBUG_PRINT("palacios: hostdev: read guest\n");
return -EFAULT;
}
+ // overflow possible here for very large request
if (resize_op(&(s->op),size-sizeof(struct palacios_user_keyed_stream_op))) {
ERROR("unable to resize op in user key push response\n");
palacios_spinlock_unlock_irqrestore(&(s->lock), flags);
return -1;
}
+ // overflow possible here, but only if this is a huge guest request (>4GB)
url = palacios_alloc(len);
if (!url) {
guest->img_size = user_image.size;
DEBUG("Palacios: Allocating kernel memory for guest image (%llu bytes)\n", user_image.size);
+ // overflow possible here, but only if guest image is probably to large for kernel anyway...
guest->img = palacios_valloc(guest->img_size);
if (!guest->img) {
out:
- if (mem) { palacios_vfree(mem); }
+ if (mem) { palacios_vfree(mem); } // dead code but kept for clarity
if (core) { palacios_vfree(core); }
if (base) { palacios_vfree(base); }
//INFO("num_cores=%u",num_cores);
+ // overflow possible here, but only for an insane number of cores
if (!(user_snap=palacios_alloc(sizeof(v3_mem_track_snapshot) + num_cores * sizeof(struct v3_core_mem_track)))) {
ERROR("palacios: cannot allocate memory for copying user snapshot\n");
goto fail;
INFO("Could not allocate initial memory block for node %d below 4GB\n", node_id);
if (!pgs) {
ERROR("Could not allocate initial memory block for node %d without restrictions\n", node_id);
- BUG_ON(!pgs);
palacios_deinit_mm();
return -1;
}
res->fd = v3_file_open(vm, path, flags);
- if (res->fd < 0) {
+ if (!res->fd) {
ERROR("failed to open underlying file\n");
goto clean_mem;
}
static int key_handler( struct cons_state * state, uint8_t ascii) {
PrintDebug(VM_NONE, VCORE_NONE, "Character recieved: 0x%x\n", ascii);
- // printable
- if (ascii < 0x80) {
- const struct key_code * key = &(ascii_to_key_code[ascii]);
- if (deliver_scan_code(state, (struct key_code *)key) == -1) {
- PrintError(VM_NONE, VCORE_NONE, "Could not deliver scan code to vm\n");
- return -1;
- }
-
- } else if (ascii == ESC_CHAR) { // Escape Key
+ if (ascii == ESC_CHAR) { // Escape Key
// This means that another 2 characters are pending
// receive it and deliver accordingly
char esc_seq[2] = {0, 0};
} else if (esc_seq[1] == 'D') { // LEFT ARROW
struct key_code left = { 0x4B, 0 };
deliver_scan_code(state, &left);
+ }
+
+ } else if (ascii < 0x80) { // printable
+ const struct key_code * key = &(ascii_to_key_code[ascii]);
+
+ if (deliver_scan_code(state, (struct key_code *)key) == -1) {
+ PrintError(VM_NONE, VCORE_NONE, "Could not deliver scan code to vm\n");
+ return -1;
}
+
} else {
PrintError(VM_NONE, VCORE_NONE, "Invalid character received from network (%c) (code=%d)\n",
ascii, ascii);
#endif
switch(mode) {
+ // Note that the dead code here (for other than LONG and PROTECTED
+ // is kept here for clarity and parallelism with other impls
case REAL:
case PROTECTED:
return invalidate_addr_32(info, inv_addr, actual_start, actual_end);
#endif
switch(mode) {
+ // dead code except for LONG and PROTECTED cases
+ // this is kept for clarity and parallelism with other implementations
case REAL:
case PROTECTED:
return invalidate_addr_32_range(info, inv_addr_start, inv_addr_end, actual_start, actual_end);
vcore_id--;
}
- if (vcore_id >= 0) {
+ if (vcore_id >= 0) { // dead code...
v3_stop_vm(vm);
return -1;
}
return -1;
}
+ // dead code if there are no events, but this is correct
for (i = 0; i < V3_EVENT_INVALID; i++) {
INIT_LIST_HEAD(&(map->events[i]));
}
struct v3_event_map * map = &(vm->event_map);
int i = 0;
+
+ // dead code if there are no events, but this is correct
for (i = 0; i < V3_EVENT_INVALID; i++) {
if (!list_empty(&(map->events[i]))) {
struct v3_notifier * tmp_notifier = NULL;
case PT_ENTRY_NOT_PRESENT:
return -1;
case PT_ENTRY_LARGE_PAGE:
- if ((ret == callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
+ if ((ret = callback(info, PAGE_4MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
return (ret == -1) ? -1 : PAGE_4MB;
}
return 0;
case PT_ENTRY_NOT_PRESENT:
return -1;
case PT_ENTRY_LARGE_PAGE:
- if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
+ if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
return (ret == -1) ? -1 : PAGE_2MB;
}
return 0;
case PT_ENTRY_NOT_PRESENT:
return -1;
case PT_ENTRY_LARGE_PAGE:
- if ((ret == callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
+ if ((ret = callback(info, PAGE_1GB, vaddr, (addr_t)V3_VAddr((void *)host_pde_pa), host_pde_pa, private_data)) != 0) {
return (ret == -1) ? -1 : PAGE_1GB;
}
PrintError(info->vm_info, info, "1 Gigabyte Pages not supported\n");
case PT_ENTRY_NOT_PRESENT:
return -1;
case PT_ENTRY_LARGE_PAGE:
- if ((ret == callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
+ if ((ret = callback(info, PAGE_2MB, vaddr, (addr_t)V3_VAddr((void *)host_pte_pa), host_pte_pa, private_data)) != 0) {
return (ret == -1) ? -1 : PAGE_2MB;
}
return 0;
}
- if ((ret == callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
+ if ((ret = callback(info, PAGE_4MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
return (ret == -1) ? -1 : PAGE_4MB;
}
return 0;
large_page_va = 0;
}
- if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
+ if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
return (ret == -1) ? -1 : PAGE_2MB;
}
return 0;
large_page_va = 0;
}
- if ((ret == callback(info, PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
+ if ((ret = callback(info, PAGE_1GB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
return (ret == -1) ? -1 : PAGE_1GB;
}
PrintError(info->vm_info, info, "1 Gigabyte Pages not supported\n");
large_page_va = 0;
}
- if ((ret == callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
+ if ((ret = callback(info, PAGE_2MB, vaddr, large_page_va, large_page_pa, private_data)) != 0) {
return (ret == -1) ? -1 : PAGE_2MB;
}
return 0;
// Can we allocate the file?
- if ((vm->swap_state.swapfd = v3_file_open(vm,file, FILE_OPEN_MODE_READ | FILE_OPEN_MODE_WRITE | FILE_OPEN_MODE_CREATE))<0) {
+ if (!(vm->swap_state.swapfd = v3_file_open(vm,file, FILE_OPEN_MODE_READ | FILE_OPEN_MODE_WRITE | FILE_OPEN_MODE_CREATE))) {
PrintError(vm,VCORE_NONE,"swapper: cannot open or create swap file\n");
return -1;
} else {
!strcasecmp(strategy,"next_fit") ? V3_SWAP_NEXT_FIT :
!strcasecmp(strategy,"random") ? V3_SWAP_RANDOM :
!strcasecmp(strategy,"lru") ? V3_SWAP_LRU :
- !strcasecmp(strategy,"default") ? V3_SWAP_RANDOM :
+ !strcasecmp(strategy,"default") ? V3_SWAP_RANDOM : // identical branches for clarity
V3_SWAP_RANDOM;
vm->swap_state.host_mem_size=alloc;