From: Jack Lange Date: Fri, 28 Jun 2013 20:45:28 +0000 (-0500) Subject: updates to enable functionality necessary for SEABIOS to run X-Git-Url: http://v3vee.org/palacios/gitweb/gitweb.cgi?a=commitdiff_plain;h=8070d5e87ee3b92cc95bdcb65b2906b0a55d2ea9;p=palacios.releases.git updates to enable functionality necessary for SEABIOS to run added fw_cfg framework, special mov instruction decoding also added NUMA framework files to compilation process --- diff --git a/linux_module/Makefile b/linux_module/Makefile index 6824eea..b27cf77 100644 --- a/linux_module/Makefile +++ b/linux_module/Makefile @@ -20,7 +20,9 @@ v3vee-y := palacios-stubs.o \ allow_devmem.o \ util-queue.o \ util-hashtable.o \ - linux-exts.o + linux-exts.o \ + buddy.o \ + numa.o v3vee-$(V3_CONFIG_CONSOLE) += iface-console.o v3vee-$(V3_CONFIG_FILE) += iface-file.o diff --git a/linux_module/buddy.c b/linux_module/buddy.c index e076fd7..01cf392 100644 --- a/linux_module/buddy.c +++ b/linux_module/buddy.c @@ -152,7 +152,7 @@ int buddy_add_pool(struct buddy_memzone * zone, return -1; } - mp = kmalloc_node(sizeof(struct buddy_mempool), GFP_KERNEL, zone->node_id); + mp = palacios_alloc_node_extended(sizeof(struct buddy_mempool), GFP_KERNEL, zone->node_id); if (IS_ERR(mp)) { ERROR("Could not allocate mempool\n"); @@ -167,28 +167,28 @@ int buddy_add_pool(struct buddy_memzone * zone, /* Allocate a bitmap with 1 bit per minimum-sized block */ mp->num_blocks = (1UL << pool_order) / (1UL << zone->min_order); - mp->tag_bits = kmalloc_node( + mp->tag_bits = palacios_alloc_node_extended( BITS_TO_LONGS(mp->num_blocks) * sizeof(long), GFP_KERNEL, zone->node_id ); /* Initially mark all minimum-sized blocks as allocated */ bitmap_zero(mp->tag_bits, mp->num_blocks); - spin_lock_irqsave(&(zone->lock), flags); + palacios_spinlock_lock_irqsave(&(zone->lock), flags); ret = insert_mempool(zone, mp); - spin_unlock_irqrestore(&(zone->lock), flags); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); if (ret == -1) { ERROR("Error: Could not insert mempool into zone\n"); - kfree(mp->tag_bits); - kfree(mp); + palacios_free(mp->tag_bits); + palacios_free(mp); return -1; } buddy_free(zone, base_addr, pool_order); - printk("Added memory pool (addr=%p), order=%lu\n", (void *)base_addr, pool_order); + INFO("Added memory pool (addr=%p), order=%lu\n", (void *)base_addr, pool_order); return 0; } @@ -222,8 +222,8 @@ static int __buddy_remove_mempool(struct buddy_memzone * zone, list_del(&(block->link)); rb_erase(&(pool->tree_node), &(zone->mempools)); - kfree(pool->tag_bits); - kfree(pool); + palacios_free(pool->tag_bits); + palacios_free(pool); zone->num_pools--; @@ -236,9 +236,9 @@ int buddy_remove_pool(struct buddy_memzone * zone, unsigned long flags = 0; int ret = 0; - spin_lock_irqsave(&(zone->lock), flags); + palacios_spinlock_lock_irqsave(&(zone->lock), flags); ret = __buddy_remove_mempool(zone, base_addr, force); - spin_unlock_irqrestore(&(zone->lock), flags); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); return ret; } @@ -275,13 +275,13 @@ buddy_alloc(struct buddy_memzone *zone, unsigned long order) order = zone->min_order; } - printk("zone=%p, order=%lu\n", zone, order); + INFO("zone=%p, order=%lu\n", zone, order); - spin_lock_irqsave(&(zone->lock), flags); + palacios_spinlock_lock_irqsave(&(zone->lock), flags); for (j = order; j <= zone->max_order; j++) { - printk("Order iter=%lu\n", j); + INFO("Order iter=%lu\n", j); /* Try to allocate the first block in the order j list */ list = &zone->avail[j]; @@ -296,10 +296,10 @@ buddy_alloc(struct buddy_memzone *zone, unsigned long order) mark_allocated(mp, block); - printk("pool=%p, block=%p, order=%lu, j=%lu\n", mp, block, order, j); + INFO("pool=%p, block=%p, order=%lu, j=%lu\n", mp, block, order, j); /* - spin_unlock_irqrestore(&(zone->lock), flags); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); return 0; */ @@ -315,12 +315,12 @@ buddy_alloc(struct buddy_memzone *zone, unsigned long order) mp->num_free_blocks -= (1UL << (order - zone->min_order)); - spin_unlock_irqrestore(&(zone->lock), flags); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); return __pa(block); } - spin_unlock_irqrestore(&(zone->lock), flags); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); return (uintptr_t)NULL; } @@ -359,13 +359,13 @@ buddy_free( } - spin_lock_irqsave(&(zone->lock), flags); + palacios_spinlock_lock_irqsave(&(zone->lock), flags); pool = find_mempool(zone, addr); if ((pool == NULL) || (order > pool->pool_order)) { WARNING("Attempted to free an invalid page address (%p)\n", (void *)addr); - spin_unlock_irqrestore(&(zone->lock), flags); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); return; } @@ -374,8 +374,8 @@ buddy_free( block = (struct block *) __va(addr); if (is_available(pool, block)) { - printk(KERN_ERR "Error: Freeing an available block\n"); - spin_unlock_irqrestore(&(zone->lock), flags); + ERROR("Error: Freeing an available block\n"); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); return; } @@ -406,7 +406,7 @@ buddy_free( mark_available(pool, block); list_add(&(block->link), &(zone->avail[order])); - spin_unlock_irqrestore(&(zone->lock), flags); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); } @@ -433,7 +433,7 @@ zone_mem_show(struct seq_file * s, void * v) { seq_printf(s, " Zone Max Order=%lu, Min Order=%lu\n", zone->max_order, zone->min_order); - spin_lock_irqsave(&(zone->lock), flags); + palacios_spinlock_lock_irqsave(&(zone->lock), flags); for (i = zone->min_order; i <= zone->max_order; i++) { @@ -465,7 +465,7 @@ zone_mem_show(struct seq_file * s, void * v) { } } - spin_unlock_irqrestore(&(zone->lock), flags); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); return 0; } @@ -473,7 +473,7 @@ zone_mem_show(struct seq_file * s, void * v) { static int zone_proc_open(struct inode * inode, struct file * filp) { struct proc_dir_entry * proc_entry = PDE(inode); - printk("proc_entry at %p, data at %p\n", proc_entry, proc_entry->data); + INFO("proc_entry at %p, data at %p\n", proc_entry, proc_entry->data); return single_open(filp, zone_mem_show, proc_entry->data); } @@ -487,15 +487,17 @@ static struct file_operations zone_proc_ops = { }; +extern struct proc_dir_entry * palacios_proc_dir; void buddy_deinit(struct buddy_memzone * zone) { unsigned long flags; - spin_lock_irqsave(&(zone->lock), flags); + palacios_spinlock_lock_irqsave(&(zone->lock), flags); // for each pool, free it +#warning We really need to free the memory pools here - spin_unlock_irqrestore(&(zone->lock), flags); + palacios_spinlock_unlock_irqrestore(&(zone->lock), flags); { char proc_file_name[128]; @@ -507,8 +509,8 @@ void buddy_deinit(struct buddy_memzone * zone) { } - kfree(zone->avail); - kfree(zone); + palacios_free(zone->avail); + palacios_free(zone); return; } @@ -554,9 +556,9 @@ buddy_init( if (min_order > max_order) return NULL; - zone = kmalloc_node(sizeof(struct buddy_memzone), GFP_KERNEL, node_id); + zone = palacios_alloc_node_extended(sizeof(struct buddy_memzone), GFP_KERNEL, node_id); - printk("Allocated zone at %p\n", zone); + INFO("Allocated zone at %p\n", zone); if (IS_ERR(zone)) { ERROR("Could not allocate memzone\n"); @@ -570,9 +572,9 @@ buddy_init( zone->node_id = node_id; /* Allocate a list for every order up to the maximum allowed order */ - zone->avail = kmalloc_node((max_order + 1) * sizeof(struct list_head), GFP_KERNEL, zone->node_id); + zone->avail = palacios_alloc_node_extended((max_order + 1) * sizeof(struct list_head), GFP_KERNEL, zone->node_id); - printk("Allocated free lists at %p\n", zone->avail); + INFO("Allocated free lists at %p\n", zone->avail); /* Initially all lists are empty */ for (i = 0; i <= max_order; i++) { @@ -580,11 +582,11 @@ buddy_init( } - spin_lock_init(&(zone->lock)); + palacios_spinlock_init(&(zone->lock)); zone->mempools.rb_node = NULL; - printk("Allocated zone at %p\n", zone); + INFO("Allocated zone at %p\n", zone); { struct proc_dir_entry * zone_entry = NULL; @@ -598,7 +600,7 @@ buddy_init( zone_entry->proc_fops = &zone_proc_ops; zone_entry->data = zone; } else { - printk(KERN_ERR "Error creating memory zone proc file\n"); + ERROR("Error creating memory zone proc file\n"); } } diff --git a/linux_module/main.c b/linux_module/main.c index ab6d9f2..f465eaa 100644 --- a/linux_module/main.c +++ b/linux_module/main.c @@ -57,7 +57,7 @@ int mod_frees = 0; static int v3_major_num = 0; static struct v3_guest * guest_map[MAX_VMS] = {[0 ... MAX_VMS - 1] = 0}; -static struct proc_dir_entry *dir = 0; +struct proc_dir_entry * palacios_proc_dir = NULL; struct class * v3_class = NULL; static struct cdev ctrl_dev; @@ -232,7 +232,7 @@ static struct file_operations v3_ctrl_fops = { struct proc_dir_entry *palacios_get_procdir(void) { - return dir; + return palacios_proc_dir; } @@ -327,17 +327,6 @@ static int read_guests(char * buf, char ** start, off_t off, int count, return len; } -static int show_mem(char * buf, char ** start, off_t off, int count, - int * eof, void * data) -{ - int len = 0; - - len = snprintf(buf,count, "%p\n", (void *)get_palacios_base_addr()); - len += snprintf(buf+len,count-len, "%lld\n", get_palacios_num_pages()); - - return len; -} - @@ -397,11 +386,11 @@ static int __init v3_init(void) { goto failure1; } - dir = proc_mkdir("v3vee", NULL); - if(dir) { + palacios_proc_dir = proc_mkdir("v3vee", NULL); + if (palacios_proc_dir) { struct proc_dir_entry *entry; - entry = create_proc_read_entry("v3-guests", 0444, dir, + entry = create_proc_read_entry("v3-guests", 0444, palacios_proc_dir, read_guests, NULL); if (entry) { INFO("/proc/v3vee/v3-guests successfully created\n"); @@ -410,14 +399,6 @@ static int __init v3_init(void) { goto failure1; } - entry = create_proc_read_entry("v3-mem", 0444, dir, - show_mem, NULL); - if (entry) { - INFO("/proc/v3vee/v3-mem successfully added\n"); - } else { - ERROR("Could not create proc entry\n"); - goto failure1; - } } else { ERROR("Could not create proc entry\n"); goto failure1; @@ -486,8 +467,7 @@ static void __exit v3_exit(void) { palacios_deinit_mm(); - remove_proc_entry("v3-guests", dir); - remove_proc_entry("v3-mem", dir); + remove_proc_entry("v3-guests", palacios_proc_dir); remove_proc_entry("v3vee", NULL); DEBUG("Palacios Module Mallocs = %d, Frees = %d\n", mod_allocs, mod_frees); diff --git a/linux_module/numa.c b/linux_module/numa.c index c95244d..4c31731 100644 --- a/linux_module/numa.c +++ b/linux_module/numa.c @@ -4,9 +4,11 @@ #include -#include "palacios.h" #include +#include "palacios.h" + + #if 0 @@ -48,9 +50,7 @@ int create_numa_topology_from_user(void __user * argp) { /* Read in the CPU to Node mapping */ { - topology.cpu_to_node_map = kmalloc(GFP_KERNEL, - sizeof(u32) * - topology.num_cpus); + topology.cpu_to_node_map = palacios_alloc(sizeof(u32) * topology.num_cpus); if (IS_ERR(topology.cpu_to_node_map)) { ERROR("Could not allocate cpu to node map\n"); @@ -61,7 +61,7 @@ int create_numa_topology_from_user(void __user * argp) { if (copy_from_user(topology.cpu_to_node_map, argp, sizeof(u32) * topology.num_cpus)) { ERROR("Could not copy cpu to node map from user space\n"); - kfree(topology.cpu_to_node_map); + palacios_free(topology.cpu_to_node_map); return -1; } @@ -72,21 +72,19 @@ int create_numa_topology_from_user(void __user * argp) { { int i = 0; - topology.mem_to_node_map = kmalloc(GFP_KERNEL, - sizeof(struct mem_region) * - topology.num_mem_regions); + topology.mem_to_node_map = palacios_alloc(sizeof(struct mem_region) * topology.num_mem_regions); if (IS_ERR(topology.mem_to_node_map)) { ERROR("Could not allocate mem to node map\n"); - kfree(topology.cpu_to_node_map); + palacios_free(topology.cpu_to_node_map); return -1; } if (copy_from_user(topology.mem_to_node_map, argp, sizeof(struct mem_region) * topology.num_mem_regions)) { ERROR("Coudl not copy mem to node map from user space\n"); - kfree(topology.cpu_to_node_map); - kfree(topology.mem_to_node_map); + palacios_free(topology.cpu_to_node_map); + palacios_free(topology.mem_to_node_map); return -1; } @@ -105,14 +103,12 @@ int create_numa_topology_from_user(void __user * argp) { /* Read in the distance table */ { - topology.distance_table = kmalloc(GFP_KERNEL, - sizeof(u32) * - (topology.num_nodes * topology.num_nodes)); + topology.distance_table = palacios_alloc(sizeof(u32) * (topology.num_nodes * topology.num_nodes)); if (IS_ERR(topology.distance_table)) { ERROR("Could not allocate distance table\n"); - kfree(topology.cpu_to_node_map); - kfree(topology.mem_to_node_map); + palacios_free(topology.cpu_to_node_map); + palacios_free(topology.mem_to_node_map); return -1; } @@ -120,9 +116,9 @@ int create_numa_topology_from_user(void __user * argp) { if (copy_from_user(topology.distance_table, argp, sizeof(u32) * (topology.num_nodes * topology.num_nodes))) { ERROR("Could not copy distance table from user space\n"); - kfree(topology.cpu_to_node_map); - kfree(topology.mem_to_node_map); - kfree(topology.distance_table); + palacios_free(topology.cpu_to_node_map); + palacios_free(topology.mem_to_node_map); + palacios_free(topology.distance_table); return -1; } @@ -133,40 +129,40 @@ int create_numa_topology_from_user(void __user * argp) { int i = 0; int j = 0; - printk("Created NUMA topology from user space\n"); - printk("Number of Nodes: %d, CPUs: %d, MEM regions: %d\n", + INFO("Created NUMA topology from user space\n"); + INFO("Number of Nodes: %d, CPUs: %d, MEM regions: %d\n", topology.num_nodes, topology.num_cpus, topology.num_mem_regions); - printk("CPU mapping\n"); + INFO("CPU mapping\n"); for (i = 0; i < topology.num_cpus; i++) { - printk("\tCPU %d -> Node %d\n", i, topology.cpu_to_node_map[i]); + INFO("\tCPU %d -> Node %d\n", i, topology.cpu_to_node_map[i]); } - printk("Memory mapping\n"); + INFO("Memory mapping\n"); for (i = 0; i < topology.num_mem_regions; i++) { struct mem_region * region = &(topology.mem_to_node_map[i]); - printk("\tMEM %p - %p -> Node %d\n", + INFO("\tMEM %p - %p -> Node %d\n", region->start_addr, region->end_addr, region->node_id); } - printk("Distance Table\n"); + INFO("Distance Table\n"); for (i = 0; i < topology.num_nodes; i++) { - printk("\t%d", i); + INFO("\t%d", i); } - printk("\n"); + INFO("\n"); for (i = 0; i < topology.num_nodes; i++) { - printk("%d", i); + INFO("%d", i); for (j = 0; j < topology.num_nodes; j++) { - printk("\t%d", topology.distance_table[j + (i * topology.num_nodes)]); + INFO("\t%d", topology.distance_table[j + (i * topology.num_nodes)]); } - printk("\n"); + INFO("\n"); } diff --git a/linux_module/palacios.h b/linux_module/palacios.h index ca35565..3562165 100644 --- a/linux_module/palacios.h +++ b/linux_module/palacios.h @@ -135,6 +135,10 @@ void *palacios_allocate_pages(int num_pages, unsigned int alignment); void palacios_free_pages(void *page_addr, int num_pages); void *palacios_alloc(unsigned int size); void *palacios_alloc_extended(unsigned int size, unsigned int flags); +// FIX +// NEED A palacios_alloc_node wrapper +// +#define palacios_alloc_node_extended(size, flags, node) kmalloc_node(size,flags,node) void palacios_free(void *); void *palacios_valloc(unsigned int size); // use instead of vmalloc void palacios_vfree(void *); // use instead of vfree diff --git a/palacios/include/palacios/vm_guest.h b/palacios/include/palacios/vm_guest.h index 3f05a99..5272b0d 100644 --- a/palacios/include/palacios/vm_guest.h +++ b/palacios/include/palacios/vm_guest.h @@ -42,6 +42,7 @@ #include #include #include +#include #include @@ -167,6 +168,7 @@ struct v3_vm_info { char name[128]; v3_vm_class_t vm_class; + struct v3_fw_cfg_state fw_cfg_state; addr_t mem_size; /* In bytes for now */ uint32_t mem_align; diff --git a/palacios/include/palacios/vmm_instr_decoder.h b/palacios/include/palacios/vmm_instr_decoder.h index 6e9785e..a7c63fb 100644 --- a/palacios/include/palacios/vmm_instr_decoder.h +++ b/palacios/include/palacios/vmm_instr_decoder.h @@ -478,6 +478,24 @@ static inline int decode_cr(struct guest_info * core, return 0; } +static struct v3_segment * get_instr_segment(struct guest_info * core, struct x86_instr * instr) { + struct v3_segment * seg = &(core->segments.ds); + + if (instr->prefixes.cs_override) { + seg = &(core->segments.cs); + } else if (instr->prefixes.es_override) { + seg = &(core->segments.es); + } else if (instr->prefixes.ss_override) { + seg = &(core->segments.ss); + } else if (instr->prefixes.fs_override) { + seg = &(core->segments.fs); + } else if (instr->prefixes.gs_override) { + seg = &(core->segments.gs); + } + + return seg; +} + #define ADDR_MASK(val, length) ({ \ diff --git a/palacios/src/interfaces/Makefile b/palacios/src/interfaces/Makefile index 2e8b3e7..ae10d74 100644 --- a/palacios/src/interfaces/Makefile +++ b/palacios/src/interfaces/Makefile @@ -12,4 +12,4 @@ obj-$(V3_CONFIG_HOST_PMU) += vmm_pmu.o obj-$(V3_CONFIG_HOST_PWRSTAT) += vmm_pwrstat.o obj-y += null.o - +obj-y += vmm_numa.o diff --git a/palacios/src/interfaces/vmm_numa.c b/palacios/src/interfaces/vmm_numa.c index 9630cd9..4e2a76d 100644 --- a/palacios/src/interfaces/vmm_numa.c +++ b/palacios/src/interfaces/vmm_numa.c @@ -28,7 +28,7 @@ static struct v3_numa_hooks * numa_hooks = NULL; void V3_Init_NUMA(struct v3_numa_hooks * hooks) { numa_hooks = hooks; - V3_Print("V3 NUMA interface initialized\n"); + V3_Print(VM_NONE, VCORE_NONE, "V3 NUMA interface initialized\n"); return; } @@ -49,7 +49,7 @@ int v3_numa_gpa_to_node(struct v3_vm_info * vm, addr_t gpa) { } if (v3_gpa_to_hpa(&(vm->cores[0]), gpa, &hpa) == -1) { - PrintError("Tried to find NUMA node for invalid GPA (%p)\n", (void *)gpa); + PrintError(vm, VCORE_NONE, "Tried to find NUMA node for invalid GPA (%p)\n", (void *)gpa); return -1; } diff --git a/palacios/src/palacios/Makefile b/palacios/src/palacios/Makefile index cd53d35..18cdea1 100644 --- a/palacios/src/palacios/Makefile +++ b/palacios/src/palacios/Makefile @@ -44,6 +44,7 @@ obj-y := \ vmm_exits.o \ vmm_events.o \ vmm_perftune.o \ + vmm_fw_cfg.o obj-$(V3_CONFIG_XED) += vmm_xed.o diff --git a/palacios/src/palacios/vmm_config.c b/palacios/src/palacios/vmm_config.c index d8f1961..de646bf 100644 --- a/palacios/src/palacios/vmm_config.c +++ b/palacios/src/palacios/vmm_config.c @@ -452,6 +452,13 @@ static int post_config_vm(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { } + + // Initialize fw_cfg state for VMM<->VM SEABIOS communication + if (v3_fw_cfg_init(vm) == -1) { + PrintError(vm, VCORE_NONE, "Error initializing Firmware Config (fw_cfg) state\n"); + return -1; + } + /* * Initialize configured devices */ diff --git a/palacios/src/palacios/vmm_config_class.h b/palacios/src/palacios/vmm_config_class.h index 7ce2277..bdd85f4 100644 --- a/palacios/src/palacios/vmm_config_class.h +++ b/palacios/src/palacios/vmm_config_class.h @@ -46,7 +46,7 @@ static int post_config_pc_core(struct guest_info * info, v3_cfg_tree_t * cfg) { static int post_config_pc(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { #define VGABIOS_START 0x000c0000 -#define ROMBIOS_START 0x000f0000 +#define ROMBIOS_START 0x000e0000 /* layout vgabios */ { @@ -74,6 +74,15 @@ static int post_config_pc(struct v3_vm_info * vm, v3_cfg_tree_t * cfg) { } memcpy(rombios_dst, v3_rombios_start, v3_rombios_end - v3_rombios_start); + + // SEABIOS gets mapped into end of 4GB region + if (v3_add_shadow_mem(vm, V3_MEM_CORE_ANY, + 0xfffe0000, 0xffffffff, + (addr_t)V3_PAddr(rombios_dst)) == -1) { + PrintError(vm, VCORE_NONE, "Error mapping SEABIOS to end of memory\n"); + return -1; + } + } diff --git a/palacios/src/palacios/vmm_fw_cfg.c b/palacios/src/palacios/vmm_fw_cfg.c index 9550db7..e0c5b5b 100644 --- a/palacios/src/palacios/vmm_fw_cfg.c +++ b/palacios/src/palacios/vmm_fw_cfg.c @@ -142,7 +142,7 @@ static int fw_cfg_ctl_read(struct guest_info * core, uint16_t port, void * src, } static int fw_cfg_ctl_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * priv_data) { - V3_ASSERT(length == 2); + V3_ASSERT(core->vm_info, core, length == 2); struct v3_fw_cfg_state * cfg_state = (struct v3_fw_cfg_state *)priv_data; uint16_t key = *(uint16_t *)src; @@ -163,7 +163,7 @@ static int fw_cfg_ctl_write(struct guest_info * core, uint16_t port, void * src, static int fw_cfg_data_read(struct guest_info * core, uint16_t port, void * src, uint_t length, void * priv_data) { - V3_ASSERT(length == 1); + V3_ASSERT(core->vm_info, core, length == 1); struct v3_fw_cfg_state * cfg_state = (struct v3_fw_cfg_state *)priv_data; int arch = !!(cfg_state->cur_entry & FW_CFG_ARCH_LOCAL); @@ -185,7 +185,7 @@ static int fw_cfg_data_read(struct guest_info * core, uint16_t port, void * src, } static int fw_cfg_data_write(struct guest_info * core, uint16_t port, void * src, uint_t length, void * priv_data) { - V3_ASSERT(length == 1); + V3_ASSERT(core->vm_info, core, length == 1); struct v3_fw_cfg_state * cfg_state = (struct v3_fw_cfg_state *)priv_data; int arch = !!(cfg_state->cur_entry & FW_CFG_ARCH_LOCAL); @@ -212,14 +212,14 @@ static struct e820_table * e820_populate(struct v3_vm_info * vm) { int i = 0; if (vm->mem_map.e820_count > E820_MAX_COUNT) { - PrintError("Too much E820 table entries! (max is %d)\n", E820_MAX_COUNT); + PrintError(vm, VCORE_NONE,"Too much E820 table entries! (max is %d)\n", E820_MAX_COUNT); return NULL; } e820 = V3_Malloc(sizeof(struct e820_table)); if (e820 == NULL) { - PrintError("Out of memory!\n"); + PrintError(vm, VCORE_NONE, "Out of memory!\n"); return NULL; } @@ -248,7 +248,7 @@ int v3_fw_cfg_init(struct v3_vm_info * vm) { struct e820_table * e820 = e820_populate(vm); if (e820 == NULL) { - PrintError("Failed to populate E820 for FW interface!\n"); + PrintError(vm, VCORE_NONE, "Failed to populate E820 for FW interface!\n"); return -1; } @@ -260,7 +260,7 @@ int v3_fw_cfg_init(struct v3_vm_info * vm) { if (ret != 0) { // V3_Free(e820); - PrintError("Failed to hook FW CFG ports!\n"); + PrintError(vm, VCORE_NONE, "Failed to hook FW CFG ports!\n"); return -1; } @@ -320,7 +320,7 @@ int v3_fw_cfg_init(struct v3_vm_info * vm) { numa_fw_cfg = V3_Malloc((1 + vm->num_cores + num_nodes) * sizeof(uint64_t)); if (numa_fw_cfg == NULL) { - PrintError("Could not allocate fw_cfg NUMA config space\n"); + PrintError(vm, VCORE_NONE, "Could not allocate fw_cfg NUMA config space\n"); return -1; } @@ -369,7 +369,7 @@ int v3_fw_cfg_init(struct v3_vm_info * vm) { int vnode_id = 0; if ((!start_addr_str) || (!end_addr_str) || (!vnode_id_str)) { - PrintError("Invalid memory layout in configuration\n"); + PrintError(vm, VCORE_NONE, "Invalid memory layout in configuration\n"); V3_Free(numa_fw_cfg); return -1; } @@ -389,14 +389,14 @@ int v3_fw_cfg_init(struct v3_vm_info * vm) { { uint64_t region_start = 0; - V3_Print("NUMA CONFIG: (nodes=%llu)\n", numa_fw_cfg[0]); + V3_Print(vm, VCORE_NONE, "NUMA CONFIG: (nodes=%llu)\n", numa_fw_cfg[0]); for (i = 0; i < vm->num_cores; i++) { - V3_Print("\tCore %d -> Node %llu\n", i, numa_fw_cfg[core_offset + i]); + V3_Print(vm, VCORE_NONE, "\tCore %d -> Node %llu\n", i, numa_fw_cfg[core_offset + i]); } for (i = 0; i < num_nodes; i++) { - V3_Print("\tMem (%p - %p) -> Node %d\n", (void *)region_start, + V3_Print(vm, VCORE_NONE, "\tMem (%p - %p) -> Node %d\n", (void *)region_start, (void *)numa_fw_cfg[mem_offset + i], i); region_start += numa_fw_cfg[mem_offset + i]; diff --git a/palacios/src/palacios/vmm_v3dec.c b/palacios/src/palacios/vmm_v3dec.c index ea6fc53..c05aef1 100644 --- a/palacios/src/palacios/vmm_v3dec.c +++ b/palacios/src/palacios/vmm_v3dec.c @@ -267,6 +267,80 @@ static int parse_operands(struct guest_info * core, uint8_t * instr_ptr, break; } + case MOV_MEM2AL_8: + case MOV_MEM2AX: { + + /* Use AX for destination operand */ + instr->dst_operand.size = operand_width; + instr->dst_operand.type = REG_OPERAND; + instr->dst_operand.operand = (addr_t)&(core->vm_regs.rax); + instr->dst_operand.write = 1; + + /* Get the correct offset -- (seg + offset) */ + struct v3_segment * src_reg = get_instr_segment(core, instr); + addr_t offset = 0; + + if (addr_width == 2) { + offset = *(uint16_t *)instr_ptr; + } else if (addr_width == 4) { + offset = *(uint32_t *)instr_ptr; + } else if (addr_width == 8) { + offset = *(uint64_t *)instr_ptr; + } else { + PrintError(core->vm_info, core, "illegal address width for %s (width=%d)\n", + op_form_to_str(form), addr_width); + return -1; + } + + instr->src_operand.operand = ADDR_MASK(get_addr_linear(core, offset, src_reg), + get_addr_width(core, instr)); + + instr->src_operand.read = 1; + instr->src_operand.type = MEM_OPERAND; + instr->src_operand.size = addr_width; + + instr_ptr += addr_width; + instr->num_operands = 2; + + break; + } + case MOV_AL2MEM_8: + case MOV_AX2MEM: { + + /* Use AX for src operand */ + instr->src_operand.size = operand_width; + instr->src_operand.type = REG_OPERAND; + instr->src_operand.operand = (addr_t)&(core->vm_regs.rax); + instr->src_operand.write = 1; + + /* Get the correct offset -- (seg + offset) */ + struct v3_segment * dst_reg = get_instr_segment(core, instr); + addr_t offset = 0; + + if (addr_width == 2) { + offset = *(uint16_t *)instr_ptr; + } else if (addr_width == 4) { + offset = *(uint32_t *)instr_ptr; + } else if (addr_width == 8) { + offset = *(uint64_t *)instr_ptr; + } else { + PrintError(core->vm_info, core, "illegal address width for %s (width=%d)\n", + op_form_to_str(form), addr_width); + return -1; + } + + instr->dst_operand.operand = ADDR_MASK(get_addr_linear(core, offset, dst_reg), + get_addr_width(core, instr)); + + instr->dst_operand.read = 1; + instr->dst_operand.type = MEM_OPERAND; + instr->dst_operand.size = addr_width; + + instr_ptr += addr_width; + instr->num_operands = 2; + + break; + } case MOVSX_8: case MOVZX_8: { uint8_t reg_code = 0;