2 * This file is part of the Palacios Virtual Machine Monitor developed
3 * by the V3VEE Project with funding from the United States National
4 * Science Foundation and the Department of Energy.
6 * The V3VEE Project is a joint project between Northwestern University
7 * and the University of New Mexico. You can find out more at
10 * Copyright (c) 2008, Jack Lange <jarusl@cs.northwestern.edu>
11 * Copyright (c) 2008, The V3VEE Project <http://www.v3vee.org>
12 * All rights reserved.
14 * Author: Jack Lange <jarusl@cs.northwestern.edu>
16 * This is free software. You are permitted to use,
17 * redistribute, and modify it as specified in the file "V3VEE_LICENSE".
26 #include <palacios/vmm_mem.h>
27 #include <palacios/vmm_types.h>
28 #include <palacios/vmm_string.h>
33 /* utility definitions */
35 #define VM_NONE ((struct v3_vm_info *)0)
36 #define VCORE_NONE ((struct guest_info *)0)
38 void *v3_get_host_vm(struct v3_vm_info *);
39 int v3_get_vcore(struct guest_info *);
41 #define V3_Print(vm, vcore, fmt, args...) \
43 extern struct v3_os_hooks * os_hooks; \
44 if ((os_hooks) && (os_hooks)->print) { \
45 (os_hooks)->print(v3_get_host_vm(vm), v3_get_vcore(vcore), (fmt), ##args); \
50 #define PrintDebug(vm, vcore, fmt, args...) V3_Print(vm, vcore, "DEBUG: " fmt, ##args)
52 #define PrintError(vm, vcore, fmt, args...) V3_Print(vm, vcore, "ERROR at %s(%d): " fmt, __FILE__, __LINE__, ##args)
57 #define V3_AllocPages(num_pages) \
59 extern struct v3_os_hooks * os_hooks; \
61 if ((os_hooks) && (os_hooks)->allocate_pages) { \
62 ptr = (os_hooks)->allocate_pages(num_pages,PAGE_SIZE_4KB,-1,0,0); \
68 #define V3_AllocAlignedPages(num_pages, align) \
70 extern struct v3_os_hooks * os_hooks; \
72 if ((os_hooks) && (os_hooks)->allocate_pages) { \
73 ptr = (os_hooks)->allocate_pages(num_pages,align,-1,0,0); \
79 #define V3_AllocPagesNode(num_pages, node_id) \
81 extern struct v3_os_hooks * os_hooks; \
83 if ((os_hooks) && (os_hooks)->allocate_pages) { \
84 ptr = (os_hooks)->allocate_pages(num_pages, PAGE_SIZE_4KB, node_id,0,0); \
89 #define V3_AllocPagesExtended(num_pages, align, node_id, filter_func, filter_state) \
91 extern struct v3_os_hooks * os_hooks; \
93 if ((os_hooks) && (os_hooks)->allocate_pages) { \
94 ptr = (os_hooks)->allocate_pages(num_pages, align, node_id, filter_func, filter_state); \
101 #define V3_FreePages(page, num_pages) \
103 extern struct v3_os_hooks * os_hooks; \
104 if ((os_hooks) && (os_hooks)->free_pages) { \
105 (os_hooks)->free_pages(page, num_pages); \
110 #define V3_VAddr(addr) ({ \
111 extern struct v3_os_hooks * os_hooks; \
113 if ((os_hooks) && (os_hooks)->paddr_to_vaddr) { \
114 var = (os_hooks)->paddr_to_vaddr(addr); \
120 #define V3_PAddr(addr) ({ \
121 extern struct v3_os_hooks * os_hooks; \
123 if ((os_hooks) && (os_hooks)->vaddr_to_paddr) { \
124 var = (os_hooks)->vaddr_to_paddr(addr); \
131 #define V3_Malloc(size) ({ \
132 extern struct v3_os_hooks * os_hooks; \
134 if ((os_hooks) && (os_hooks)->malloc) { \
135 var = (os_hooks)->malloc(size); \
137 if (!var) PrintError(VM_NONE,VCORE_NONE,"MALLOC FAILURE. Memory LEAK!!\n"); \
141 // We need to check the hook structure at runtime to ensure its SAFE
142 #define V3_Free(addr) \
144 extern struct v3_os_hooks * os_hooks; \
145 if ((os_hooks) && (os_hooks)->free) { \
146 (os_hooks)->free(addr); \
150 #define V3_VMalloc(size) ({ \
151 extern struct v3_os_hooks * os_hooks; \
153 if ((os_hooks) && (os_hooks)->vmalloc) { \
154 var = (os_hooks)->vmalloc(size); \
156 if (!var) PrintError(VM_NONE,VCORE_NONE,"VMALLOC FAILURE. Memory LEAK!!\n"); \
160 #define V3_VFree(addr) \
162 extern struct v3_os_hooks * os_hooks; \
163 if ((os_hooks) && (os_hooks)->vfree) { \
164 (os_hooks)->vfree(addr); \
168 // uint_t V3_CPU_KHZ();
169 #define V3_CPU_KHZ() ({ \
170 unsigned int khz = 0; \
171 extern struct v3_os_hooks * os_hooks; \
172 if ((os_hooks) && (os_hooks)->get_cpu_khz) { \
173 khz = (os_hooks)->get_cpu_khz(); \
181 #define V3_Hook_Interrupt(vm, irq) ({ \
183 extern struct v3_os_hooks * os_hooks; \
184 if ((os_hooks) && (os_hooks)->hook_interrupt) { \
185 ret = (os_hooks)->hook_interrupt(vm, irq); \
191 #define V3_ACK_IRQ(irq) \
193 extern struct v3_os_hooks * os_hooks; \
194 if ((os_hooks) && (os_hooks)->ack_irq) { \
195 (os_hooks)->ack_irq(irq); \
201 #define V3_Get_CPU() ({ \
203 extern struct v3_os_hooks * os_hooks; \
204 if ((os_hooks) && (os_hooks)->get_cpu) { \
205 ret = (os_hooks)->get_cpu(); \
213 #define V3_CREATE_AND_START_THREAD(fn, arg, name, rctl) ({ \
214 void * thread = NULL; \
215 extern struct v3_os_hooks * os_hooks; \
216 if ((os_hooks) && (os_hooks)->start_kernel_thread) { \
217 thread = (os_hooks)->start_kernel_thread(fn, arg, name,rctl); \
225 #define V3_Call_On_CPU(cpu, fn, arg) \
227 extern struct v3_os_hooks * os_hooks; \
228 if ((os_hooks) && (os_hooks)->call_on_cpu) { \
229 (os_hooks)->call_on_cpu(cpu, fn, arg); \
235 #define V3_CREATE_THREAD_ON_CPU(cpu, fn, arg, name, rctl) ({ \
236 void * thread = NULL; \
237 extern struct v3_os_hooks * os_hooks; \
238 if ((os_hooks) && (os_hooks)->create_thread_on_cpu) { \
239 thread = (os_hooks)->create_thread_on_cpu(cpu, fn, arg, name, rctl); \
244 #define V3_START_THREAD(thread) ({ \
245 extern struct v3_os_hooks * os_hooks; \
246 if((os_hooks) && (os_hooks)->start_thread){ \
247 (os_hooks)->start_thread(thread); \
251 #define V3_CREATE_AND_START_THREAD_ON_CPU(cpu, fn, arg, name, rctl) ({ \
252 void *thread = V3_CREATE_THREAD_ON_CPU(cpu,fn,arg,name,rctl); \
254 V3_START_THREAD(thread); \
259 #define V3_MOVE_THREAD_TO_CPU(pcpu, thread) ({ \
261 extern struct v3_os_hooks * os_hooks; \
262 if((os_hooks) && (os_hooks)->move_thread_to_cpu) { \
263 ret = (os_hooks)->move_thread_to_cpu(pcpu, thread); \
272 #define V3_ASSERT(vm, vcore, x) \
274 extern struct v3_os_hooks * os_hooks; \
276 PrintDebug(vm, vcore, "Failed assertion in %s: %s at %s, line %d, RA=%lx\n", \
277 __func__, #x, __FILE__, __LINE__, \
278 (ulong_t) __builtin_return_address(0)); \
280 if ((os_hooks) && (os_hooks)->yield_cpu) { \
281 (os_hooks)->yield_cpu(); \
291 extern struct v3_os_hooks * os_hooks; \
292 if ((os_hooks) && (os_hooks)->yield_cpu) { \
293 (os_hooks)->yield_cpu(); \
298 #define V3_Sleep(usec) \
300 extern struct v3_os_hooks * os_hooks; \
301 if ((os_hooks) && (os_hooks)->sleep_cpu) {\
302 (os_hooks)->sleep_cpu(usec); \
308 #define V3_Wakeup(cpu) \
310 extern struct v3_os_hooks * os_hooks; \
311 if ((os_hooks) && (os_hooks)->wakeup_cpu) { \
312 (os_hooks)->wakeup_cpu(cpu); \
317 typedef enum v3_vm_class {V3_INVALID_VM, V3_PC_VM, V3_CRAY_VM} v3_vm_class_t;
320 // Maybe make this a define....
321 typedef enum v3_cpu_arch {V3_INVALID_CPU, V3_SVM_CPU, V3_SVM_REV3_CPU, V3_VMX_CPU, V3_VMX_EPT_CPU, V3_VMX_EPT_UG_CPU} v3_cpu_arch_t;
324 v3_cpu_mode_t v3_get_host_cpu_mode();
326 void v3_yield(struct guest_info * info, int usec);
327 void v3_yield_cond(struct guest_info * info, int usec);
328 void v3_print_cond(const char * fmt, ...);
330 void v3_interrupt_cpu(struct v3_vm_info * vm, int logical_cpu, int vector);
334 v3_cpu_arch_t v3_get_cpu_type(int cpu_id);
337 int v3_vm_enter(struct guest_info * info);
338 int v3_reset_vm_core(struct guest_info * core, addr_t rip);
341 #endif /*!__V3VEE__ */
347 // Resource management constraints placed
349 typedef struct v3_resource_control {
350 // Page allocations, including for the thread stack
351 unsigned int pg_alignment; // alignment e.g., large pages
352 int pg_node_id; // numa node
353 int (*pg_filter_func)(void *paddr, void *filter_state);
354 void *pg_filter_state;
355 } v3_resource_control_t;
357 /* This will contain function pointers that provide OS services */
359 // the vm pointer is the host os's "priv_data" from v3_create_vm
360 // if vm is null, this is a general palacios printout
361 // if vm is not null, and vcore is negative, this is a general print form the vm
362 // if vm is not null, and vcore is non-negative, this is a print from a specific vcore
363 void (*print)(void *vm, int vcore, const char * format, ...)
364 __attribute__ ((format (printf, 3, 4)));
366 // For page allocation:
367 // - node_id -1 => any node, otherwise the numa node we want to alloc from
368 // - constraint = 0 => no constraints, otherwise a bitwise-or of the following flags
369 // Allocates physically contiguous pages
370 // - with desired alignment
371 // - that the filter_func returns nonzero on (if filter_func is given)
372 // For any constraint that is not given, if a resource control struture
373 // exists for the thread, its fields are used. This allows Palacios
374 // to manage resources using its internal knowledge of what the
375 // purpose of thread is
376 void *(*allocate_pages)(int num_pages, unsigned int alignment, int node_id, int (*filter_func)(void *paddr, void *filter_state), void *filter_state);
377 void (*free_pages)(void * page, int num_pages);
379 // Allocates virtually contiguous memory
380 // the resource control structure for the thread is used, if it exists
381 void *(*vmalloc)(unsigned int size);
382 void (*vfree)(void * addr);
384 // Allocates virtually and physically contiguous memory
385 // the resource control structure for the thread is used, if it exists
386 void *(*malloc)(unsigned int size);
387 void (*free)(void * addr);
389 void *(*paddr_to_vaddr)(void * addr);
390 void *(*vaddr_to_paddr)(void * addr);
392 int (*hook_interrupt)(struct v3_vm_info * vm, unsigned int irq);
393 int (*ack_irq)(int irq);
395 unsigned int (*get_cpu_khz)(void);
397 void (*yield_cpu)(void);
398 void (*sleep_cpu)(unsigned int usec);
399 void (*wakeup_cpu)(void *cpu);
401 void *(*mutex_alloc)(void);
402 void (*mutex_free)(void * mutex);
403 void (*mutex_lock)(void * mutex, int must_spin);
404 void (*mutex_unlock)(void * mutex);
405 void *(*mutex_lock_irqsave)(void * mutex, int must_spin);
406 void (*mutex_unlock_irqrestore)(void * mutex, void *flags);
408 unsigned int (*get_cpu)(void);
410 // Resource allocations to instantiate a thread obey the resource
411 // control structure, if it exists, and if it is possible.
412 // The structure is then bound to the thread and used for subsequent
414 void * (*start_kernel_thread)(int (*fn)(void * arg), void * arg, char * thread_name, v3_resource_control_t *rctl);
415 void (*interrupt_cpu)(struct v3_vm_info * vm, int logical_cpu, int vector);
416 void (*call_on_cpu)(int logical_cpu, void (*fn)(void * arg), void * arg);
417 void * (*create_thread_on_cpu)(int cpu_id, int (*fn)(void * arg), void * arg, char * thread_name, v3_resource_control_t *rctl);
418 void (*start_thread)(void * core_thread);
419 int (*move_thread_to_cpu)(int cpu_id, void * thread);
425 * This is the interrupt state that the VMM's interrupt handlers need to see
427 struct v3_interrupt {
431 unsigned int should_ack; /* Should the vmm ack this interrupt, or will
432 * the host OS do it? */
436 typedef enum {V3_VM_UNKNOWN, V3_VM_INVALID, V3_VM_RUNNING, V3_VM_STOPPED, V3_VM_PAUSED, V3_VM_ERROR, V3_VM_SIMULATING, V3_VM_RESETTING} v3_vm_state_t;
437 typedef enum {V3_VCORE_UNKNOWN, V3_VCORE_INVALID, V3_VCORE_RUNNING, V3_VCORE_STOPPED, V3_VCORE_RESETTING } v3_vcore_state_t;
438 typedef enum {V3_VCORE_CPU_UNKNOWN, V3_VCORE_CPU_REAL, V3_VCORE_CPU_PROTECTED, V3_VCORE_CPU_PROTECTED_PAE, V3_VCORE_CPU_LONG, V3_VCORE_CPU_LONG_32_COMPAT, V3_VCORE_CPU_LONG_16_COMPAT} v3_vcore_cpu_mode_t;
440 typedef enum {V3_VCORE_MEM_STATE_UNKNOWN, V3_VCORE_MEM_STATE_SHADOW, V3_VCORE_MEM_STATE_NESTED} v3_vcore_mem_state_t;
441 typedef enum {V3_VCORE_MEM_MODE_UNKNOWN, V3_VCORE_MEM_MODE_PHYSICAL, V3_VCORE_MEM_MODE_VIRTUAL} v3_vcore_mem_mode_t;
443 typedef enum {V3_VM_GENERAL, V3_VM_HVM} v3_vm_type_t;
444 typedef enum {V3_VCORE_GENERAL, V3_VCORE_ROS, V3_VCORE_HRT} v3_vcore_type_t;
447 struct v3_vm_base_state {
449 v3_vm_type_t vm_type;
452 struct v3_vm_vcore_state {
453 v3_vcore_state_t state;
454 v3_vcore_cpu_mode_t cpu_mode;
455 v3_vcore_mem_state_t mem_state;
456 v3_vcore_mem_mode_t mem_mode;
457 v3_vcore_type_t vcore_type;
460 unsigned long long num_exits;
463 struct v3_vm_core_state {
464 unsigned long long num_vcores;
465 struct v3_vm_vcore_state vcore[];
468 struct v3_vm_mem_region {
471 unsigned long long size;
476 struct v3_vm_mem_state {
477 unsigned long long mem_size;
478 unsigned long long ros_mem_size;
479 unsigned long long num_regions;
480 struct v3_vm_mem_region region[];
483 char *v3_lookup_option(char *name);
484 void Init_V3(struct v3_os_hooks * hooks, char * cpus, int num_cpus, char *options);
485 void Shutdown_V3( void );
488 struct v3_vm_info * v3_create_vm(void * cfg, void * priv_data, char * name, unsigned int cpu_mask);
489 int v3_start_vm(struct v3_vm_info * vm, unsigned int cpu_mask);
490 int v3_stop_vm(struct v3_vm_info * vm);
491 int v3_pause_vm(struct v3_vm_info * vm);
492 int v3_continue_vm(struct v3_vm_info * vm);
493 int v3_simulate_vm(struct v3_vm_info * vm, unsigned int msecs);
495 int v3_reset_vm(struct v3_vm_info *vm);
496 typedef enum {V3_VM_RESET_ALL,V3_VM_RESET_HRT,V3_VM_RESET_ROS,V3_VM_RESET_CORE_RANGE} v3_vm_reset_type;
497 int v3_reset_vm_extended(struct v3_vm_info *vm, v3_vm_reset_type t, void *data);
499 int v3_save_vm(struct v3_vm_info * vm, char * store, char * url, unsigned long long opts);
500 int v3_load_vm(struct v3_vm_info * vm, char * store, char * url, unsigned long long opts);
502 int v3_send_vm(struct v3_vm_info * vm, char * store, char * url, unsigned long long opts);
503 int v3_receive_vm(struct v3_vm_info * vm, char * store, char * url, unsigned long long opts);
505 int v3_move_vm_core(struct v3_vm_info * vm, int vcore_id, int target_cpu);
506 int v3_move_vm_mem(struct v3_vm_info * vm, void *gpa, int target_cpu);
508 int v3_free_vm(struct v3_vm_info * vm);
510 int v3_get_state_sizes_vm(struct v3_vm_info *vm,
511 unsigned long long *num_vcores,
512 unsigned long long *num_regions);
514 int v3_get_state_vm(struct v3_vm_info *vm,
515 struct v3_vm_base_state *base,
516 struct v3_vm_core_state *core,
517 struct v3_vm_mem_state *mem);
519 char *v3_lookup_option(char *key);
521 int v3_deliver_irq(struct v3_vm_info * vm, struct v3_interrupt * intr);