#define V3_CREATE_GUEST 12
#define V3_FREE_GUEST 13
-#define V3_ADD_MEMORY 50
+#define V3_ADD_MEMORY 50
+#define V3_RESET_MEMORY 51
+#define V3_REMOVE_MEMORY 52
+
+#define V3_ADD_PCI_HW_DEV 55
+#define V3_ADD_PCI_USER_DEV 56
+
+#define V3_DVFS_CTRL 60
+
/* VM Specific IOCTLs */
#define V3_VM_CONSOLE_CONNECT 20
#define V3_VM_SEND 34
#define V3_VM_RECEIVE 35
+#define V3_VM_MOVE_MEM 36
+
#define V3_VM_FB_INPUT 257
#define V3_VM_FB_QUERY 258
+#define V3_VM_MEM_TRACK_SIZE 300
+#define V3_VM_MEM_TRACK_CMD 301
+#define V3_VM_MEM_TRACK_SNAP 302
+
#define V3_VM_HOST_DEV_CONNECT 10245
#define V3_VM_KSTREAM_USER_CONNECT 11245
char name[128];
} __attribute__((packed));
+typedef enum { PREALLOCATED=0, // user space-allocated (e.g. hot remove)
+ REQUESTED, // kernel will attempt allocation (anywhere)
+ REQUESTED32, // kernel will attempt allocation (<4GB)
+
+} v3_mem_region_type_t;
+
struct v3_mem_region {
- unsigned long long base_addr;
- unsigned long long num_pages;
+ v3_mem_region_type_t type; //
+ int node; // numa node for REQUESTED (-1 = any)
+ unsigned long long base_addr; // region start (hpa) for PREALLOCATED
+ unsigned long long num_pages; // size for PREALLOCATED or request size for REQUESTED
+ // should be power of 2 and > V3_CONFIG_MEM_BLOCK
} __attribute__((packed));
struct v3_debug_cmd {
unsigned short pcore_id;
} __attribute__((packed));
+struct v3_mem_move_cmd{
+ unsigned long long gpa;
+ unsigned short pcore_id;
+} __attribute__((packed));
+
struct v3_chkpt_info {
char store[128];
char url[256]; /* This might need to be bigger... */
+ unsigned long long opts;
+#define V3_CHKPT_OPT_NONE 0
+#define V3_CHKPT_OPT_SKIP_MEM 1 // don't write memory to store
+#define V3_CHKPT_OPT_SKIP_DEVS 2 // don't write devices to store
+#define V3_CHKPT_OPT_SKIP_CORES 4 // don't write core arch ind data to store
+#define V3_CHKPT_OPT_SKIP_ARCHDEP 8 // don't write core arch dep data to store
+} __attribute__((packed));
+
+
+struct v3_hw_pci_dev {
+ char name[128];
+ unsigned int bus;
+ unsigned int dev;
+ unsigned int func;
} __attribute__((packed));
+struct v3_user_pci_dev {
+ char name[128];
+ unsigned short vendor_id;
+ unsigned short dev_id;
+} __attribute__((packed));
-int palacios_vmm_init( void );
+int palacios_vmm_init( char *options );
int palacios_vmm_exit( void );
// Selected exported stubs, for use in other palacios components, like vnet
// The idea is that everything uses the same stubs
-void palacios_print(const char *fmt, ...);
-void *palacios_allocate_pages(int num_pages, unsigned int alignment);
+void palacios_print_scoped(void *vm, int vcore, const char *fmt, ...);
+#define palacios_print(...) palacios_print_scoped(0,-1, __VA_ARGS__)
+// node_id=-1 => no node constraint
+void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id, int constraints);
void palacios_free_pages(void *page_addr, int num_pages);
void *palacios_alloc(unsigned int size);
-void *palacios_alloc_extended(unsigned int size, unsigned int flags);
+// node_id=-1 => no node constraint
+void *palacios_alloc_extended(unsigned int size, unsigned int flags, int node_id);
void palacios_free(void *);
+void *palacios_valloc(unsigned int size); // use instead of vmalloc
+void palacios_vfree(void *); // use instead of vfree
void *palacios_vaddr_to_paddr(void *vaddr);
void *palacios_paddr_to_vaddr(void *paddr);
-void *palacios_start_kernel_thread(int (*fn)(void * arg), void *arg, char *thread_name);
-void *palacios_start_thread_on_cpu(int cpu_id, int (*fn)(void * arg), void *arg, char *thread_name);
+void palacios_xcall(int cpu_id, void (*fn)(void *arg), void *arg);
+void *palacios_create_and_start_kernel_thread(int (*fn)(void * arg), void *arg, char *thread_name);
+void *palacios_create_thread_on_cpu(int cpu_id, int (*fn)(void * arg), void *arg, char *thread_name);
+void palacios_start_thread(void *thread_ptr);
+void *palacios_creeate_and_start_thread_on_cpu(int cpu_id, int (*fn)(void * arg), void *arg, char *thread_name);
int palacios_move_thread_to_cpu(int new_cpu_id, void *thread_ptr);
void palacios_yield_cpu(void);
-void palacios_yield_cpu_timed(unsigned int us);
+void palacios_sleep_cpu(unsigned int us);
unsigned int palacios_get_cpu(void);
unsigned int palacios_get_cpu_khz(void);
-void *palacios_mutex_alloc(void);
-void palacios_mutex_free(void *mutex);
+void palacios_used_fpu(void);
+void palacios_need_fpu(void);
+void *palacios_mutex_alloc(void); // allocates and inits a lock
+void palacios_mutex_init(void *mutex); // only inits a lock
+void palacios_mutex_deinit(void *mutex); // only deinits a lock
+void palacios_mutex_free(void *mutex); // deinits and frees a lock
void palacios_mutex_lock(void *mutex, int must_spin);
void palacios_mutex_unlock(void *mutex);
void *palacios_mutex_lock_irqsave(void *mutex, int must_spin);
void palacios_mutex_unlock_irqrestore(void *mutex, void *flags);
-
+// Macros for spin-locks in the module code
+// By using these macros, the lock checker will be able
+// to see the module code as well as the core VMM
+#define palacios_spinlock_init(l) palacios_mutex_init(l)
+#define palacios_spinlock_deinit(l) palacios_mutex_deinit(l)
+#define palacios_spinlock_lock(l) palacios_mutex_lock(l,0)
+#define palacios_spinlock_unlock(l) palacios_mutex_unlock(l)
+#define palacios_spinlock_lock_irqsave(l,f) do { f=(unsigned long)palacios_mutex_lock_irqsave(l,0); } while (0)
+#define palacios_spinlock_unlock_irqrestore(l,f) palacios_mutex_unlock_irqrestore(l,(void*)f)
// Palacios Printing Support