1 #include <linux/kernel.h>
2 #include <linux/kthread.h>
3 #include <linux/spinlock.h>
5 #include <linux/interrupt.h>
6 #include <linux/linkage.h>
7 #include <linux/sched.h>
8 #include <linux/uaccess.h>
9 #include <asm/irq_vectors.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/kthread.h>
15 #include <asm/uaccess.h>
16 #include <linux/smp_lock.h>
18 #include <palacios/vmm.h>
19 #include <palacios/vmm_host_events.h>
25 #include "palacios-mm.h"
34 static struct v3_vm_info * irq_to_guest_map[256];
37 extern unsigned int cpu_khz;
41 * Prints a message to the console.
43 static void palacios_print(const char * fmt, ...) {
55 * Allocates a contiguous region of pages of the requested size.
56 * Returns the physical address of the first page in the region.
58 static void * palacios_allocate_pages(int num_pages, unsigned int alignment) {
59 void * pg_addr = NULL;
61 pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment);
62 pg_allocs += num_pages;
69 * Frees a page previously allocated via palacios_allocate_page().
70 * Note that palacios_allocate_page() can allocate multiple pages with
71 * a single call while palacios_free_page() only frees a single page.
74 static void palacios_free_pages(void * page_paddr, int num_pages) {
75 pg_frees += num_pages;
76 free_palacios_pgs((uintptr_t)page_paddr, num_pages);
81 * Allocates 'size' bytes of kernel memory.
82 * Returns the kernel virtual address of the memory allocated.
85 palacios_alloc(unsigned int size) {
88 addr = kmalloc(size, GFP_KERNEL);
95 * Frees memory that was previously allocated by palacios_alloc().
108 * Converts a kernel virtual address to the corresponding physical address.
111 palacios_vaddr_to_paddr(
115 return (void*) __pa(vaddr);
120 * Converts a physical address to the corresponding kernel virtual address.
123 palacios_paddr_to_vaddr(
131 * Runs a function on the specified CPU.
134 // For now, do call only on local CPU
138 void (*fn)(void *arg),
142 printk("palacios_xcall: Doing 'xcall' to local cpu\n");
147 struct lnx_thread_arg {
148 int (*fn)(void * arg);
153 static int lnx_thread_target(void * arg) {
154 struct lnx_thread_arg * thread_info = (struct lnx_thread_arg *)arg;
158 printk("Daemonizing new Palacios thread (name=%s)\n", thread_info->name);
160 daemonize(thread_info->name);
162 allow_signal(SIGKILL);
166 thread_info->fn(thread_info->arg);
175 * Creates a kernel thread.
178 palacios_start_kernel_thread(
179 int (*fn) (void * arg),
181 char * thread_name) {
183 struct lnx_thread_arg * thread_info = kmalloc(sizeof(struct lnx_thread_arg), GFP_KERNEL);
185 thread_info->fn = fn;
186 thread_info->arg = arg;
187 thread_info->name = thread_name;
189 kthread_run( lnx_thread_target, thread_info, thread_name );
195 * Starts a kernel thread on the specified CPU.
198 palacios_start_thread_on_cpu(int cpu_id,
199 int (*fn)(void * arg),
201 char * thread_name ) {
202 struct task_struct * thread = NULL;
203 struct lnx_thread_arg * thread_info = kmalloc(sizeof(struct lnx_thread_arg), GFP_KERNEL);
205 thread_info->fn = fn;
206 thread_info->arg = arg;
207 thread_info->name = thread_name;
209 thread = kthread_run( lnx_thread_target, thread_info, thread_name );
211 if (IS_ERR(thread)) {
212 printk("Palacios error creating thread: %s\n", thread_name);
220 * Returns the CPU ID that the caller is running on.
223 palacios_get_cpu(void)
227 // return smp_processor_id();
228 // id = get_cpu(); put_cpu(id);
234 if(sched_getaffinity(0,&mask)<0){
235 panic("sched_getaffinity failed");
238 set = cpumask_first(&mask);
239 printk("***mask.bits: %d",set);
245 * Interrupts the physical CPU corresponding to the specified logical guest cpu.
248 * This is dependent on the implementation of xcall_reschedule(). Currently
249 * xcall_reschedule does not explicitly call schedule() on the destination CPU,
250 * but instead relies on the return to user space to handle it. Because
251 * palacios is a kernel thread schedule will not be called, which is correct.
252 * If it ever changes to induce side effects, we'll need to figure something
256 palacios_interrupt_cpu(
257 struct v3_vm_info * vm,
262 // panic("palacios_interrupt_cpu");
263 // printk("Faking interruption of target CPU by not doing anything since there is only one CPU\n");
268 * Dispatches an interrupt to Palacios for handling.
271 palacios_dispatch_interrupt( int vector, void * dev, struct pt_regs * regs ) {
272 struct v3_interrupt intr = {
274 .error = regs->orig_ax,
278 if (irq_to_guest_map[vector]) {
279 v3_deliver_irq(irq_to_guest_map[vector], &intr);
285 * Instructs the kernel to forward the specified IRQ to Palacios.
288 palacios_hook_interrupt(struct v3_vm_info * vm,
289 unsigned int vector ) {
290 printk("hooking vector %d\n", vector);
292 if (irq_to_guest_map[vector]) {
294 "%s: Interrupt vector %u is already hooked.\n",
300 "%s: Hooking interrupt vector %u to vm %p.\n",
301 __func__, vector, vm);
303 irq_to_guest_map[vector] = vm;
306 * NOTE: Normally PCI devices are supposed to be level sensitive,
307 * but we need them to be edge sensitive so that they are
308 * properly latched by Palacios. Leaving them as level
309 * sensitive would lead to an interrupt storm.
311 //ioapic_set_trigger_for_vector(vector, ioapic_edge_sensitive);
313 //set_idtvec_handler(vector, palacios_dispatch_interrupt);
315 panic("unexpected vector for hooking\n");
322 printk("hooking vector: %d\n", vector);
330 error = request_irq((vector - 32),
331 (void *)palacios_dispatch_interrupt,
333 "interrupt_for_palacios",
337 printk("error code for request_irq is %d\n", error);
338 panic("request vector %d failed",vector);
346 * Acknowledges an interrupt.
349 palacios_ack_interrupt(
354 printk("Pretending to ack interrupt, vector=%d\n",vector);
359 * Returns the CPU frequency in kilohertz.
362 palacios_get_cpu_khz(void)
364 printk("cpu_khz is %u\n",cpu_khz);
366 printk("faking cpu_khz to 1000000\n");
375 * Yield the CPU so other host OS tasks can run.
378 palacios_yield_cpu(void)
388 * Returns NULL on failure.
391 palacios_mutex_alloc(void)
393 spinlock_t *lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
395 spin_lock_init(lock);
419 spin_lock((spinlock_t*)mutex);
426 palacios_mutex_unlock(
430 spin_unlock((spinlock_t*)mutex);
434 * Structure used by the Palacios hypervisor to interface with the host kernel.
436 static struct v3_os_hooks palacios_os_hooks = {
437 .print = palacios_print,
438 .allocate_pages = palacios_allocate_pages,
439 .free_pages = palacios_free_pages,
440 .malloc = palacios_alloc,
441 .free = palacios_free,
442 .vaddr_to_paddr = palacios_vaddr_to_paddr,
443 .paddr_to_vaddr = palacios_paddr_to_vaddr,
444 .hook_interrupt = palacios_hook_interrupt,
445 .ack_irq = palacios_ack_interrupt,
446 .get_cpu_khz = palacios_get_cpu_khz,
447 .start_kernel_thread = palacios_start_kernel_thread,
448 .yield_cpu = palacios_yield_cpu,
449 .mutex_alloc = palacios_mutex_alloc,
450 .mutex_free = palacios_mutex_free,
451 .mutex_lock = palacios_mutex_lock,
452 .mutex_unlock = palacios_mutex_unlock,
453 .get_cpu = palacios_get_cpu,
454 .interrupt_cpu = palacios_interrupt_cpu,
455 .call_on_cpu = palacios_xcall,
456 .start_thread_on_cpu = palacios_start_thread_on_cpu,
462 int palacios_vmm_init( void )
465 memset(irq_to_guest_map, 0, sizeof(struct v3_vm_info *) * 256);
467 printk("palacios_init starting - calling init_v3\n");
469 Init_V3(&palacios_os_hooks, 1);
477 int palacios_vmm_exit( void ) {