Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Linux kernel compatability enhancements (through 3.19)
[palacios.git] / linux_module / palacios-stubs.c
index 5ef9c5c..039c170 100644 (file)
@@ -8,6 +8,12 @@
 #include <linux/uaccess.h>
 #include <asm/irq_vectors.h>
 #include <asm/io.h>
+#include <asm/thread_info.h>
+#include <asm/i387.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+#include <asm/fpu-internal.h>
+#endif
 
 #include <linux/init.h>
 #include <linux/module.h>
@@ -27,6 +33,8 @@
 
 #include "palacios.h"
 
+#include "util-hashtable.h"
+
 #include "mm.h"
 
 #include "memcheck.h"
@@ -57,6 +65,9 @@ extern int cpu_list[NR_CPUS];
 extern int cpu_list_len;
 
 
+extern struct hashtable *v3_thread_resource_map;
+
+
 static char *print_buffer[NR_CPUS];
 
 static void deinit_print_buffers(void)
@@ -174,18 +185,36 @@ void palacios_print_scoped(void * vm, int vcore, const char *fmt, ...) {
  * Allocates a contiguous region of pages of the requested size.
  * Returns the physical address of the first page in the region.
  */
-void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id, int constraints) {
+void *palacios_allocate_pages(int num_pages, unsigned int alignment, int node_id, int (*filter_func)(void *paddr, void *filter_state), void *filter_state) {
     void * pg_addr = NULL;
+    v3_resource_control_t *r;
 
     if (num_pages<=0) { 
-       ERROR("ALERT ALERT Attempt to allocate zero or fewer pages (%d pages, alignment %d, node %d, constraints 0x%x)\n",num_pages, alignment, node_id, constraints);
+       ERROR("ALERT ALERT Attempt to allocate zero or fewer pages (%d pages, alignment %d, node %d, filter_func %p, filter_state %p)\n",num_pages, alignment, node_id, filter_func, filter_state);
       return NULL;
     }
 
-    pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id, constraints);
+    if ((r=(v3_resource_control_t *)palacios_htable_search(v3_thread_resource_map,(addr_t)current))) { 
+       // thread has a registered resource control structure
+       // these override any default values
+       //      INFO("Overridden page search: (pre) alignment=%x, node_id=%x, filter_func=%p, filter_state=%p\n",alignment,node_id,filter_func,filter_state);
+       if (alignment==4096) { 
+           alignment = r->pg_alignment;
+       }
+       if (node_id==-1) { 
+           node_id = r->pg_node_id;
+       }
+       if (!filter_func) {
+           filter_func = r->pg_filter_func;
+           filter_state = r->pg_filter_state;
+       }
+       //INFO("Overridden page search: (post) alignment=%x, node_id=%x, filter_func=%p, filter_state=%p\n",alignment,node_id,filter_func,filter_state);
+    }
+    
+    pg_addr = (void *)alloc_palacios_pgs(num_pages, alignment, node_id, filter_func, filter_state);
 
     if (!pg_addr) { 
-       ERROR("ALERT ALERT  Page allocation has FAILED Warning (%d pages, alignment %d, node %d, constraints 0x%x)\n",num_pages, alignment, node_id, constraints);
+       ERROR("ALERT ALERT  Page allocation has FAILED Warning (%d pages, alignment %d, node %d, filter_func %p, filter_state %p)\n",num_pages, alignment, node_id, filter_func, filter_state);
        return NULL;
     }
 
@@ -305,7 +334,7 @@ palacios_alloc(unsigned int size) {
     // this function is used extensively throughout palacios and the linux
     // module, both in places where interrupts are off and where they are on
     // a GFP_KERNEL call, when done with interrupts off can lead to DEADLOCK
-    if (irqs_disabled()) {
+    if (irqs_disabled() || in_atomic()) {
        return palacios_alloc_extended(size,GFP_ATOMIC,-1);
     } else {
        return palacios_alloc_extended(size,GFP_KERNEL,-1);
@@ -357,7 +386,7 @@ palacios_paddr_to_vaddr(
 /**
  * Runs a function on the specified CPU.
  */
-static void 
+void 
 palacios_xcall(
        int                     cpu_id, 
        void                    (*fn)(void *arg),
@@ -378,6 +407,7 @@ palacios_xcall(
 struct lnx_thread_arg {
     int (*fn)(void * arg);
     void * arg;
+    v3_resource_control_t *resource_control;
     char name[MAX_THREAD_NAME];
 };
 
@@ -395,13 +425,18 @@ static int lnx_thread_target(void * arg) {
     // We are a kernel thread that needs FPU save/restore state
     // vcores definitely need this, all the other threads get it too, 
     // but they just won't use it
+
     fpu_alloc(&(current->thread.fpu));
 #endif
 
+    palacios_htable_insert(v3_thread_resource_map,(addr_t)current,(addr_t)thread_info->resource_control);
+
     ret = thread_info->fn(thread_info->arg);
 
     INFO("Palacios Thread (%s) EXITING\n", thread_info->name);
 
+    palacios_htable_remove(v3_thread_resource_map,(addr_t)current,0);
+
     palacios_free(thread_info);
     // handle cleanup 
 
@@ -420,7 +455,8 @@ void *
 palacios_create_and_start_kernel_thread(
        int (*fn)               (void * arg),
        void *                  arg,
-       char *                  thread_name) {
+       char *                  thread_name,
+       v3_resource_control_t   *resource_control) {
 
     struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg));
 
@@ -433,6 +469,7 @@ palacios_create_and_start_kernel_thread(
     thread_info->arg = arg;
     strncpy(thread_info->name,thread_name,MAX_THREAD_NAME);
     thread_info->name[MAX_THREAD_NAME-1] =0;
+    thread_info->resource_control = resource_control;
 
     return kthread_run( lnx_thread_target, thread_info, thread_info->name );
 }
@@ -443,9 +480,10 @@ palacios_create_and_start_kernel_thread(
  */
 void * 
 palacios_create_thread_on_cpu(int cpu_id,
-                            int (*fn)(void * arg), 
-                            void * arg, 
-                            char * thread_name ) {
+                             int (*fn)(void * arg), 
+                             void * arg, 
+                             char * thread_name,
+                             v3_resource_control_t *resource_control) {
     struct task_struct * thread = NULL;
     struct lnx_thread_arg * thread_info = palacios_alloc(sizeof(struct lnx_thread_arg));
 
@@ -458,6 +496,7 @@ palacios_create_thread_on_cpu(int cpu_id,
     thread_info->arg = arg;
     strncpy(thread_info->name,thread_name,MAX_THREAD_NAME);
     thread_info->name[MAX_THREAD_NAME-1] =0;
+    thread_info->resource_control=resource_control;
 
     thread = kthread_create( lnx_thread_target, thread_info, thread_info->name );
 
@@ -492,9 +531,10 @@ void *
 palacios_create_and_start_thread_on_cpu(int cpu_id,
                                        int (*fn)(void * arg), 
                                        void * arg, 
-                                       char * thread_name ) {
+                                       char * thread_name, 
+                                       v3_resource_control_t *resource_control) {
 
-    void *t = palacios_create_thread_on_cpu(cpu_id, fn, arg, thread_name);
+    void *t = palacios_create_thread_on_cpu(cpu_id, fn, arg, thread_name, resource_control);
 
     if (t) { 
        palacios_start_thread(t);
@@ -831,10 +871,14 @@ palacios_mutex_unlock_irqrestore(void *mutex, void *flags)
 
 void palacios_used_fpu(void)
 {
-   struct thread_info *cur = current_thread_info();
-
    // We assume we are not preemptible here...
-   cur->status |= TS_USEDFPU;
+#ifndef TS_USEDFPU
+   struct task_struct *tsk = current;
+   tsk->thread.fpu.has_fpu = 1;
+#else
+   struct thread_info *cur = current_thread_info();
+   cur->status |= TS_USEDFPU; 
+#endif
    clts(); 
    // After this, FP Save should be handled by Linux if it
    // switches to a different task and that task uses FPU
@@ -863,6 +907,8 @@ static struct v3_os_hooks palacios_os_hooks = {
        .print                  = palacios_print_scoped,
        .allocate_pages         = palacios_allocate_pages,
        .free_pages             = palacios_free_pages,
+       .vmalloc                = palacios_valloc,
+       .vfree                  = palacios_vfree,
        .malloc                 = palacios_alloc,
        .free                   = palacios_free,
        .vaddr_to_paddr         = palacios_vaddr_to_paddr,