Palacios Public Git Repository

To checkout Palacios execute

  git clone http://v3vee.org/palacios/palacios.web/palacios.git
This will give you the master branch. You probably want the devel branch or one of the release branches. To switch to the devel branch, simply execute
  cd palacios
  git checkout --track -b devel origin/devel
The other branches are similar.


Merge branch 'devel'
[palacios.git] / kitten / include / arch-x86_64 / tlbflush.h
1 #ifndef _X86_64_TLBFLUSH_H
2 #define _X86_64_TLBFLUSH_H
3
4 //#include <linux/mm.h>
5 #include <arch/processor.h>
6
7 #define __flush_tlb()                                                   \
8         do {                                                            \
9                 unsigned long tmpreg;                                   \
10                                                                         \
11                 __asm__ __volatile__(                                   \
12                         "movq %%cr3, %0;  # flush TLB \n"               \
13                         "movq %0, %%cr3;              \n"               \
14                         : "=r" (tmpreg)                                 \
15                         :: "memory");                                   \
16         } while (0)
17
18 /*
19  * Global pages have to be flushed a bit differently. Not a real
20  * performance problem because this does not happen often.
21  */
22 #define __flush_tlb_global()                                            \
23         do {                                                            \
24                 unsigned long tmpreg, cr4, cr4_orig;                    \
25                                                                         \
26                 __asm__ __volatile__(                                   \
27                         "movq %%cr4, %2;  # turn off PGE     \n"        \
28                         "movq %2, %1;                        \n"        \
29                         "andq %3, %1;                        \n"        \
30                         "movq %1, %%cr4;                     \n"        \
31                         "movq %%cr3, %0;  # flush TLB        \n"        \
32                         "movq %0, %%cr3;                     \n"        \
33                         "movq %2, %%cr4;  # turn PGE back on \n"        \
34                         : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
35                         : "i" (~X86_CR4_PGE)                            \
36                         : "memory");                                    \
37         } while (0)
38
39 extern unsigned long pgkern_mask;
40
41 #define __flush_tlb_all() __flush_tlb_global()
42
43 #define __flush_tlb_one(addr) \
44         __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
45
46 #if 0
47
48 /*
49  * TLB flushing:
50  *
51  *  - flush_tlb() flushes the current mm struct TLBs
52  *  - flush_tlb_all() flushes all processes TLBs
53  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
54  *  - flush_tlb_page(vma, vmaddr) flushes one page
55  *  - flush_tlb_range(vma, start, end) flushes a range of pages
56  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
57  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
58  *
59  * x86-64 can only flush individual pages or full VMs. For a range flush
60  * we always do the full VM. Might be worth trying if for a small
61  * range a few INVLPGs in a row are a win.
62  */
63
64 #ifndef CONFIG_SMP
65
66 #define flush_tlb() __flush_tlb()
67 #define flush_tlb_all() __flush_tlb_all()
68 #define local_flush_tlb() __flush_tlb()
69
70 static inline void flush_tlb_mm(struct mm_struct *mm)
71 {
72         if (mm == current->active_mm)
73                 __flush_tlb();
74 }
75
76 static inline void flush_tlb_page(struct vm_area_struct *vma,
77         unsigned long addr)
78 {
79         if (vma->vm_mm == current->active_mm)
80                 __flush_tlb_one(addr);
81 }
82
83 static inline void flush_tlb_range(struct vm_area_struct *vma,
84         unsigned long start, unsigned long end)
85 {
86         if (vma->vm_mm == current->active_mm)
87                 __flush_tlb();
88 }
89
90 #else
91
92 #include <asm/smp.h>
93
94 #define local_flush_tlb() \
95         __flush_tlb()
96
97 extern void flush_tlb_all(void);
98 extern void flush_tlb_current_task(void);
99 extern void flush_tlb_mm(struct mm_struct *);
100 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
101
102 #define flush_tlb()     flush_tlb_current_task()
103
104 static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
105 {
106         flush_tlb_mm(vma->vm_mm);
107 }
108
109 #define TLBSTATE_OK     1
110 #define TLBSTATE_LAZY   2
111
112 /* Roughly an IPI every 20MB with 4k pages for freeing page table
113    ranges. Cost is about 42k of memory for each CPU. */
114 #define ARCH_FREE_PTE_NR 5350   
115
116 #endif
117
118 #define flush_tlb_kernel_range(start, end) flush_tlb_all()
119
120 static inline void flush_tlb_pgtables(struct mm_struct *mm,
121                                       unsigned long start, unsigned long end)
122 {
123         /* x86_64 does not keep any page table caches in a software TLB.
124            The CPUs do in their hardware TLBs, but they are handled
125            by the normal TLB flushing algorithms. */
126 }
127
128 #endif
129
130 #endif /* _X86_64_TLBFLUSH_H */