1 /* Copyright 2002 Andi Kleen, SuSE Labs.
2 * Subject to the GNU Public License v2.
4 * Functions to copy from and to user space.
7 #include <lwk/linkage.h>
8 #include <arch/dwarf2.h>
10 #include <arch/current.h>
11 #include <arch/asm-offsets.h>
13 /* Standard copy_to_user with segment limit checking */
20 cmpq tsk_arch_addr_limit(%rax),%rcx
22 xorl %eax,%eax /* clear zero flag */
23 jmp copy_user_generic_string
26 ENTRY(copy_user_generic)
28 movl $1,%ecx /* set zero flag */
29 jmp copy_user_generic_string
32 ENTRY(__copy_from_user_inatomic)
34 xorl %ecx,%ecx /* clear zero flag */
35 jmp copy_user_generic_string
38 /* Standard copy_from_user with segment limit checking */
45 cmpq tsk_arch_addr_limit(%rax),%rcx
47 movl $1,%ecx /* set zero flag */
48 jmp copy_user_generic_string
50 ENDPROC(copy_from_user)
74 * eax uncopied bytes or 0 if successfull.
76 * Only 4GB of copy is supported. This shouldn't be a problem
77 * because the kernel normally only writes from/to page sized chunks
78 * even if user space passed a longer buffer.
79 * And more would be dangerous because both Intel and AMD have
80 * errata with rep movsq > 4GB. If someone feels the need to fix
81 * this please consider this.
83 ENTRY(copy_user_generic_string)
85 movl %ecx,%r8d /* save zero flag */
98 /* multiple of 8 byte */
104 /* exception handling */
105 3: lea (%rdx,%rcx,8),%rax /* exception on quad loop */
107 5: movl %ecx,%eax /* exception on byte loop */
108 /* eax: left over bytes */
109 6: testl %r8d,%r8d /* zero flag set? */
111 movl %eax,%ecx /* initialize x86 loop counter */
115 stosb /* zero the rest */
119 END(copy_user_generic_c)
121 .section __ex_table,"a"