2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
6 * Functions to copy from and to user space.
9 #include <linux/linkage.h>
10 #include <asm/dwarf2.h>
11 #include <asm/current.h>
12 #include <asm/asm-offsets.h>
13 #include <asm/thread_info.h>
14 #include <asm/cpufeature.h>
15 #include <asm/alternative-asm.h>
19 /* Standard copy_to_user with segment limit checking */
26 cmpq TI_addr_limit(%rax),%rcx
28 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
29 "jmp copy_user_generic_string", \
30 X86_FEATURE_REP_GOOD, \
31 "jmp copy_user_enhanced_fast_string", \
34 ENDPROC(_copy_to_user)
36 /* Standard copy_from_user with segment limit checking */
37 ENTRY(_copy_from_user)
43 cmpq TI_addr_limit(%rax),%rcx
45 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
46 "jmp copy_user_generic_string", \
47 X86_FEATURE_REP_GOOD, \
48 "jmp copy_user_enhanced_fast_string", \
51 ENDPROC(_copy_from_user)
66 ENDPROC(bad_from_user)
70 * copy_user_generic_unrolled - memory copy with exception handling.
71 * This version is for CPUs like P4 that don't have efficient micro
80 * eax uncopied bytes or 0 if successful.
82 ENTRY(copy_user_generic_unrolled)
86 jb 20f /* less then 8 bytes, go to byte copy loop */
94 3: movq 2*8(%rsi),%r10
95 4: movq 3*8(%rsi),%r11
98 7: movq %r10,2*8(%rdi)
99 8: movq %r11,3*8(%rdi)
100 9: movq 4*8(%rsi),%r8
101 10: movq 5*8(%rsi),%r9
102 11: movq 6*8(%rsi),%r10
103 12: movq 7*8(%rsi),%r11
104 13: movq %r8,4*8(%rdi)
105 14: movq %r9,5*8(%rdi)
106 15: movq %r10,6*8(%rdi)
107 16: movq %r11,7*8(%rdi)
139 40: leal (%rdx,%rcx,8),%edx
142 60: jmp copy_user_handle_tail /* ecx is zerorest also */
154 _ASM_EXTABLE(10b,30b)
155 _ASM_EXTABLE(11b,30b)
156 _ASM_EXTABLE(12b,30b)
157 _ASM_EXTABLE(13b,30b)
158 _ASM_EXTABLE(14b,30b)
159 _ASM_EXTABLE(15b,30b)
160 _ASM_EXTABLE(16b,30b)
161 _ASM_EXTABLE(18b,40b)
162 _ASM_EXTABLE(19b,40b)
163 _ASM_EXTABLE(21b,50b)
164 _ASM_EXTABLE(22b,50b)
166 ENDPROC(copy_user_generic_unrolled)
168 /* Some CPUs run faster using the string copy instructions.
169 * This is also a lot simpler. Use them when possible.
171 * Only 4GB of copy is supported. This shouldn't be a problem
172 * because the kernel normally only writes from/to page sized chunks
173 * even if user space passed a longer buffer.
174 * And more would be dangerous because both Intel and AMD have
175 * errata with rep movsq > 4GB. If someone feels the need to fix
176 * this please consider this.
184 * eax uncopied bytes or 0 if successful.
186 ENTRY(copy_user_generic_string)
190 jb 2f /* less than 8 bytes, go to byte copy loop */
205 11: leal (%rdx,%rcx,8),%ecx
206 12: movl %ecx,%edx /* ecx is zerorest also */
207 jmp copy_user_handle_tail
213 ENDPROC(copy_user_generic_string)
216 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
217 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
225 * eax uncopied bytes or 0 if successful.
227 ENTRY(copy_user_enhanced_fast_string)
238 12: movl %ecx,%edx /* ecx is zerorest also */
239 jmp copy_user_handle_tail
244 ENDPROC(copy_user_enhanced_fast_string)
247 * copy_user_nocache - Uncached memory copy with exception handling
248 * This will force destination/source out of cache for more performance.
250 ENTRY(__copy_user_nocache)
254 jb 20f /* less then 8 bytes, go to byte copy loop */
261 2: movq 1*8(%rsi),%r9
262 3: movq 2*8(%rsi),%r10
263 4: movq 3*8(%rsi),%r11
265 6: movnti %r9,1*8(%rdi)
266 7: movnti %r10,2*8(%rdi)
267 8: movnti %r11,3*8(%rdi)
268 9: movq 4*8(%rsi),%r8
269 10: movq 5*8(%rsi),%r9
270 11: movq 6*8(%rsi),%r10
271 12: movq 7*8(%rsi),%r11
272 13: movnti %r8,4*8(%rdi)
273 14: movnti %r9,5*8(%rdi)
274 15: movnti %r10,6*8(%rdi)
275 16: movnti %r11,7*8(%rdi)
285 19: movnti %r8,(%rdi)
308 40: lea (%rdx,%rcx,8),%rdx
312 jmp copy_user_handle_tail
324 _ASM_EXTABLE(10b,30b)
325 _ASM_EXTABLE(11b,30b)
326 _ASM_EXTABLE(12b,30b)
327 _ASM_EXTABLE(13b,30b)
328 _ASM_EXTABLE(14b,30b)
329 _ASM_EXTABLE(15b,30b)
330 _ASM_EXTABLE(16b,30b)
331 _ASM_EXTABLE(18b,40b)
332 _ASM_EXTABLE(19b,40b)
333 _ASM_EXTABLE(21b,50b)
334 _ASM_EXTABLE(22b,50b)
336 ENDPROC(__copy_user_nocache)