Merge tag 'for-linus-6.3-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / arch / x86 / lib / usercopy_64.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * User address space access functions.
4 *
5 * Copyright 1997 Andi Kleen <ak@muc.de>
6 * Copyright 1997 Linus Torvalds
7 * Copyright 2002 Andi Kleen <ak@suse.de>
8 */
e683014c 9#include <linux/export.h>
13d4ea09 10#include <linux/uaccess.h>
0aed55af 11#include <linux/highmem.h>
1da177e4 12
1da177e4
LT
13/*
14 * Zero Userspace
15 */
16
0aed55af
DW
17#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
18/**
19 * clean_cache_range - write back a cache range with CLWB
20 * @vaddr: virtual start address
21 * @size: number of bytes to write back
22 *
23 * Write back a cache range using the CLWB (cache line write back)
24 * instruction. Note that @size is internally rounded up to be cache
25 * line size aligned.
26 */
27static void clean_cache_range(void *addr, size_t size)
28{
29 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
30 unsigned long clflush_mask = x86_clflush_size - 1;
31 void *vend = addr + size;
32 void *p;
33
34 for (p = (void *)((unsigned long)addr & ~clflush_mask);
35 p < vend; p += x86_clflush_size)
36 clwb(p);
37}
38
4e4f00a9
DW
39void arch_wb_cache_pmem(void *addr, size_t size)
40{
41 clean_cache_range(addr, size);
42}
43EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
44
0aed55af
DW
45long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
46{
47 unsigned long flushed, dest = (unsigned long) dst;
48 long rc = __copy_user_nocache(dst, src, size, 0);
49
50 /*
51 * __copy_user_nocache() uses non-temporal stores for the bulk
52 * of the transfer, but we need to manually flush if the
53 * transfer is unaligned. A cached memory copy is used when
54 * destination or size is not naturally aligned. That is:
55 * - Require 8-byte alignment when size is 8 bytes or larger.
56 * - Require 4-byte alignment when size is 4 bytes.
57 */
58 if (size < 8) {
59 if (!IS_ALIGNED(dest, 4) || size != 4)
a1cd6c2a 60 clean_cache_range(dst, size);
0aed55af
DW
61 } else {
62 if (!IS_ALIGNED(dest, 8)) {
63 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
64 clean_cache_range(dst, 1);
65 }
66
67 flushed = dest - (unsigned long) dst;
68 if (size > flushed && !IS_ALIGNED(size - flushed, 8))
69 clean_cache_range(dst + size - 1, 1);
70 }
71
72 return rc;
73}
74
02101c45 75void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
0aed55af
DW
76{
77 unsigned long dest = (unsigned long) _dst;
78 unsigned long source = (unsigned long) _src;
79
80 /* cache copy and flush to align dest */
81 if (!IS_ALIGNED(dest, 8)) {
a6823e4e 82 size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
0aed55af
DW
83
84 memcpy((void *) dest, (void *) source, len);
85 clean_cache_range((void *) dest, len);
86 dest += len;
87 source += len;
88 size -= len;
89 if (!size)
90 return;
91 }
92
93 /* 4x8 movnti loop */
94 while (size >= 32) {
95 asm("movq (%0), %%r8\n"
96 "movq 8(%0), %%r9\n"
97 "movq 16(%0), %%r10\n"
98 "movq 24(%0), %%r11\n"
99 "movnti %%r8, (%1)\n"
100 "movnti %%r9, 8(%1)\n"
101 "movnti %%r10, 16(%1)\n"
102 "movnti %%r11, 24(%1)\n"
103 :: "r" (source), "r" (dest)
104 : "memory", "r8", "r9", "r10", "r11");
105 dest += 32;
106 source += 32;
107 size -= 32;
108 }
109
110 /* 1x8 movnti loop */
111 while (size >= 8) {
112 asm("movq (%0), %%r8\n"
113 "movnti %%r8, (%1)\n"
114 :: "r" (source), "r" (dest)
115 : "memory", "r8");
116 dest += 8;
117 source += 8;
118 size -= 8;
119 }
120
121 /* 1x4 movnti loop */
122 while (size >= 4) {
123 asm("movl (%0), %%r8d\n"
124 "movnti %%r8d, (%1)\n"
125 :: "r" (source), "r" (dest)
126 : "memory", "r8");
127 dest += 4;
128 source += 4;
129 size -= 4;
130 }
131
132 /* cache copy for remaining bytes */
133 if (size) {
134 memcpy((void *) dest, (void *) source, size);
135 clean_cache_range((void *) dest, size);
136 }
137}
02101c45 138EXPORT_SYMBOL_GPL(__memcpy_flushcache);
0aed55af
DW
139
140void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
141 size_t len)
142{
143 char *from = kmap_atomic(page);
144
145 memcpy_flushcache(to, from + offset, len);
146 kunmap_atomic(from);
147}
148#endif