powerpc/book3s64/hash: Add cond_resched to avoid soft lockup warning
[linux-2.6-block.git] / lib / test_user_copy.c
CommitLineData
9c92ab61 1// SPDX-License-Identifier: GPL-2.0-only
3e2a4c18
KC
2/*
3 * Kernel module for testing copy_to/from_user infrastructure.
4 *
5 * Copyright 2013 Google Inc. All Rights Reserved
6 *
7 * Authors:
8 * Kees Cook <keescook@chromium.org>
3e2a4c18
KC
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/mman.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/uaccess.h>
18#include <linux/vmalloc.h>
19
4c5d7bc6
KC
20/*
21 * Several 32-bit architectures support 64-bit {get,put}_user() calls.
22 * As there doesn't appear to be anything that can safely determine
23 * their capability at compile-time, we just have to opt-out certain archs.
24 */
4deaa6fd 25#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
4c5d7bc6
KC
26 !defined(CONFIG_M68K) && \
27 !defined(CONFIG_MICROBLAZE) && \
4c5d7bc6
KC
28 !defined(CONFIG_NIOS2) && \
29 !defined(CONFIG_PPC32) && \
30 !defined(CONFIG_SUPERH))
31# define TEST_U64
32#endif
33
f5a1a536
AS
34#define test(condition, msg, ...) \
35({ \
36 int cond = (condition); \
37 if (cond) \
38 pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \
39 cond; \
3e2a4c18
KC
40})
41
f5a1a536
AS
42static bool is_zeroed(void *from, size_t size)
43{
44 return memchr_inv(from, 0x0, size) == NULL;
45}
46
47static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
48{
49 int ret = 0;
50 size_t start, end, i;
51 size_t zero_start = size / 4;
52 size_t zero_end = size - zero_start;
53
54 /*
55 * We conduct a series of check_nonzero_user() tests on a block of memory
56 * with the following byte-pattern (trying every possible [start,end]
57 * pair):
58 *
59 * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
60 *
61 * And we verify that check_nonzero_user() acts identically to memchr_inv().
62 */
63
64 memset(kmem, 0x0, size);
65 for (i = 1; i < zero_start; i += 2)
66 kmem[i] = 0xff;
67 for (i = zero_end; i < size; i += 2)
68 kmem[i] = 0xff;
69
70 ret |= test(copy_to_user(umem, kmem, size),
71 "legitimate copy_to_user failed");
72
73 for (start = 0; start <= size; start++) {
74 for (end = start; end <= size; end++) {
75 size_t len = end - start;
76 int retval = check_zeroed_user(umem + start, len);
77 int expected = is_zeroed(kmem + start, len);
78
79 ret |= test(retval != expected,
80 "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
81 retval, expected, start, end);
82 }
83 }
84
85 return ret;
86}
87
88static int test_copy_struct_from_user(char *kmem, char __user *umem,
89 size_t size)
90{
91 int ret = 0;
92 char *umem_src = NULL, *expected = NULL;
93 size_t ksize, usize;
94
95 umem_src = kmalloc(size, GFP_KERNEL);
34111582 96 if ((ret |= test(umem_src == NULL, "kmalloc failed")))
f5a1a536
AS
97 goto out_free;
98
99 expected = kmalloc(size, GFP_KERNEL);
34111582 100 if ((ret |= test(expected == NULL, "kmalloc failed")))
f5a1a536
AS
101 goto out_free;
102
103 /* Fill umem with a fixed byte pattern. */
104 memset(umem_src, 0x3e, size);
105 ret |= test(copy_to_user(umem, umem_src, size),
106 "legitimate copy_to_user failed");
107
108 /* Check basic case -- (usize == ksize). */
109 ksize = size;
110 usize = size;
111
112 memcpy(expected, umem_src, ksize);
113
114 memset(kmem, 0x0, size);
115 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
116 "copy_struct_from_user(usize == ksize) failed");
117 ret |= test(memcmp(kmem, expected, ksize),
118 "copy_struct_from_user(usize == ksize) gives unexpected copy");
119
120 /* Old userspace case -- (usize < ksize). */
121 ksize = size;
122 usize = size / 2;
123
124 memcpy(expected, umem_src, usize);
125 memset(expected + usize, 0x0, ksize - usize);
126
127 memset(kmem, 0x0, size);
128 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
129 "copy_struct_from_user(usize < ksize) failed");
130 ret |= test(memcmp(kmem, expected, ksize),
131 "copy_struct_from_user(usize < ksize) gives unexpected copy");
132
133 /* New userspace (-E2BIG) case -- (usize > ksize). */
134 ksize = size / 2;
135 usize = size;
136
137 memset(kmem, 0x0, size);
138 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
139 "copy_struct_from_user(usize > ksize) didn't give E2BIG");
140
141 /* New userspace (success) case -- (usize > ksize). */
142 ksize = size / 2;
143 usize = size;
144
145 memcpy(expected, umem_src, ksize);
146 ret |= test(clear_user(umem + ksize, usize - ksize),
147 "legitimate clear_user failed");
148
149 memset(kmem, 0x0, size);
150 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
151 "copy_struct_from_user(usize > ksize) failed");
152 ret |= test(memcmp(kmem, expected, ksize),
153 "copy_struct_from_user(usize > ksize) gives unexpected copy");
154
155out_free:
156 kfree(expected);
157 kfree(umem_src);
158 return ret;
159}
160
3e2a4c18
KC
161static int __init test_user_copy_init(void)
162{
163 int ret = 0;
164 char *kmem;
165 char __user *usermem;
166 char *bad_usermem;
167 unsigned long user_addr;
4c5d7bc6
KC
168 u8 val_u8;
169 u16 val_u16;
170 u32 val_u32;
171#ifdef TEST_U64
172 u64 val_u64;
173#endif
3e2a4c18
KC
174
175 kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
176 if (!kmem)
177 return -ENOMEM;
178
179 user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2,
180 PROT_READ | PROT_WRITE | PROT_EXEC,
181 MAP_ANONYMOUS | MAP_PRIVATE, 0);
182 if (user_addr >= (unsigned long)(TASK_SIZE)) {
183 pr_warn("Failed to allocate user memory\n");
184 kfree(kmem);
185 return -ENOMEM;
186 }
187
188 usermem = (char __user *)user_addr;
189 bad_usermem = (char *)user_addr;
190
f5f893c5
KC
191 /*
192 * Legitimate usage: none of these copies should fail.
193 */
4c5d7bc6 194 memset(kmem, 0x3a, PAGE_SIZE * 2);
3e2a4c18
KC
195 ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
196 "legitimate copy_to_user failed");
4c5d7bc6
KC
197 memset(kmem, 0x0, PAGE_SIZE);
198 ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
199 "legitimate copy_from_user failed");
200 ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE),
201 "legitimate usercopy failed to copy data");
202
203#define test_legit(size, check) \
204 do { \
205 val_##size = check; \
206 ret |= test(put_user(val_##size, (size __user *)usermem), \
207 "legitimate put_user (" #size ") failed"); \
208 val_##size = 0; \
209 ret |= test(get_user(val_##size, (size __user *)usermem), \
210 "legitimate get_user (" #size ") failed"); \
211 ret |= test(val_##size != check, \
212 "legitimate get_user (" #size ") failed to do copy"); \
213 if (val_##size != check) { \
214 pr_info("0x%llx != 0x%llx\n", \
215 (unsigned long long)val_##size, \
216 (unsigned long long)check); \
217 } \
218 } while (0)
219
220 test_legit(u8, 0x5a);
221 test_legit(u16, 0x5a5b);
222 test_legit(u32, 0x5a5b5c5d);
223#ifdef TEST_U64
224 test_legit(u64, 0x5a5b5c5d6a6b6c6d);
225#endif
226#undef test_legit
3e2a4c18 227
f5a1a536
AS
228 /* Test usage of check_nonzero_user(). */
229 ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
230 /* Test usage of copy_struct_from_user(). */
231 ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
232
f5f893c5
KC
233 /*
234 * Invalid usage: none of these copies should succeed.
235 */
236
237 /* Prepare kernel memory with check values. */
4fbfeb8b
HR
238 memset(kmem, 0x5a, PAGE_SIZE);
239 memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
f5f893c5
KC
240
241 /* Reject kernel-to-kernel copies through copy_from_user(). */
3e2a4c18
KC
242 ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
243 PAGE_SIZE),
244 "illegal all-kernel copy_from_user passed");
f5f893c5
KC
245
246 /* Destination half of buffer should have been zeroed. */
4fbfeb8b
HR
247 ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE),
248 "zeroing failure for illegal all-kernel copy_from_user");
f5f893c5
KC
249
250#if 0
251 /*
252 * When running with SMAP/PAN/etc, this will Oops the kernel
253 * due to the zeroing of userspace memory on failure. This needs
254 * to be tested in LKDTM instead, since this test module does not
255 * expect to explode.
256 */
3e2a4c18
KC
257 ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
258 PAGE_SIZE),
259 "illegal reversed copy_from_user passed");
f5f893c5 260#endif
3e2a4c18
KC
261 ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
262 PAGE_SIZE),
263 "illegal all-kernel copy_to_user passed");
264 ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
265 PAGE_SIZE),
266 "illegal reversed copy_to_user passed");
f5f893c5 267
4c5d7bc6
KC
268#define test_illegal(size, check) \
269 do { \
270 val_##size = (check); \
271 ret |= test(!get_user(val_##size, (size __user *)kmem), \
272 "illegal get_user (" #size ") passed"); \
273 ret |= test(val_##size != (size)0, \
274 "zeroing failure for illegal get_user (" #size ")"); \
275 if (val_##size != (size)0) { \
276 pr_info("0x%llx != 0\n", \
277 (unsigned long long)val_##size); \
278 } \
279 ret |= test(!put_user(val_##size, (size __user *)kmem), \
280 "illegal put_user (" #size ") passed"); \
281 } while (0)
282
283 test_illegal(u8, 0x5a);
284 test_illegal(u16, 0x5a5b);
285 test_illegal(u32, 0x5a5b5c5d);
286#ifdef TEST_U64
287 test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
288#endif
289#undef test_illegal
3e2a4c18
KC
290
291 vm_munmap(user_addr, PAGE_SIZE * 2);
292 kfree(kmem);
293
294 if (ret == 0) {
295 pr_info("tests passed.\n");
296 return 0;
297 }
298
299 return -EINVAL;
300}
301
302module_init(test_user_copy_init);
303
304static void __exit test_user_copy_exit(void)
305{
306 pr_info("unloaded.\n");
307}
308
309module_exit(test_user_copy_exit);
310
311MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
312MODULE_LICENSE("GPL");