Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
7999096f | 2 | #include <crypto/hash.h> |
4f18cd31 | 3 | #include <linux/export.h> |
2f8b5444 | 4 | #include <linux/bvec.h> |
4d0e9df5 | 5 | #include <linux/fault-inject-usercopy.h> |
4f18cd31 AV |
6 | #include <linux/uio.h> |
7 | #include <linux/pagemap.h> | |
28961998 | 8 | #include <linux/highmem.h> |
91f79c43 AV |
9 | #include <linux/slab.h> |
10 | #include <linux/vmalloc.h> | |
241699cd | 11 | #include <linux/splice.h> |
bfdc5970 | 12 | #include <linux/compat.h> |
a604ec7e | 13 | #include <net/checksum.h> |
d05f4435 | 14 | #include <linux/scatterlist.h> |
d0ef4c36 | 15 | #include <linux/instrumented.h> |
4f18cd31 | 16 | |
241699cd AV |
17 | #define PIPE_PARANOIA /* for now */ |
18 | ||
5c67aa90 | 19 | /* covers iovec and kvec alike */ |
a6e4ec7b | 20 | #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ |
7baa5099 | 21 | size_t off = 0; \ |
a6e4ec7b | 22 | size_t skip = i->iov_offset; \ |
7a1bcb5d | 23 | do { \ |
7baa5099 AV |
24 | len = min(n, __p->iov_len - skip); \ |
25 | if (likely(len)) { \ | |
26 | base = __p->iov_base + skip; \ | |
27 | len -= (STEP); \ | |
28 | off += len; \ | |
29 | skip += len; \ | |
30 | n -= len; \ | |
7a1bcb5d AV |
31 | if (skip < __p->iov_len) \ |
32 | break; \ | |
33 | } \ | |
34 | __p++; \ | |
35 | skip = 0; \ | |
36 | } while (n); \ | |
a6e4ec7b | 37 | i->iov_offset = skip; \ |
7baa5099 | 38 | n = off; \ |
04a31165 AV |
39 | } |
40 | ||
a6e4ec7b | 41 | #define iterate_bvec(i, n, base, len, off, p, STEP) { \ |
7baa5099 | 42 | size_t off = 0; \ |
a6e4ec7b | 43 | unsigned skip = i->iov_offset; \ |
7491a2bf AV |
44 | while (n) { \ |
45 | unsigned offset = p->bv_offset + skip; \ | |
1b4fb5ff | 46 | unsigned left; \ |
21b56c84 AV |
47 | void *kaddr = kmap_local_page(p->bv_page + \ |
48 | offset / PAGE_SIZE); \ | |
7baa5099 | 49 | base = kaddr + offset % PAGE_SIZE; \ |
a6e4ec7b | 50 | len = min(min(n, (size_t)(p->bv_len - skip)), \ |
7491a2bf | 51 | (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ |
1b4fb5ff | 52 | left = (STEP); \ |
21b56c84 | 53 | kunmap_local(kaddr); \ |
7baa5099 AV |
54 | len -= left; \ |
55 | off += len; \ | |
56 | skip += len; \ | |
7491a2bf AV |
57 | if (skip == p->bv_len) { \ |
58 | skip = 0; \ | |
59 | p++; \ | |
60 | } \ | |
7baa5099 | 61 | n -= len; \ |
1b4fb5ff AV |
62 | if (left) \ |
63 | break; \ | |
7491a2bf | 64 | } \ |
a6e4ec7b | 65 | i->iov_offset = skip; \ |
7baa5099 | 66 | n = off; \ |
04a31165 AV |
67 | } |
68 | ||
a6e4ec7b | 69 | #define iterate_xarray(i, n, base, len, __off, STEP) { \ |
1b4fb5ff | 70 | __label__ __out; \ |
622838f3 | 71 | size_t __off = 0; \ |
821979f5 | 72 | struct folio *folio; \ |
a6e4ec7b | 73 | loff_t start = i->xarray_start + i->iov_offset; \ |
4b179e9a | 74 | pgoff_t index = start / PAGE_SIZE; \ |
7ff50620 DH |
75 | XA_STATE(xas, i->xarray, index); \ |
76 | \ | |
821979f5 | 77 | len = PAGE_SIZE - offset_in_page(start); \ |
7baa5099 | 78 | rcu_read_lock(); \ |
821979f5 | 79 | xas_for_each(&xas, folio, ULONG_MAX) { \ |
7baa5099 | 80 | unsigned left; \ |
821979f5 MWO |
81 | size_t offset; \ |
82 | if (xas_retry(&xas, folio)) \ | |
7baa5099 | 83 | continue; \ |
821979f5 | 84 | if (WARN_ON(xa_is_value(folio))) \ |
7baa5099 | 85 | break; \ |
821979f5 | 86 | if (WARN_ON(folio_test_hugetlb(folio))) \ |
7baa5099 | 87 | break; \ |
821979f5 MWO |
88 | offset = offset_in_folio(folio, start + __off); \ |
89 | while (offset < folio_size(folio)) { \ | |
90 | base = kmap_local_folio(folio, offset); \ | |
7baa5099 AV |
91 | len = min(n, len); \ |
92 | left = (STEP); \ | |
821979f5 | 93 | kunmap_local(base); \ |
7baa5099 AV |
94 | len -= left; \ |
95 | __off += len; \ | |
96 | n -= len; \ | |
97 | if (left || n == 0) \ | |
98 | goto __out; \ | |
821979f5 MWO |
99 | offset += len; \ |
100 | len = PAGE_SIZE; \ | |
7baa5099 | 101 | } \ |
7ff50620 | 102 | } \ |
1b4fb5ff | 103 | __out: \ |
7ff50620 | 104 | rcu_read_unlock(); \ |
821979f5 | 105 | i->iov_offset += __off; \ |
622838f3 | 106 | n = __off; \ |
7ff50620 DH |
107 | } |
108 | ||
7baa5099 | 109 | #define __iterate_and_advance(i, n, base, len, off, I, K) { \ |
dd254f5a AV |
110 | if (unlikely(i->count < n)) \ |
111 | n = i->count; \ | |
f5da8354 | 112 | if (likely(n)) { \ |
28f38db7 | 113 | if (likely(iter_is_iovec(i))) { \ |
5c67aa90 | 114 | const struct iovec *iov = i->iov; \ |
7baa5099 AV |
115 | void __user *base; \ |
116 | size_t len; \ | |
117 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 118 | iov, (I)) \ |
28f38db7 AV |
119 | i->nr_segs -= iov - i->iov; \ |
120 | i->iov = iov; \ | |
121 | } else if (iov_iter_is_bvec(i)) { \ | |
1bdc76ae | 122 | const struct bio_vec *bvec = i->bvec; \ |
7baa5099 AV |
123 | void *base; \ |
124 | size_t len; \ | |
125 | iterate_bvec(i, n, base, len, off, \ | |
a6e4ec7b | 126 | bvec, (K)) \ |
7491a2bf AV |
127 | i->nr_segs -= bvec - i->bvec; \ |
128 | i->bvec = bvec; \ | |
28f38db7 | 129 | } else if (iov_iter_is_kvec(i)) { \ |
5c67aa90 | 130 | const struct kvec *kvec = i->kvec; \ |
7baa5099 AV |
131 | void *base; \ |
132 | size_t len; \ | |
133 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 134 | kvec, (K)) \ |
dd254f5a AV |
135 | i->nr_segs -= kvec - i->kvec; \ |
136 | i->kvec = kvec; \ | |
28f38db7 | 137 | } else if (iov_iter_is_xarray(i)) { \ |
7baa5099 AV |
138 | void *base; \ |
139 | size_t len; \ | |
140 | iterate_xarray(i, n, base, len, off, \ | |
a6e4ec7b | 141 | (K)) \ |
7ce2a91e | 142 | } \ |
dd254f5a | 143 | i->count -= n; \ |
7ce2a91e | 144 | } \ |
7ce2a91e | 145 | } |
7baa5099 AV |
146 | #define iterate_and_advance(i, n, base, len, off, I, K) \ |
147 | __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) | |
7ce2a91e | 148 | |
09fc68dc AV |
149 | static int copyout(void __user *to, const void *from, size_t n) |
150 | { | |
4d0e9df5 AL |
151 | if (should_fail_usercopy()) |
152 | return n; | |
96d4f267 | 153 | if (access_ok(to, n)) { |
d0ef4c36 | 154 | instrument_copy_to_user(to, from, n); |
09fc68dc AV |
155 | n = raw_copy_to_user(to, from, n); |
156 | } | |
157 | return n; | |
158 | } | |
159 | ||
160 | static int copyin(void *to, const void __user *from, size_t n) | |
161 | { | |
4d0e9df5 AL |
162 | if (should_fail_usercopy()) |
163 | return n; | |
96d4f267 | 164 | if (access_ok(from, n)) { |
d0ef4c36 | 165 | instrument_copy_from_user(to, from, n); |
09fc68dc AV |
166 | n = raw_copy_from_user(to, from, n); |
167 | } | |
168 | return n; | |
169 | } | |
170 | ||
241699cd AV |
171 | #ifdef PIPE_PARANOIA |
172 | static bool sanity(const struct iov_iter *i) | |
173 | { | |
174 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
175 | unsigned int p_head = pipe->head; |
176 | unsigned int p_tail = pipe->tail; | |
177 | unsigned int p_mask = pipe->ring_size - 1; | |
178 | unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); | |
179 | unsigned int i_head = i->head; | |
180 | unsigned int idx; | |
181 | ||
241699cd AV |
182 | if (i->iov_offset) { |
183 | struct pipe_buffer *p; | |
8cefc107 | 184 | if (unlikely(p_occupancy == 0)) |
241699cd | 185 | goto Bad; // pipe must be non-empty |
8cefc107 | 186 | if (unlikely(i_head != p_head - 1)) |
241699cd AV |
187 | goto Bad; // must be at the last buffer... |
188 | ||
8cefc107 | 189 | p = &pipe->bufs[i_head & p_mask]; |
241699cd AV |
190 | if (unlikely(p->offset + p->len != i->iov_offset)) |
191 | goto Bad; // ... at the end of segment | |
192 | } else { | |
8cefc107 | 193 | if (i_head != p_head) |
241699cd AV |
194 | goto Bad; // must be right after the last buffer |
195 | } | |
196 | return true; | |
197 | Bad: | |
8cefc107 DH |
198 | printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); |
199 | printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", | |
200 | p_head, p_tail, pipe->ring_size); | |
201 | for (idx = 0; idx < pipe->ring_size; idx++) | |
241699cd AV |
202 | printk(KERN_ERR "[%p %p %d %d]\n", |
203 | pipe->bufs[idx].ops, | |
204 | pipe->bufs[idx].page, | |
205 | pipe->bufs[idx].offset, | |
206 | pipe->bufs[idx].len); | |
207 | WARN_ON(1); | |
208 | return false; | |
209 | } | |
210 | #else | |
211 | #define sanity(i) true | |
212 | #endif | |
213 | ||
241699cd AV |
214 | static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, |
215 | struct iov_iter *i) | |
216 | { | |
217 | struct pipe_inode_info *pipe = i->pipe; | |
218 | struct pipe_buffer *buf; | |
8cefc107 DH |
219 | unsigned int p_tail = pipe->tail; |
220 | unsigned int p_mask = pipe->ring_size - 1; | |
221 | unsigned int i_head = i->head; | |
241699cd | 222 | size_t off; |
241699cd AV |
223 | |
224 | if (unlikely(bytes > i->count)) | |
225 | bytes = i->count; | |
226 | ||
227 | if (unlikely(!bytes)) | |
228 | return 0; | |
229 | ||
230 | if (!sanity(i)) | |
231 | return 0; | |
232 | ||
233 | off = i->iov_offset; | |
8cefc107 | 234 | buf = &pipe->bufs[i_head & p_mask]; |
241699cd AV |
235 | if (off) { |
236 | if (offset == off && buf->page == page) { | |
237 | /* merge with the last one */ | |
238 | buf->len += bytes; | |
239 | i->iov_offset += bytes; | |
240 | goto out; | |
241 | } | |
8cefc107 DH |
242 | i_head++; |
243 | buf = &pipe->bufs[i_head & p_mask]; | |
241699cd | 244 | } |
6718b6f8 | 245 | if (pipe_full(i_head, p_tail, pipe->max_usage)) |
241699cd | 246 | return 0; |
8cefc107 | 247 | |
241699cd | 248 | buf->ops = &page_cache_pipe_buf_ops; |
9d2231c5 | 249 | buf->flags = 0; |
8cefc107 DH |
250 | get_page(page); |
251 | buf->page = page; | |
241699cd AV |
252 | buf->offset = offset; |
253 | buf->len = bytes; | |
8cefc107 DH |
254 | |
255 | pipe->head = i_head + 1; | |
241699cd | 256 | i->iov_offset = offset + bytes; |
8cefc107 | 257 | i->head = i_head; |
241699cd AV |
258 | out: |
259 | i->count -= bytes; | |
260 | return bytes; | |
261 | } | |
262 | ||
171a0203 | 263 | /* |
a6294593 AG |
264 | * fault_in_iov_iter_readable - fault in iov iterator for reading |
265 | * @i: iterator | |
266 | * @size: maximum length | |
267 | * | |
171a0203 | 268 | * Fault in one or more iovecs of the given iov_iter, to a maximum length of |
a6294593 AG |
269 | * @size. For each iovec, fault in each page that constitutes the iovec. |
270 | * | |
271 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
272 | * copy_from_user()). | |
171a0203 | 273 | * |
a6294593 | 274 | * Always returns 0 for non-userspace iterators. |
171a0203 | 275 | */ |
a6294593 | 276 | size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) |
171a0203 | 277 | { |
0e8f0d67 | 278 | if (iter_is_iovec(i)) { |
a6294593 | 279 | size_t count = min(size, iov_iter_count(i)); |
8409a0d2 AV |
280 | const struct iovec *p; |
281 | size_t skip; | |
282 | ||
a6294593 AG |
283 | size -= count; |
284 | for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { | |
285 | size_t len = min(count, p->iov_len - skip); | |
286 | size_t ret; | |
8409a0d2 AV |
287 | |
288 | if (unlikely(!len)) | |
289 | continue; | |
a6294593 AG |
290 | ret = fault_in_readable(p->iov_base + skip, len); |
291 | count -= len - ret; | |
292 | if (ret) | |
293 | break; | |
8409a0d2 | 294 | } |
a6294593 | 295 | return count + size; |
171a0203 AA |
296 | } |
297 | return 0; | |
298 | } | |
a6294593 | 299 | EXPORT_SYMBOL(fault_in_iov_iter_readable); |
171a0203 | 300 | |
cdd591fc AG |
301 | /* |
302 | * fault_in_iov_iter_writeable - fault in iov iterator for writing | |
303 | * @i: iterator | |
304 | * @size: maximum length | |
305 | * | |
306 | * Faults in the iterator using get_user_pages(), i.e., without triggering | |
307 | * hardware page faults. This is primarily useful when we already know that | |
308 | * some or all of the pages in @i aren't in memory. | |
309 | * | |
310 | * Returns the number of bytes not faulted in, like copy_to_user() and | |
311 | * copy_from_user(). | |
312 | * | |
313 | * Always returns 0 for non-user-space iterators. | |
314 | */ | |
315 | size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) | |
316 | { | |
317 | if (iter_is_iovec(i)) { | |
318 | size_t count = min(size, iov_iter_count(i)); | |
319 | const struct iovec *p; | |
320 | size_t skip; | |
321 | ||
322 | size -= count; | |
323 | for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { | |
324 | size_t len = min(count, p->iov_len - skip); | |
325 | size_t ret; | |
326 | ||
327 | if (unlikely(!len)) | |
328 | continue; | |
329 | ret = fault_in_safe_writeable(p->iov_base + skip, len); | |
330 | count -= len - ret; | |
331 | if (ret) | |
332 | break; | |
333 | } | |
334 | return count + size; | |
335 | } | |
336 | return 0; | |
337 | } | |
338 | EXPORT_SYMBOL(fault_in_iov_iter_writeable); | |
339 | ||
aa563d7b | 340 | void iov_iter_init(struct iov_iter *i, unsigned int direction, |
71d8e532 AV |
341 | const struct iovec *iov, unsigned long nr_segs, |
342 | size_t count) | |
343 | { | |
aa563d7b | 344 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
345 | *i = (struct iov_iter) { |
346 | .iter_type = ITER_IOVEC, | |
3337ab08 | 347 | .nofault = false, |
8cd54c1c AV |
348 | .data_source = direction, |
349 | .iov = iov, | |
350 | .nr_segs = nr_segs, | |
351 | .iov_offset = 0, | |
352 | .count = count | |
353 | }; | |
71d8e532 AV |
354 | } |
355 | EXPORT_SYMBOL(iov_iter_init); | |
7b2c99d1 | 356 | |
241699cd AV |
357 | static inline bool allocated(struct pipe_buffer *buf) |
358 | { | |
359 | return buf->ops == &default_pipe_buf_ops; | |
360 | } | |
361 | ||
8cefc107 DH |
362 | static inline void data_start(const struct iov_iter *i, |
363 | unsigned int *iter_headp, size_t *offp) | |
241699cd | 364 | { |
8cefc107 DH |
365 | unsigned int p_mask = i->pipe->ring_size - 1; |
366 | unsigned int iter_head = i->head; | |
241699cd | 367 | size_t off = i->iov_offset; |
8cefc107 DH |
368 | |
369 | if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || | |
370 | off == PAGE_SIZE)) { | |
371 | iter_head++; | |
241699cd AV |
372 | off = 0; |
373 | } | |
8cefc107 | 374 | *iter_headp = iter_head; |
241699cd AV |
375 | *offp = off; |
376 | } | |
377 | ||
378 | static size_t push_pipe(struct iov_iter *i, size_t size, | |
8cefc107 | 379 | int *iter_headp, size_t *offp) |
241699cd AV |
380 | { |
381 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
382 | unsigned int p_tail = pipe->tail; |
383 | unsigned int p_mask = pipe->ring_size - 1; | |
384 | unsigned int iter_head; | |
241699cd | 385 | size_t off; |
241699cd AV |
386 | ssize_t left; |
387 | ||
388 | if (unlikely(size > i->count)) | |
389 | size = i->count; | |
390 | if (unlikely(!size)) | |
391 | return 0; | |
392 | ||
393 | left = size; | |
8cefc107 DH |
394 | data_start(i, &iter_head, &off); |
395 | *iter_headp = iter_head; | |
241699cd AV |
396 | *offp = off; |
397 | if (off) { | |
398 | left -= PAGE_SIZE - off; | |
399 | if (left <= 0) { | |
8cefc107 | 400 | pipe->bufs[iter_head & p_mask].len += size; |
241699cd AV |
401 | return size; |
402 | } | |
8cefc107 DH |
403 | pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; |
404 | iter_head++; | |
241699cd | 405 | } |
6718b6f8 | 406 | while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { |
8cefc107 | 407 | struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; |
241699cd AV |
408 | struct page *page = alloc_page(GFP_USER); |
409 | if (!page) | |
410 | break; | |
8cefc107 DH |
411 | |
412 | buf->ops = &default_pipe_buf_ops; | |
9d2231c5 | 413 | buf->flags = 0; |
8cefc107 DH |
414 | buf->page = page; |
415 | buf->offset = 0; | |
416 | buf->len = min_t(ssize_t, left, PAGE_SIZE); | |
417 | left -= buf->len; | |
418 | iter_head++; | |
419 | pipe->head = iter_head; | |
420 | ||
421 | if (left == 0) | |
241699cd | 422 | return size; |
241699cd AV |
423 | } |
424 | return size - left; | |
425 | } | |
426 | ||
427 | static size_t copy_pipe_to_iter(const void *addr, size_t bytes, | |
428 | struct iov_iter *i) | |
429 | { | |
430 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
431 | unsigned int p_mask = pipe->ring_size - 1; |
432 | unsigned int i_head; | |
241699cd | 433 | size_t n, off; |
241699cd AV |
434 | |
435 | if (!sanity(i)) | |
436 | return 0; | |
437 | ||
8cefc107 | 438 | bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd AV |
439 | if (unlikely(!n)) |
440 | return 0; | |
8cefc107 | 441 | do { |
241699cd | 442 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
8cefc107 DH |
443 | memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); |
444 | i->head = i_head; | |
241699cd AV |
445 | i->iov_offset = off + chunk; |
446 | n -= chunk; | |
447 | addr += chunk; | |
8cefc107 DH |
448 | off = 0; |
449 | i_head++; | |
450 | } while (n); | |
241699cd AV |
451 | i->count -= bytes; |
452 | return bytes; | |
453 | } | |
454 | ||
f9152895 AV |
455 | static __wsum csum_and_memcpy(void *to, const void *from, size_t len, |
456 | __wsum sum, size_t off) | |
457 | { | |
cc44c17b | 458 | __wsum next = csum_partial_copy_nocheck(from, to, len); |
f9152895 AV |
459 | return csum_block_add(sum, next, off); |
460 | } | |
461 | ||
78e1f386 | 462 | static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, |
6852df12 | 463 | struct iov_iter *i, __wsum *sump) |
78e1f386 AV |
464 | { |
465 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 | 466 | unsigned int p_mask = pipe->ring_size - 1; |
6852df12 AV |
467 | __wsum sum = *sump; |
468 | size_t off = 0; | |
8cefc107 | 469 | unsigned int i_head; |
6852df12 | 470 | size_t r; |
78e1f386 AV |
471 | |
472 | if (!sanity(i)) | |
473 | return 0; | |
474 | ||
6852df12 AV |
475 | bytes = push_pipe(i, bytes, &i_head, &r); |
476 | while (bytes) { | |
477 | size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r); | |
2495bdcc | 478 | char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); |
6852df12 | 479 | sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); |
2495bdcc | 480 | kunmap_local(p); |
8cefc107 | 481 | i->head = i_head; |
78e1f386 | 482 | i->iov_offset = r + chunk; |
6852df12 | 483 | bytes -= chunk; |
78e1f386 | 484 | off += chunk; |
8cefc107 DH |
485 | r = 0; |
486 | i_head++; | |
6852df12 AV |
487 | } |
488 | *sump = sum; | |
489 | i->count -= off; | |
490 | return off; | |
78e1f386 AV |
491 | } |
492 | ||
aa28de27 | 493 | size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
62a8067a | 494 | { |
00e23707 | 495 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 496 | return copy_pipe_to_iter(addr, bytes, i); |
09fc68dc AV |
497 | if (iter_is_iovec(i)) |
498 | might_fault(); | |
7baa5099 AV |
499 | iterate_and_advance(i, bytes, base, len, off, |
500 | copyout(base, addr + off, len), | |
501 | memcpy(base, addr + off, len) | |
3d4d3e48 | 502 | ) |
62a8067a | 503 | |
3d4d3e48 | 504 | return bytes; |
c35e0248 | 505 | } |
aa28de27 | 506 | EXPORT_SYMBOL(_copy_to_iter); |
c35e0248 | 507 | |
ec6347bb DW |
508 | #ifdef CONFIG_ARCH_HAS_COPY_MC |
509 | static int copyout_mc(void __user *to, const void *from, size_t n) | |
8780356e | 510 | { |
96d4f267 | 511 | if (access_ok(to, n)) { |
d0ef4c36 | 512 | instrument_copy_to_user(to, from, n); |
ec6347bb | 513 | n = copy_mc_to_user((__force void *) to, from, n); |
8780356e DW |
514 | } |
515 | return n; | |
516 | } | |
517 | ||
ec6347bb | 518 | static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, |
ca146f6f DW |
519 | struct iov_iter *i) |
520 | { | |
521 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
522 | unsigned int p_mask = pipe->ring_size - 1; |
523 | unsigned int i_head; | |
c3497fd0 | 524 | unsigned int valid = pipe->head; |
ca146f6f | 525 | size_t n, off, xfer = 0; |
ca146f6f DW |
526 | |
527 | if (!sanity(i)) | |
528 | return 0; | |
529 | ||
2a510a74 AV |
530 | n = push_pipe(i, bytes, &i_head, &off); |
531 | while (n) { | |
ca146f6f | 532 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
2a510a74 | 533 | char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); |
ca146f6f | 534 | unsigned long rem; |
2a510a74 AV |
535 | rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); |
536 | chunk -= rem; | |
537 | kunmap_local(p); | |
c3497fd0 AV |
538 | if (chunk) { |
539 | i->head = i_head; | |
540 | i->iov_offset = off + chunk; | |
541 | xfer += chunk; | |
542 | valid = i_head + 1; | |
543 | } | |
544 | if (rem) { | |
545 | pipe->bufs[i_head & p_mask].len -= rem; | |
546 | pipe_discard_from(pipe, valid); | |
ca146f6f | 547 | break; |
c3497fd0 | 548 | } |
ca146f6f | 549 | n -= chunk; |
8cefc107 DH |
550 | off = 0; |
551 | i_head++; | |
2a510a74 | 552 | } |
ca146f6f DW |
553 | i->count -= xfer; |
554 | return xfer; | |
555 | } | |
556 | ||
bf3eeb9b | 557 | /** |
ec6347bb | 558 | * _copy_mc_to_iter - copy to iter with source memory error exception handling |
bf3eeb9b DW |
559 | * @addr: source kernel address |
560 | * @bytes: total transfer length | |
44e55997 | 561 | * @i: destination iterator |
bf3eeb9b | 562 | * |
ec6347bb DW |
563 | * The pmem driver deploys this for the dax operation |
564 | * (dax_copy_to_iter()) for dax reads (bypass page-cache and the | |
565 | * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes | |
566 | * successfully copied. | |
bf3eeb9b | 567 | * |
ec6347bb | 568 | * The main differences between this and typical _copy_to_iter(). |
bf3eeb9b DW |
569 | * |
570 | * * Typical tail/residue handling after a fault retries the copy | |
571 | * byte-by-byte until the fault happens again. Re-triggering machine | |
572 | * checks is potentially fatal so the implementation uses source | |
573 | * alignment and poison alignment assumptions to avoid re-triggering | |
574 | * hardware exceptions. | |
575 | * | |
576 | * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. | |
577 | * Compare to copy_to_iter() where only ITER_IOVEC attempts might return | |
578 | * a short copy. | |
44e55997 RD |
579 | * |
580 | * Return: number of bytes copied (may be %0) | |
bf3eeb9b | 581 | */ |
ec6347bb | 582 | size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
8780356e | 583 | { |
00e23707 | 584 | if (unlikely(iov_iter_is_pipe(i))) |
ec6347bb | 585 | return copy_mc_pipe_to_iter(addr, bytes, i); |
8780356e DW |
586 | if (iter_is_iovec(i)) |
587 | might_fault(); | |
7baa5099 AV |
588 | __iterate_and_advance(i, bytes, base, len, off, |
589 | copyout_mc(base, addr + off, len), | |
590 | copy_mc_to_kernel(base, addr + off, len) | |
8780356e DW |
591 | ) |
592 | ||
593 | return bytes; | |
594 | } | |
ec6347bb DW |
595 | EXPORT_SYMBOL_GPL(_copy_mc_to_iter); |
596 | #endif /* CONFIG_ARCH_HAS_COPY_MC */ | |
8780356e | 597 | |
aa28de27 | 598 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
c35e0248 | 599 | { |
00e23707 | 600 | if (unlikely(iov_iter_is_pipe(i))) { |
241699cd AV |
601 | WARN_ON(1); |
602 | return 0; | |
603 | } | |
09fc68dc AV |
604 | if (iter_is_iovec(i)) |
605 | might_fault(); | |
7baa5099 AV |
606 | iterate_and_advance(i, bytes, base, len, off, |
607 | copyin(addr + off, base, len), | |
608 | memcpy(addr + off, base, len) | |
0dbca9a4 AV |
609 | ) |
610 | ||
611 | return bytes; | |
c35e0248 | 612 | } |
aa28de27 | 613 | EXPORT_SYMBOL(_copy_from_iter); |
c35e0248 | 614 | |
aa28de27 | 615 | size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
aa583096 | 616 | { |
00e23707 | 617 | if (unlikely(iov_iter_is_pipe(i))) { |
241699cd AV |
618 | WARN_ON(1); |
619 | return 0; | |
620 | } | |
7baa5099 AV |
621 | iterate_and_advance(i, bytes, base, len, off, |
622 | __copy_from_user_inatomic_nocache(addr + off, base, len), | |
623 | memcpy(addr + off, base, len) | |
aa583096 AV |
624 | ) |
625 | ||
626 | return bytes; | |
627 | } | |
aa28de27 | 628 | EXPORT_SYMBOL(_copy_from_iter_nocache); |
aa583096 | 629 | |
0aed55af | 630 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
abd08d7d DW |
631 | /** |
632 | * _copy_from_iter_flushcache - write destination through cpu cache | |
633 | * @addr: destination kernel address | |
634 | * @bytes: total transfer length | |
44e55997 | 635 | * @i: source iterator |
abd08d7d DW |
636 | * |
637 | * The pmem driver arranges for filesystem-dax to use this facility via | |
638 | * dax_copy_from_iter() for ensuring that writes to persistent memory | |
639 | * are flushed through the CPU cache. It is differentiated from | |
640 | * _copy_from_iter_nocache() in that guarantees all data is flushed for | |
641 | * all iterator types. The _copy_from_iter_nocache() only attempts to | |
642 | * bypass the cache for the ITER_IOVEC case, and on some archs may use | |
643 | * instructions that strand dirty-data in the cache. | |
44e55997 RD |
644 | * |
645 | * Return: number of bytes copied (may be %0) | |
abd08d7d | 646 | */ |
6a37e940 | 647 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) |
0aed55af | 648 | { |
00e23707 | 649 | if (unlikely(iov_iter_is_pipe(i))) { |
0aed55af DW |
650 | WARN_ON(1); |
651 | return 0; | |
652 | } | |
7baa5099 AV |
653 | iterate_and_advance(i, bytes, base, len, off, |
654 | __copy_from_user_flushcache(addr + off, base, len), | |
655 | memcpy_flushcache(addr + off, base, len) | |
0aed55af DW |
656 | ) |
657 | ||
658 | return bytes; | |
659 | } | |
6a37e940 | 660 | EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); |
0aed55af DW |
661 | #endif |
662 | ||
72e809ed AV |
663 | static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) |
664 | { | |
6daef95b ED |
665 | struct page *head; |
666 | size_t v = n + offset; | |
667 | ||
668 | /* | |
669 | * The general case needs to access the page order in order | |
670 | * to compute the page size. | |
671 | * However, we mostly deal with order-0 pages and thus can | |
672 | * avoid a possible cache line miss for requests that fit all | |
673 | * page orders. | |
674 | */ | |
675 | if (n <= v && v <= PAGE_SIZE) | |
676 | return true; | |
677 | ||
678 | head = compound_head(page); | |
679 | v += (page - head) << PAGE_SHIFT; | |
a90bcb86 | 680 | |
a50b854e | 681 | if (likely(n <= v && v <= (page_size(head)))) |
72e809ed AV |
682 | return true; |
683 | WARN_ON(1); | |
684 | return false; | |
685 | } | |
cbbd26b8 | 686 | |
08aa6479 | 687 | static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
62a8067a AV |
688 | struct iov_iter *i) |
689 | { | |
59bb69c6 AV |
690 | if (unlikely(iov_iter_is_pipe(i))) { |
691 | return copy_page_to_iter_pipe(page, offset, bytes, i); | |
692 | } else { | |
c1d4d6a9 AV |
693 | void *kaddr = kmap_local_page(page); |
694 | size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); | |
695 | kunmap_local(kaddr); | |
d271524a | 696 | return wanted; |
28f38db7 | 697 | } |
62a8067a | 698 | } |
08aa6479 AV |
699 | |
700 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | |
701 | struct iov_iter *i) | |
702 | { | |
703 | size_t res = 0; | |
704 | if (unlikely(!page_copy_sane(page, offset, bytes))) | |
705 | return 0; | |
706 | page += offset / PAGE_SIZE; // first subpage | |
707 | offset %= PAGE_SIZE; | |
708 | while (1) { | |
709 | size_t n = __copy_page_to_iter(page, offset, | |
710 | min(bytes, (size_t)PAGE_SIZE - offset), i); | |
711 | res += n; | |
712 | bytes -= n; | |
713 | if (!bytes || !n) | |
714 | break; | |
715 | offset += n; | |
716 | if (offset == PAGE_SIZE) { | |
717 | page++; | |
718 | offset = 0; | |
719 | } | |
720 | } | |
721 | return res; | |
722 | } | |
62a8067a AV |
723 | EXPORT_SYMBOL(copy_page_to_iter); |
724 | ||
725 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | |
726 | struct iov_iter *i) | |
727 | { | |
59bb69c6 | 728 | if (page_copy_sane(page, offset, bytes)) { |
55ca375c | 729 | void *kaddr = kmap_local_page(page); |
aa28de27 | 730 | size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); |
55ca375c | 731 | kunmap_local(kaddr); |
d271524a | 732 | return wanted; |
28f38db7 | 733 | } |
28f38db7 | 734 | return 0; |
62a8067a AV |
735 | } |
736 | EXPORT_SYMBOL(copy_page_from_iter); | |
737 | ||
241699cd AV |
738 | static size_t pipe_zero(size_t bytes, struct iov_iter *i) |
739 | { | |
740 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
741 | unsigned int p_mask = pipe->ring_size - 1; |
742 | unsigned int i_head; | |
241699cd | 743 | size_t n, off; |
241699cd AV |
744 | |
745 | if (!sanity(i)) | |
746 | return 0; | |
747 | ||
8cefc107 | 748 | bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd AV |
749 | if (unlikely(!n)) |
750 | return 0; | |
751 | ||
8cefc107 | 752 | do { |
241699cd | 753 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
893839fd AV |
754 | char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); |
755 | memset(p + off, 0, chunk); | |
756 | kunmap_local(p); | |
8cefc107 | 757 | i->head = i_head; |
241699cd AV |
758 | i->iov_offset = off + chunk; |
759 | n -= chunk; | |
8cefc107 DH |
760 | off = 0; |
761 | i_head++; | |
762 | } while (n); | |
241699cd AV |
763 | i->count -= bytes; |
764 | return bytes; | |
765 | } | |
766 | ||
c35e0248 MW |
767 | size_t iov_iter_zero(size_t bytes, struct iov_iter *i) |
768 | { | |
00e23707 | 769 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 770 | return pipe_zero(bytes, i); |
7baa5099 AV |
771 | iterate_and_advance(i, bytes, base, len, count, |
772 | clear_user(base, len), | |
773 | memset(base, 0, len) | |
8442fa46 AV |
774 | ) |
775 | ||
776 | return bytes; | |
c35e0248 MW |
777 | } |
778 | EXPORT_SYMBOL(iov_iter_zero); | |
779 | ||
f0b65f39 AV |
780 | size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, |
781 | struct iov_iter *i) | |
62a8067a | 782 | { |
04a31165 | 783 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
72e809ed AV |
784 | if (unlikely(!page_copy_sane(page, offset, bytes))) { |
785 | kunmap_atomic(kaddr); | |
786 | return 0; | |
787 | } | |
9ea9ce04 | 788 | if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd AV |
789 | kunmap_atomic(kaddr); |
790 | WARN_ON(1); | |
791 | return 0; | |
792 | } | |
7baa5099 AV |
793 | iterate_and_advance(i, bytes, base, len, off, |
794 | copyin(p + off, base, len), | |
795 | memcpy(p + off, base, len) | |
04a31165 AV |
796 | ) |
797 | kunmap_atomic(kaddr); | |
798 | return bytes; | |
62a8067a | 799 | } |
f0b65f39 | 800 | EXPORT_SYMBOL(copy_page_from_iter_atomic); |
62a8067a | 801 | |
b9dc6f65 AV |
802 | static inline void pipe_truncate(struct iov_iter *i) |
803 | { | |
804 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
805 | unsigned int p_tail = pipe->tail; |
806 | unsigned int p_head = pipe->head; | |
807 | unsigned int p_mask = pipe->ring_size - 1; | |
808 | ||
809 | if (!pipe_empty(p_head, p_tail)) { | |
810 | struct pipe_buffer *buf; | |
811 | unsigned int i_head = i->head; | |
b9dc6f65 | 812 | size_t off = i->iov_offset; |
8cefc107 | 813 | |
b9dc6f65 | 814 | if (off) { |
8cefc107 DH |
815 | buf = &pipe->bufs[i_head & p_mask]; |
816 | buf->len = off - buf->offset; | |
817 | i_head++; | |
b9dc6f65 | 818 | } |
8cefc107 DH |
819 | while (p_head != i_head) { |
820 | p_head--; | |
821 | pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); | |
b9dc6f65 | 822 | } |
8cefc107 DH |
823 | |
824 | pipe->head = p_head; | |
b9dc6f65 AV |
825 | } |
826 | } | |
827 | ||
241699cd AV |
828 | static void pipe_advance(struct iov_iter *i, size_t size) |
829 | { | |
830 | struct pipe_inode_info *pipe = i->pipe; | |
241699cd | 831 | if (size) { |
b9dc6f65 | 832 | struct pipe_buffer *buf; |
8cefc107 DH |
833 | unsigned int p_mask = pipe->ring_size - 1; |
834 | unsigned int i_head = i->head; | |
b9dc6f65 | 835 | size_t off = i->iov_offset, left = size; |
8cefc107 | 836 | |
241699cd | 837 | if (off) /* make it relative to the beginning of buffer */ |
8cefc107 | 838 | left += off - pipe->bufs[i_head & p_mask].offset; |
241699cd | 839 | while (1) { |
8cefc107 | 840 | buf = &pipe->bufs[i_head & p_mask]; |
b9dc6f65 | 841 | if (left <= buf->len) |
241699cd | 842 | break; |
b9dc6f65 | 843 | left -= buf->len; |
8cefc107 | 844 | i_head++; |
241699cd | 845 | } |
8cefc107 | 846 | i->head = i_head; |
b9dc6f65 | 847 | i->iov_offset = buf->offset + left; |
241699cd | 848 | } |
b9dc6f65 AV |
849 | i->count -= size; |
850 | /* ... and discard everything past that point */ | |
851 | pipe_truncate(i); | |
241699cd AV |
852 | } |
853 | ||
54c8195b PB |
854 | static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) |
855 | { | |
18fa9af7 | 856 | const struct bio_vec *bvec, *end; |
54c8195b | 857 | |
18fa9af7 AV |
858 | if (!i->count) |
859 | return; | |
860 | i->count -= size; | |
861 | ||
862 | size += i->iov_offset; | |
54c8195b | 863 | |
18fa9af7 AV |
864 | for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { |
865 | if (likely(size < bvec->bv_len)) | |
866 | break; | |
867 | size -= bvec->bv_len; | |
868 | } | |
869 | i->iov_offset = size; | |
870 | i->nr_segs -= bvec - i->bvec; | |
871 | i->bvec = bvec; | |
54c8195b PB |
872 | } |
873 | ||
185ac4d4 AV |
874 | static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) |
875 | { | |
876 | const struct iovec *iov, *end; | |
877 | ||
878 | if (!i->count) | |
879 | return; | |
880 | i->count -= size; | |
881 | ||
882 | size += i->iov_offset; // from beginning of current segment | |
883 | for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { | |
884 | if (likely(size < iov->iov_len)) | |
885 | break; | |
886 | size -= iov->iov_len; | |
887 | } | |
888 | i->iov_offset = size; | |
889 | i->nr_segs -= iov - i->iov; | |
890 | i->iov = iov; | |
891 | } | |
892 | ||
62a8067a AV |
893 | void iov_iter_advance(struct iov_iter *i, size_t size) |
894 | { | |
3b3fc051 AV |
895 | if (unlikely(i->count < size)) |
896 | size = i->count; | |
185ac4d4 AV |
897 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { |
898 | /* iovec and kvec have identical layouts */ | |
899 | iov_iter_iovec_advance(i, size); | |
900 | } else if (iov_iter_is_bvec(i)) { | |
901 | iov_iter_bvec_advance(i, size); | |
902 | } else if (iov_iter_is_pipe(i)) { | |
241699cd | 903 | pipe_advance(i, size); |
185ac4d4 | 904 | } else if (unlikely(iov_iter_is_xarray(i))) { |
7ff50620 DH |
905 | i->iov_offset += size; |
906 | i->count -= size; | |
185ac4d4 AV |
907 | } else if (iov_iter_is_discard(i)) { |
908 | i->count -= size; | |
54c8195b | 909 | } |
62a8067a AV |
910 | } |
911 | EXPORT_SYMBOL(iov_iter_advance); | |
912 | ||
27c0e374 AV |
913 | void iov_iter_revert(struct iov_iter *i, size_t unroll) |
914 | { | |
915 | if (!unroll) | |
916 | return; | |
5b47d59a AV |
917 | if (WARN_ON(unroll > MAX_RW_COUNT)) |
918 | return; | |
27c0e374 | 919 | i->count += unroll; |
00e23707 | 920 | if (unlikely(iov_iter_is_pipe(i))) { |
27c0e374 | 921 | struct pipe_inode_info *pipe = i->pipe; |
8cefc107 DH |
922 | unsigned int p_mask = pipe->ring_size - 1; |
923 | unsigned int i_head = i->head; | |
27c0e374 AV |
924 | size_t off = i->iov_offset; |
925 | while (1) { | |
8cefc107 DH |
926 | struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; |
927 | size_t n = off - b->offset; | |
27c0e374 | 928 | if (unroll < n) { |
4fa55cef | 929 | off -= unroll; |
27c0e374 AV |
930 | break; |
931 | } | |
932 | unroll -= n; | |
8cefc107 | 933 | if (!unroll && i_head == i->start_head) { |
27c0e374 AV |
934 | off = 0; |
935 | break; | |
936 | } | |
8cefc107 DH |
937 | i_head--; |
938 | b = &pipe->bufs[i_head & p_mask]; | |
939 | off = b->offset + b->len; | |
27c0e374 AV |
940 | } |
941 | i->iov_offset = off; | |
8cefc107 | 942 | i->head = i_head; |
27c0e374 AV |
943 | pipe_truncate(i); |
944 | return; | |
945 | } | |
9ea9ce04 DH |
946 | if (unlikely(iov_iter_is_discard(i))) |
947 | return; | |
27c0e374 AV |
948 | if (unroll <= i->iov_offset) { |
949 | i->iov_offset -= unroll; | |
950 | return; | |
951 | } | |
952 | unroll -= i->iov_offset; | |
7ff50620 DH |
953 | if (iov_iter_is_xarray(i)) { |
954 | BUG(); /* We should never go beyond the start of the specified | |
955 | * range since we might then be straying into pages that | |
956 | * aren't pinned. | |
957 | */ | |
958 | } else if (iov_iter_is_bvec(i)) { | |
27c0e374 AV |
959 | const struct bio_vec *bvec = i->bvec; |
960 | while (1) { | |
961 | size_t n = (--bvec)->bv_len; | |
962 | i->nr_segs++; | |
963 | if (unroll <= n) { | |
964 | i->bvec = bvec; | |
965 | i->iov_offset = n - unroll; | |
966 | return; | |
967 | } | |
968 | unroll -= n; | |
969 | } | |
970 | } else { /* same logics for iovec and kvec */ | |
971 | const struct iovec *iov = i->iov; | |
972 | while (1) { | |
973 | size_t n = (--iov)->iov_len; | |
974 | i->nr_segs++; | |
975 | if (unroll <= n) { | |
976 | i->iov = iov; | |
977 | i->iov_offset = n - unroll; | |
978 | return; | |
979 | } | |
980 | unroll -= n; | |
981 | } | |
982 | } | |
983 | } | |
984 | EXPORT_SYMBOL(iov_iter_revert); | |
985 | ||
62a8067a AV |
986 | /* |
987 | * Return the count of just the current iov_iter segment. | |
988 | */ | |
989 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | |
990 | { | |
28f38db7 AV |
991 | if (i->nr_segs > 1) { |
992 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
993 | return min(i->count, i->iov->iov_len - i->iov_offset); | |
994 | if (iov_iter_is_bvec(i)) | |
995 | return min(i->count, i->bvec->bv_len - i->iov_offset); | |
996 | } | |
997 | return i->count; | |
62a8067a AV |
998 | } |
999 | EXPORT_SYMBOL(iov_iter_single_seg_count); | |
1000 | ||
aa563d7b | 1001 | void iov_iter_kvec(struct iov_iter *i, unsigned int direction, |
05afcb77 | 1002 | const struct kvec *kvec, unsigned long nr_segs, |
abb78f87 AV |
1003 | size_t count) |
1004 | { | |
aa563d7b | 1005 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
1006 | *i = (struct iov_iter){ |
1007 | .iter_type = ITER_KVEC, | |
1008 | .data_source = direction, | |
1009 | .kvec = kvec, | |
1010 | .nr_segs = nr_segs, | |
1011 | .iov_offset = 0, | |
1012 | .count = count | |
1013 | }; | |
abb78f87 AV |
1014 | } |
1015 | EXPORT_SYMBOL(iov_iter_kvec); | |
1016 | ||
aa563d7b | 1017 | void iov_iter_bvec(struct iov_iter *i, unsigned int direction, |
05afcb77 AV |
1018 | const struct bio_vec *bvec, unsigned long nr_segs, |
1019 | size_t count) | |
1020 | { | |
aa563d7b | 1021 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
1022 | *i = (struct iov_iter){ |
1023 | .iter_type = ITER_BVEC, | |
1024 | .data_source = direction, | |
1025 | .bvec = bvec, | |
1026 | .nr_segs = nr_segs, | |
1027 | .iov_offset = 0, | |
1028 | .count = count | |
1029 | }; | |
05afcb77 AV |
1030 | } |
1031 | EXPORT_SYMBOL(iov_iter_bvec); | |
1032 | ||
aa563d7b | 1033 | void iov_iter_pipe(struct iov_iter *i, unsigned int direction, |
241699cd AV |
1034 | struct pipe_inode_info *pipe, |
1035 | size_t count) | |
1036 | { | |
aa563d7b | 1037 | BUG_ON(direction != READ); |
8cefc107 | 1038 | WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); |
8cd54c1c AV |
1039 | *i = (struct iov_iter){ |
1040 | .iter_type = ITER_PIPE, | |
1041 | .data_source = false, | |
1042 | .pipe = pipe, | |
1043 | .head = pipe->head, | |
1044 | .start_head = pipe->head, | |
1045 | .iov_offset = 0, | |
1046 | .count = count | |
1047 | }; | |
241699cd AV |
1048 | } |
1049 | EXPORT_SYMBOL(iov_iter_pipe); | |
1050 | ||
7ff50620 DH |
1051 | /** |
1052 | * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray | |
1053 | * @i: The iterator to initialise. | |
1054 | * @direction: The direction of the transfer. | |
1055 | * @xarray: The xarray to access. | |
1056 | * @start: The start file position. | |
1057 | * @count: The size of the I/O buffer in bytes. | |
1058 | * | |
1059 | * Set up an I/O iterator to either draw data out of the pages attached to an | |
1060 | * inode or to inject data into those pages. The pages *must* be prevented | |
1061 | * from evaporation, either by taking a ref on them or locking them by the | |
1062 | * caller. | |
1063 | */ | |
1064 | void iov_iter_xarray(struct iov_iter *i, unsigned int direction, | |
1065 | struct xarray *xarray, loff_t start, size_t count) | |
1066 | { | |
1067 | BUG_ON(direction & ~1); | |
8cd54c1c AV |
1068 | *i = (struct iov_iter) { |
1069 | .iter_type = ITER_XARRAY, | |
1070 | .data_source = direction, | |
1071 | .xarray = xarray, | |
1072 | .xarray_start = start, | |
1073 | .count = count, | |
1074 | .iov_offset = 0 | |
1075 | }; | |
7ff50620 DH |
1076 | } |
1077 | EXPORT_SYMBOL(iov_iter_xarray); | |
1078 | ||
9ea9ce04 DH |
1079 | /** |
1080 | * iov_iter_discard - Initialise an I/O iterator that discards data | |
1081 | * @i: The iterator to initialise. | |
1082 | * @direction: The direction of the transfer. | |
1083 | * @count: The size of the I/O buffer in bytes. | |
1084 | * | |
1085 | * Set up an I/O iterator that just discards everything that's written to it. | |
1086 | * It's only available as a READ iterator. | |
1087 | */ | |
1088 | void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) | |
1089 | { | |
1090 | BUG_ON(direction != READ); | |
8cd54c1c AV |
1091 | *i = (struct iov_iter){ |
1092 | .iter_type = ITER_DISCARD, | |
1093 | .data_source = false, | |
1094 | .count = count, | |
1095 | .iov_offset = 0 | |
1096 | }; | |
9ea9ce04 DH |
1097 | } |
1098 | EXPORT_SYMBOL(iov_iter_discard); | |
1099 | ||
cfa320f7 KB |
1100 | static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, |
1101 | unsigned len_mask) | |
1102 | { | |
1103 | size_t size = i->count; | |
1104 | size_t skip = i->iov_offset; | |
1105 | unsigned k; | |
1106 | ||
1107 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1108 | size_t len = i->iov[k].iov_len - skip; | |
1109 | ||
1110 | if (len > size) | |
1111 | len = size; | |
1112 | if (len & len_mask) | |
1113 | return false; | |
1114 | if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) | |
1115 | return false; | |
1116 | ||
1117 | size -= len; | |
1118 | if (!size) | |
1119 | break; | |
1120 | } | |
1121 | return true; | |
1122 | } | |
1123 | ||
1124 | static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, | |
1125 | unsigned len_mask) | |
1126 | { | |
1127 | size_t size = i->count; | |
1128 | unsigned skip = i->iov_offset; | |
1129 | unsigned k; | |
1130 | ||
1131 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1132 | size_t len = i->bvec[k].bv_len - skip; | |
1133 | ||
1134 | if (len > size) | |
1135 | len = size; | |
1136 | if (len & len_mask) | |
1137 | return false; | |
1138 | if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) | |
1139 | return false; | |
1140 | ||
1141 | size -= len; | |
1142 | if (!size) | |
1143 | break; | |
1144 | } | |
1145 | return true; | |
1146 | } | |
1147 | ||
1148 | /** | |
1149 | * iov_iter_is_aligned() - Check if the addresses and lengths of each segments | |
1150 | * are aligned to the parameters. | |
1151 | * | |
1152 | * @i: &struct iov_iter to restore | |
1153 | * @addr_mask: bit mask to check against the iov element's addresses | |
1154 | * @len_mask: bit mask to check against the iov element's lengths | |
1155 | * | |
1156 | * Return: false if any addresses or lengths intersect with the provided masks | |
1157 | */ | |
1158 | bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, | |
1159 | unsigned len_mask) | |
1160 | { | |
1161 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1162 | return iov_iter_aligned_iovec(i, addr_mask, len_mask); | |
1163 | ||
1164 | if (iov_iter_is_bvec(i)) | |
1165 | return iov_iter_aligned_bvec(i, addr_mask, len_mask); | |
1166 | ||
1167 | if (iov_iter_is_pipe(i)) { | |
1168 | unsigned int p_mask = i->pipe->ring_size - 1; | |
1169 | size_t size = i->count; | |
1170 | ||
1171 | if (size & len_mask) | |
1172 | return false; | |
1173 | if (size && allocated(&i->pipe->bufs[i->head & p_mask])) { | |
1174 | if (i->iov_offset & addr_mask) | |
1175 | return false; | |
1176 | } | |
1177 | ||
1178 | return true; | |
1179 | } | |
1180 | ||
1181 | if (iov_iter_is_xarray(i)) { | |
1182 | if (i->count & len_mask) | |
1183 | return false; | |
1184 | if ((i->xarray_start + i->iov_offset) & addr_mask) | |
1185 | return false; | |
1186 | } | |
1187 | ||
1188 | return true; | |
1189 | } | |
1190 | EXPORT_SYMBOL_GPL(iov_iter_is_aligned); | |
1191 | ||
9221d2e3 | 1192 | static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) |
62a8067a | 1193 | { |
04a31165 AV |
1194 | unsigned long res = 0; |
1195 | size_t size = i->count; | |
9221d2e3 AV |
1196 | size_t skip = i->iov_offset; |
1197 | unsigned k; | |
1198 | ||
1199 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1200 | size_t len = i->iov[k].iov_len - skip; | |
1201 | if (len) { | |
1202 | res |= (unsigned long)i->iov[k].iov_base + skip; | |
1203 | if (len > size) | |
1204 | len = size; | |
1205 | res |= len; | |
1206 | size -= len; | |
1207 | if (!size) | |
1208 | break; | |
1209 | } | |
1210 | } | |
1211 | return res; | |
1212 | } | |
04a31165 | 1213 | |
9221d2e3 AV |
1214 | static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) |
1215 | { | |
1216 | unsigned res = 0; | |
1217 | size_t size = i->count; | |
1218 | unsigned skip = i->iov_offset; | |
1219 | unsigned k; | |
1220 | ||
1221 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1222 | size_t len = i->bvec[k].bv_len - skip; | |
1223 | res |= (unsigned long)i->bvec[k].bv_offset + skip; | |
1224 | if (len > size) | |
1225 | len = size; | |
1226 | res |= len; | |
1227 | size -= len; | |
1228 | if (!size) | |
1229 | break; | |
1230 | } | |
1231 | return res; | |
1232 | } | |
1233 | ||
1234 | unsigned long iov_iter_alignment(const struct iov_iter *i) | |
1235 | { | |
1236 | /* iovec and kvec have identical layouts */ | |
1237 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1238 | return iov_iter_alignment_iovec(i); | |
1239 | ||
1240 | if (iov_iter_is_bvec(i)) | |
1241 | return iov_iter_alignment_bvec(i); | |
1242 | ||
1243 | if (iov_iter_is_pipe(i)) { | |
e0ff126e | 1244 | unsigned int p_mask = i->pipe->ring_size - 1; |
9221d2e3 | 1245 | size_t size = i->count; |
e0ff126e | 1246 | |
8cefc107 | 1247 | if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) |
241699cd AV |
1248 | return size | i->iov_offset; |
1249 | return size; | |
1250 | } | |
9221d2e3 AV |
1251 | |
1252 | if (iov_iter_is_xarray(i)) | |
3d14ec1f | 1253 | return (i->xarray_start + i->iov_offset) | i->count; |
9221d2e3 AV |
1254 | |
1255 | return 0; | |
62a8067a AV |
1256 | } |
1257 | EXPORT_SYMBOL(iov_iter_alignment); | |
1258 | ||
357f435d AV |
1259 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i) |
1260 | { | |
33844e66 | 1261 | unsigned long res = 0; |
610c7a71 | 1262 | unsigned long v = 0; |
357f435d | 1263 | size_t size = i->count; |
610c7a71 | 1264 | unsigned k; |
357f435d | 1265 | |
610c7a71 | 1266 | if (WARN_ON(!iter_is_iovec(i))) |
241699cd | 1267 | return ~0U; |
241699cd | 1268 | |
610c7a71 AV |
1269 | for (k = 0; k < i->nr_segs; k++) { |
1270 | if (i->iov[k].iov_len) { | |
1271 | unsigned long base = (unsigned long)i->iov[k].iov_base; | |
1272 | if (v) // if not the first one | |
1273 | res |= base | v; // this start | previous end | |
1274 | v = base + i->iov[k].iov_len; | |
1275 | if (size <= i->iov[k].iov_len) | |
1276 | break; | |
1277 | size -= i->iov[k].iov_len; | |
1278 | } | |
1279 | } | |
33844e66 | 1280 | return res; |
357f435d AV |
1281 | } |
1282 | EXPORT_SYMBOL(iov_iter_gap_alignment); | |
1283 | ||
e76b6312 | 1284 | static inline ssize_t __pipe_get_pages(struct iov_iter *i, |
241699cd AV |
1285 | size_t maxsize, |
1286 | struct page **pages, | |
8cefc107 | 1287 | int iter_head, |
241699cd AV |
1288 | size_t *start) |
1289 | { | |
1290 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
1291 | unsigned int p_mask = pipe->ring_size - 1; |
1292 | ssize_t n = push_pipe(i, maxsize, &iter_head, start); | |
241699cd AV |
1293 | if (!n) |
1294 | return -EFAULT; | |
1295 | ||
1296 | maxsize = n; | |
1297 | n += *start; | |
1689c73a | 1298 | while (n > 0) { |
8cefc107 DH |
1299 | get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); |
1300 | iter_head++; | |
241699cd AV |
1301 | n -= PAGE_SIZE; |
1302 | } | |
1303 | ||
1304 | return maxsize; | |
1305 | } | |
1306 | ||
1307 | static ssize_t pipe_get_pages(struct iov_iter *i, | |
1308 | struct page **pages, size_t maxsize, unsigned maxpages, | |
1309 | size_t *start) | |
1310 | { | |
8cefc107 | 1311 | unsigned int iter_head, npages; |
241699cd | 1312 | size_t capacity; |
241699cd AV |
1313 | |
1314 | if (!sanity(i)) | |
1315 | return -EFAULT; | |
1316 | ||
8cefc107 DH |
1317 | data_start(i, &iter_head, start); |
1318 | /* Amount of free space: some of this one + all after this one */ | |
1319 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); | |
1320 | capacity = min(npages, maxpages) * PAGE_SIZE - *start; | |
241699cd | 1321 | |
8cefc107 | 1322 | return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); |
241699cd AV |
1323 | } |
1324 | ||
7ff50620 DH |
1325 | static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, |
1326 | pgoff_t index, unsigned int nr_pages) | |
1327 | { | |
1328 | XA_STATE(xas, xa, index); | |
1329 | struct page *page; | |
1330 | unsigned int ret = 0; | |
1331 | ||
1332 | rcu_read_lock(); | |
1333 | for (page = xas_load(&xas); page; page = xas_next(&xas)) { | |
1334 | if (xas_retry(&xas, page)) | |
1335 | continue; | |
1336 | ||
1337 | /* Has the page moved or been split? */ | |
1338 | if (unlikely(page != xas_reload(&xas))) { | |
1339 | xas_reset(&xas); | |
1340 | continue; | |
1341 | } | |
1342 | ||
1343 | pages[ret] = find_subpage(page, xas.xa_index); | |
1344 | get_page(pages[ret]); | |
1345 | if (++ret == nr_pages) | |
1346 | break; | |
1347 | } | |
1348 | rcu_read_unlock(); | |
1349 | return ret; | |
1350 | } | |
1351 | ||
1352 | static ssize_t iter_xarray_get_pages(struct iov_iter *i, | |
1353 | struct page **pages, size_t maxsize, | |
1354 | unsigned maxpages, size_t *_start_offset) | |
1355 | { | |
1356 | unsigned nr, offset; | |
1357 | pgoff_t index, count; | |
6c776766 | 1358 | size_t size = maxsize; |
7ff50620 DH |
1359 | loff_t pos; |
1360 | ||
1361 | if (!size || !maxpages) | |
1362 | return 0; | |
1363 | ||
1364 | pos = i->xarray_start + i->iov_offset; | |
1365 | index = pos >> PAGE_SHIFT; | |
1366 | offset = pos & ~PAGE_MASK; | |
1367 | *_start_offset = offset; | |
1368 | ||
1369 | count = 1; | |
1370 | if (size > PAGE_SIZE - offset) { | |
1371 | size -= PAGE_SIZE - offset; | |
1372 | count += size >> PAGE_SHIFT; | |
1373 | size &= ~PAGE_MASK; | |
1374 | if (size) | |
1375 | count++; | |
1376 | } | |
1377 | ||
1378 | if (count > maxpages) | |
1379 | count = maxpages; | |
1380 | ||
1381 | nr = iter_xarray_populate_pages(pages, i->xarray, index, count); | |
1382 | if (nr == 0) | |
1383 | return 0; | |
1384 | ||
1c27f1fc | 1385 | return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); |
7ff50620 DH |
1386 | } |
1387 | ||
3d671ca6 | 1388 | /* must be done on non-empty ITER_IOVEC one */ |
dd45ab9d | 1389 | static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) |
3d671ca6 AV |
1390 | { |
1391 | size_t skip; | |
1392 | long k; | |
1393 | ||
1394 | for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { | |
3d671ca6 AV |
1395 | size_t len = i->iov[k].iov_len - skip; |
1396 | ||
1397 | if (unlikely(!len)) | |
1398 | continue; | |
59dbd7d0 AV |
1399 | if (*size > len) |
1400 | *size = len; | |
dd45ab9d | 1401 | return (unsigned long)i->iov[k].iov_base + skip; |
3d671ca6 AV |
1402 | } |
1403 | BUG(); // if it had been empty, we wouldn't get called | |
1404 | } | |
1405 | ||
1406 | /* must be done on non-empty ITER_BVEC one */ | |
1407 | static struct page *first_bvec_segment(const struct iov_iter *i, | |
59dbd7d0 | 1408 | size_t *size, size_t *start) |
3d671ca6 AV |
1409 | { |
1410 | struct page *page; | |
1411 | size_t skip = i->iov_offset, len; | |
1412 | ||
1413 | len = i->bvec->bv_len - skip; | |
59dbd7d0 AV |
1414 | if (*size > len) |
1415 | *size = len; | |
3d671ca6 AV |
1416 | skip += i->bvec->bv_offset; |
1417 | page = i->bvec->bv_page + skip / PAGE_SIZE; | |
dda8e5d1 | 1418 | *start = skip % PAGE_SIZE; |
3d671ca6 AV |
1419 | return page; |
1420 | } | |
1421 | ||
62a8067a | 1422 | ssize_t iov_iter_get_pages(struct iov_iter *i, |
2c80929c | 1423 | struct page **pages, size_t maxsize, unsigned maxpages, |
62a8067a AV |
1424 | size_t *start) |
1425 | { | |
3d671ca6 AV |
1426 | int n, res; |
1427 | ||
e5393fae AV |
1428 | if (maxsize > i->count) |
1429 | maxsize = i->count; | |
3d671ca6 AV |
1430 | if (!maxsize) |
1431 | return 0; | |
7392ed17 AV |
1432 | if (maxsize > MAX_RW_COUNT) |
1433 | maxsize = MAX_RW_COUNT; | |
e5393fae | 1434 | |
3d671ca6 | 1435 | if (likely(iter_is_iovec(i))) { |
3337ab08 | 1436 | unsigned int gup_flags = 0; |
3d671ca6 | 1437 | unsigned long addr; |
e5393fae | 1438 | |
3337ab08 AG |
1439 | if (iov_iter_rw(i) != WRITE) |
1440 | gup_flags |= FOLL_WRITE; | |
1441 | if (i->nofault) | |
1442 | gup_flags |= FOLL_NOFAULT; | |
1443 | ||
dd45ab9d AV |
1444 | addr = first_iovec_segment(i, &maxsize); |
1445 | *start = addr % PAGE_SIZE; | |
1446 | addr &= PAGE_MASK; | |
59dbd7d0 | 1447 | n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); |
dda8e5d1 AV |
1448 | if (n > maxpages) |
1449 | n = maxpages; | |
3337ab08 | 1450 | res = get_user_pages_fast(addr, n, gup_flags, pages); |
814a6674 | 1451 | if (unlikely(res <= 0)) |
e5393fae | 1452 | return res; |
59dbd7d0 | 1453 | return min_t(size_t, maxsize, res * PAGE_SIZE - *start); |
3d671ca6 AV |
1454 | } |
1455 | if (iov_iter_is_bvec(i)) { | |
1456 | struct page *page; | |
1457 | ||
59dbd7d0 AV |
1458 | page = first_bvec_segment(i, &maxsize, start); |
1459 | n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
dda8e5d1 AV |
1460 | if (n > maxpages) |
1461 | n = maxpages; | |
1462 | for (int k = 0; k < n; k++) | |
3d671ca6 | 1463 | get_page(*pages++ = page++); |
59dbd7d0 | 1464 | return min_t(size_t, maxsize, n * PAGE_SIZE - *start); |
3d671ca6 AV |
1465 | } |
1466 | if (iov_iter_is_pipe(i)) | |
1467 | return pipe_get_pages(i, pages, maxsize, maxpages, start); | |
1468 | if (iov_iter_is_xarray(i)) | |
1469 | return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); | |
1470 | return -EFAULT; | |
62a8067a AV |
1471 | } |
1472 | EXPORT_SYMBOL(iov_iter_get_pages); | |
1473 | ||
1b17f1f2 AV |
1474 | static struct page **get_pages_array(size_t n) |
1475 | { | |
752ade68 | 1476 | return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); |
1b17f1f2 AV |
1477 | } |
1478 | ||
241699cd AV |
1479 | static ssize_t pipe_get_pages_alloc(struct iov_iter *i, |
1480 | struct page ***pages, size_t maxsize, | |
1481 | size_t *start) | |
1482 | { | |
1483 | struct page **p; | |
8cefc107 | 1484 | unsigned int iter_head, npages; |
d7760d63 | 1485 | ssize_t n; |
241699cd AV |
1486 | |
1487 | if (!sanity(i)) | |
1488 | return -EFAULT; | |
1489 | ||
8cefc107 DH |
1490 | data_start(i, &iter_head, start); |
1491 | /* Amount of free space: some of this one + all after this one */ | |
1492 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); | |
241699cd AV |
1493 | n = npages * PAGE_SIZE - *start; |
1494 | if (maxsize > n) | |
1495 | maxsize = n; | |
1496 | else | |
1497 | npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
1498 | p = get_pages_array(npages); | |
1499 | if (!p) | |
1500 | return -ENOMEM; | |
8cefc107 | 1501 | n = __pipe_get_pages(i, maxsize, p, iter_head, start); |
241699cd AV |
1502 | if (n > 0) |
1503 | *pages = p; | |
1504 | else | |
1505 | kvfree(p); | |
1506 | return n; | |
1507 | } | |
1508 | ||
7ff50620 DH |
1509 | static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, |
1510 | struct page ***pages, size_t maxsize, | |
1511 | size_t *_start_offset) | |
1512 | { | |
1513 | struct page **p; | |
1514 | unsigned nr, offset; | |
1515 | pgoff_t index, count; | |
6c776766 | 1516 | size_t size = maxsize; |
7ff50620 DH |
1517 | loff_t pos; |
1518 | ||
1519 | if (!size) | |
1520 | return 0; | |
1521 | ||
1522 | pos = i->xarray_start + i->iov_offset; | |
1523 | index = pos >> PAGE_SHIFT; | |
1524 | offset = pos & ~PAGE_MASK; | |
1525 | *_start_offset = offset; | |
1526 | ||
1527 | count = 1; | |
1528 | if (size > PAGE_SIZE - offset) { | |
1529 | size -= PAGE_SIZE - offset; | |
1530 | count += size >> PAGE_SHIFT; | |
1531 | size &= ~PAGE_MASK; | |
1532 | if (size) | |
1533 | count++; | |
1534 | } | |
1535 | ||
1536 | p = get_pages_array(count); | |
1537 | if (!p) | |
1538 | return -ENOMEM; | |
1539 | *pages = p; | |
1540 | ||
1541 | nr = iter_xarray_populate_pages(p, i->xarray, index, count); | |
1542 | if (nr == 0) | |
1543 | return 0; | |
1544 | ||
1c27f1fc | 1545 | return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); |
7ff50620 DH |
1546 | } |
1547 | ||
62a8067a AV |
1548 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, |
1549 | struct page ***pages, size_t maxsize, | |
1550 | size_t *start) | |
1551 | { | |
1b17f1f2 | 1552 | struct page **p; |
3d671ca6 | 1553 | int n, res; |
1b17f1f2 AV |
1554 | |
1555 | if (maxsize > i->count) | |
1556 | maxsize = i->count; | |
3d671ca6 AV |
1557 | if (!maxsize) |
1558 | return 0; | |
7392ed17 AV |
1559 | if (maxsize > MAX_RW_COUNT) |
1560 | maxsize = MAX_RW_COUNT; | |
1b17f1f2 | 1561 | |
3d671ca6 | 1562 | if (likely(iter_is_iovec(i))) { |
3337ab08 | 1563 | unsigned int gup_flags = 0; |
3d671ca6 | 1564 | unsigned long addr; |
1b17f1f2 | 1565 | |
3337ab08 AG |
1566 | if (iov_iter_rw(i) != WRITE) |
1567 | gup_flags |= FOLL_WRITE; | |
1568 | if (i->nofault) | |
1569 | gup_flags |= FOLL_NOFAULT; | |
1570 | ||
dd45ab9d AV |
1571 | addr = first_iovec_segment(i, &maxsize); |
1572 | *start = addr % PAGE_SIZE; | |
1573 | addr &= PAGE_MASK; | |
59dbd7d0 | 1574 | n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); |
1b17f1f2 AV |
1575 | p = get_pages_array(n); |
1576 | if (!p) | |
1577 | return -ENOMEM; | |
3337ab08 | 1578 | res = get_user_pages_fast(addr, n, gup_flags, p); |
814a6674 | 1579 | if (unlikely(res <= 0)) { |
1b17f1f2 | 1580 | kvfree(p); |
814a6674 | 1581 | *pages = NULL; |
1b17f1f2 AV |
1582 | return res; |
1583 | } | |
1584 | *pages = p; | |
59dbd7d0 | 1585 | return min_t(size_t, maxsize, res * PAGE_SIZE - *start); |
3d671ca6 AV |
1586 | } |
1587 | if (iov_iter_is_bvec(i)) { | |
1588 | struct page *page; | |
1589 | ||
59dbd7d0 AV |
1590 | page = first_bvec_segment(i, &maxsize, start); |
1591 | n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
3d671ca6 | 1592 | *pages = p = get_pages_array(n); |
1b17f1f2 AV |
1593 | if (!p) |
1594 | return -ENOMEM; | |
dda8e5d1 | 1595 | for (int k = 0; k < n; k++) |
3d671ca6 | 1596 | get_page(*p++ = page++); |
59dbd7d0 | 1597 | return min_t(size_t, maxsize, n * PAGE_SIZE - *start); |
3d671ca6 AV |
1598 | } |
1599 | if (iov_iter_is_pipe(i)) | |
1600 | return pipe_get_pages_alloc(i, pages, maxsize, start); | |
1601 | if (iov_iter_is_xarray(i)) | |
1602 | return iter_xarray_get_pages_alloc(i, pages, maxsize, start); | |
1603 | return -EFAULT; | |
62a8067a AV |
1604 | } |
1605 | EXPORT_SYMBOL(iov_iter_get_pages_alloc); | |
1606 | ||
a604ec7e AV |
1607 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, |
1608 | struct iov_iter *i) | |
1609 | { | |
a604ec7e | 1610 | __wsum sum, next; |
a604ec7e | 1611 | sum = *csum; |
9ea9ce04 | 1612 | if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd AV |
1613 | WARN_ON(1); |
1614 | return 0; | |
1615 | } | |
7baa5099 AV |
1616 | iterate_and_advance(i, bytes, base, len, off, ({ |
1617 | next = csum_and_copy_from_user(base, addr + off, len); | |
2495bdcc | 1618 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1619 | next ? 0 : len; |
a604ec7e | 1620 | }), ({ |
7baa5099 | 1621 | sum = csum_and_memcpy(addr + off, base, len, sum, off); |
a604ec7e AV |
1622 | }) |
1623 | ) | |
1624 | *csum = sum; | |
1625 | return bytes; | |
1626 | } | |
1627 | EXPORT_SYMBOL(csum_and_copy_from_iter); | |
1628 | ||
52cbd23a | 1629 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, |
a604ec7e AV |
1630 | struct iov_iter *i) |
1631 | { | |
52cbd23a | 1632 | struct csum_state *csstate = _csstate; |
a604ec7e | 1633 | __wsum sum, next; |
78e1f386 | 1634 | |
78e1f386 | 1635 | if (unlikely(iov_iter_is_discard(i))) { |
241699cd AV |
1636 | WARN_ON(1); /* for now */ |
1637 | return 0; | |
1638 | } | |
6852df12 AV |
1639 | |
1640 | sum = csum_shift(csstate->csum, csstate->off); | |
1641 | if (unlikely(iov_iter_is_pipe(i))) | |
1642 | bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); | |
1643 | else iterate_and_advance(i, bytes, base, len, off, ({ | |
7baa5099 | 1644 | next = csum_and_copy_to_user(addr + off, base, len); |
2495bdcc | 1645 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1646 | next ? 0 : len; |
a604ec7e | 1647 | }), ({ |
7baa5099 | 1648 | sum = csum_and_memcpy(base, addr + off, len, sum, off); |
a604ec7e AV |
1649 | }) |
1650 | ) | |
594e450b AV |
1651 | csstate->csum = csum_shift(sum, csstate->off); |
1652 | csstate->off += bytes; | |
a604ec7e AV |
1653 | return bytes; |
1654 | } | |
1655 | EXPORT_SYMBOL(csum_and_copy_to_iter); | |
1656 | ||
d05f4435 SG |
1657 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
1658 | struct iov_iter *i) | |
1659 | { | |
7999096f | 1660 | #ifdef CONFIG_CRYPTO_HASH |
d05f4435 SG |
1661 | struct ahash_request *hash = hashp; |
1662 | struct scatterlist sg; | |
1663 | size_t copied; | |
1664 | ||
1665 | copied = copy_to_iter(addr, bytes, i); | |
1666 | sg_init_one(&sg, addr, copied); | |
1667 | ahash_request_set_crypt(hash, &sg, NULL, copied); | |
1668 | crypto_ahash_update(hash); | |
1669 | return copied; | |
27fad74a Y |
1670 | #else |
1671 | return 0; | |
1672 | #endif | |
d05f4435 SG |
1673 | } |
1674 | EXPORT_SYMBOL(hash_and_copy_to_iter); | |
1675 | ||
66531c65 | 1676 | static int iov_npages(const struct iov_iter *i, int maxpages) |
62a8067a | 1677 | { |
66531c65 AV |
1678 | size_t skip = i->iov_offset, size = i->count; |
1679 | const struct iovec *p; | |
e0f2dc40 AV |
1680 | int npages = 0; |
1681 | ||
66531c65 AV |
1682 | for (p = i->iov; size; skip = 0, p++) { |
1683 | unsigned offs = offset_in_page(p->iov_base + skip); | |
1684 | size_t len = min(p->iov_len - skip, size); | |
e0f2dc40 | 1685 | |
66531c65 AV |
1686 | if (len) { |
1687 | size -= len; | |
1688 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1689 | if (unlikely(npages > maxpages)) | |
1690 | return maxpages; | |
1691 | } | |
1692 | } | |
1693 | return npages; | |
1694 | } | |
1695 | ||
1696 | static int bvec_npages(const struct iov_iter *i, int maxpages) | |
1697 | { | |
1698 | size_t skip = i->iov_offset, size = i->count; | |
1699 | const struct bio_vec *p; | |
1700 | int npages = 0; | |
1701 | ||
1702 | for (p = i->bvec; size; skip = 0, p++) { | |
1703 | unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; | |
1704 | size_t len = min(p->bv_len - skip, size); | |
1705 | ||
1706 | size -= len; | |
1707 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1708 | if (unlikely(npages > maxpages)) | |
1709 | return maxpages; | |
1710 | } | |
1711 | return npages; | |
1712 | } | |
1713 | ||
1714 | int iov_iter_npages(const struct iov_iter *i, int maxpages) | |
1715 | { | |
1716 | if (unlikely(!i->count)) | |
1717 | return 0; | |
1718 | /* iovec and kvec have identical layouts */ | |
1719 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1720 | return iov_npages(i, maxpages); | |
1721 | if (iov_iter_is_bvec(i)) | |
1722 | return bvec_npages(i, maxpages); | |
1723 | if (iov_iter_is_pipe(i)) { | |
8cefc107 | 1724 | unsigned int iter_head; |
66531c65 | 1725 | int npages; |
241699cd | 1726 | size_t off; |
241699cd AV |
1727 | |
1728 | if (!sanity(i)) | |
1729 | return 0; | |
1730 | ||
8cefc107 | 1731 | data_start(i, &iter_head, &off); |
241699cd | 1732 | /* some of this one + all after this one */ |
66531c65 AV |
1733 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); |
1734 | return min(npages, maxpages); | |
1735 | } | |
1736 | if (iov_iter_is_xarray(i)) { | |
e4f8df86 AV |
1737 | unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; |
1738 | int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); | |
66531c65 AV |
1739 | return min(npages, maxpages); |
1740 | } | |
1741 | return 0; | |
62a8067a | 1742 | } |
f67da30c | 1743 | EXPORT_SYMBOL(iov_iter_npages); |
4b8164b9 AV |
1744 | |
1745 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) | |
1746 | { | |
1747 | *new = *old; | |
00e23707 | 1748 | if (unlikely(iov_iter_is_pipe(new))) { |
241699cd AV |
1749 | WARN_ON(1); |
1750 | return NULL; | |
1751 | } | |
7ff50620 | 1752 | if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) |
9ea9ce04 | 1753 | return NULL; |
00e23707 | 1754 | if (iov_iter_is_bvec(new)) |
4b8164b9 AV |
1755 | return new->bvec = kmemdup(new->bvec, |
1756 | new->nr_segs * sizeof(struct bio_vec), | |
1757 | flags); | |
1758 | else | |
1759 | /* iovec and kvec have identical layout */ | |
1760 | return new->iov = kmemdup(new->iov, | |
1761 | new->nr_segs * sizeof(struct iovec), | |
1762 | flags); | |
1763 | } | |
1764 | EXPORT_SYMBOL(dup_iter); | |
bc917be8 | 1765 | |
bfdc5970 CH |
1766 | static int copy_compat_iovec_from_user(struct iovec *iov, |
1767 | const struct iovec __user *uvec, unsigned long nr_segs) | |
1768 | { | |
1769 | const struct compat_iovec __user *uiov = | |
1770 | (const struct compat_iovec __user *)uvec; | |
1771 | int ret = -EFAULT, i; | |
1772 | ||
a959a978 | 1773 | if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) |
bfdc5970 CH |
1774 | return -EFAULT; |
1775 | ||
1776 | for (i = 0; i < nr_segs; i++) { | |
1777 | compat_uptr_t buf; | |
1778 | compat_ssize_t len; | |
1779 | ||
1780 | unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); | |
1781 | unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); | |
1782 | ||
1783 | /* check for compat_size_t not fitting in compat_ssize_t .. */ | |
1784 | if (len < 0) { | |
1785 | ret = -EINVAL; | |
1786 | goto uaccess_end; | |
1787 | } | |
1788 | iov[i].iov_base = compat_ptr(buf); | |
1789 | iov[i].iov_len = len; | |
1790 | } | |
1791 | ||
1792 | ret = 0; | |
1793 | uaccess_end: | |
1794 | user_access_end(); | |
1795 | return ret; | |
1796 | } | |
1797 | ||
1798 | static int copy_iovec_from_user(struct iovec *iov, | |
1799 | const struct iovec __user *uvec, unsigned long nr_segs) | |
fb041b59 DL |
1800 | { |
1801 | unsigned long seg; | |
fb041b59 | 1802 | |
bfdc5970 CH |
1803 | if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) |
1804 | return -EFAULT; | |
1805 | for (seg = 0; seg < nr_segs; seg++) { | |
1806 | if ((ssize_t)iov[seg].iov_len < 0) | |
1807 | return -EINVAL; | |
fb041b59 DL |
1808 | } |
1809 | ||
bfdc5970 CH |
1810 | return 0; |
1811 | } | |
1812 | ||
1813 | struct iovec *iovec_from_user(const struct iovec __user *uvec, | |
1814 | unsigned long nr_segs, unsigned long fast_segs, | |
1815 | struct iovec *fast_iov, bool compat) | |
1816 | { | |
1817 | struct iovec *iov = fast_iov; | |
1818 | int ret; | |
1819 | ||
fb041b59 | 1820 | /* |
bfdc5970 CH |
1821 | * SuS says "The readv() function *may* fail if the iovcnt argument was |
1822 | * less than or equal to 0, or greater than {IOV_MAX}. Linux has | |
1823 | * traditionally returned zero for zero segments, so... | |
fb041b59 | 1824 | */ |
bfdc5970 CH |
1825 | if (nr_segs == 0) |
1826 | return iov; | |
1827 | if (nr_segs > UIO_MAXIOV) | |
1828 | return ERR_PTR(-EINVAL); | |
fb041b59 DL |
1829 | if (nr_segs > fast_segs) { |
1830 | iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); | |
bfdc5970 CH |
1831 | if (!iov) |
1832 | return ERR_PTR(-ENOMEM); | |
fb041b59 | 1833 | } |
bfdc5970 CH |
1834 | |
1835 | if (compat) | |
1836 | ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); | |
1837 | else | |
1838 | ret = copy_iovec_from_user(iov, uvec, nr_segs); | |
1839 | if (ret) { | |
1840 | if (iov != fast_iov) | |
1841 | kfree(iov); | |
1842 | return ERR_PTR(ret); | |
1843 | } | |
1844 | ||
1845 | return iov; | |
1846 | } | |
1847 | ||
1848 | ssize_t __import_iovec(int type, const struct iovec __user *uvec, | |
1849 | unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, | |
1850 | struct iov_iter *i, bool compat) | |
1851 | { | |
1852 | ssize_t total_len = 0; | |
1853 | unsigned long seg; | |
1854 | struct iovec *iov; | |
1855 | ||
1856 | iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); | |
1857 | if (IS_ERR(iov)) { | |
1858 | *iovp = NULL; | |
1859 | return PTR_ERR(iov); | |
fb041b59 DL |
1860 | } |
1861 | ||
1862 | /* | |
bfdc5970 CH |
1863 | * According to the Single Unix Specification we should return EINVAL if |
1864 | * an element length is < 0 when cast to ssize_t or if the total length | |
1865 | * would overflow the ssize_t return value of the system call. | |
fb041b59 DL |
1866 | * |
1867 | * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the | |
1868 | * overflow case. | |
1869 | */ | |
fb041b59 | 1870 | for (seg = 0; seg < nr_segs; seg++) { |
fb041b59 DL |
1871 | ssize_t len = (ssize_t)iov[seg].iov_len; |
1872 | ||
bfdc5970 CH |
1873 | if (!access_ok(iov[seg].iov_base, len)) { |
1874 | if (iov != *iovp) | |
1875 | kfree(iov); | |
1876 | *iovp = NULL; | |
1877 | return -EFAULT; | |
fb041b59 | 1878 | } |
bfdc5970 CH |
1879 | |
1880 | if (len > MAX_RW_COUNT - total_len) { | |
1881 | len = MAX_RW_COUNT - total_len; | |
fb041b59 DL |
1882 | iov[seg].iov_len = len; |
1883 | } | |
bfdc5970 | 1884 | total_len += len; |
fb041b59 | 1885 | } |
bfdc5970 CH |
1886 | |
1887 | iov_iter_init(i, type, iov, nr_segs, total_len); | |
1888 | if (iov == *iovp) | |
1889 | *iovp = NULL; | |
1890 | else | |
1891 | *iovp = iov; | |
1892 | return total_len; | |
fb041b59 DL |
1893 | } |
1894 | ||
ffecee4f VN |
1895 | /** |
1896 | * import_iovec() - Copy an array of &struct iovec from userspace | |
1897 | * into the kernel, check that it is valid, and initialize a new | |
1898 | * &struct iov_iter iterator to access it. | |
1899 | * | |
1900 | * @type: One of %READ or %WRITE. | |
bfdc5970 | 1901 | * @uvec: Pointer to the userspace array. |
ffecee4f VN |
1902 | * @nr_segs: Number of elements in userspace array. |
1903 | * @fast_segs: Number of elements in @iov. | |
bfdc5970 | 1904 | * @iovp: (input and output parameter) Pointer to pointer to (usually small |
ffecee4f VN |
1905 | * on-stack) kernel array. |
1906 | * @i: Pointer to iterator that will be initialized on success. | |
1907 | * | |
1908 | * If the array pointed to by *@iov is large enough to hold all @nr_segs, | |
1909 | * then this function places %NULL in *@iov on return. Otherwise, a new | |
1910 | * array will be allocated and the result placed in *@iov. This means that | |
1911 | * the caller may call kfree() on *@iov regardless of whether the small | |
1912 | * on-stack array was used or not (and regardless of whether this function | |
1913 | * returns an error or not). | |
1914 | * | |
87e5e6da | 1915 | * Return: Negative error code on error, bytes imported on success |
ffecee4f | 1916 | */ |
bfdc5970 | 1917 | ssize_t import_iovec(int type, const struct iovec __user *uvec, |
bc917be8 | 1918 | unsigned nr_segs, unsigned fast_segs, |
bfdc5970 | 1919 | struct iovec **iovp, struct iov_iter *i) |
bc917be8 | 1920 | { |
89cd35c5 CH |
1921 | return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, |
1922 | in_compat_syscall()); | |
bc917be8 AV |
1923 | } |
1924 | EXPORT_SYMBOL(import_iovec); | |
1925 | ||
bc917be8 AV |
1926 | int import_single_range(int rw, void __user *buf, size_t len, |
1927 | struct iovec *iov, struct iov_iter *i) | |
1928 | { | |
1929 | if (len > MAX_RW_COUNT) | |
1930 | len = MAX_RW_COUNT; | |
96d4f267 | 1931 | if (unlikely(!access_ok(buf, len))) |
bc917be8 AV |
1932 | return -EFAULT; |
1933 | ||
1934 | iov->iov_base = buf; | |
1935 | iov->iov_len = len; | |
1936 | iov_iter_init(i, rw, iov, 1, len); | |
1937 | return 0; | |
1938 | } | |
e1267585 | 1939 | EXPORT_SYMBOL(import_single_range); |
8fb0f47a JA |
1940 | |
1941 | /** | |
1942 | * iov_iter_restore() - Restore a &struct iov_iter to the same state as when | |
1943 | * iov_iter_save_state() was called. | |
1944 | * | |
1945 | * @i: &struct iov_iter to restore | |
1946 | * @state: state to restore from | |
1947 | * | |
1948 | * Used after iov_iter_save_state() to bring restore @i, if operations may | |
1949 | * have advanced it. | |
1950 | * | |
1951 | * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC | |
1952 | */ | |
1953 | void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) | |
1954 | { | |
1955 | if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && | |
1956 | !iov_iter_is_kvec(i)) | |
1957 | return; | |
1958 | i->iov_offset = state->iov_offset; | |
1959 | i->count = state->count; | |
1960 | /* | |
1961 | * For the *vec iters, nr_segs + iov is constant - if we increment | |
1962 | * the vec, then we also decrement the nr_segs count. Hence we don't | |
1963 | * need to track both of these, just one is enough and we can deduct | |
1964 | * the other from that. ITER_KVEC and ITER_IOVEC are the same struct | |
1965 | * size, so we can just increment the iov pointer as they are unionzed. | |
1966 | * ITER_BVEC _may_ be the same size on some archs, but on others it is | |
1967 | * not. Be safe and handle it separately. | |
1968 | */ | |
1969 | BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); | |
1970 | if (iov_iter_is_bvec(i)) | |
1971 | i->bvec -= state->nr_segs - i->nr_segs; | |
1972 | else | |
1973 | i->iov -= state->nr_segs - i->nr_segs; | |
1974 | i->nr_segs = state->nr_segs; | |
1975 | } |