Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
7999096f | 2 | #include <crypto/hash.h> |
4f18cd31 | 3 | #include <linux/export.h> |
2f8b5444 | 4 | #include <linux/bvec.h> |
4d0e9df5 | 5 | #include <linux/fault-inject-usercopy.h> |
4f18cd31 AV |
6 | #include <linux/uio.h> |
7 | #include <linux/pagemap.h> | |
28961998 | 8 | #include <linux/highmem.h> |
91f79c43 AV |
9 | #include <linux/slab.h> |
10 | #include <linux/vmalloc.h> | |
241699cd | 11 | #include <linux/splice.h> |
bfdc5970 | 12 | #include <linux/compat.h> |
a604ec7e | 13 | #include <net/checksum.h> |
d05f4435 | 14 | #include <linux/scatterlist.h> |
d0ef4c36 | 15 | #include <linux/instrumented.h> |
4f18cd31 | 16 | |
241699cd AV |
17 | #define PIPE_PARANOIA /* for now */ |
18 | ||
5c67aa90 | 19 | /* covers iovec and kvec alike */ |
a6e4ec7b | 20 | #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ |
7baa5099 | 21 | size_t off = 0; \ |
a6e4ec7b | 22 | size_t skip = i->iov_offset; \ |
7a1bcb5d | 23 | do { \ |
7baa5099 AV |
24 | len = min(n, __p->iov_len - skip); \ |
25 | if (likely(len)) { \ | |
26 | base = __p->iov_base + skip; \ | |
27 | len -= (STEP); \ | |
28 | off += len; \ | |
29 | skip += len; \ | |
30 | n -= len; \ | |
7a1bcb5d AV |
31 | if (skip < __p->iov_len) \ |
32 | break; \ | |
33 | } \ | |
34 | __p++; \ | |
35 | skip = 0; \ | |
36 | } while (n); \ | |
a6e4ec7b | 37 | i->iov_offset = skip; \ |
7baa5099 | 38 | n = off; \ |
04a31165 AV |
39 | } |
40 | ||
a6e4ec7b | 41 | #define iterate_bvec(i, n, base, len, off, p, STEP) { \ |
7baa5099 | 42 | size_t off = 0; \ |
a6e4ec7b | 43 | unsigned skip = i->iov_offset; \ |
7491a2bf AV |
44 | while (n) { \ |
45 | unsigned offset = p->bv_offset + skip; \ | |
1b4fb5ff | 46 | unsigned left; \ |
21b56c84 AV |
47 | void *kaddr = kmap_local_page(p->bv_page + \ |
48 | offset / PAGE_SIZE); \ | |
7baa5099 | 49 | base = kaddr + offset % PAGE_SIZE; \ |
a6e4ec7b | 50 | len = min(min(n, (size_t)(p->bv_len - skip)), \ |
7491a2bf | 51 | (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ |
1b4fb5ff | 52 | left = (STEP); \ |
21b56c84 | 53 | kunmap_local(kaddr); \ |
7baa5099 AV |
54 | len -= left; \ |
55 | off += len; \ | |
56 | skip += len; \ | |
7491a2bf AV |
57 | if (skip == p->bv_len) { \ |
58 | skip = 0; \ | |
59 | p++; \ | |
60 | } \ | |
7baa5099 | 61 | n -= len; \ |
1b4fb5ff AV |
62 | if (left) \ |
63 | break; \ | |
7491a2bf | 64 | } \ |
a6e4ec7b | 65 | i->iov_offset = skip; \ |
7baa5099 | 66 | n = off; \ |
04a31165 AV |
67 | } |
68 | ||
a6e4ec7b | 69 | #define iterate_xarray(i, n, base, len, __off, STEP) { \ |
1b4fb5ff | 70 | __label__ __out; \ |
622838f3 | 71 | size_t __off = 0; \ |
7ff50620 | 72 | struct page *head = NULL; \ |
7baa5099 | 73 | size_t offset; \ |
a6e4ec7b | 74 | loff_t start = i->xarray_start + i->iov_offset; \ |
7ff50620 DH |
75 | pgoff_t index = start >> PAGE_SHIFT; \ |
76 | int j; \ | |
77 | \ | |
78 | XA_STATE(xas, i->xarray, index); \ | |
79 | \ | |
7baa5099 AV |
80 | rcu_read_lock(); \ |
81 | xas_for_each(&xas, head, ULONG_MAX) { \ | |
82 | unsigned left; \ | |
83 | if (xas_retry(&xas, head)) \ | |
84 | continue; \ | |
85 | if (WARN_ON(xa_is_value(head))) \ | |
86 | break; \ | |
87 | if (WARN_ON(PageHuge(head))) \ | |
88 | break; \ | |
7ff50620 | 89 | for (j = (head->index < index) ? index - head->index : 0; \ |
7baa5099 | 90 | j < thp_nr_pages(head); j++) { \ |
21b56c84 | 91 | void *kaddr = kmap_local_page(head + j); \ |
7baa5099 AV |
92 | offset = (start + __off) % PAGE_SIZE; \ |
93 | base = kaddr + offset; \ | |
94 | len = PAGE_SIZE - offset; \ | |
95 | len = min(n, len); \ | |
96 | left = (STEP); \ | |
97 | kunmap_local(kaddr); \ | |
98 | len -= left; \ | |
99 | __off += len; \ | |
100 | n -= len; \ | |
101 | if (left || n == 0) \ | |
102 | goto __out; \ | |
103 | } \ | |
7ff50620 | 104 | } \ |
1b4fb5ff | 105 | __out: \ |
7ff50620 | 106 | rcu_read_unlock(); \ |
a6e4ec7b | 107 | i->iov_offset += __off; \ |
622838f3 | 108 | n = __off; \ |
7ff50620 DH |
109 | } |
110 | ||
7baa5099 | 111 | #define __iterate_and_advance(i, n, base, len, off, I, K) { \ |
dd254f5a AV |
112 | if (unlikely(i->count < n)) \ |
113 | n = i->count; \ | |
f5da8354 | 114 | if (likely(n)) { \ |
28f38db7 | 115 | if (likely(iter_is_iovec(i))) { \ |
5c67aa90 | 116 | const struct iovec *iov = i->iov; \ |
7baa5099 AV |
117 | void __user *base; \ |
118 | size_t len; \ | |
119 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 120 | iov, (I)) \ |
28f38db7 AV |
121 | i->nr_segs -= iov - i->iov; \ |
122 | i->iov = iov; \ | |
123 | } else if (iov_iter_is_bvec(i)) { \ | |
1bdc76ae | 124 | const struct bio_vec *bvec = i->bvec; \ |
7baa5099 AV |
125 | void *base; \ |
126 | size_t len; \ | |
127 | iterate_bvec(i, n, base, len, off, \ | |
a6e4ec7b | 128 | bvec, (K)) \ |
7491a2bf AV |
129 | i->nr_segs -= bvec - i->bvec; \ |
130 | i->bvec = bvec; \ | |
28f38db7 | 131 | } else if (iov_iter_is_kvec(i)) { \ |
5c67aa90 | 132 | const struct kvec *kvec = i->kvec; \ |
7baa5099 AV |
133 | void *base; \ |
134 | size_t len; \ | |
135 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 136 | kvec, (K)) \ |
dd254f5a AV |
137 | i->nr_segs -= kvec - i->kvec; \ |
138 | i->kvec = kvec; \ | |
28f38db7 | 139 | } else if (iov_iter_is_xarray(i)) { \ |
7baa5099 AV |
140 | void *base; \ |
141 | size_t len; \ | |
142 | iterate_xarray(i, n, base, len, off, \ | |
a6e4ec7b | 143 | (K)) \ |
7ce2a91e | 144 | } \ |
dd254f5a | 145 | i->count -= n; \ |
7ce2a91e | 146 | } \ |
7ce2a91e | 147 | } |
7baa5099 AV |
148 | #define iterate_and_advance(i, n, base, len, off, I, K) \ |
149 | __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) | |
7ce2a91e | 150 | |
09fc68dc AV |
151 | static int copyout(void __user *to, const void *from, size_t n) |
152 | { | |
4d0e9df5 AL |
153 | if (should_fail_usercopy()) |
154 | return n; | |
96d4f267 | 155 | if (access_ok(to, n)) { |
d0ef4c36 | 156 | instrument_copy_to_user(to, from, n); |
09fc68dc AV |
157 | n = raw_copy_to_user(to, from, n); |
158 | } | |
159 | return n; | |
160 | } | |
161 | ||
162 | static int copyin(void *to, const void __user *from, size_t n) | |
163 | { | |
4d0e9df5 AL |
164 | if (should_fail_usercopy()) |
165 | return n; | |
96d4f267 | 166 | if (access_ok(from, n)) { |
d0ef4c36 | 167 | instrument_copy_from_user(to, from, n); |
09fc68dc AV |
168 | n = raw_copy_from_user(to, from, n); |
169 | } | |
170 | return n; | |
171 | } | |
172 | ||
62a8067a | 173 | static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, |
4f18cd31 AV |
174 | struct iov_iter *i) |
175 | { | |
176 | size_t skip, copy, left, wanted; | |
177 | const struct iovec *iov; | |
178 | char __user *buf; | |
179 | void *kaddr, *from; | |
180 | ||
181 | if (unlikely(bytes > i->count)) | |
182 | bytes = i->count; | |
183 | ||
184 | if (unlikely(!bytes)) | |
185 | return 0; | |
186 | ||
09fc68dc | 187 | might_fault(); |
4f18cd31 AV |
188 | wanted = bytes; |
189 | iov = i->iov; | |
190 | skip = i->iov_offset; | |
191 | buf = iov->iov_base + skip; | |
192 | copy = min(bytes, iov->iov_len - skip); | |
193 | ||
3fa6c507 | 194 | if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { |
4f18cd31 AV |
195 | kaddr = kmap_atomic(page); |
196 | from = kaddr + offset; | |
197 | ||
198 | /* first chunk, usually the only one */ | |
09fc68dc | 199 | left = copyout(buf, from, copy); |
4f18cd31 AV |
200 | copy -= left; |
201 | skip += copy; | |
202 | from += copy; | |
203 | bytes -= copy; | |
204 | ||
205 | while (unlikely(!left && bytes)) { | |
206 | iov++; | |
207 | buf = iov->iov_base; | |
208 | copy = min(bytes, iov->iov_len); | |
09fc68dc | 209 | left = copyout(buf, from, copy); |
4f18cd31 AV |
210 | copy -= left; |
211 | skip = copy; | |
212 | from += copy; | |
213 | bytes -= copy; | |
214 | } | |
215 | if (likely(!bytes)) { | |
216 | kunmap_atomic(kaddr); | |
217 | goto done; | |
218 | } | |
219 | offset = from - kaddr; | |
220 | buf += copy; | |
221 | kunmap_atomic(kaddr); | |
222 | copy = min(bytes, iov->iov_len - skip); | |
223 | } | |
224 | /* Too bad - revert to non-atomic kmap */ | |
3fa6c507 | 225 | |
4f18cd31 AV |
226 | kaddr = kmap(page); |
227 | from = kaddr + offset; | |
09fc68dc | 228 | left = copyout(buf, from, copy); |
4f18cd31 AV |
229 | copy -= left; |
230 | skip += copy; | |
231 | from += copy; | |
232 | bytes -= copy; | |
233 | while (unlikely(!left && bytes)) { | |
234 | iov++; | |
235 | buf = iov->iov_base; | |
236 | copy = min(bytes, iov->iov_len); | |
09fc68dc | 237 | left = copyout(buf, from, copy); |
4f18cd31 AV |
238 | copy -= left; |
239 | skip = copy; | |
240 | from += copy; | |
241 | bytes -= copy; | |
242 | } | |
243 | kunmap(page); | |
3fa6c507 | 244 | |
4f18cd31 | 245 | done: |
81055e58 AV |
246 | if (skip == iov->iov_len) { |
247 | iov++; | |
248 | skip = 0; | |
249 | } | |
4f18cd31 AV |
250 | i->count -= wanted - bytes; |
251 | i->nr_segs -= iov - i->iov; | |
252 | i->iov = iov; | |
253 | i->iov_offset = skip; | |
254 | return wanted - bytes; | |
255 | } | |
4f18cd31 | 256 | |
62a8067a | 257 | static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, |
f0d1bec9 AV |
258 | struct iov_iter *i) |
259 | { | |
260 | size_t skip, copy, left, wanted; | |
261 | const struct iovec *iov; | |
262 | char __user *buf; | |
263 | void *kaddr, *to; | |
264 | ||
265 | if (unlikely(bytes > i->count)) | |
266 | bytes = i->count; | |
267 | ||
268 | if (unlikely(!bytes)) | |
269 | return 0; | |
270 | ||
09fc68dc | 271 | might_fault(); |
f0d1bec9 AV |
272 | wanted = bytes; |
273 | iov = i->iov; | |
274 | skip = i->iov_offset; | |
275 | buf = iov->iov_base + skip; | |
276 | copy = min(bytes, iov->iov_len - skip); | |
277 | ||
3fa6c507 | 278 | if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { |
f0d1bec9 AV |
279 | kaddr = kmap_atomic(page); |
280 | to = kaddr + offset; | |
281 | ||
282 | /* first chunk, usually the only one */ | |
09fc68dc | 283 | left = copyin(to, buf, copy); |
f0d1bec9 AV |
284 | copy -= left; |
285 | skip += copy; | |
286 | to += copy; | |
287 | bytes -= copy; | |
288 | ||
289 | while (unlikely(!left && bytes)) { | |
290 | iov++; | |
291 | buf = iov->iov_base; | |
292 | copy = min(bytes, iov->iov_len); | |
09fc68dc | 293 | left = copyin(to, buf, copy); |
f0d1bec9 AV |
294 | copy -= left; |
295 | skip = copy; | |
296 | to += copy; | |
297 | bytes -= copy; | |
298 | } | |
299 | if (likely(!bytes)) { | |
300 | kunmap_atomic(kaddr); | |
301 | goto done; | |
302 | } | |
303 | offset = to - kaddr; | |
304 | buf += copy; | |
305 | kunmap_atomic(kaddr); | |
306 | copy = min(bytes, iov->iov_len - skip); | |
307 | } | |
308 | /* Too bad - revert to non-atomic kmap */ | |
3fa6c507 | 309 | |
f0d1bec9 AV |
310 | kaddr = kmap(page); |
311 | to = kaddr + offset; | |
09fc68dc | 312 | left = copyin(to, buf, copy); |
f0d1bec9 AV |
313 | copy -= left; |
314 | skip += copy; | |
315 | to += copy; | |
316 | bytes -= copy; | |
317 | while (unlikely(!left && bytes)) { | |
318 | iov++; | |
319 | buf = iov->iov_base; | |
320 | copy = min(bytes, iov->iov_len); | |
09fc68dc | 321 | left = copyin(to, buf, copy); |
f0d1bec9 AV |
322 | copy -= left; |
323 | skip = copy; | |
324 | to += copy; | |
325 | bytes -= copy; | |
326 | } | |
327 | kunmap(page); | |
3fa6c507 | 328 | |
f0d1bec9 | 329 | done: |
81055e58 AV |
330 | if (skip == iov->iov_len) { |
331 | iov++; | |
332 | skip = 0; | |
333 | } | |
f0d1bec9 AV |
334 | i->count -= wanted - bytes; |
335 | i->nr_segs -= iov - i->iov; | |
336 | i->iov = iov; | |
337 | i->iov_offset = skip; | |
338 | return wanted - bytes; | |
339 | } | |
f0d1bec9 | 340 | |
241699cd AV |
341 | #ifdef PIPE_PARANOIA |
342 | static bool sanity(const struct iov_iter *i) | |
343 | { | |
344 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
345 | unsigned int p_head = pipe->head; |
346 | unsigned int p_tail = pipe->tail; | |
347 | unsigned int p_mask = pipe->ring_size - 1; | |
348 | unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); | |
349 | unsigned int i_head = i->head; | |
350 | unsigned int idx; | |
351 | ||
241699cd AV |
352 | if (i->iov_offset) { |
353 | struct pipe_buffer *p; | |
8cefc107 | 354 | if (unlikely(p_occupancy == 0)) |
241699cd | 355 | goto Bad; // pipe must be non-empty |
8cefc107 | 356 | if (unlikely(i_head != p_head - 1)) |
241699cd AV |
357 | goto Bad; // must be at the last buffer... |
358 | ||
8cefc107 | 359 | p = &pipe->bufs[i_head & p_mask]; |
241699cd AV |
360 | if (unlikely(p->offset + p->len != i->iov_offset)) |
361 | goto Bad; // ... at the end of segment | |
362 | } else { | |
8cefc107 | 363 | if (i_head != p_head) |
241699cd AV |
364 | goto Bad; // must be right after the last buffer |
365 | } | |
366 | return true; | |
367 | Bad: | |
8cefc107 DH |
368 | printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); |
369 | printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", | |
370 | p_head, p_tail, pipe->ring_size); | |
371 | for (idx = 0; idx < pipe->ring_size; idx++) | |
241699cd AV |
372 | printk(KERN_ERR "[%p %p %d %d]\n", |
373 | pipe->bufs[idx].ops, | |
374 | pipe->bufs[idx].page, | |
375 | pipe->bufs[idx].offset, | |
376 | pipe->bufs[idx].len); | |
377 | WARN_ON(1); | |
378 | return false; | |
379 | } | |
380 | #else | |
381 | #define sanity(i) true | |
382 | #endif | |
383 | ||
241699cd AV |
384 | static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, |
385 | struct iov_iter *i) | |
386 | { | |
387 | struct pipe_inode_info *pipe = i->pipe; | |
388 | struct pipe_buffer *buf; | |
8cefc107 DH |
389 | unsigned int p_tail = pipe->tail; |
390 | unsigned int p_mask = pipe->ring_size - 1; | |
391 | unsigned int i_head = i->head; | |
241699cd | 392 | size_t off; |
241699cd AV |
393 | |
394 | if (unlikely(bytes > i->count)) | |
395 | bytes = i->count; | |
396 | ||
397 | if (unlikely(!bytes)) | |
398 | return 0; | |
399 | ||
400 | if (!sanity(i)) | |
401 | return 0; | |
402 | ||
403 | off = i->iov_offset; | |
8cefc107 | 404 | buf = &pipe->bufs[i_head & p_mask]; |
241699cd AV |
405 | if (off) { |
406 | if (offset == off && buf->page == page) { | |
407 | /* merge with the last one */ | |
408 | buf->len += bytes; | |
409 | i->iov_offset += bytes; | |
410 | goto out; | |
411 | } | |
8cefc107 DH |
412 | i_head++; |
413 | buf = &pipe->bufs[i_head & p_mask]; | |
241699cd | 414 | } |
6718b6f8 | 415 | if (pipe_full(i_head, p_tail, pipe->max_usage)) |
241699cd | 416 | return 0; |
8cefc107 | 417 | |
241699cd | 418 | buf->ops = &page_cache_pipe_buf_ops; |
8cefc107 DH |
419 | get_page(page); |
420 | buf->page = page; | |
241699cd AV |
421 | buf->offset = offset; |
422 | buf->len = bytes; | |
8cefc107 DH |
423 | |
424 | pipe->head = i_head + 1; | |
241699cd | 425 | i->iov_offset = offset + bytes; |
8cefc107 | 426 | i->head = i_head; |
241699cd AV |
427 | out: |
428 | i->count -= bytes; | |
429 | return bytes; | |
430 | } | |
431 | ||
171a0203 AA |
432 | /* |
433 | * Fault in one or more iovecs of the given iov_iter, to a maximum length of | |
434 | * bytes. For each iovec, fault in each page that constitutes the iovec. | |
435 | * | |
436 | * Return 0 on success, or non-zero if the memory could not be accessed (i.e. | |
437 | * because it is an invalid address). | |
438 | */ | |
8409a0d2 | 439 | int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes) |
171a0203 | 440 | { |
0e8f0d67 | 441 | if (iter_is_iovec(i)) { |
8409a0d2 AV |
442 | const struct iovec *p; |
443 | size_t skip; | |
444 | ||
445 | if (bytes > i->count) | |
446 | bytes = i->count; | |
447 | for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) { | |
448 | size_t len = min(bytes, p->iov_len - skip); | |
449 | int err; | |
450 | ||
451 | if (unlikely(!len)) | |
452 | continue; | |
453 | err = fault_in_pages_readable(p->iov_base + skip, len); | |
171a0203 | 454 | if (unlikely(err)) |
8409a0d2 AV |
455 | return err; |
456 | bytes -= len; | |
457 | } | |
171a0203 AA |
458 | } |
459 | return 0; | |
460 | } | |
d4690f1e | 461 | EXPORT_SYMBOL(iov_iter_fault_in_readable); |
171a0203 | 462 | |
aa563d7b | 463 | void iov_iter_init(struct iov_iter *i, unsigned int direction, |
71d8e532 AV |
464 | const struct iovec *iov, unsigned long nr_segs, |
465 | size_t count) | |
466 | { | |
aa563d7b | 467 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
468 | WARN_ON_ONCE(uaccess_kernel()); |
469 | *i = (struct iov_iter) { | |
470 | .iter_type = ITER_IOVEC, | |
471 | .data_source = direction, | |
472 | .iov = iov, | |
473 | .nr_segs = nr_segs, | |
474 | .iov_offset = 0, | |
475 | .count = count | |
476 | }; | |
71d8e532 AV |
477 | } |
478 | EXPORT_SYMBOL(iov_iter_init); | |
7b2c99d1 | 479 | |
241699cd AV |
480 | static inline bool allocated(struct pipe_buffer *buf) |
481 | { | |
482 | return buf->ops == &default_pipe_buf_ops; | |
483 | } | |
484 | ||
8cefc107 DH |
485 | static inline void data_start(const struct iov_iter *i, |
486 | unsigned int *iter_headp, size_t *offp) | |
241699cd | 487 | { |
8cefc107 DH |
488 | unsigned int p_mask = i->pipe->ring_size - 1; |
489 | unsigned int iter_head = i->head; | |
241699cd | 490 | size_t off = i->iov_offset; |
8cefc107 DH |
491 | |
492 | if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || | |
493 | off == PAGE_SIZE)) { | |
494 | iter_head++; | |
241699cd AV |
495 | off = 0; |
496 | } | |
8cefc107 | 497 | *iter_headp = iter_head; |
241699cd AV |
498 | *offp = off; |
499 | } | |
500 | ||
501 | static size_t push_pipe(struct iov_iter *i, size_t size, | |
8cefc107 | 502 | int *iter_headp, size_t *offp) |
241699cd AV |
503 | { |
504 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
505 | unsigned int p_tail = pipe->tail; |
506 | unsigned int p_mask = pipe->ring_size - 1; | |
507 | unsigned int iter_head; | |
241699cd | 508 | size_t off; |
241699cd AV |
509 | ssize_t left; |
510 | ||
511 | if (unlikely(size > i->count)) | |
512 | size = i->count; | |
513 | if (unlikely(!size)) | |
514 | return 0; | |
515 | ||
516 | left = size; | |
8cefc107 DH |
517 | data_start(i, &iter_head, &off); |
518 | *iter_headp = iter_head; | |
241699cd AV |
519 | *offp = off; |
520 | if (off) { | |
521 | left -= PAGE_SIZE - off; | |
522 | if (left <= 0) { | |
8cefc107 | 523 | pipe->bufs[iter_head & p_mask].len += size; |
241699cd AV |
524 | return size; |
525 | } | |
8cefc107 DH |
526 | pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; |
527 | iter_head++; | |
241699cd | 528 | } |
6718b6f8 | 529 | while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { |
8cefc107 | 530 | struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; |
241699cd AV |
531 | struct page *page = alloc_page(GFP_USER); |
532 | if (!page) | |
533 | break; | |
8cefc107 DH |
534 | |
535 | buf->ops = &default_pipe_buf_ops; | |
536 | buf->page = page; | |
537 | buf->offset = 0; | |
538 | buf->len = min_t(ssize_t, left, PAGE_SIZE); | |
539 | left -= buf->len; | |
540 | iter_head++; | |
541 | pipe->head = iter_head; | |
542 | ||
543 | if (left == 0) | |
241699cd | 544 | return size; |
241699cd AV |
545 | } |
546 | return size - left; | |
547 | } | |
548 | ||
549 | static size_t copy_pipe_to_iter(const void *addr, size_t bytes, | |
550 | struct iov_iter *i) | |
551 | { | |
552 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
553 | unsigned int p_mask = pipe->ring_size - 1; |
554 | unsigned int i_head; | |
241699cd | 555 | size_t n, off; |
241699cd AV |
556 | |
557 | if (!sanity(i)) | |
558 | return 0; | |
559 | ||
8cefc107 | 560 | bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd AV |
561 | if (unlikely(!n)) |
562 | return 0; | |
8cefc107 | 563 | do { |
241699cd | 564 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
8cefc107 DH |
565 | memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); |
566 | i->head = i_head; | |
241699cd AV |
567 | i->iov_offset = off + chunk; |
568 | n -= chunk; | |
569 | addr += chunk; | |
8cefc107 DH |
570 | off = 0; |
571 | i_head++; | |
572 | } while (n); | |
241699cd AV |
573 | i->count -= bytes; |
574 | return bytes; | |
575 | } | |
576 | ||
f9152895 AV |
577 | static __wsum csum_and_memcpy(void *to, const void *from, size_t len, |
578 | __wsum sum, size_t off) | |
579 | { | |
cc44c17b | 580 | __wsum next = csum_partial_copy_nocheck(from, to, len); |
f9152895 AV |
581 | return csum_block_add(sum, next, off); |
582 | } | |
583 | ||
78e1f386 | 584 | static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, |
52cbd23a WB |
585 | struct csum_state *csstate, |
586 | struct iov_iter *i) | |
78e1f386 AV |
587 | { |
588 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 | 589 | unsigned int p_mask = pipe->ring_size - 1; |
52cbd23a WB |
590 | __wsum sum = csstate->csum; |
591 | size_t off = csstate->off; | |
8cefc107 | 592 | unsigned int i_head; |
78e1f386 | 593 | size_t n, r; |
78e1f386 AV |
594 | |
595 | if (!sanity(i)) | |
596 | return 0; | |
597 | ||
8cefc107 | 598 | bytes = n = push_pipe(i, bytes, &i_head, &r); |
78e1f386 AV |
599 | if (unlikely(!n)) |
600 | return 0; | |
8cefc107 | 601 | do { |
78e1f386 | 602 | size_t chunk = min_t(size_t, n, PAGE_SIZE - r); |
8cefc107 | 603 | char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page); |
f9152895 | 604 | sum = csum_and_memcpy(p + r, addr, chunk, sum, off); |
78e1f386 | 605 | kunmap_atomic(p); |
8cefc107 | 606 | i->head = i_head; |
78e1f386 AV |
607 | i->iov_offset = r + chunk; |
608 | n -= chunk; | |
609 | off += chunk; | |
610 | addr += chunk; | |
8cefc107 DH |
611 | r = 0; |
612 | i_head++; | |
613 | } while (n); | |
78e1f386 | 614 | i->count -= bytes; |
52cbd23a WB |
615 | csstate->csum = sum; |
616 | csstate->off = off; | |
78e1f386 AV |
617 | return bytes; |
618 | } | |
619 | ||
aa28de27 | 620 | size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
62a8067a | 621 | { |
00e23707 | 622 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 623 | return copy_pipe_to_iter(addr, bytes, i); |
09fc68dc AV |
624 | if (iter_is_iovec(i)) |
625 | might_fault(); | |
7baa5099 AV |
626 | iterate_and_advance(i, bytes, base, len, off, |
627 | copyout(base, addr + off, len), | |
628 | memcpy(base, addr + off, len) | |
3d4d3e48 | 629 | ) |
62a8067a | 630 | |
3d4d3e48 | 631 | return bytes; |
c35e0248 | 632 | } |
aa28de27 | 633 | EXPORT_SYMBOL(_copy_to_iter); |
c35e0248 | 634 | |
ec6347bb DW |
635 | #ifdef CONFIG_ARCH_HAS_COPY_MC |
636 | static int copyout_mc(void __user *to, const void *from, size_t n) | |
8780356e | 637 | { |
96d4f267 | 638 | if (access_ok(to, n)) { |
d0ef4c36 | 639 | instrument_copy_to_user(to, from, n); |
ec6347bb | 640 | n = copy_mc_to_user((__force void *) to, from, n); |
8780356e DW |
641 | } |
642 | return n; | |
643 | } | |
644 | ||
ec6347bb | 645 | static unsigned long copy_mc_to_page(struct page *page, size_t offset, |
8780356e DW |
646 | const char *from, size_t len) |
647 | { | |
648 | unsigned long ret; | |
649 | char *to; | |
650 | ||
651 | to = kmap_atomic(page); | |
ec6347bb | 652 | ret = copy_mc_to_kernel(to + offset, from, len); |
8780356e DW |
653 | kunmap_atomic(to); |
654 | ||
655 | return ret; | |
656 | } | |
657 | ||
ec6347bb | 658 | static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, |
ca146f6f DW |
659 | struct iov_iter *i) |
660 | { | |
661 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
662 | unsigned int p_mask = pipe->ring_size - 1; |
663 | unsigned int i_head; | |
ca146f6f | 664 | size_t n, off, xfer = 0; |
ca146f6f DW |
665 | |
666 | if (!sanity(i)) | |
667 | return 0; | |
668 | ||
8cefc107 | 669 | bytes = n = push_pipe(i, bytes, &i_head, &off); |
ca146f6f DW |
670 | if (unlikely(!n)) |
671 | return 0; | |
8cefc107 | 672 | do { |
ca146f6f DW |
673 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
674 | unsigned long rem; | |
675 | ||
ec6347bb | 676 | rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page, |
8cefc107 DH |
677 | off, addr, chunk); |
678 | i->head = i_head; | |
ca146f6f DW |
679 | i->iov_offset = off + chunk - rem; |
680 | xfer += chunk - rem; | |
681 | if (rem) | |
682 | break; | |
683 | n -= chunk; | |
684 | addr += chunk; | |
8cefc107 DH |
685 | off = 0; |
686 | i_head++; | |
687 | } while (n); | |
ca146f6f DW |
688 | i->count -= xfer; |
689 | return xfer; | |
690 | } | |
691 | ||
bf3eeb9b | 692 | /** |
ec6347bb | 693 | * _copy_mc_to_iter - copy to iter with source memory error exception handling |
bf3eeb9b DW |
694 | * @addr: source kernel address |
695 | * @bytes: total transfer length | |
696 | * @iter: destination iterator | |
697 | * | |
ec6347bb DW |
698 | * The pmem driver deploys this for the dax operation |
699 | * (dax_copy_to_iter()) for dax reads (bypass page-cache and the | |
700 | * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes | |
701 | * successfully copied. | |
bf3eeb9b | 702 | * |
ec6347bb | 703 | * The main differences between this and typical _copy_to_iter(). |
bf3eeb9b DW |
704 | * |
705 | * * Typical tail/residue handling after a fault retries the copy | |
706 | * byte-by-byte until the fault happens again. Re-triggering machine | |
707 | * checks is potentially fatal so the implementation uses source | |
708 | * alignment and poison alignment assumptions to avoid re-triggering | |
709 | * hardware exceptions. | |
710 | * | |
711 | * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. | |
712 | * Compare to copy_to_iter() where only ITER_IOVEC attempts might return | |
713 | * a short copy. | |
bf3eeb9b | 714 | */ |
ec6347bb | 715 | size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
8780356e | 716 | { |
00e23707 | 717 | if (unlikely(iov_iter_is_pipe(i))) |
ec6347bb | 718 | return copy_mc_pipe_to_iter(addr, bytes, i); |
8780356e DW |
719 | if (iter_is_iovec(i)) |
720 | might_fault(); | |
7baa5099 AV |
721 | __iterate_and_advance(i, bytes, base, len, off, |
722 | copyout_mc(base, addr + off, len), | |
723 | copy_mc_to_kernel(base, addr + off, len) | |
8780356e DW |
724 | ) |
725 | ||
726 | return bytes; | |
727 | } | |
ec6347bb DW |
728 | EXPORT_SYMBOL_GPL(_copy_mc_to_iter); |
729 | #endif /* CONFIG_ARCH_HAS_COPY_MC */ | |
8780356e | 730 | |
aa28de27 | 731 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
c35e0248 | 732 | { |
00e23707 | 733 | if (unlikely(iov_iter_is_pipe(i))) { |
241699cd AV |
734 | WARN_ON(1); |
735 | return 0; | |
736 | } | |
09fc68dc AV |
737 | if (iter_is_iovec(i)) |
738 | might_fault(); | |
7baa5099 AV |
739 | iterate_and_advance(i, bytes, base, len, off, |
740 | copyin(addr + off, base, len), | |
741 | memcpy(addr + off, base, len) | |
0dbca9a4 AV |
742 | ) |
743 | ||
744 | return bytes; | |
c35e0248 | 745 | } |
aa28de27 | 746 | EXPORT_SYMBOL(_copy_from_iter); |
c35e0248 | 747 | |
aa28de27 | 748 | size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
aa583096 | 749 | { |
00e23707 | 750 | if (unlikely(iov_iter_is_pipe(i))) { |
241699cd AV |
751 | WARN_ON(1); |
752 | return 0; | |
753 | } | |
7baa5099 AV |
754 | iterate_and_advance(i, bytes, base, len, off, |
755 | __copy_from_user_inatomic_nocache(addr + off, base, len), | |
756 | memcpy(addr + off, base, len) | |
aa583096 AV |
757 | ) |
758 | ||
759 | return bytes; | |
760 | } | |
aa28de27 | 761 | EXPORT_SYMBOL(_copy_from_iter_nocache); |
aa583096 | 762 | |
0aed55af | 763 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
abd08d7d DW |
764 | /** |
765 | * _copy_from_iter_flushcache - write destination through cpu cache | |
766 | * @addr: destination kernel address | |
767 | * @bytes: total transfer length | |
768 | * @iter: source iterator | |
769 | * | |
770 | * The pmem driver arranges for filesystem-dax to use this facility via | |
771 | * dax_copy_from_iter() for ensuring that writes to persistent memory | |
772 | * are flushed through the CPU cache. It is differentiated from | |
773 | * _copy_from_iter_nocache() in that guarantees all data is flushed for | |
774 | * all iterator types. The _copy_from_iter_nocache() only attempts to | |
775 | * bypass the cache for the ITER_IOVEC case, and on some archs may use | |
776 | * instructions that strand dirty-data in the cache. | |
777 | */ | |
6a37e940 | 778 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) |
0aed55af | 779 | { |
00e23707 | 780 | if (unlikely(iov_iter_is_pipe(i))) { |
0aed55af DW |
781 | WARN_ON(1); |
782 | return 0; | |
783 | } | |
7baa5099 AV |
784 | iterate_and_advance(i, bytes, base, len, off, |
785 | __copy_from_user_flushcache(addr + off, base, len), | |
786 | memcpy_flushcache(addr + off, base, len) | |
0aed55af DW |
787 | ) |
788 | ||
789 | return bytes; | |
790 | } | |
6a37e940 | 791 | EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); |
0aed55af DW |
792 | #endif |
793 | ||
72e809ed AV |
794 | static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) |
795 | { | |
6daef95b ED |
796 | struct page *head; |
797 | size_t v = n + offset; | |
798 | ||
799 | /* | |
800 | * The general case needs to access the page order in order | |
801 | * to compute the page size. | |
802 | * However, we mostly deal with order-0 pages and thus can | |
803 | * avoid a possible cache line miss for requests that fit all | |
804 | * page orders. | |
805 | */ | |
806 | if (n <= v && v <= PAGE_SIZE) | |
807 | return true; | |
808 | ||
809 | head = compound_head(page); | |
810 | v += (page - head) << PAGE_SHIFT; | |
a90bcb86 | 811 | |
a50b854e | 812 | if (likely(n <= v && v <= (page_size(head)))) |
72e809ed AV |
813 | return true; |
814 | WARN_ON(1); | |
815 | return false; | |
816 | } | |
cbbd26b8 | 817 | |
08aa6479 | 818 | static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
62a8067a AV |
819 | struct iov_iter *i) |
820 | { | |
28f38db7 AV |
821 | if (likely(iter_is_iovec(i))) |
822 | return copy_page_to_iter_iovec(page, offset, bytes, i); | |
823 | if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { | |
d271524a AV |
824 | void *kaddr = kmap_atomic(page); |
825 | size_t wanted = copy_to_iter(kaddr + offset, bytes, i); | |
826 | kunmap_atomic(kaddr); | |
827 | return wanted; | |
28f38db7 AV |
828 | } |
829 | if (iov_iter_is_pipe(i)) | |
830 | return copy_page_to_iter_pipe(page, offset, bytes, i); | |
831 | if (unlikely(iov_iter_is_discard(i))) { | |
a506abc7 AV |
832 | if (unlikely(i->count < bytes)) |
833 | bytes = i->count; | |
834 | i->count -= bytes; | |
9ea9ce04 | 835 | return bytes; |
28f38db7 AV |
836 | } |
837 | WARN_ON(1); | |
838 | return 0; | |
62a8067a | 839 | } |
08aa6479 AV |
840 | |
841 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | |
842 | struct iov_iter *i) | |
843 | { | |
844 | size_t res = 0; | |
845 | if (unlikely(!page_copy_sane(page, offset, bytes))) | |
846 | return 0; | |
847 | page += offset / PAGE_SIZE; // first subpage | |
848 | offset %= PAGE_SIZE; | |
849 | while (1) { | |
850 | size_t n = __copy_page_to_iter(page, offset, | |
851 | min(bytes, (size_t)PAGE_SIZE - offset), i); | |
852 | res += n; | |
853 | bytes -= n; | |
854 | if (!bytes || !n) | |
855 | break; | |
856 | offset += n; | |
857 | if (offset == PAGE_SIZE) { | |
858 | page++; | |
859 | offset = 0; | |
860 | } | |
861 | } | |
862 | return res; | |
863 | } | |
62a8067a AV |
864 | EXPORT_SYMBOL(copy_page_to_iter); |
865 | ||
866 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | |
867 | struct iov_iter *i) | |
868 | { | |
72e809ed AV |
869 | if (unlikely(!page_copy_sane(page, offset, bytes))) |
870 | return 0; | |
28f38db7 AV |
871 | if (likely(iter_is_iovec(i))) |
872 | return copy_page_from_iter_iovec(page, offset, bytes, i); | |
873 | if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { | |
d271524a | 874 | void *kaddr = kmap_atomic(page); |
aa28de27 | 875 | size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); |
d271524a AV |
876 | kunmap_atomic(kaddr); |
877 | return wanted; | |
28f38db7 AV |
878 | } |
879 | WARN_ON(1); | |
880 | return 0; | |
62a8067a AV |
881 | } |
882 | EXPORT_SYMBOL(copy_page_from_iter); | |
883 | ||
241699cd AV |
884 | static size_t pipe_zero(size_t bytes, struct iov_iter *i) |
885 | { | |
886 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
887 | unsigned int p_mask = pipe->ring_size - 1; |
888 | unsigned int i_head; | |
241699cd | 889 | size_t n, off; |
241699cd AV |
890 | |
891 | if (!sanity(i)) | |
892 | return 0; | |
893 | ||
8cefc107 | 894 | bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd AV |
895 | if (unlikely(!n)) |
896 | return 0; | |
897 | ||
8cefc107 | 898 | do { |
241699cd | 899 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
8cefc107 DH |
900 | memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk); |
901 | i->head = i_head; | |
241699cd AV |
902 | i->iov_offset = off + chunk; |
903 | n -= chunk; | |
8cefc107 DH |
904 | off = 0; |
905 | i_head++; | |
906 | } while (n); | |
241699cd AV |
907 | i->count -= bytes; |
908 | return bytes; | |
909 | } | |
910 | ||
c35e0248 MW |
911 | size_t iov_iter_zero(size_t bytes, struct iov_iter *i) |
912 | { | |
00e23707 | 913 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 914 | return pipe_zero(bytes, i); |
7baa5099 AV |
915 | iterate_and_advance(i, bytes, base, len, count, |
916 | clear_user(base, len), | |
917 | memset(base, 0, len) | |
8442fa46 AV |
918 | ) |
919 | ||
920 | return bytes; | |
c35e0248 MW |
921 | } |
922 | EXPORT_SYMBOL(iov_iter_zero); | |
923 | ||
f0b65f39 AV |
924 | size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, |
925 | struct iov_iter *i) | |
62a8067a | 926 | { |
04a31165 | 927 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
72e809ed AV |
928 | if (unlikely(!page_copy_sane(page, offset, bytes))) { |
929 | kunmap_atomic(kaddr); | |
930 | return 0; | |
931 | } | |
9ea9ce04 | 932 | if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd AV |
933 | kunmap_atomic(kaddr); |
934 | WARN_ON(1); | |
935 | return 0; | |
936 | } | |
7baa5099 AV |
937 | iterate_and_advance(i, bytes, base, len, off, |
938 | copyin(p + off, base, len), | |
939 | memcpy(p + off, base, len) | |
04a31165 AV |
940 | ) |
941 | kunmap_atomic(kaddr); | |
942 | return bytes; | |
62a8067a | 943 | } |
f0b65f39 | 944 | EXPORT_SYMBOL(copy_page_from_iter_atomic); |
62a8067a | 945 | |
b9dc6f65 AV |
946 | static inline void pipe_truncate(struct iov_iter *i) |
947 | { | |
948 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
949 | unsigned int p_tail = pipe->tail; |
950 | unsigned int p_head = pipe->head; | |
951 | unsigned int p_mask = pipe->ring_size - 1; | |
952 | ||
953 | if (!pipe_empty(p_head, p_tail)) { | |
954 | struct pipe_buffer *buf; | |
955 | unsigned int i_head = i->head; | |
b9dc6f65 | 956 | size_t off = i->iov_offset; |
8cefc107 | 957 | |
b9dc6f65 | 958 | if (off) { |
8cefc107 DH |
959 | buf = &pipe->bufs[i_head & p_mask]; |
960 | buf->len = off - buf->offset; | |
961 | i_head++; | |
b9dc6f65 | 962 | } |
8cefc107 DH |
963 | while (p_head != i_head) { |
964 | p_head--; | |
965 | pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); | |
b9dc6f65 | 966 | } |
8cefc107 DH |
967 | |
968 | pipe->head = p_head; | |
b9dc6f65 AV |
969 | } |
970 | } | |
971 | ||
241699cd AV |
972 | static void pipe_advance(struct iov_iter *i, size_t size) |
973 | { | |
974 | struct pipe_inode_info *pipe = i->pipe; | |
241699cd | 975 | if (size) { |
b9dc6f65 | 976 | struct pipe_buffer *buf; |
8cefc107 DH |
977 | unsigned int p_mask = pipe->ring_size - 1; |
978 | unsigned int i_head = i->head; | |
b9dc6f65 | 979 | size_t off = i->iov_offset, left = size; |
8cefc107 | 980 | |
241699cd | 981 | if (off) /* make it relative to the beginning of buffer */ |
8cefc107 | 982 | left += off - pipe->bufs[i_head & p_mask].offset; |
241699cd | 983 | while (1) { |
8cefc107 | 984 | buf = &pipe->bufs[i_head & p_mask]; |
b9dc6f65 | 985 | if (left <= buf->len) |
241699cd | 986 | break; |
b9dc6f65 | 987 | left -= buf->len; |
8cefc107 | 988 | i_head++; |
241699cd | 989 | } |
8cefc107 | 990 | i->head = i_head; |
b9dc6f65 | 991 | i->iov_offset = buf->offset + left; |
241699cd | 992 | } |
b9dc6f65 AV |
993 | i->count -= size; |
994 | /* ... and discard everything past that point */ | |
995 | pipe_truncate(i); | |
241699cd AV |
996 | } |
997 | ||
54c8195b PB |
998 | static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) |
999 | { | |
1000 | struct bvec_iter bi; | |
1001 | ||
1002 | bi.bi_size = i->count; | |
1003 | bi.bi_bvec_done = i->iov_offset; | |
1004 | bi.bi_idx = 0; | |
1005 | bvec_iter_advance(i->bvec, &bi, size); | |
1006 | ||
1007 | i->bvec += bi.bi_idx; | |
1008 | i->nr_segs -= bi.bi_idx; | |
1009 | i->count = bi.bi_size; | |
1010 | i->iov_offset = bi.bi_bvec_done; | |
1011 | } | |
1012 | ||
185ac4d4 AV |
1013 | static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) |
1014 | { | |
1015 | const struct iovec *iov, *end; | |
1016 | ||
1017 | if (!i->count) | |
1018 | return; | |
1019 | i->count -= size; | |
1020 | ||
1021 | size += i->iov_offset; // from beginning of current segment | |
1022 | for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { | |
1023 | if (likely(size < iov->iov_len)) | |
1024 | break; | |
1025 | size -= iov->iov_len; | |
1026 | } | |
1027 | i->iov_offset = size; | |
1028 | i->nr_segs -= iov - i->iov; | |
1029 | i->iov = iov; | |
1030 | } | |
1031 | ||
62a8067a AV |
1032 | void iov_iter_advance(struct iov_iter *i, size_t size) |
1033 | { | |
3b3fc051 AV |
1034 | if (unlikely(i->count < size)) |
1035 | size = i->count; | |
185ac4d4 AV |
1036 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { |
1037 | /* iovec and kvec have identical layouts */ | |
1038 | iov_iter_iovec_advance(i, size); | |
1039 | } else if (iov_iter_is_bvec(i)) { | |
1040 | iov_iter_bvec_advance(i, size); | |
1041 | } else if (iov_iter_is_pipe(i)) { | |
241699cd | 1042 | pipe_advance(i, size); |
185ac4d4 | 1043 | } else if (unlikely(iov_iter_is_xarray(i))) { |
7ff50620 DH |
1044 | i->iov_offset += size; |
1045 | i->count -= size; | |
185ac4d4 AV |
1046 | } else if (iov_iter_is_discard(i)) { |
1047 | i->count -= size; | |
54c8195b | 1048 | } |
62a8067a AV |
1049 | } |
1050 | EXPORT_SYMBOL(iov_iter_advance); | |
1051 | ||
27c0e374 AV |
1052 | void iov_iter_revert(struct iov_iter *i, size_t unroll) |
1053 | { | |
1054 | if (!unroll) | |
1055 | return; | |
5b47d59a AV |
1056 | if (WARN_ON(unroll > MAX_RW_COUNT)) |
1057 | return; | |
27c0e374 | 1058 | i->count += unroll; |
00e23707 | 1059 | if (unlikely(iov_iter_is_pipe(i))) { |
27c0e374 | 1060 | struct pipe_inode_info *pipe = i->pipe; |
8cefc107 DH |
1061 | unsigned int p_mask = pipe->ring_size - 1; |
1062 | unsigned int i_head = i->head; | |
27c0e374 AV |
1063 | size_t off = i->iov_offset; |
1064 | while (1) { | |
8cefc107 DH |
1065 | struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; |
1066 | size_t n = off - b->offset; | |
27c0e374 | 1067 | if (unroll < n) { |
4fa55cef | 1068 | off -= unroll; |
27c0e374 AV |
1069 | break; |
1070 | } | |
1071 | unroll -= n; | |
8cefc107 | 1072 | if (!unroll && i_head == i->start_head) { |
27c0e374 AV |
1073 | off = 0; |
1074 | break; | |
1075 | } | |
8cefc107 DH |
1076 | i_head--; |
1077 | b = &pipe->bufs[i_head & p_mask]; | |
1078 | off = b->offset + b->len; | |
27c0e374 AV |
1079 | } |
1080 | i->iov_offset = off; | |
8cefc107 | 1081 | i->head = i_head; |
27c0e374 AV |
1082 | pipe_truncate(i); |
1083 | return; | |
1084 | } | |
9ea9ce04 DH |
1085 | if (unlikely(iov_iter_is_discard(i))) |
1086 | return; | |
27c0e374 AV |
1087 | if (unroll <= i->iov_offset) { |
1088 | i->iov_offset -= unroll; | |
1089 | return; | |
1090 | } | |
1091 | unroll -= i->iov_offset; | |
7ff50620 DH |
1092 | if (iov_iter_is_xarray(i)) { |
1093 | BUG(); /* We should never go beyond the start of the specified | |
1094 | * range since we might then be straying into pages that | |
1095 | * aren't pinned. | |
1096 | */ | |
1097 | } else if (iov_iter_is_bvec(i)) { | |
27c0e374 AV |
1098 | const struct bio_vec *bvec = i->bvec; |
1099 | while (1) { | |
1100 | size_t n = (--bvec)->bv_len; | |
1101 | i->nr_segs++; | |
1102 | if (unroll <= n) { | |
1103 | i->bvec = bvec; | |
1104 | i->iov_offset = n - unroll; | |
1105 | return; | |
1106 | } | |
1107 | unroll -= n; | |
1108 | } | |
1109 | } else { /* same logics for iovec and kvec */ | |
1110 | const struct iovec *iov = i->iov; | |
1111 | while (1) { | |
1112 | size_t n = (--iov)->iov_len; | |
1113 | i->nr_segs++; | |
1114 | if (unroll <= n) { | |
1115 | i->iov = iov; | |
1116 | i->iov_offset = n - unroll; | |
1117 | return; | |
1118 | } | |
1119 | unroll -= n; | |
1120 | } | |
1121 | } | |
1122 | } | |
1123 | EXPORT_SYMBOL(iov_iter_revert); | |
1124 | ||
62a8067a AV |
1125 | /* |
1126 | * Return the count of just the current iov_iter segment. | |
1127 | */ | |
1128 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | |
1129 | { | |
28f38db7 AV |
1130 | if (i->nr_segs > 1) { |
1131 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1132 | return min(i->count, i->iov->iov_len - i->iov_offset); | |
1133 | if (iov_iter_is_bvec(i)) | |
1134 | return min(i->count, i->bvec->bv_len - i->iov_offset); | |
1135 | } | |
1136 | return i->count; | |
62a8067a AV |
1137 | } |
1138 | EXPORT_SYMBOL(iov_iter_single_seg_count); | |
1139 | ||
aa563d7b | 1140 | void iov_iter_kvec(struct iov_iter *i, unsigned int direction, |
05afcb77 | 1141 | const struct kvec *kvec, unsigned long nr_segs, |
abb78f87 AV |
1142 | size_t count) |
1143 | { | |
aa563d7b | 1144 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
1145 | *i = (struct iov_iter){ |
1146 | .iter_type = ITER_KVEC, | |
1147 | .data_source = direction, | |
1148 | .kvec = kvec, | |
1149 | .nr_segs = nr_segs, | |
1150 | .iov_offset = 0, | |
1151 | .count = count | |
1152 | }; | |
abb78f87 AV |
1153 | } |
1154 | EXPORT_SYMBOL(iov_iter_kvec); | |
1155 | ||
aa563d7b | 1156 | void iov_iter_bvec(struct iov_iter *i, unsigned int direction, |
05afcb77 AV |
1157 | const struct bio_vec *bvec, unsigned long nr_segs, |
1158 | size_t count) | |
1159 | { | |
aa563d7b | 1160 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
1161 | *i = (struct iov_iter){ |
1162 | .iter_type = ITER_BVEC, | |
1163 | .data_source = direction, | |
1164 | .bvec = bvec, | |
1165 | .nr_segs = nr_segs, | |
1166 | .iov_offset = 0, | |
1167 | .count = count | |
1168 | }; | |
05afcb77 AV |
1169 | } |
1170 | EXPORT_SYMBOL(iov_iter_bvec); | |
1171 | ||
aa563d7b | 1172 | void iov_iter_pipe(struct iov_iter *i, unsigned int direction, |
241699cd AV |
1173 | struct pipe_inode_info *pipe, |
1174 | size_t count) | |
1175 | { | |
aa563d7b | 1176 | BUG_ON(direction != READ); |
8cefc107 | 1177 | WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); |
8cd54c1c AV |
1178 | *i = (struct iov_iter){ |
1179 | .iter_type = ITER_PIPE, | |
1180 | .data_source = false, | |
1181 | .pipe = pipe, | |
1182 | .head = pipe->head, | |
1183 | .start_head = pipe->head, | |
1184 | .iov_offset = 0, | |
1185 | .count = count | |
1186 | }; | |
241699cd AV |
1187 | } |
1188 | EXPORT_SYMBOL(iov_iter_pipe); | |
1189 | ||
7ff50620 DH |
1190 | /** |
1191 | * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray | |
1192 | * @i: The iterator to initialise. | |
1193 | * @direction: The direction of the transfer. | |
1194 | * @xarray: The xarray to access. | |
1195 | * @start: The start file position. | |
1196 | * @count: The size of the I/O buffer in bytes. | |
1197 | * | |
1198 | * Set up an I/O iterator to either draw data out of the pages attached to an | |
1199 | * inode or to inject data into those pages. The pages *must* be prevented | |
1200 | * from evaporation, either by taking a ref on them or locking them by the | |
1201 | * caller. | |
1202 | */ | |
1203 | void iov_iter_xarray(struct iov_iter *i, unsigned int direction, | |
1204 | struct xarray *xarray, loff_t start, size_t count) | |
1205 | { | |
1206 | BUG_ON(direction & ~1); | |
8cd54c1c AV |
1207 | *i = (struct iov_iter) { |
1208 | .iter_type = ITER_XARRAY, | |
1209 | .data_source = direction, | |
1210 | .xarray = xarray, | |
1211 | .xarray_start = start, | |
1212 | .count = count, | |
1213 | .iov_offset = 0 | |
1214 | }; | |
7ff50620 DH |
1215 | } |
1216 | EXPORT_SYMBOL(iov_iter_xarray); | |
1217 | ||
9ea9ce04 DH |
1218 | /** |
1219 | * iov_iter_discard - Initialise an I/O iterator that discards data | |
1220 | * @i: The iterator to initialise. | |
1221 | * @direction: The direction of the transfer. | |
1222 | * @count: The size of the I/O buffer in bytes. | |
1223 | * | |
1224 | * Set up an I/O iterator that just discards everything that's written to it. | |
1225 | * It's only available as a READ iterator. | |
1226 | */ | |
1227 | void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) | |
1228 | { | |
1229 | BUG_ON(direction != READ); | |
8cd54c1c AV |
1230 | *i = (struct iov_iter){ |
1231 | .iter_type = ITER_DISCARD, | |
1232 | .data_source = false, | |
1233 | .count = count, | |
1234 | .iov_offset = 0 | |
1235 | }; | |
9ea9ce04 DH |
1236 | } |
1237 | EXPORT_SYMBOL(iov_iter_discard); | |
1238 | ||
9221d2e3 | 1239 | static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) |
62a8067a | 1240 | { |
04a31165 AV |
1241 | unsigned long res = 0; |
1242 | size_t size = i->count; | |
9221d2e3 AV |
1243 | size_t skip = i->iov_offset; |
1244 | unsigned k; | |
1245 | ||
1246 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1247 | size_t len = i->iov[k].iov_len - skip; | |
1248 | if (len) { | |
1249 | res |= (unsigned long)i->iov[k].iov_base + skip; | |
1250 | if (len > size) | |
1251 | len = size; | |
1252 | res |= len; | |
1253 | size -= len; | |
1254 | if (!size) | |
1255 | break; | |
1256 | } | |
1257 | } | |
1258 | return res; | |
1259 | } | |
04a31165 | 1260 | |
9221d2e3 AV |
1261 | static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) |
1262 | { | |
1263 | unsigned res = 0; | |
1264 | size_t size = i->count; | |
1265 | unsigned skip = i->iov_offset; | |
1266 | unsigned k; | |
1267 | ||
1268 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1269 | size_t len = i->bvec[k].bv_len - skip; | |
1270 | res |= (unsigned long)i->bvec[k].bv_offset + skip; | |
1271 | if (len > size) | |
1272 | len = size; | |
1273 | res |= len; | |
1274 | size -= len; | |
1275 | if (!size) | |
1276 | break; | |
1277 | } | |
1278 | return res; | |
1279 | } | |
1280 | ||
1281 | unsigned long iov_iter_alignment(const struct iov_iter *i) | |
1282 | { | |
1283 | /* iovec and kvec have identical layouts */ | |
1284 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1285 | return iov_iter_alignment_iovec(i); | |
1286 | ||
1287 | if (iov_iter_is_bvec(i)) | |
1288 | return iov_iter_alignment_bvec(i); | |
1289 | ||
1290 | if (iov_iter_is_pipe(i)) { | |
e0ff126e | 1291 | unsigned int p_mask = i->pipe->ring_size - 1; |
9221d2e3 | 1292 | size_t size = i->count; |
e0ff126e | 1293 | |
8cefc107 | 1294 | if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) |
241699cd AV |
1295 | return size | i->iov_offset; |
1296 | return size; | |
1297 | } | |
9221d2e3 AV |
1298 | |
1299 | if (iov_iter_is_xarray(i)) | |
3d14ec1f | 1300 | return (i->xarray_start + i->iov_offset) | i->count; |
9221d2e3 AV |
1301 | |
1302 | return 0; | |
62a8067a AV |
1303 | } |
1304 | EXPORT_SYMBOL(iov_iter_alignment); | |
1305 | ||
357f435d AV |
1306 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i) |
1307 | { | |
33844e66 | 1308 | unsigned long res = 0; |
610c7a71 | 1309 | unsigned long v = 0; |
357f435d | 1310 | size_t size = i->count; |
610c7a71 | 1311 | unsigned k; |
357f435d | 1312 | |
610c7a71 | 1313 | if (WARN_ON(!iter_is_iovec(i))) |
241699cd | 1314 | return ~0U; |
241699cd | 1315 | |
610c7a71 AV |
1316 | for (k = 0; k < i->nr_segs; k++) { |
1317 | if (i->iov[k].iov_len) { | |
1318 | unsigned long base = (unsigned long)i->iov[k].iov_base; | |
1319 | if (v) // if not the first one | |
1320 | res |= base | v; // this start | previous end | |
1321 | v = base + i->iov[k].iov_len; | |
1322 | if (size <= i->iov[k].iov_len) | |
1323 | break; | |
1324 | size -= i->iov[k].iov_len; | |
1325 | } | |
1326 | } | |
33844e66 | 1327 | return res; |
357f435d AV |
1328 | } |
1329 | EXPORT_SYMBOL(iov_iter_gap_alignment); | |
1330 | ||
e76b6312 | 1331 | static inline ssize_t __pipe_get_pages(struct iov_iter *i, |
241699cd AV |
1332 | size_t maxsize, |
1333 | struct page **pages, | |
8cefc107 | 1334 | int iter_head, |
241699cd AV |
1335 | size_t *start) |
1336 | { | |
1337 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
1338 | unsigned int p_mask = pipe->ring_size - 1; |
1339 | ssize_t n = push_pipe(i, maxsize, &iter_head, start); | |
241699cd AV |
1340 | if (!n) |
1341 | return -EFAULT; | |
1342 | ||
1343 | maxsize = n; | |
1344 | n += *start; | |
1689c73a | 1345 | while (n > 0) { |
8cefc107 DH |
1346 | get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); |
1347 | iter_head++; | |
241699cd AV |
1348 | n -= PAGE_SIZE; |
1349 | } | |
1350 | ||
1351 | return maxsize; | |
1352 | } | |
1353 | ||
1354 | static ssize_t pipe_get_pages(struct iov_iter *i, | |
1355 | struct page **pages, size_t maxsize, unsigned maxpages, | |
1356 | size_t *start) | |
1357 | { | |
8cefc107 | 1358 | unsigned int iter_head, npages; |
241699cd | 1359 | size_t capacity; |
241699cd AV |
1360 | |
1361 | if (!sanity(i)) | |
1362 | return -EFAULT; | |
1363 | ||
8cefc107 DH |
1364 | data_start(i, &iter_head, start); |
1365 | /* Amount of free space: some of this one + all after this one */ | |
1366 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); | |
1367 | capacity = min(npages, maxpages) * PAGE_SIZE - *start; | |
241699cd | 1368 | |
8cefc107 | 1369 | return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); |
241699cd AV |
1370 | } |
1371 | ||
7ff50620 DH |
1372 | static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, |
1373 | pgoff_t index, unsigned int nr_pages) | |
1374 | { | |
1375 | XA_STATE(xas, xa, index); | |
1376 | struct page *page; | |
1377 | unsigned int ret = 0; | |
1378 | ||
1379 | rcu_read_lock(); | |
1380 | for (page = xas_load(&xas); page; page = xas_next(&xas)) { | |
1381 | if (xas_retry(&xas, page)) | |
1382 | continue; | |
1383 | ||
1384 | /* Has the page moved or been split? */ | |
1385 | if (unlikely(page != xas_reload(&xas))) { | |
1386 | xas_reset(&xas); | |
1387 | continue; | |
1388 | } | |
1389 | ||
1390 | pages[ret] = find_subpage(page, xas.xa_index); | |
1391 | get_page(pages[ret]); | |
1392 | if (++ret == nr_pages) | |
1393 | break; | |
1394 | } | |
1395 | rcu_read_unlock(); | |
1396 | return ret; | |
1397 | } | |
1398 | ||
1399 | static ssize_t iter_xarray_get_pages(struct iov_iter *i, | |
1400 | struct page **pages, size_t maxsize, | |
1401 | unsigned maxpages, size_t *_start_offset) | |
1402 | { | |
1403 | unsigned nr, offset; | |
1404 | pgoff_t index, count; | |
1405 | size_t size = maxsize, actual; | |
1406 | loff_t pos; | |
1407 | ||
1408 | if (!size || !maxpages) | |
1409 | return 0; | |
1410 | ||
1411 | pos = i->xarray_start + i->iov_offset; | |
1412 | index = pos >> PAGE_SHIFT; | |
1413 | offset = pos & ~PAGE_MASK; | |
1414 | *_start_offset = offset; | |
1415 | ||
1416 | count = 1; | |
1417 | if (size > PAGE_SIZE - offset) { | |
1418 | size -= PAGE_SIZE - offset; | |
1419 | count += size >> PAGE_SHIFT; | |
1420 | size &= ~PAGE_MASK; | |
1421 | if (size) | |
1422 | count++; | |
1423 | } | |
1424 | ||
1425 | if (count > maxpages) | |
1426 | count = maxpages; | |
1427 | ||
1428 | nr = iter_xarray_populate_pages(pages, i->xarray, index, count); | |
1429 | if (nr == 0) | |
1430 | return 0; | |
1431 | ||
1432 | actual = PAGE_SIZE * nr; | |
1433 | actual -= offset; | |
1434 | if (nr == count && size > 0) { | |
1435 | unsigned last_offset = (nr > 1) ? 0 : offset; | |
1436 | actual -= PAGE_SIZE - (last_offset + size); | |
1437 | } | |
1438 | return actual; | |
1439 | } | |
1440 | ||
3d671ca6 AV |
1441 | /* must be done on non-empty ITER_IOVEC one */ |
1442 | static unsigned long first_iovec_segment(const struct iov_iter *i, | |
1443 | size_t *size, size_t *start, | |
1444 | size_t maxsize, unsigned maxpages) | |
1445 | { | |
1446 | size_t skip; | |
1447 | long k; | |
1448 | ||
1449 | for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { | |
1450 | unsigned long addr = (unsigned long)i->iov[k].iov_base + skip; | |
1451 | size_t len = i->iov[k].iov_len - skip; | |
1452 | ||
1453 | if (unlikely(!len)) | |
1454 | continue; | |
1455 | if (len > maxsize) | |
1456 | len = maxsize; | |
1457 | len += (*start = addr % PAGE_SIZE); | |
1458 | if (len > maxpages * PAGE_SIZE) | |
1459 | len = maxpages * PAGE_SIZE; | |
1460 | *size = len; | |
1461 | return addr & PAGE_MASK; | |
1462 | } | |
1463 | BUG(); // if it had been empty, we wouldn't get called | |
1464 | } | |
1465 | ||
1466 | /* must be done on non-empty ITER_BVEC one */ | |
1467 | static struct page *first_bvec_segment(const struct iov_iter *i, | |
1468 | size_t *size, size_t *start, | |
1469 | size_t maxsize, unsigned maxpages) | |
1470 | { | |
1471 | struct page *page; | |
1472 | size_t skip = i->iov_offset, len; | |
1473 | ||
1474 | len = i->bvec->bv_len - skip; | |
1475 | if (len > maxsize) | |
1476 | len = maxsize; | |
1477 | skip += i->bvec->bv_offset; | |
1478 | page = i->bvec->bv_page + skip / PAGE_SIZE; | |
1479 | len += (*start = skip % PAGE_SIZE); | |
1480 | if (len > maxpages * PAGE_SIZE) | |
1481 | len = maxpages * PAGE_SIZE; | |
1482 | *size = len; | |
1483 | return page; | |
1484 | } | |
1485 | ||
62a8067a | 1486 | ssize_t iov_iter_get_pages(struct iov_iter *i, |
2c80929c | 1487 | struct page **pages, size_t maxsize, unsigned maxpages, |
62a8067a AV |
1488 | size_t *start) |
1489 | { | |
3d671ca6 AV |
1490 | size_t len; |
1491 | int n, res; | |
1492 | ||
e5393fae AV |
1493 | if (maxsize > i->count) |
1494 | maxsize = i->count; | |
3d671ca6 AV |
1495 | if (!maxsize) |
1496 | return 0; | |
e5393fae | 1497 | |
3d671ca6 AV |
1498 | if (likely(iter_is_iovec(i))) { |
1499 | unsigned long addr; | |
e5393fae | 1500 | |
3d671ca6 | 1501 | addr = first_iovec_segment(i, &len, start, maxsize, maxpages); |
e5393fae | 1502 | n = DIV_ROUND_UP(len, PAGE_SIZE); |
73b0140b IW |
1503 | res = get_user_pages_fast(addr, n, |
1504 | iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, | |
1505 | pages); | |
e5393fae AV |
1506 | if (unlikely(res < 0)) |
1507 | return res; | |
1508 | return (res == n ? len : res * PAGE_SIZE) - *start; | |
3d671ca6 AV |
1509 | } |
1510 | if (iov_iter_is_bvec(i)) { | |
1511 | struct page *page; | |
1512 | ||
1513 | page = first_bvec_segment(i, &len, start, maxsize, maxpages); | |
1514 | n = DIV_ROUND_UP(len, PAGE_SIZE); | |
1515 | while (n--) | |
1516 | get_page(*pages++ = page++); | |
1517 | return len - *start; | |
1518 | } | |
1519 | if (iov_iter_is_pipe(i)) | |
1520 | return pipe_get_pages(i, pages, maxsize, maxpages, start); | |
1521 | if (iov_iter_is_xarray(i)) | |
1522 | return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); | |
1523 | return -EFAULT; | |
62a8067a AV |
1524 | } |
1525 | EXPORT_SYMBOL(iov_iter_get_pages); | |
1526 | ||
1b17f1f2 AV |
1527 | static struct page **get_pages_array(size_t n) |
1528 | { | |
752ade68 | 1529 | return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); |
1b17f1f2 AV |
1530 | } |
1531 | ||
241699cd AV |
1532 | static ssize_t pipe_get_pages_alloc(struct iov_iter *i, |
1533 | struct page ***pages, size_t maxsize, | |
1534 | size_t *start) | |
1535 | { | |
1536 | struct page **p; | |
8cefc107 | 1537 | unsigned int iter_head, npages; |
d7760d63 | 1538 | ssize_t n; |
241699cd AV |
1539 | |
1540 | if (!sanity(i)) | |
1541 | return -EFAULT; | |
1542 | ||
8cefc107 DH |
1543 | data_start(i, &iter_head, start); |
1544 | /* Amount of free space: some of this one + all after this one */ | |
1545 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); | |
241699cd AV |
1546 | n = npages * PAGE_SIZE - *start; |
1547 | if (maxsize > n) | |
1548 | maxsize = n; | |
1549 | else | |
1550 | npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
1551 | p = get_pages_array(npages); | |
1552 | if (!p) | |
1553 | return -ENOMEM; | |
8cefc107 | 1554 | n = __pipe_get_pages(i, maxsize, p, iter_head, start); |
241699cd AV |
1555 | if (n > 0) |
1556 | *pages = p; | |
1557 | else | |
1558 | kvfree(p); | |
1559 | return n; | |
1560 | } | |
1561 | ||
7ff50620 DH |
1562 | static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, |
1563 | struct page ***pages, size_t maxsize, | |
1564 | size_t *_start_offset) | |
1565 | { | |
1566 | struct page **p; | |
1567 | unsigned nr, offset; | |
1568 | pgoff_t index, count; | |
1569 | size_t size = maxsize, actual; | |
1570 | loff_t pos; | |
1571 | ||
1572 | if (!size) | |
1573 | return 0; | |
1574 | ||
1575 | pos = i->xarray_start + i->iov_offset; | |
1576 | index = pos >> PAGE_SHIFT; | |
1577 | offset = pos & ~PAGE_MASK; | |
1578 | *_start_offset = offset; | |
1579 | ||
1580 | count = 1; | |
1581 | if (size > PAGE_SIZE - offset) { | |
1582 | size -= PAGE_SIZE - offset; | |
1583 | count += size >> PAGE_SHIFT; | |
1584 | size &= ~PAGE_MASK; | |
1585 | if (size) | |
1586 | count++; | |
1587 | } | |
1588 | ||
1589 | p = get_pages_array(count); | |
1590 | if (!p) | |
1591 | return -ENOMEM; | |
1592 | *pages = p; | |
1593 | ||
1594 | nr = iter_xarray_populate_pages(p, i->xarray, index, count); | |
1595 | if (nr == 0) | |
1596 | return 0; | |
1597 | ||
1598 | actual = PAGE_SIZE * nr; | |
1599 | actual -= offset; | |
1600 | if (nr == count && size > 0) { | |
1601 | unsigned last_offset = (nr > 1) ? 0 : offset; | |
1602 | actual -= PAGE_SIZE - (last_offset + size); | |
1603 | } | |
1604 | return actual; | |
1605 | } | |
1606 | ||
62a8067a AV |
1607 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, |
1608 | struct page ***pages, size_t maxsize, | |
1609 | size_t *start) | |
1610 | { | |
1b17f1f2 | 1611 | struct page **p; |
3d671ca6 AV |
1612 | size_t len; |
1613 | int n, res; | |
1b17f1f2 AV |
1614 | |
1615 | if (maxsize > i->count) | |
1616 | maxsize = i->count; | |
3d671ca6 AV |
1617 | if (!maxsize) |
1618 | return 0; | |
1b17f1f2 | 1619 | |
3d671ca6 AV |
1620 | if (likely(iter_is_iovec(i))) { |
1621 | unsigned long addr; | |
1b17f1f2 | 1622 | |
3d671ca6 | 1623 | addr = first_iovec_segment(i, &len, start, maxsize, ~0U); |
1b17f1f2 AV |
1624 | n = DIV_ROUND_UP(len, PAGE_SIZE); |
1625 | p = get_pages_array(n); | |
1626 | if (!p) | |
1627 | return -ENOMEM; | |
73b0140b IW |
1628 | res = get_user_pages_fast(addr, n, |
1629 | iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p); | |
1b17f1f2 AV |
1630 | if (unlikely(res < 0)) { |
1631 | kvfree(p); | |
1632 | return res; | |
1633 | } | |
1634 | *pages = p; | |
1635 | return (res == n ? len : res * PAGE_SIZE) - *start; | |
3d671ca6 AV |
1636 | } |
1637 | if (iov_iter_is_bvec(i)) { | |
1638 | struct page *page; | |
1639 | ||
1640 | page = first_bvec_segment(i, &len, start, maxsize, ~0U); | |
1641 | n = DIV_ROUND_UP(len, PAGE_SIZE); | |
1642 | *pages = p = get_pages_array(n); | |
1b17f1f2 AV |
1643 | if (!p) |
1644 | return -ENOMEM; | |
3d671ca6 AV |
1645 | while (n--) |
1646 | get_page(*p++ = page++); | |
1647 | return len - *start; | |
1648 | } | |
1649 | if (iov_iter_is_pipe(i)) | |
1650 | return pipe_get_pages_alloc(i, pages, maxsize, start); | |
1651 | if (iov_iter_is_xarray(i)) | |
1652 | return iter_xarray_get_pages_alloc(i, pages, maxsize, start); | |
1653 | return -EFAULT; | |
62a8067a AV |
1654 | } |
1655 | EXPORT_SYMBOL(iov_iter_get_pages_alloc); | |
1656 | ||
a604ec7e AV |
1657 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, |
1658 | struct iov_iter *i) | |
1659 | { | |
a604ec7e | 1660 | __wsum sum, next; |
a604ec7e | 1661 | sum = *csum; |
9ea9ce04 | 1662 | if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd AV |
1663 | WARN_ON(1); |
1664 | return 0; | |
1665 | } | |
7baa5099 AV |
1666 | iterate_and_advance(i, bytes, base, len, off, ({ |
1667 | next = csum_and_copy_from_user(base, addr + off, len); | |
622838f3 | 1668 | if (next) |
a604ec7e | 1669 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1670 | next ? 0 : len; |
a604ec7e | 1671 | }), ({ |
7baa5099 | 1672 | sum = csum_and_memcpy(addr + off, base, len, sum, off); |
a604ec7e AV |
1673 | }) |
1674 | ) | |
1675 | *csum = sum; | |
1676 | return bytes; | |
1677 | } | |
1678 | EXPORT_SYMBOL(csum_and_copy_from_iter); | |
1679 | ||
52cbd23a | 1680 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, |
a604ec7e AV |
1681 | struct iov_iter *i) |
1682 | { | |
52cbd23a | 1683 | struct csum_state *csstate = _csstate; |
a604ec7e | 1684 | __wsum sum, next; |
78e1f386 AV |
1685 | |
1686 | if (unlikely(iov_iter_is_pipe(i))) | |
52cbd23a | 1687 | return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i); |
78e1f386 | 1688 | |
594e450b | 1689 | sum = csum_shift(csstate->csum, csstate->off); |
78e1f386 | 1690 | if (unlikely(iov_iter_is_discard(i))) { |
241699cd AV |
1691 | WARN_ON(1); /* for now */ |
1692 | return 0; | |
1693 | } | |
7baa5099 AV |
1694 | iterate_and_advance(i, bytes, base, len, off, ({ |
1695 | next = csum_and_copy_to_user(addr + off, base, len); | |
622838f3 | 1696 | if (next) |
a604ec7e | 1697 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1698 | next ? 0 : len; |
a604ec7e | 1699 | }), ({ |
7baa5099 | 1700 | sum = csum_and_memcpy(base, addr + off, len, sum, off); |
a604ec7e AV |
1701 | }) |
1702 | ) | |
594e450b AV |
1703 | csstate->csum = csum_shift(sum, csstate->off); |
1704 | csstate->off += bytes; | |
a604ec7e AV |
1705 | return bytes; |
1706 | } | |
1707 | EXPORT_SYMBOL(csum_and_copy_to_iter); | |
1708 | ||
d05f4435 SG |
1709 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
1710 | struct iov_iter *i) | |
1711 | { | |
7999096f | 1712 | #ifdef CONFIG_CRYPTO_HASH |
d05f4435 SG |
1713 | struct ahash_request *hash = hashp; |
1714 | struct scatterlist sg; | |
1715 | size_t copied; | |
1716 | ||
1717 | copied = copy_to_iter(addr, bytes, i); | |
1718 | sg_init_one(&sg, addr, copied); | |
1719 | ahash_request_set_crypt(hash, &sg, NULL, copied); | |
1720 | crypto_ahash_update(hash); | |
1721 | return copied; | |
27fad74a Y |
1722 | #else |
1723 | return 0; | |
1724 | #endif | |
d05f4435 SG |
1725 | } |
1726 | EXPORT_SYMBOL(hash_and_copy_to_iter); | |
1727 | ||
66531c65 | 1728 | static int iov_npages(const struct iov_iter *i, int maxpages) |
62a8067a | 1729 | { |
66531c65 AV |
1730 | size_t skip = i->iov_offset, size = i->count; |
1731 | const struct iovec *p; | |
e0f2dc40 AV |
1732 | int npages = 0; |
1733 | ||
66531c65 AV |
1734 | for (p = i->iov; size; skip = 0, p++) { |
1735 | unsigned offs = offset_in_page(p->iov_base + skip); | |
1736 | size_t len = min(p->iov_len - skip, size); | |
e0f2dc40 | 1737 | |
66531c65 AV |
1738 | if (len) { |
1739 | size -= len; | |
1740 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1741 | if (unlikely(npages > maxpages)) | |
1742 | return maxpages; | |
1743 | } | |
1744 | } | |
1745 | return npages; | |
1746 | } | |
1747 | ||
1748 | static int bvec_npages(const struct iov_iter *i, int maxpages) | |
1749 | { | |
1750 | size_t skip = i->iov_offset, size = i->count; | |
1751 | const struct bio_vec *p; | |
1752 | int npages = 0; | |
1753 | ||
1754 | for (p = i->bvec; size; skip = 0, p++) { | |
1755 | unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; | |
1756 | size_t len = min(p->bv_len - skip, size); | |
1757 | ||
1758 | size -= len; | |
1759 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1760 | if (unlikely(npages > maxpages)) | |
1761 | return maxpages; | |
1762 | } | |
1763 | return npages; | |
1764 | } | |
1765 | ||
1766 | int iov_iter_npages(const struct iov_iter *i, int maxpages) | |
1767 | { | |
1768 | if (unlikely(!i->count)) | |
1769 | return 0; | |
1770 | /* iovec and kvec have identical layouts */ | |
1771 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1772 | return iov_npages(i, maxpages); | |
1773 | if (iov_iter_is_bvec(i)) | |
1774 | return bvec_npages(i, maxpages); | |
1775 | if (iov_iter_is_pipe(i)) { | |
8cefc107 | 1776 | unsigned int iter_head; |
66531c65 | 1777 | int npages; |
241699cd | 1778 | size_t off; |
241699cd AV |
1779 | |
1780 | if (!sanity(i)) | |
1781 | return 0; | |
1782 | ||
8cefc107 | 1783 | data_start(i, &iter_head, &off); |
241699cd | 1784 | /* some of this one + all after this one */ |
66531c65 AV |
1785 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); |
1786 | return min(npages, maxpages); | |
1787 | } | |
1788 | if (iov_iter_is_xarray(i)) { | |
e4f8df86 AV |
1789 | unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; |
1790 | int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); | |
66531c65 AV |
1791 | return min(npages, maxpages); |
1792 | } | |
1793 | return 0; | |
62a8067a | 1794 | } |
f67da30c | 1795 | EXPORT_SYMBOL(iov_iter_npages); |
4b8164b9 AV |
1796 | |
1797 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) | |
1798 | { | |
1799 | *new = *old; | |
00e23707 | 1800 | if (unlikely(iov_iter_is_pipe(new))) { |
241699cd AV |
1801 | WARN_ON(1); |
1802 | return NULL; | |
1803 | } | |
7ff50620 | 1804 | if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) |
9ea9ce04 | 1805 | return NULL; |
00e23707 | 1806 | if (iov_iter_is_bvec(new)) |
4b8164b9 AV |
1807 | return new->bvec = kmemdup(new->bvec, |
1808 | new->nr_segs * sizeof(struct bio_vec), | |
1809 | flags); | |
1810 | else | |
1811 | /* iovec and kvec have identical layout */ | |
1812 | return new->iov = kmemdup(new->iov, | |
1813 | new->nr_segs * sizeof(struct iovec), | |
1814 | flags); | |
1815 | } | |
1816 | EXPORT_SYMBOL(dup_iter); | |
bc917be8 | 1817 | |
bfdc5970 CH |
1818 | static int copy_compat_iovec_from_user(struct iovec *iov, |
1819 | const struct iovec __user *uvec, unsigned long nr_segs) | |
1820 | { | |
1821 | const struct compat_iovec __user *uiov = | |
1822 | (const struct compat_iovec __user *)uvec; | |
1823 | int ret = -EFAULT, i; | |
1824 | ||
a959a978 | 1825 | if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) |
bfdc5970 CH |
1826 | return -EFAULT; |
1827 | ||
1828 | for (i = 0; i < nr_segs; i++) { | |
1829 | compat_uptr_t buf; | |
1830 | compat_ssize_t len; | |
1831 | ||
1832 | unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); | |
1833 | unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); | |
1834 | ||
1835 | /* check for compat_size_t not fitting in compat_ssize_t .. */ | |
1836 | if (len < 0) { | |
1837 | ret = -EINVAL; | |
1838 | goto uaccess_end; | |
1839 | } | |
1840 | iov[i].iov_base = compat_ptr(buf); | |
1841 | iov[i].iov_len = len; | |
1842 | } | |
1843 | ||
1844 | ret = 0; | |
1845 | uaccess_end: | |
1846 | user_access_end(); | |
1847 | return ret; | |
1848 | } | |
1849 | ||
1850 | static int copy_iovec_from_user(struct iovec *iov, | |
1851 | const struct iovec __user *uvec, unsigned long nr_segs) | |
fb041b59 DL |
1852 | { |
1853 | unsigned long seg; | |
fb041b59 | 1854 | |
bfdc5970 CH |
1855 | if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) |
1856 | return -EFAULT; | |
1857 | for (seg = 0; seg < nr_segs; seg++) { | |
1858 | if ((ssize_t)iov[seg].iov_len < 0) | |
1859 | return -EINVAL; | |
fb041b59 DL |
1860 | } |
1861 | ||
bfdc5970 CH |
1862 | return 0; |
1863 | } | |
1864 | ||
1865 | struct iovec *iovec_from_user(const struct iovec __user *uvec, | |
1866 | unsigned long nr_segs, unsigned long fast_segs, | |
1867 | struct iovec *fast_iov, bool compat) | |
1868 | { | |
1869 | struct iovec *iov = fast_iov; | |
1870 | int ret; | |
1871 | ||
fb041b59 | 1872 | /* |
bfdc5970 CH |
1873 | * SuS says "The readv() function *may* fail if the iovcnt argument was |
1874 | * less than or equal to 0, or greater than {IOV_MAX}. Linux has | |
1875 | * traditionally returned zero for zero segments, so... | |
fb041b59 | 1876 | */ |
bfdc5970 CH |
1877 | if (nr_segs == 0) |
1878 | return iov; | |
1879 | if (nr_segs > UIO_MAXIOV) | |
1880 | return ERR_PTR(-EINVAL); | |
fb041b59 DL |
1881 | if (nr_segs > fast_segs) { |
1882 | iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); | |
bfdc5970 CH |
1883 | if (!iov) |
1884 | return ERR_PTR(-ENOMEM); | |
fb041b59 | 1885 | } |
bfdc5970 CH |
1886 | |
1887 | if (compat) | |
1888 | ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); | |
1889 | else | |
1890 | ret = copy_iovec_from_user(iov, uvec, nr_segs); | |
1891 | if (ret) { | |
1892 | if (iov != fast_iov) | |
1893 | kfree(iov); | |
1894 | return ERR_PTR(ret); | |
1895 | } | |
1896 | ||
1897 | return iov; | |
1898 | } | |
1899 | ||
1900 | ssize_t __import_iovec(int type, const struct iovec __user *uvec, | |
1901 | unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, | |
1902 | struct iov_iter *i, bool compat) | |
1903 | { | |
1904 | ssize_t total_len = 0; | |
1905 | unsigned long seg; | |
1906 | struct iovec *iov; | |
1907 | ||
1908 | iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); | |
1909 | if (IS_ERR(iov)) { | |
1910 | *iovp = NULL; | |
1911 | return PTR_ERR(iov); | |
fb041b59 DL |
1912 | } |
1913 | ||
1914 | /* | |
bfdc5970 CH |
1915 | * According to the Single Unix Specification we should return EINVAL if |
1916 | * an element length is < 0 when cast to ssize_t or if the total length | |
1917 | * would overflow the ssize_t return value of the system call. | |
fb041b59 DL |
1918 | * |
1919 | * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the | |
1920 | * overflow case. | |
1921 | */ | |
fb041b59 | 1922 | for (seg = 0; seg < nr_segs; seg++) { |
fb041b59 DL |
1923 | ssize_t len = (ssize_t)iov[seg].iov_len; |
1924 | ||
bfdc5970 CH |
1925 | if (!access_ok(iov[seg].iov_base, len)) { |
1926 | if (iov != *iovp) | |
1927 | kfree(iov); | |
1928 | *iovp = NULL; | |
1929 | return -EFAULT; | |
fb041b59 | 1930 | } |
bfdc5970 CH |
1931 | |
1932 | if (len > MAX_RW_COUNT - total_len) { | |
1933 | len = MAX_RW_COUNT - total_len; | |
fb041b59 DL |
1934 | iov[seg].iov_len = len; |
1935 | } | |
bfdc5970 | 1936 | total_len += len; |
fb041b59 | 1937 | } |
bfdc5970 CH |
1938 | |
1939 | iov_iter_init(i, type, iov, nr_segs, total_len); | |
1940 | if (iov == *iovp) | |
1941 | *iovp = NULL; | |
1942 | else | |
1943 | *iovp = iov; | |
1944 | return total_len; | |
fb041b59 DL |
1945 | } |
1946 | ||
ffecee4f VN |
1947 | /** |
1948 | * import_iovec() - Copy an array of &struct iovec from userspace | |
1949 | * into the kernel, check that it is valid, and initialize a new | |
1950 | * &struct iov_iter iterator to access it. | |
1951 | * | |
1952 | * @type: One of %READ or %WRITE. | |
bfdc5970 | 1953 | * @uvec: Pointer to the userspace array. |
ffecee4f VN |
1954 | * @nr_segs: Number of elements in userspace array. |
1955 | * @fast_segs: Number of elements in @iov. | |
bfdc5970 | 1956 | * @iovp: (input and output parameter) Pointer to pointer to (usually small |
ffecee4f VN |
1957 | * on-stack) kernel array. |
1958 | * @i: Pointer to iterator that will be initialized on success. | |
1959 | * | |
1960 | * If the array pointed to by *@iov is large enough to hold all @nr_segs, | |
1961 | * then this function places %NULL in *@iov on return. Otherwise, a new | |
1962 | * array will be allocated and the result placed in *@iov. This means that | |
1963 | * the caller may call kfree() on *@iov regardless of whether the small | |
1964 | * on-stack array was used or not (and regardless of whether this function | |
1965 | * returns an error or not). | |
1966 | * | |
87e5e6da | 1967 | * Return: Negative error code on error, bytes imported on success |
ffecee4f | 1968 | */ |
bfdc5970 | 1969 | ssize_t import_iovec(int type, const struct iovec __user *uvec, |
bc917be8 | 1970 | unsigned nr_segs, unsigned fast_segs, |
bfdc5970 | 1971 | struct iovec **iovp, struct iov_iter *i) |
bc917be8 | 1972 | { |
89cd35c5 CH |
1973 | return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, |
1974 | in_compat_syscall()); | |
bc917be8 AV |
1975 | } |
1976 | EXPORT_SYMBOL(import_iovec); | |
1977 | ||
bc917be8 AV |
1978 | int import_single_range(int rw, void __user *buf, size_t len, |
1979 | struct iovec *iov, struct iov_iter *i) | |
1980 | { | |
1981 | if (len > MAX_RW_COUNT) | |
1982 | len = MAX_RW_COUNT; | |
96d4f267 | 1983 | if (unlikely(!access_ok(buf, len))) |
bc917be8 AV |
1984 | return -EFAULT; |
1985 | ||
1986 | iov->iov_base = buf; | |
1987 | iov->iov_len = len; | |
1988 | iov_iter_init(i, rw, iov, 1, len); | |
1989 | return 0; | |
1990 | } | |
e1267585 | 1991 | EXPORT_SYMBOL(import_single_range); |