Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
7999096f | 2 | #include <crypto/hash.h> |
4f18cd31 | 3 | #include <linux/export.h> |
2f8b5444 | 4 | #include <linux/bvec.h> |
4d0e9df5 | 5 | #include <linux/fault-inject-usercopy.h> |
4f18cd31 AV |
6 | #include <linux/uio.h> |
7 | #include <linux/pagemap.h> | |
28961998 | 8 | #include <linux/highmem.h> |
91f79c43 AV |
9 | #include <linux/slab.h> |
10 | #include <linux/vmalloc.h> | |
241699cd | 11 | #include <linux/splice.h> |
bfdc5970 | 12 | #include <linux/compat.h> |
a604ec7e | 13 | #include <net/checksum.h> |
d05f4435 | 14 | #include <linux/scatterlist.h> |
d0ef4c36 | 15 | #include <linux/instrumented.h> |
4f18cd31 | 16 | |
241699cd AV |
17 | #define PIPE_PARANOIA /* for now */ |
18 | ||
5c67aa90 | 19 | /* covers iovec and kvec alike */ |
a6e4ec7b | 20 | #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ |
7baa5099 | 21 | size_t off = 0; \ |
a6e4ec7b | 22 | size_t skip = i->iov_offset; \ |
7a1bcb5d | 23 | do { \ |
7baa5099 AV |
24 | len = min(n, __p->iov_len - skip); \ |
25 | if (likely(len)) { \ | |
26 | base = __p->iov_base + skip; \ | |
27 | len -= (STEP); \ | |
28 | off += len; \ | |
29 | skip += len; \ | |
30 | n -= len; \ | |
7a1bcb5d AV |
31 | if (skip < __p->iov_len) \ |
32 | break; \ | |
33 | } \ | |
34 | __p++; \ | |
35 | skip = 0; \ | |
36 | } while (n); \ | |
a6e4ec7b | 37 | i->iov_offset = skip; \ |
7baa5099 | 38 | n = off; \ |
04a31165 AV |
39 | } |
40 | ||
a6e4ec7b | 41 | #define iterate_bvec(i, n, base, len, off, p, STEP) { \ |
7baa5099 | 42 | size_t off = 0; \ |
a6e4ec7b | 43 | unsigned skip = i->iov_offset; \ |
7491a2bf AV |
44 | while (n) { \ |
45 | unsigned offset = p->bv_offset + skip; \ | |
1b4fb5ff | 46 | unsigned left; \ |
21b56c84 AV |
47 | void *kaddr = kmap_local_page(p->bv_page + \ |
48 | offset / PAGE_SIZE); \ | |
7baa5099 | 49 | base = kaddr + offset % PAGE_SIZE; \ |
a6e4ec7b | 50 | len = min(min(n, (size_t)(p->bv_len - skip)), \ |
7491a2bf | 51 | (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ |
1b4fb5ff | 52 | left = (STEP); \ |
21b56c84 | 53 | kunmap_local(kaddr); \ |
7baa5099 AV |
54 | len -= left; \ |
55 | off += len; \ | |
56 | skip += len; \ | |
7491a2bf AV |
57 | if (skip == p->bv_len) { \ |
58 | skip = 0; \ | |
59 | p++; \ | |
60 | } \ | |
7baa5099 | 61 | n -= len; \ |
1b4fb5ff AV |
62 | if (left) \ |
63 | break; \ | |
7491a2bf | 64 | } \ |
a6e4ec7b | 65 | i->iov_offset = skip; \ |
7baa5099 | 66 | n = off; \ |
04a31165 AV |
67 | } |
68 | ||
a6e4ec7b | 69 | #define iterate_xarray(i, n, base, len, __off, STEP) { \ |
1b4fb5ff | 70 | __label__ __out; \ |
622838f3 | 71 | size_t __off = 0; \ |
7ff50620 | 72 | struct page *head = NULL; \ |
a6e4ec7b | 73 | loff_t start = i->xarray_start + i->iov_offset; \ |
4b179e9a AV |
74 | unsigned offset = start % PAGE_SIZE; \ |
75 | pgoff_t index = start / PAGE_SIZE; \ | |
7ff50620 DH |
76 | int j; \ |
77 | \ | |
78 | XA_STATE(xas, i->xarray, index); \ | |
79 | \ | |
7baa5099 AV |
80 | rcu_read_lock(); \ |
81 | xas_for_each(&xas, head, ULONG_MAX) { \ | |
82 | unsigned left; \ | |
83 | if (xas_retry(&xas, head)) \ | |
84 | continue; \ | |
85 | if (WARN_ON(xa_is_value(head))) \ | |
86 | break; \ | |
87 | if (WARN_ON(PageHuge(head))) \ | |
88 | break; \ | |
7ff50620 | 89 | for (j = (head->index < index) ? index - head->index : 0; \ |
7baa5099 | 90 | j < thp_nr_pages(head); j++) { \ |
21b56c84 | 91 | void *kaddr = kmap_local_page(head + j); \ |
7baa5099 AV |
92 | base = kaddr + offset; \ |
93 | len = PAGE_SIZE - offset; \ | |
94 | len = min(n, len); \ | |
95 | left = (STEP); \ | |
96 | kunmap_local(kaddr); \ | |
97 | len -= left; \ | |
98 | __off += len; \ | |
99 | n -= len; \ | |
100 | if (left || n == 0) \ | |
101 | goto __out; \ | |
4b179e9a | 102 | offset = 0; \ |
7baa5099 | 103 | } \ |
7ff50620 | 104 | } \ |
1b4fb5ff | 105 | __out: \ |
7ff50620 | 106 | rcu_read_unlock(); \ |
a6e4ec7b | 107 | i->iov_offset += __off; \ |
622838f3 | 108 | n = __off; \ |
7ff50620 DH |
109 | } |
110 | ||
7baa5099 | 111 | #define __iterate_and_advance(i, n, base, len, off, I, K) { \ |
dd254f5a AV |
112 | if (unlikely(i->count < n)) \ |
113 | n = i->count; \ | |
f5da8354 | 114 | if (likely(n)) { \ |
28f38db7 | 115 | if (likely(iter_is_iovec(i))) { \ |
5c67aa90 | 116 | const struct iovec *iov = i->iov; \ |
7baa5099 AV |
117 | void __user *base; \ |
118 | size_t len; \ | |
119 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 120 | iov, (I)) \ |
28f38db7 AV |
121 | i->nr_segs -= iov - i->iov; \ |
122 | i->iov = iov; \ | |
123 | } else if (iov_iter_is_bvec(i)) { \ | |
1bdc76ae | 124 | const struct bio_vec *bvec = i->bvec; \ |
7baa5099 AV |
125 | void *base; \ |
126 | size_t len; \ | |
127 | iterate_bvec(i, n, base, len, off, \ | |
a6e4ec7b | 128 | bvec, (K)) \ |
7491a2bf AV |
129 | i->nr_segs -= bvec - i->bvec; \ |
130 | i->bvec = bvec; \ | |
28f38db7 | 131 | } else if (iov_iter_is_kvec(i)) { \ |
5c67aa90 | 132 | const struct kvec *kvec = i->kvec; \ |
7baa5099 AV |
133 | void *base; \ |
134 | size_t len; \ | |
135 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 136 | kvec, (K)) \ |
dd254f5a AV |
137 | i->nr_segs -= kvec - i->kvec; \ |
138 | i->kvec = kvec; \ | |
28f38db7 | 139 | } else if (iov_iter_is_xarray(i)) { \ |
7baa5099 AV |
140 | void *base; \ |
141 | size_t len; \ | |
142 | iterate_xarray(i, n, base, len, off, \ | |
a6e4ec7b | 143 | (K)) \ |
7ce2a91e | 144 | } \ |
dd254f5a | 145 | i->count -= n; \ |
7ce2a91e | 146 | } \ |
7ce2a91e | 147 | } |
7baa5099 AV |
148 | #define iterate_and_advance(i, n, base, len, off, I, K) \ |
149 | __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) | |
7ce2a91e | 150 | |
09fc68dc AV |
151 | static int copyout(void __user *to, const void *from, size_t n) |
152 | { | |
4d0e9df5 AL |
153 | if (should_fail_usercopy()) |
154 | return n; | |
96d4f267 | 155 | if (access_ok(to, n)) { |
d0ef4c36 | 156 | instrument_copy_to_user(to, from, n); |
09fc68dc AV |
157 | n = raw_copy_to_user(to, from, n); |
158 | } | |
159 | return n; | |
160 | } | |
161 | ||
162 | static int copyin(void *to, const void __user *from, size_t n) | |
163 | { | |
4d0e9df5 AL |
164 | if (should_fail_usercopy()) |
165 | return n; | |
96d4f267 | 166 | if (access_ok(from, n)) { |
d0ef4c36 | 167 | instrument_copy_from_user(to, from, n); |
09fc68dc AV |
168 | n = raw_copy_from_user(to, from, n); |
169 | } | |
170 | return n; | |
171 | } | |
172 | ||
62a8067a | 173 | static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, |
4f18cd31 AV |
174 | struct iov_iter *i) |
175 | { | |
176 | size_t skip, copy, left, wanted; | |
177 | const struct iovec *iov; | |
178 | char __user *buf; | |
179 | void *kaddr, *from; | |
180 | ||
181 | if (unlikely(bytes > i->count)) | |
182 | bytes = i->count; | |
183 | ||
184 | if (unlikely(!bytes)) | |
185 | return 0; | |
186 | ||
09fc68dc | 187 | might_fault(); |
4f18cd31 AV |
188 | wanted = bytes; |
189 | iov = i->iov; | |
190 | skip = i->iov_offset; | |
191 | buf = iov->iov_base + skip; | |
192 | copy = min(bytes, iov->iov_len - skip); | |
193 | ||
bb523b40 | 194 | if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) { |
4f18cd31 AV |
195 | kaddr = kmap_atomic(page); |
196 | from = kaddr + offset; | |
197 | ||
198 | /* first chunk, usually the only one */ | |
09fc68dc | 199 | left = copyout(buf, from, copy); |
4f18cd31 AV |
200 | copy -= left; |
201 | skip += copy; | |
202 | from += copy; | |
203 | bytes -= copy; | |
204 | ||
205 | while (unlikely(!left && bytes)) { | |
206 | iov++; | |
207 | buf = iov->iov_base; | |
208 | copy = min(bytes, iov->iov_len); | |
09fc68dc | 209 | left = copyout(buf, from, copy); |
4f18cd31 AV |
210 | copy -= left; |
211 | skip = copy; | |
212 | from += copy; | |
213 | bytes -= copy; | |
214 | } | |
215 | if (likely(!bytes)) { | |
216 | kunmap_atomic(kaddr); | |
217 | goto done; | |
218 | } | |
219 | offset = from - kaddr; | |
220 | buf += copy; | |
221 | kunmap_atomic(kaddr); | |
222 | copy = min(bytes, iov->iov_len - skip); | |
223 | } | |
224 | /* Too bad - revert to non-atomic kmap */ | |
3fa6c507 | 225 | |
4f18cd31 AV |
226 | kaddr = kmap(page); |
227 | from = kaddr + offset; | |
09fc68dc | 228 | left = copyout(buf, from, copy); |
4f18cd31 AV |
229 | copy -= left; |
230 | skip += copy; | |
231 | from += copy; | |
232 | bytes -= copy; | |
233 | while (unlikely(!left && bytes)) { | |
234 | iov++; | |
235 | buf = iov->iov_base; | |
236 | copy = min(bytes, iov->iov_len); | |
09fc68dc | 237 | left = copyout(buf, from, copy); |
4f18cd31 AV |
238 | copy -= left; |
239 | skip = copy; | |
240 | from += copy; | |
241 | bytes -= copy; | |
242 | } | |
243 | kunmap(page); | |
3fa6c507 | 244 | |
4f18cd31 | 245 | done: |
81055e58 AV |
246 | if (skip == iov->iov_len) { |
247 | iov++; | |
248 | skip = 0; | |
249 | } | |
4f18cd31 AV |
250 | i->count -= wanted - bytes; |
251 | i->nr_segs -= iov - i->iov; | |
252 | i->iov = iov; | |
253 | i->iov_offset = skip; | |
254 | return wanted - bytes; | |
255 | } | |
4f18cd31 | 256 | |
62a8067a | 257 | static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, |
f0d1bec9 AV |
258 | struct iov_iter *i) |
259 | { | |
260 | size_t skip, copy, left, wanted; | |
261 | const struct iovec *iov; | |
262 | char __user *buf; | |
263 | void *kaddr, *to; | |
264 | ||
265 | if (unlikely(bytes > i->count)) | |
266 | bytes = i->count; | |
267 | ||
268 | if (unlikely(!bytes)) | |
269 | return 0; | |
270 | ||
09fc68dc | 271 | might_fault(); |
f0d1bec9 AV |
272 | wanted = bytes; |
273 | iov = i->iov; | |
274 | skip = i->iov_offset; | |
275 | buf = iov->iov_base + skip; | |
276 | copy = min(bytes, iov->iov_len - skip); | |
277 | ||
bb523b40 | 278 | if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) { |
f0d1bec9 AV |
279 | kaddr = kmap_atomic(page); |
280 | to = kaddr + offset; | |
281 | ||
282 | /* first chunk, usually the only one */ | |
09fc68dc | 283 | left = copyin(to, buf, copy); |
f0d1bec9 AV |
284 | copy -= left; |
285 | skip += copy; | |
286 | to += copy; | |
287 | bytes -= copy; | |
288 | ||
289 | while (unlikely(!left && bytes)) { | |
290 | iov++; | |
291 | buf = iov->iov_base; | |
292 | copy = min(bytes, iov->iov_len); | |
09fc68dc | 293 | left = copyin(to, buf, copy); |
f0d1bec9 AV |
294 | copy -= left; |
295 | skip = copy; | |
296 | to += copy; | |
297 | bytes -= copy; | |
298 | } | |
299 | if (likely(!bytes)) { | |
300 | kunmap_atomic(kaddr); | |
301 | goto done; | |
302 | } | |
303 | offset = to - kaddr; | |
304 | buf += copy; | |
305 | kunmap_atomic(kaddr); | |
306 | copy = min(bytes, iov->iov_len - skip); | |
307 | } | |
308 | /* Too bad - revert to non-atomic kmap */ | |
3fa6c507 | 309 | |
f0d1bec9 AV |
310 | kaddr = kmap(page); |
311 | to = kaddr + offset; | |
09fc68dc | 312 | left = copyin(to, buf, copy); |
f0d1bec9 AV |
313 | copy -= left; |
314 | skip += copy; | |
315 | to += copy; | |
316 | bytes -= copy; | |
317 | while (unlikely(!left && bytes)) { | |
318 | iov++; | |
319 | buf = iov->iov_base; | |
320 | copy = min(bytes, iov->iov_len); | |
09fc68dc | 321 | left = copyin(to, buf, copy); |
f0d1bec9 AV |
322 | copy -= left; |
323 | skip = copy; | |
324 | to += copy; | |
325 | bytes -= copy; | |
326 | } | |
327 | kunmap(page); | |
3fa6c507 | 328 | |
f0d1bec9 | 329 | done: |
81055e58 AV |
330 | if (skip == iov->iov_len) { |
331 | iov++; | |
332 | skip = 0; | |
333 | } | |
f0d1bec9 AV |
334 | i->count -= wanted - bytes; |
335 | i->nr_segs -= iov - i->iov; | |
336 | i->iov = iov; | |
337 | i->iov_offset = skip; | |
338 | return wanted - bytes; | |
339 | } | |
f0d1bec9 | 340 | |
241699cd AV |
341 | #ifdef PIPE_PARANOIA |
342 | static bool sanity(const struct iov_iter *i) | |
343 | { | |
344 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
345 | unsigned int p_head = pipe->head; |
346 | unsigned int p_tail = pipe->tail; | |
347 | unsigned int p_mask = pipe->ring_size - 1; | |
348 | unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); | |
349 | unsigned int i_head = i->head; | |
350 | unsigned int idx; | |
351 | ||
241699cd AV |
352 | if (i->iov_offset) { |
353 | struct pipe_buffer *p; | |
8cefc107 | 354 | if (unlikely(p_occupancy == 0)) |
241699cd | 355 | goto Bad; // pipe must be non-empty |
8cefc107 | 356 | if (unlikely(i_head != p_head - 1)) |
241699cd AV |
357 | goto Bad; // must be at the last buffer... |
358 | ||
8cefc107 | 359 | p = &pipe->bufs[i_head & p_mask]; |
241699cd AV |
360 | if (unlikely(p->offset + p->len != i->iov_offset)) |
361 | goto Bad; // ... at the end of segment | |
362 | } else { | |
8cefc107 | 363 | if (i_head != p_head) |
241699cd AV |
364 | goto Bad; // must be right after the last buffer |
365 | } | |
366 | return true; | |
367 | Bad: | |
8cefc107 DH |
368 | printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); |
369 | printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", | |
370 | p_head, p_tail, pipe->ring_size); | |
371 | for (idx = 0; idx < pipe->ring_size; idx++) | |
241699cd AV |
372 | printk(KERN_ERR "[%p %p %d %d]\n", |
373 | pipe->bufs[idx].ops, | |
374 | pipe->bufs[idx].page, | |
375 | pipe->bufs[idx].offset, | |
376 | pipe->bufs[idx].len); | |
377 | WARN_ON(1); | |
378 | return false; | |
379 | } | |
380 | #else | |
381 | #define sanity(i) true | |
382 | #endif | |
383 | ||
241699cd AV |
384 | static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, |
385 | struct iov_iter *i) | |
386 | { | |
387 | struct pipe_inode_info *pipe = i->pipe; | |
388 | struct pipe_buffer *buf; | |
8cefc107 DH |
389 | unsigned int p_tail = pipe->tail; |
390 | unsigned int p_mask = pipe->ring_size - 1; | |
391 | unsigned int i_head = i->head; | |
241699cd | 392 | size_t off; |
241699cd AV |
393 | |
394 | if (unlikely(bytes > i->count)) | |
395 | bytes = i->count; | |
396 | ||
397 | if (unlikely(!bytes)) | |
398 | return 0; | |
399 | ||
400 | if (!sanity(i)) | |
401 | return 0; | |
402 | ||
403 | off = i->iov_offset; | |
8cefc107 | 404 | buf = &pipe->bufs[i_head & p_mask]; |
241699cd AV |
405 | if (off) { |
406 | if (offset == off && buf->page == page) { | |
407 | /* merge with the last one */ | |
408 | buf->len += bytes; | |
409 | i->iov_offset += bytes; | |
410 | goto out; | |
411 | } | |
8cefc107 DH |
412 | i_head++; |
413 | buf = &pipe->bufs[i_head & p_mask]; | |
241699cd | 414 | } |
6718b6f8 | 415 | if (pipe_full(i_head, p_tail, pipe->max_usage)) |
241699cd | 416 | return 0; |
8cefc107 | 417 | |
241699cd | 418 | buf->ops = &page_cache_pipe_buf_ops; |
8cefc107 DH |
419 | get_page(page); |
420 | buf->page = page; | |
241699cd AV |
421 | buf->offset = offset; |
422 | buf->len = bytes; | |
8cefc107 DH |
423 | |
424 | pipe->head = i_head + 1; | |
241699cd | 425 | i->iov_offset = offset + bytes; |
8cefc107 | 426 | i->head = i_head; |
241699cd AV |
427 | out: |
428 | i->count -= bytes; | |
429 | return bytes; | |
430 | } | |
431 | ||
171a0203 | 432 | /* |
a6294593 AG |
433 | * fault_in_iov_iter_readable - fault in iov iterator for reading |
434 | * @i: iterator | |
435 | * @size: maximum length | |
436 | * | |
171a0203 | 437 | * Fault in one or more iovecs of the given iov_iter, to a maximum length of |
a6294593 AG |
438 | * @size. For each iovec, fault in each page that constitutes the iovec. |
439 | * | |
440 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
441 | * copy_from_user()). | |
171a0203 | 442 | * |
a6294593 | 443 | * Always returns 0 for non-userspace iterators. |
171a0203 | 444 | */ |
a6294593 | 445 | size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) |
171a0203 | 446 | { |
0e8f0d67 | 447 | if (iter_is_iovec(i)) { |
a6294593 | 448 | size_t count = min(size, iov_iter_count(i)); |
8409a0d2 AV |
449 | const struct iovec *p; |
450 | size_t skip; | |
451 | ||
a6294593 AG |
452 | size -= count; |
453 | for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { | |
454 | size_t len = min(count, p->iov_len - skip); | |
455 | size_t ret; | |
8409a0d2 AV |
456 | |
457 | if (unlikely(!len)) | |
458 | continue; | |
a6294593 AG |
459 | ret = fault_in_readable(p->iov_base + skip, len); |
460 | count -= len - ret; | |
461 | if (ret) | |
462 | break; | |
8409a0d2 | 463 | } |
a6294593 | 464 | return count + size; |
171a0203 AA |
465 | } |
466 | return 0; | |
467 | } | |
a6294593 | 468 | EXPORT_SYMBOL(fault_in_iov_iter_readable); |
171a0203 | 469 | |
aa563d7b | 470 | void iov_iter_init(struct iov_iter *i, unsigned int direction, |
71d8e532 AV |
471 | const struct iovec *iov, unsigned long nr_segs, |
472 | size_t count) | |
473 | { | |
aa563d7b | 474 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
475 | *i = (struct iov_iter) { |
476 | .iter_type = ITER_IOVEC, | |
477 | .data_source = direction, | |
478 | .iov = iov, | |
479 | .nr_segs = nr_segs, | |
480 | .iov_offset = 0, | |
481 | .count = count | |
482 | }; | |
71d8e532 AV |
483 | } |
484 | EXPORT_SYMBOL(iov_iter_init); | |
7b2c99d1 | 485 | |
241699cd AV |
486 | static inline bool allocated(struct pipe_buffer *buf) |
487 | { | |
488 | return buf->ops == &default_pipe_buf_ops; | |
489 | } | |
490 | ||
8cefc107 DH |
491 | static inline void data_start(const struct iov_iter *i, |
492 | unsigned int *iter_headp, size_t *offp) | |
241699cd | 493 | { |
8cefc107 DH |
494 | unsigned int p_mask = i->pipe->ring_size - 1; |
495 | unsigned int iter_head = i->head; | |
241699cd | 496 | size_t off = i->iov_offset; |
8cefc107 DH |
497 | |
498 | if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || | |
499 | off == PAGE_SIZE)) { | |
500 | iter_head++; | |
241699cd AV |
501 | off = 0; |
502 | } | |
8cefc107 | 503 | *iter_headp = iter_head; |
241699cd AV |
504 | *offp = off; |
505 | } | |
506 | ||
507 | static size_t push_pipe(struct iov_iter *i, size_t size, | |
8cefc107 | 508 | int *iter_headp, size_t *offp) |
241699cd AV |
509 | { |
510 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
511 | unsigned int p_tail = pipe->tail; |
512 | unsigned int p_mask = pipe->ring_size - 1; | |
513 | unsigned int iter_head; | |
241699cd | 514 | size_t off; |
241699cd AV |
515 | ssize_t left; |
516 | ||
517 | if (unlikely(size > i->count)) | |
518 | size = i->count; | |
519 | if (unlikely(!size)) | |
520 | return 0; | |
521 | ||
522 | left = size; | |
8cefc107 DH |
523 | data_start(i, &iter_head, &off); |
524 | *iter_headp = iter_head; | |
241699cd AV |
525 | *offp = off; |
526 | if (off) { | |
527 | left -= PAGE_SIZE - off; | |
528 | if (left <= 0) { | |
8cefc107 | 529 | pipe->bufs[iter_head & p_mask].len += size; |
241699cd AV |
530 | return size; |
531 | } | |
8cefc107 DH |
532 | pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; |
533 | iter_head++; | |
241699cd | 534 | } |
6718b6f8 | 535 | while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { |
8cefc107 | 536 | struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; |
241699cd AV |
537 | struct page *page = alloc_page(GFP_USER); |
538 | if (!page) | |
539 | break; | |
8cefc107 DH |
540 | |
541 | buf->ops = &default_pipe_buf_ops; | |
542 | buf->page = page; | |
543 | buf->offset = 0; | |
544 | buf->len = min_t(ssize_t, left, PAGE_SIZE); | |
545 | left -= buf->len; | |
546 | iter_head++; | |
547 | pipe->head = iter_head; | |
548 | ||
549 | if (left == 0) | |
241699cd | 550 | return size; |
241699cd AV |
551 | } |
552 | return size - left; | |
553 | } | |
554 | ||
555 | static size_t copy_pipe_to_iter(const void *addr, size_t bytes, | |
556 | struct iov_iter *i) | |
557 | { | |
558 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
559 | unsigned int p_mask = pipe->ring_size - 1; |
560 | unsigned int i_head; | |
241699cd | 561 | size_t n, off; |
241699cd AV |
562 | |
563 | if (!sanity(i)) | |
564 | return 0; | |
565 | ||
8cefc107 | 566 | bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd AV |
567 | if (unlikely(!n)) |
568 | return 0; | |
8cefc107 | 569 | do { |
241699cd | 570 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
8cefc107 DH |
571 | memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); |
572 | i->head = i_head; | |
241699cd AV |
573 | i->iov_offset = off + chunk; |
574 | n -= chunk; | |
575 | addr += chunk; | |
8cefc107 DH |
576 | off = 0; |
577 | i_head++; | |
578 | } while (n); | |
241699cd AV |
579 | i->count -= bytes; |
580 | return bytes; | |
581 | } | |
582 | ||
f9152895 AV |
583 | static __wsum csum_and_memcpy(void *to, const void *from, size_t len, |
584 | __wsum sum, size_t off) | |
585 | { | |
cc44c17b | 586 | __wsum next = csum_partial_copy_nocheck(from, to, len); |
f9152895 AV |
587 | return csum_block_add(sum, next, off); |
588 | } | |
589 | ||
78e1f386 | 590 | static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, |
6852df12 | 591 | struct iov_iter *i, __wsum *sump) |
78e1f386 AV |
592 | { |
593 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 | 594 | unsigned int p_mask = pipe->ring_size - 1; |
6852df12 AV |
595 | __wsum sum = *sump; |
596 | size_t off = 0; | |
8cefc107 | 597 | unsigned int i_head; |
6852df12 | 598 | size_t r; |
78e1f386 AV |
599 | |
600 | if (!sanity(i)) | |
601 | return 0; | |
602 | ||
6852df12 AV |
603 | bytes = push_pipe(i, bytes, &i_head, &r); |
604 | while (bytes) { | |
605 | size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r); | |
2495bdcc | 606 | char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); |
6852df12 | 607 | sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); |
2495bdcc | 608 | kunmap_local(p); |
8cefc107 | 609 | i->head = i_head; |
78e1f386 | 610 | i->iov_offset = r + chunk; |
6852df12 | 611 | bytes -= chunk; |
78e1f386 | 612 | off += chunk; |
8cefc107 DH |
613 | r = 0; |
614 | i_head++; | |
6852df12 AV |
615 | } |
616 | *sump = sum; | |
617 | i->count -= off; | |
618 | return off; | |
78e1f386 AV |
619 | } |
620 | ||
aa28de27 | 621 | size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
62a8067a | 622 | { |
00e23707 | 623 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 624 | return copy_pipe_to_iter(addr, bytes, i); |
09fc68dc AV |
625 | if (iter_is_iovec(i)) |
626 | might_fault(); | |
7baa5099 AV |
627 | iterate_and_advance(i, bytes, base, len, off, |
628 | copyout(base, addr + off, len), | |
629 | memcpy(base, addr + off, len) | |
3d4d3e48 | 630 | ) |
62a8067a | 631 | |
3d4d3e48 | 632 | return bytes; |
c35e0248 | 633 | } |
aa28de27 | 634 | EXPORT_SYMBOL(_copy_to_iter); |
c35e0248 | 635 | |
ec6347bb DW |
636 | #ifdef CONFIG_ARCH_HAS_COPY_MC |
637 | static int copyout_mc(void __user *to, const void *from, size_t n) | |
8780356e | 638 | { |
96d4f267 | 639 | if (access_ok(to, n)) { |
d0ef4c36 | 640 | instrument_copy_to_user(to, from, n); |
ec6347bb | 641 | n = copy_mc_to_user((__force void *) to, from, n); |
8780356e DW |
642 | } |
643 | return n; | |
644 | } | |
645 | ||
ec6347bb | 646 | static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, |
ca146f6f DW |
647 | struct iov_iter *i) |
648 | { | |
649 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
650 | unsigned int p_mask = pipe->ring_size - 1; |
651 | unsigned int i_head; | |
ca146f6f | 652 | size_t n, off, xfer = 0; |
ca146f6f DW |
653 | |
654 | if (!sanity(i)) | |
655 | return 0; | |
656 | ||
2a510a74 AV |
657 | n = push_pipe(i, bytes, &i_head, &off); |
658 | while (n) { | |
ca146f6f | 659 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
2a510a74 | 660 | char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); |
ca146f6f | 661 | unsigned long rem; |
2a510a74 AV |
662 | rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); |
663 | chunk -= rem; | |
664 | kunmap_local(p); | |
8cefc107 | 665 | i->head = i_head; |
2a510a74 AV |
666 | i->iov_offset = off + chunk; |
667 | xfer += chunk; | |
ca146f6f DW |
668 | if (rem) |
669 | break; | |
670 | n -= chunk; | |
8cefc107 DH |
671 | off = 0; |
672 | i_head++; | |
2a510a74 | 673 | } |
ca146f6f DW |
674 | i->count -= xfer; |
675 | return xfer; | |
676 | } | |
677 | ||
bf3eeb9b | 678 | /** |
ec6347bb | 679 | * _copy_mc_to_iter - copy to iter with source memory error exception handling |
bf3eeb9b DW |
680 | * @addr: source kernel address |
681 | * @bytes: total transfer length | |
44e55997 | 682 | * @i: destination iterator |
bf3eeb9b | 683 | * |
ec6347bb DW |
684 | * The pmem driver deploys this for the dax operation |
685 | * (dax_copy_to_iter()) for dax reads (bypass page-cache and the | |
686 | * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes | |
687 | * successfully copied. | |
bf3eeb9b | 688 | * |
ec6347bb | 689 | * The main differences between this and typical _copy_to_iter(). |
bf3eeb9b DW |
690 | * |
691 | * * Typical tail/residue handling after a fault retries the copy | |
692 | * byte-by-byte until the fault happens again. Re-triggering machine | |
693 | * checks is potentially fatal so the implementation uses source | |
694 | * alignment and poison alignment assumptions to avoid re-triggering | |
695 | * hardware exceptions. | |
696 | * | |
697 | * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. | |
698 | * Compare to copy_to_iter() where only ITER_IOVEC attempts might return | |
699 | * a short copy. | |
44e55997 RD |
700 | * |
701 | * Return: number of bytes copied (may be %0) | |
bf3eeb9b | 702 | */ |
ec6347bb | 703 | size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
8780356e | 704 | { |
00e23707 | 705 | if (unlikely(iov_iter_is_pipe(i))) |
ec6347bb | 706 | return copy_mc_pipe_to_iter(addr, bytes, i); |
8780356e DW |
707 | if (iter_is_iovec(i)) |
708 | might_fault(); | |
7baa5099 AV |
709 | __iterate_and_advance(i, bytes, base, len, off, |
710 | copyout_mc(base, addr + off, len), | |
711 | copy_mc_to_kernel(base, addr + off, len) | |
8780356e DW |
712 | ) |
713 | ||
714 | return bytes; | |
715 | } | |
ec6347bb DW |
716 | EXPORT_SYMBOL_GPL(_copy_mc_to_iter); |
717 | #endif /* CONFIG_ARCH_HAS_COPY_MC */ | |
8780356e | 718 | |
aa28de27 | 719 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
c35e0248 | 720 | { |
00e23707 | 721 | if (unlikely(iov_iter_is_pipe(i))) { |
241699cd AV |
722 | WARN_ON(1); |
723 | return 0; | |
724 | } | |
09fc68dc AV |
725 | if (iter_is_iovec(i)) |
726 | might_fault(); | |
7baa5099 AV |
727 | iterate_and_advance(i, bytes, base, len, off, |
728 | copyin(addr + off, base, len), | |
729 | memcpy(addr + off, base, len) | |
0dbca9a4 AV |
730 | ) |
731 | ||
732 | return bytes; | |
c35e0248 | 733 | } |
aa28de27 | 734 | EXPORT_SYMBOL(_copy_from_iter); |
c35e0248 | 735 | |
aa28de27 | 736 | size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
aa583096 | 737 | { |
00e23707 | 738 | if (unlikely(iov_iter_is_pipe(i))) { |
241699cd AV |
739 | WARN_ON(1); |
740 | return 0; | |
741 | } | |
7baa5099 AV |
742 | iterate_and_advance(i, bytes, base, len, off, |
743 | __copy_from_user_inatomic_nocache(addr + off, base, len), | |
744 | memcpy(addr + off, base, len) | |
aa583096 AV |
745 | ) |
746 | ||
747 | return bytes; | |
748 | } | |
aa28de27 | 749 | EXPORT_SYMBOL(_copy_from_iter_nocache); |
aa583096 | 750 | |
0aed55af | 751 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
abd08d7d DW |
752 | /** |
753 | * _copy_from_iter_flushcache - write destination through cpu cache | |
754 | * @addr: destination kernel address | |
755 | * @bytes: total transfer length | |
44e55997 | 756 | * @i: source iterator |
abd08d7d DW |
757 | * |
758 | * The pmem driver arranges for filesystem-dax to use this facility via | |
759 | * dax_copy_from_iter() for ensuring that writes to persistent memory | |
760 | * are flushed through the CPU cache. It is differentiated from | |
761 | * _copy_from_iter_nocache() in that guarantees all data is flushed for | |
762 | * all iterator types. The _copy_from_iter_nocache() only attempts to | |
763 | * bypass the cache for the ITER_IOVEC case, and on some archs may use | |
764 | * instructions that strand dirty-data in the cache. | |
44e55997 RD |
765 | * |
766 | * Return: number of bytes copied (may be %0) | |
abd08d7d | 767 | */ |
6a37e940 | 768 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) |
0aed55af | 769 | { |
00e23707 | 770 | if (unlikely(iov_iter_is_pipe(i))) { |
0aed55af DW |
771 | WARN_ON(1); |
772 | return 0; | |
773 | } | |
7baa5099 AV |
774 | iterate_and_advance(i, bytes, base, len, off, |
775 | __copy_from_user_flushcache(addr + off, base, len), | |
776 | memcpy_flushcache(addr + off, base, len) | |
0aed55af DW |
777 | ) |
778 | ||
779 | return bytes; | |
780 | } | |
6a37e940 | 781 | EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); |
0aed55af DW |
782 | #endif |
783 | ||
72e809ed AV |
784 | static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) |
785 | { | |
6daef95b ED |
786 | struct page *head; |
787 | size_t v = n + offset; | |
788 | ||
789 | /* | |
790 | * The general case needs to access the page order in order | |
791 | * to compute the page size. | |
792 | * However, we mostly deal with order-0 pages and thus can | |
793 | * avoid a possible cache line miss for requests that fit all | |
794 | * page orders. | |
795 | */ | |
796 | if (n <= v && v <= PAGE_SIZE) | |
797 | return true; | |
798 | ||
799 | head = compound_head(page); | |
800 | v += (page - head) << PAGE_SHIFT; | |
a90bcb86 | 801 | |
a50b854e | 802 | if (likely(n <= v && v <= (page_size(head)))) |
72e809ed AV |
803 | return true; |
804 | WARN_ON(1); | |
805 | return false; | |
806 | } | |
cbbd26b8 | 807 | |
08aa6479 | 808 | static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
62a8067a AV |
809 | struct iov_iter *i) |
810 | { | |
28f38db7 AV |
811 | if (likely(iter_is_iovec(i))) |
812 | return copy_page_to_iter_iovec(page, offset, bytes, i); | |
813 | if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { | |
c1d4d6a9 AV |
814 | void *kaddr = kmap_local_page(page); |
815 | size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); | |
816 | kunmap_local(kaddr); | |
d271524a | 817 | return wanted; |
28f38db7 AV |
818 | } |
819 | if (iov_iter_is_pipe(i)) | |
820 | return copy_page_to_iter_pipe(page, offset, bytes, i); | |
821 | if (unlikely(iov_iter_is_discard(i))) { | |
a506abc7 AV |
822 | if (unlikely(i->count < bytes)) |
823 | bytes = i->count; | |
824 | i->count -= bytes; | |
9ea9ce04 | 825 | return bytes; |
28f38db7 AV |
826 | } |
827 | WARN_ON(1); | |
828 | return 0; | |
62a8067a | 829 | } |
08aa6479 AV |
830 | |
831 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | |
832 | struct iov_iter *i) | |
833 | { | |
834 | size_t res = 0; | |
835 | if (unlikely(!page_copy_sane(page, offset, bytes))) | |
836 | return 0; | |
837 | page += offset / PAGE_SIZE; // first subpage | |
838 | offset %= PAGE_SIZE; | |
839 | while (1) { | |
840 | size_t n = __copy_page_to_iter(page, offset, | |
841 | min(bytes, (size_t)PAGE_SIZE - offset), i); | |
842 | res += n; | |
843 | bytes -= n; | |
844 | if (!bytes || !n) | |
845 | break; | |
846 | offset += n; | |
847 | if (offset == PAGE_SIZE) { | |
848 | page++; | |
849 | offset = 0; | |
850 | } | |
851 | } | |
852 | return res; | |
853 | } | |
62a8067a AV |
854 | EXPORT_SYMBOL(copy_page_to_iter); |
855 | ||
856 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | |
857 | struct iov_iter *i) | |
858 | { | |
72e809ed AV |
859 | if (unlikely(!page_copy_sane(page, offset, bytes))) |
860 | return 0; | |
28f38db7 AV |
861 | if (likely(iter_is_iovec(i))) |
862 | return copy_page_from_iter_iovec(page, offset, bytes, i); | |
863 | if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { | |
55ca375c | 864 | void *kaddr = kmap_local_page(page); |
aa28de27 | 865 | size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); |
55ca375c | 866 | kunmap_local(kaddr); |
d271524a | 867 | return wanted; |
28f38db7 AV |
868 | } |
869 | WARN_ON(1); | |
870 | return 0; | |
62a8067a AV |
871 | } |
872 | EXPORT_SYMBOL(copy_page_from_iter); | |
873 | ||
241699cd AV |
874 | static size_t pipe_zero(size_t bytes, struct iov_iter *i) |
875 | { | |
876 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
877 | unsigned int p_mask = pipe->ring_size - 1; |
878 | unsigned int i_head; | |
241699cd | 879 | size_t n, off; |
241699cd AV |
880 | |
881 | if (!sanity(i)) | |
882 | return 0; | |
883 | ||
8cefc107 | 884 | bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd AV |
885 | if (unlikely(!n)) |
886 | return 0; | |
887 | ||
8cefc107 | 888 | do { |
241699cd | 889 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
893839fd AV |
890 | char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); |
891 | memset(p + off, 0, chunk); | |
892 | kunmap_local(p); | |
8cefc107 | 893 | i->head = i_head; |
241699cd AV |
894 | i->iov_offset = off + chunk; |
895 | n -= chunk; | |
8cefc107 DH |
896 | off = 0; |
897 | i_head++; | |
898 | } while (n); | |
241699cd AV |
899 | i->count -= bytes; |
900 | return bytes; | |
901 | } | |
902 | ||
c35e0248 MW |
903 | size_t iov_iter_zero(size_t bytes, struct iov_iter *i) |
904 | { | |
00e23707 | 905 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 906 | return pipe_zero(bytes, i); |
7baa5099 AV |
907 | iterate_and_advance(i, bytes, base, len, count, |
908 | clear_user(base, len), | |
909 | memset(base, 0, len) | |
8442fa46 AV |
910 | ) |
911 | ||
912 | return bytes; | |
c35e0248 MW |
913 | } |
914 | EXPORT_SYMBOL(iov_iter_zero); | |
915 | ||
f0b65f39 AV |
916 | size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, |
917 | struct iov_iter *i) | |
62a8067a | 918 | { |
04a31165 | 919 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
72e809ed AV |
920 | if (unlikely(!page_copy_sane(page, offset, bytes))) { |
921 | kunmap_atomic(kaddr); | |
922 | return 0; | |
923 | } | |
9ea9ce04 | 924 | if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd AV |
925 | kunmap_atomic(kaddr); |
926 | WARN_ON(1); | |
927 | return 0; | |
928 | } | |
7baa5099 AV |
929 | iterate_and_advance(i, bytes, base, len, off, |
930 | copyin(p + off, base, len), | |
931 | memcpy(p + off, base, len) | |
04a31165 AV |
932 | ) |
933 | kunmap_atomic(kaddr); | |
934 | return bytes; | |
62a8067a | 935 | } |
f0b65f39 | 936 | EXPORT_SYMBOL(copy_page_from_iter_atomic); |
62a8067a | 937 | |
b9dc6f65 AV |
938 | static inline void pipe_truncate(struct iov_iter *i) |
939 | { | |
940 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
941 | unsigned int p_tail = pipe->tail; |
942 | unsigned int p_head = pipe->head; | |
943 | unsigned int p_mask = pipe->ring_size - 1; | |
944 | ||
945 | if (!pipe_empty(p_head, p_tail)) { | |
946 | struct pipe_buffer *buf; | |
947 | unsigned int i_head = i->head; | |
b9dc6f65 | 948 | size_t off = i->iov_offset; |
8cefc107 | 949 | |
b9dc6f65 | 950 | if (off) { |
8cefc107 DH |
951 | buf = &pipe->bufs[i_head & p_mask]; |
952 | buf->len = off - buf->offset; | |
953 | i_head++; | |
b9dc6f65 | 954 | } |
8cefc107 DH |
955 | while (p_head != i_head) { |
956 | p_head--; | |
957 | pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); | |
b9dc6f65 | 958 | } |
8cefc107 DH |
959 | |
960 | pipe->head = p_head; | |
b9dc6f65 AV |
961 | } |
962 | } | |
963 | ||
241699cd AV |
964 | static void pipe_advance(struct iov_iter *i, size_t size) |
965 | { | |
966 | struct pipe_inode_info *pipe = i->pipe; | |
241699cd | 967 | if (size) { |
b9dc6f65 | 968 | struct pipe_buffer *buf; |
8cefc107 DH |
969 | unsigned int p_mask = pipe->ring_size - 1; |
970 | unsigned int i_head = i->head; | |
b9dc6f65 | 971 | size_t off = i->iov_offset, left = size; |
8cefc107 | 972 | |
241699cd | 973 | if (off) /* make it relative to the beginning of buffer */ |
8cefc107 | 974 | left += off - pipe->bufs[i_head & p_mask].offset; |
241699cd | 975 | while (1) { |
8cefc107 | 976 | buf = &pipe->bufs[i_head & p_mask]; |
b9dc6f65 | 977 | if (left <= buf->len) |
241699cd | 978 | break; |
b9dc6f65 | 979 | left -= buf->len; |
8cefc107 | 980 | i_head++; |
241699cd | 981 | } |
8cefc107 | 982 | i->head = i_head; |
b9dc6f65 | 983 | i->iov_offset = buf->offset + left; |
241699cd | 984 | } |
b9dc6f65 AV |
985 | i->count -= size; |
986 | /* ... and discard everything past that point */ | |
987 | pipe_truncate(i); | |
241699cd AV |
988 | } |
989 | ||
54c8195b PB |
990 | static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) |
991 | { | |
992 | struct bvec_iter bi; | |
993 | ||
994 | bi.bi_size = i->count; | |
995 | bi.bi_bvec_done = i->iov_offset; | |
996 | bi.bi_idx = 0; | |
997 | bvec_iter_advance(i->bvec, &bi, size); | |
998 | ||
999 | i->bvec += bi.bi_idx; | |
1000 | i->nr_segs -= bi.bi_idx; | |
1001 | i->count = bi.bi_size; | |
1002 | i->iov_offset = bi.bi_bvec_done; | |
1003 | } | |
1004 | ||
185ac4d4 AV |
1005 | static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) |
1006 | { | |
1007 | const struct iovec *iov, *end; | |
1008 | ||
1009 | if (!i->count) | |
1010 | return; | |
1011 | i->count -= size; | |
1012 | ||
1013 | size += i->iov_offset; // from beginning of current segment | |
1014 | for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { | |
1015 | if (likely(size < iov->iov_len)) | |
1016 | break; | |
1017 | size -= iov->iov_len; | |
1018 | } | |
1019 | i->iov_offset = size; | |
1020 | i->nr_segs -= iov - i->iov; | |
1021 | i->iov = iov; | |
1022 | } | |
1023 | ||
62a8067a AV |
1024 | void iov_iter_advance(struct iov_iter *i, size_t size) |
1025 | { | |
3b3fc051 AV |
1026 | if (unlikely(i->count < size)) |
1027 | size = i->count; | |
185ac4d4 AV |
1028 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { |
1029 | /* iovec and kvec have identical layouts */ | |
1030 | iov_iter_iovec_advance(i, size); | |
1031 | } else if (iov_iter_is_bvec(i)) { | |
1032 | iov_iter_bvec_advance(i, size); | |
1033 | } else if (iov_iter_is_pipe(i)) { | |
241699cd | 1034 | pipe_advance(i, size); |
185ac4d4 | 1035 | } else if (unlikely(iov_iter_is_xarray(i))) { |
7ff50620 DH |
1036 | i->iov_offset += size; |
1037 | i->count -= size; | |
185ac4d4 AV |
1038 | } else if (iov_iter_is_discard(i)) { |
1039 | i->count -= size; | |
54c8195b | 1040 | } |
62a8067a AV |
1041 | } |
1042 | EXPORT_SYMBOL(iov_iter_advance); | |
1043 | ||
27c0e374 AV |
1044 | void iov_iter_revert(struct iov_iter *i, size_t unroll) |
1045 | { | |
1046 | if (!unroll) | |
1047 | return; | |
5b47d59a AV |
1048 | if (WARN_ON(unroll > MAX_RW_COUNT)) |
1049 | return; | |
27c0e374 | 1050 | i->count += unroll; |
00e23707 | 1051 | if (unlikely(iov_iter_is_pipe(i))) { |
27c0e374 | 1052 | struct pipe_inode_info *pipe = i->pipe; |
8cefc107 DH |
1053 | unsigned int p_mask = pipe->ring_size - 1; |
1054 | unsigned int i_head = i->head; | |
27c0e374 AV |
1055 | size_t off = i->iov_offset; |
1056 | while (1) { | |
8cefc107 DH |
1057 | struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; |
1058 | size_t n = off - b->offset; | |
27c0e374 | 1059 | if (unroll < n) { |
4fa55cef | 1060 | off -= unroll; |
27c0e374 AV |
1061 | break; |
1062 | } | |
1063 | unroll -= n; | |
8cefc107 | 1064 | if (!unroll && i_head == i->start_head) { |
27c0e374 AV |
1065 | off = 0; |
1066 | break; | |
1067 | } | |
8cefc107 DH |
1068 | i_head--; |
1069 | b = &pipe->bufs[i_head & p_mask]; | |
1070 | off = b->offset + b->len; | |
27c0e374 AV |
1071 | } |
1072 | i->iov_offset = off; | |
8cefc107 | 1073 | i->head = i_head; |
27c0e374 AV |
1074 | pipe_truncate(i); |
1075 | return; | |
1076 | } | |
9ea9ce04 DH |
1077 | if (unlikely(iov_iter_is_discard(i))) |
1078 | return; | |
27c0e374 AV |
1079 | if (unroll <= i->iov_offset) { |
1080 | i->iov_offset -= unroll; | |
1081 | return; | |
1082 | } | |
1083 | unroll -= i->iov_offset; | |
7ff50620 DH |
1084 | if (iov_iter_is_xarray(i)) { |
1085 | BUG(); /* We should never go beyond the start of the specified | |
1086 | * range since we might then be straying into pages that | |
1087 | * aren't pinned. | |
1088 | */ | |
1089 | } else if (iov_iter_is_bvec(i)) { | |
27c0e374 AV |
1090 | const struct bio_vec *bvec = i->bvec; |
1091 | while (1) { | |
1092 | size_t n = (--bvec)->bv_len; | |
1093 | i->nr_segs++; | |
1094 | if (unroll <= n) { | |
1095 | i->bvec = bvec; | |
1096 | i->iov_offset = n - unroll; | |
1097 | return; | |
1098 | } | |
1099 | unroll -= n; | |
1100 | } | |
1101 | } else { /* same logics for iovec and kvec */ | |
1102 | const struct iovec *iov = i->iov; | |
1103 | while (1) { | |
1104 | size_t n = (--iov)->iov_len; | |
1105 | i->nr_segs++; | |
1106 | if (unroll <= n) { | |
1107 | i->iov = iov; | |
1108 | i->iov_offset = n - unroll; | |
1109 | return; | |
1110 | } | |
1111 | unroll -= n; | |
1112 | } | |
1113 | } | |
1114 | } | |
1115 | EXPORT_SYMBOL(iov_iter_revert); | |
1116 | ||
62a8067a AV |
1117 | /* |
1118 | * Return the count of just the current iov_iter segment. | |
1119 | */ | |
1120 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | |
1121 | { | |
28f38db7 AV |
1122 | if (i->nr_segs > 1) { |
1123 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1124 | return min(i->count, i->iov->iov_len - i->iov_offset); | |
1125 | if (iov_iter_is_bvec(i)) | |
1126 | return min(i->count, i->bvec->bv_len - i->iov_offset); | |
1127 | } | |
1128 | return i->count; | |
62a8067a AV |
1129 | } |
1130 | EXPORT_SYMBOL(iov_iter_single_seg_count); | |
1131 | ||
aa563d7b | 1132 | void iov_iter_kvec(struct iov_iter *i, unsigned int direction, |
05afcb77 | 1133 | const struct kvec *kvec, unsigned long nr_segs, |
abb78f87 AV |
1134 | size_t count) |
1135 | { | |
aa563d7b | 1136 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
1137 | *i = (struct iov_iter){ |
1138 | .iter_type = ITER_KVEC, | |
1139 | .data_source = direction, | |
1140 | .kvec = kvec, | |
1141 | .nr_segs = nr_segs, | |
1142 | .iov_offset = 0, | |
1143 | .count = count | |
1144 | }; | |
abb78f87 AV |
1145 | } |
1146 | EXPORT_SYMBOL(iov_iter_kvec); | |
1147 | ||
aa563d7b | 1148 | void iov_iter_bvec(struct iov_iter *i, unsigned int direction, |
05afcb77 AV |
1149 | const struct bio_vec *bvec, unsigned long nr_segs, |
1150 | size_t count) | |
1151 | { | |
aa563d7b | 1152 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
1153 | *i = (struct iov_iter){ |
1154 | .iter_type = ITER_BVEC, | |
1155 | .data_source = direction, | |
1156 | .bvec = bvec, | |
1157 | .nr_segs = nr_segs, | |
1158 | .iov_offset = 0, | |
1159 | .count = count | |
1160 | }; | |
05afcb77 AV |
1161 | } |
1162 | EXPORT_SYMBOL(iov_iter_bvec); | |
1163 | ||
aa563d7b | 1164 | void iov_iter_pipe(struct iov_iter *i, unsigned int direction, |
241699cd AV |
1165 | struct pipe_inode_info *pipe, |
1166 | size_t count) | |
1167 | { | |
aa563d7b | 1168 | BUG_ON(direction != READ); |
8cefc107 | 1169 | WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); |
8cd54c1c AV |
1170 | *i = (struct iov_iter){ |
1171 | .iter_type = ITER_PIPE, | |
1172 | .data_source = false, | |
1173 | .pipe = pipe, | |
1174 | .head = pipe->head, | |
1175 | .start_head = pipe->head, | |
1176 | .iov_offset = 0, | |
1177 | .count = count | |
1178 | }; | |
241699cd AV |
1179 | } |
1180 | EXPORT_SYMBOL(iov_iter_pipe); | |
1181 | ||
7ff50620 DH |
1182 | /** |
1183 | * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray | |
1184 | * @i: The iterator to initialise. | |
1185 | * @direction: The direction of the transfer. | |
1186 | * @xarray: The xarray to access. | |
1187 | * @start: The start file position. | |
1188 | * @count: The size of the I/O buffer in bytes. | |
1189 | * | |
1190 | * Set up an I/O iterator to either draw data out of the pages attached to an | |
1191 | * inode or to inject data into those pages. The pages *must* be prevented | |
1192 | * from evaporation, either by taking a ref on them or locking them by the | |
1193 | * caller. | |
1194 | */ | |
1195 | void iov_iter_xarray(struct iov_iter *i, unsigned int direction, | |
1196 | struct xarray *xarray, loff_t start, size_t count) | |
1197 | { | |
1198 | BUG_ON(direction & ~1); | |
8cd54c1c AV |
1199 | *i = (struct iov_iter) { |
1200 | .iter_type = ITER_XARRAY, | |
1201 | .data_source = direction, | |
1202 | .xarray = xarray, | |
1203 | .xarray_start = start, | |
1204 | .count = count, | |
1205 | .iov_offset = 0 | |
1206 | }; | |
7ff50620 DH |
1207 | } |
1208 | EXPORT_SYMBOL(iov_iter_xarray); | |
1209 | ||
9ea9ce04 DH |
1210 | /** |
1211 | * iov_iter_discard - Initialise an I/O iterator that discards data | |
1212 | * @i: The iterator to initialise. | |
1213 | * @direction: The direction of the transfer. | |
1214 | * @count: The size of the I/O buffer in bytes. | |
1215 | * | |
1216 | * Set up an I/O iterator that just discards everything that's written to it. | |
1217 | * It's only available as a READ iterator. | |
1218 | */ | |
1219 | void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) | |
1220 | { | |
1221 | BUG_ON(direction != READ); | |
8cd54c1c AV |
1222 | *i = (struct iov_iter){ |
1223 | .iter_type = ITER_DISCARD, | |
1224 | .data_source = false, | |
1225 | .count = count, | |
1226 | .iov_offset = 0 | |
1227 | }; | |
9ea9ce04 DH |
1228 | } |
1229 | EXPORT_SYMBOL(iov_iter_discard); | |
1230 | ||
9221d2e3 | 1231 | static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) |
62a8067a | 1232 | { |
04a31165 AV |
1233 | unsigned long res = 0; |
1234 | size_t size = i->count; | |
9221d2e3 AV |
1235 | size_t skip = i->iov_offset; |
1236 | unsigned k; | |
1237 | ||
1238 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1239 | size_t len = i->iov[k].iov_len - skip; | |
1240 | if (len) { | |
1241 | res |= (unsigned long)i->iov[k].iov_base + skip; | |
1242 | if (len > size) | |
1243 | len = size; | |
1244 | res |= len; | |
1245 | size -= len; | |
1246 | if (!size) | |
1247 | break; | |
1248 | } | |
1249 | } | |
1250 | return res; | |
1251 | } | |
04a31165 | 1252 | |
9221d2e3 AV |
1253 | static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) |
1254 | { | |
1255 | unsigned res = 0; | |
1256 | size_t size = i->count; | |
1257 | unsigned skip = i->iov_offset; | |
1258 | unsigned k; | |
1259 | ||
1260 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1261 | size_t len = i->bvec[k].bv_len - skip; | |
1262 | res |= (unsigned long)i->bvec[k].bv_offset + skip; | |
1263 | if (len > size) | |
1264 | len = size; | |
1265 | res |= len; | |
1266 | size -= len; | |
1267 | if (!size) | |
1268 | break; | |
1269 | } | |
1270 | return res; | |
1271 | } | |
1272 | ||
1273 | unsigned long iov_iter_alignment(const struct iov_iter *i) | |
1274 | { | |
1275 | /* iovec and kvec have identical layouts */ | |
1276 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1277 | return iov_iter_alignment_iovec(i); | |
1278 | ||
1279 | if (iov_iter_is_bvec(i)) | |
1280 | return iov_iter_alignment_bvec(i); | |
1281 | ||
1282 | if (iov_iter_is_pipe(i)) { | |
e0ff126e | 1283 | unsigned int p_mask = i->pipe->ring_size - 1; |
9221d2e3 | 1284 | size_t size = i->count; |
e0ff126e | 1285 | |
8cefc107 | 1286 | if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) |
241699cd AV |
1287 | return size | i->iov_offset; |
1288 | return size; | |
1289 | } | |
9221d2e3 AV |
1290 | |
1291 | if (iov_iter_is_xarray(i)) | |
3d14ec1f | 1292 | return (i->xarray_start + i->iov_offset) | i->count; |
9221d2e3 AV |
1293 | |
1294 | return 0; | |
62a8067a AV |
1295 | } |
1296 | EXPORT_SYMBOL(iov_iter_alignment); | |
1297 | ||
357f435d AV |
1298 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i) |
1299 | { | |
33844e66 | 1300 | unsigned long res = 0; |
610c7a71 | 1301 | unsigned long v = 0; |
357f435d | 1302 | size_t size = i->count; |
610c7a71 | 1303 | unsigned k; |
357f435d | 1304 | |
610c7a71 | 1305 | if (WARN_ON(!iter_is_iovec(i))) |
241699cd | 1306 | return ~0U; |
241699cd | 1307 | |
610c7a71 AV |
1308 | for (k = 0; k < i->nr_segs; k++) { |
1309 | if (i->iov[k].iov_len) { | |
1310 | unsigned long base = (unsigned long)i->iov[k].iov_base; | |
1311 | if (v) // if not the first one | |
1312 | res |= base | v; // this start | previous end | |
1313 | v = base + i->iov[k].iov_len; | |
1314 | if (size <= i->iov[k].iov_len) | |
1315 | break; | |
1316 | size -= i->iov[k].iov_len; | |
1317 | } | |
1318 | } | |
33844e66 | 1319 | return res; |
357f435d AV |
1320 | } |
1321 | EXPORT_SYMBOL(iov_iter_gap_alignment); | |
1322 | ||
e76b6312 | 1323 | static inline ssize_t __pipe_get_pages(struct iov_iter *i, |
241699cd AV |
1324 | size_t maxsize, |
1325 | struct page **pages, | |
8cefc107 | 1326 | int iter_head, |
241699cd AV |
1327 | size_t *start) |
1328 | { | |
1329 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
1330 | unsigned int p_mask = pipe->ring_size - 1; |
1331 | ssize_t n = push_pipe(i, maxsize, &iter_head, start); | |
241699cd AV |
1332 | if (!n) |
1333 | return -EFAULT; | |
1334 | ||
1335 | maxsize = n; | |
1336 | n += *start; | |
1689c73a | 1337 | while (n > 0) { |
8cefc107 DH |
1338 | get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); |
1339 | iter_head++; | |
241699cd AV |
1340 | n -= PAGE_SIZE; |
1341 | } | |
1342 | ||
1343 | return maxsize; | |
1344 | } | |
1345 | ||
1346 | static ssize_t pipe_get_pages(struct iov_iter *i, | |
1347 | struct page **pages, size_t maxsize, unsigned maxpages, | |
1348 | size_t *start) | |
1349 | { | |
8cefc107 | 1350 | unsigned int iter_head, npages; |
241699cd | 1351 | size_t capacity; |
241699cd AV |
1352 | |
1353 | if (!sanity(i)) | |
1354 | return -EFAULT; | |
1355 | ||
8cefc107 DH |
1356 | data_start(i, &iter_head, start); |
1357 | /* Amount of free space: some of this one + all after this one */ | |
1358 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); | |
1359 | capacity = min(npages, maxpages) * PAGE_SIZE - *start; | |
241699cd | 1360 | |
8cefc107 | 1361 | return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); |
241699cd AV |
1362 | } |
1363 | ||
7ff50620 DH |
1364 | static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, |
1365 | pgoff_t index, unsigned int nr_pages) | |
1366 | { | |
1367 | XA_STATE(xas, xa, index); | |
1368 | struct page *page; | |
1369 | unsigned int ret = 0; | |
1370 | ||
1371 | rcu_read_lock(); | |
1372 | for (page = xas_load(&xas); page; page = xas_next(&xas)) { | |
1373 | if (xas_retry(&xas, page)) | |
1374 | continue; | |
1375 | ||
1376 | /* Has the page moved or been split? */ | |
1377 | if (unlikely(page != xas_reload(&xas))) { | |
1378 | xas_reset(&xas); | |
1379 | continue; | |
1380 | } | |
1381 | ||
1382 | pages[ret] = find_subpage(page, xas.xa_index); | |
1383 | get_page(pages[ret]); | |
1384 | if (++ret == nr_pages) | |
1385 | break; | |
1386 | } | |
1387 | rcu_read_unlock(); | |
1388 | return ret; | |
1389 | } | |
1390 | ||
1391 | static ssize_t iter_xarray_get_pages(struct iov_iter *i, | |
1392 | struct page **pages, size_t maxsize, | |
1393 | unsigned maxpages, size_t *_start_offset) | |
1394 | { | |
1395 | unsigned nr, offset; | |
1396 | pgoff_t index, count; | |
1397 | size_t size = maxsize, actual; | |
1398 | loff_t pos; | |
1399 | ||
1400 | if (!size || !maxpages) | |
1401 | return 0; | |
1402 | ||
1403 | pos = i->xarray_start + i->iov_offset; | |
1404 | index = pos >> PAGE_SHIFT; | |
1405 | offset = pos & ~PAGE_MASK; | |
1406 | *_start_offset = offset; | |
1407 | ||
1408 | count = 1; | |
1409 | if (size > PAGE_SIZE - offset) { | |
1410 | size -= PAGE_SIZE - offset; | |
1411 | count += size >> PAGE_SHIFT; | |
1412 | size &= ~PAGE_MASK; | |
1413 | if (size) | |
1414 | count++; | |
1415 | } | |
1416 | ||
1417 | if (count > maxpages) | |
1418 | count = maxpages; | |
1419 | ||
1420 | nr = iter_xarray_populate_pages(pages, i->xarray, index, count); | |
1421 | if (nr == 0) | |
1422 | return 0; | |
1423 | ||
1424 | actual = PAGE_SIZE * nr; | |
1425 | actual -= offset; | |
1426 | if (nr == count && size > 0) { | |
1427 | unsigned last_offset = (nr > 1) ? 0 : offset; | |
1428 | actual -= PAGE_SIZE - (last_offset + size); | |
1429 | } | |
1430 | return actual; | |
1431 | } | |
1432 | ||
3d671ca6 AV |
1433 | /* must be done on non-empty ITER_IOVEC one */ |
1434 | static unsigned long first_iovec_segment(const struct iov_iter *i, | |
1435 | size_t *size, size_t *start, | |
1436 | size_t maxsize, unsigned maxpages) | |
1437 | { | |
1438 | size_t skip; | |
1439 | long k; | |
1440 | ||
1441 | for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { | |
1442 | unsigned long addr = (unsigned long)i->iov[k].iov_base + skip; | |
1443 | size_t len = i->iov[k].iov_len - skip; | |
1444 | ||
1445 | if (unlikely(!len)) | |
1446 | continue; | |
1447 | if (len > maxsize) | |
1448 | len = maxsize; | |
1449 | len += (*start = addr % PAGE_SIZE); | |
1450 | if (len > maxpages * PAGE_SIZE) | |
1451 | len = maxpages * PAGE_SIZE; | |
1452 | *size = len; | |
1453 | return addr & PAGE_MASK; | |
1454 | } | |
1455 | BUG(); // if it had been empty, we wouldn't get called | |
1456 | } | |
1457 | ||
1458 | /* must be done on non-empty ITER_BVEC one */ | |
1459 | static struct page *first_bvec_segment(const struct iov_iter *i, | |
1460 | size_t *size, size_t *start, | |
1461 | size_t maxsize, unsigned maxpages) | |
1462 | { | |
1463 | struct page *page; | |
1464 | size_t skip = i->iov_offset, len; | |
1465 | ||
1466 | len = i->bvec->bv_len - skip; | |
1467 | if (len > maxsize) | |
1468 | len = maxsize; | |
1469 | skip += i->bvec->bv_offset; | |
1470 | page = i->bvec->bv_page + skip / PAGE_SIZE; | |
1471 | len += (*start = skip % PAGE_SIZE); | |
1472 | if (len > maxpages * PAGE_SIZE) | |
1473 | len = maxpages * PAGE_SIZE; | |
1474 | *size = len; | |
1475 | return page; | |
1476 | } | |
1477 | ||
62a8067a | 1478 | ssize_t iov_iter_get_pages(struct iov_iter *i, |
2c80929c | 1479 | struct page **pages, size_t maxsize, unsigned maxpages, |
62a8067a AV |
1480 | size_t *start) |
1481 | { | |
3d671ca6 AV |
1482 | size_t len; |
1483 | int n, res; | |
1484 | ||
e5393fae AV |
1485 | if (maxsize > i->count) |
1486 | maxsize = i->count; | |
3d671ca6 AV |
1487 | if (!maxsize) |
1488 | return 0; | |
e5393fae | 1489 | |
3d671ca6 AV |
1490 | if (likely(iter_is_iovec(i))) { |
1491 | unsigned long addr; | |
e5393fae | 1492 | |
3d671ca6 | 1493 | addr = first_iovec_segment(i, &len, start, maxsize, maxpages); |
e5393fae | 1494 | n = DIV_ROUND_UP(len, PAGE_SIZE); |
73b0140b IW |
1495 | res = get_user_pages_fast(addr, n, |
1496 | iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, | |
1497 | pages); | |
814a6674 | 1498 | if (unlikely(res <= 0)) |
e5393fae AV |
1499 | return res; |
1500 | return (res == n ? len : res * PAGE_SIZE) - *start; | |
3d671ca6 AV |
1501 | } |
1502 | if (iov_iter_is_bvec(i)) { | |
1503 | struct page *page; | |
1504 | ||
1505 | page = first_bvec_segment(i, &len, start, maxsize, maxpages); | |
1506 | n = DIV_ROUND_UP(len, PAGE_SIZE); | |
1507 | while (n--) | |
1508 | get_page(*pages++ = page++); | |
1509 | return len - *start; | |
1510 | } | |
1511 | if (iov_iter_is_pipe(i)) | |
1512 | return pipe_get_pages(i, pages, maxsize, maxpages, start); | |
1513 | if (iov_iter_is_xarray(i)) | |
1514 | return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); | |
1515 | return -EFAULT; | |
62a8067a AV |
1516 | } |
1517 | EXPORT_SYMBOL(iov_iter_get_pages); | |
1518 | ||
1b17f1f2 AV |
1519 | static struct page **get_pages_array(size_t n) |
1520 | { | |
752ade68 | 1521 | return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); |
1b17f1f2 AV |
1522 | } |
1523 | ||
241699cd AV |
1524 | static ssize_t pipe_get_pages_alloc(struct iov_iter *i, |
1525 | struct page ***pages, size_t maxsize, | |
1526 | size_t *start) | |
1527 | { | |
1528 | struct page **p; | |
8cefc107 | 1529 | unsigned int iter_head, npages; |
d7760d63 | 1530 | ssize_t n; |
241699cd AV |
1531 | |
1532 | if (!sanity(i)) | |
1533 | return -EFAULT; | |
1534 | ||
8cefc107 DH |
1535 | data_start(i, &iter_head, start); |
1536 | /* Amount of free space: some of this one + all after this one */ | |
1537 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); | |
241699cd AV |
1538 | n = npages * PAGE_SIZE - *start; |
1539 | if (maxsize > n) | |
1540 | maxsize = n; | |
1541 | else | |
1542 | npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
1543 | p = get_pages_array(npages); | |
1544 | if (!p) | |
1545 | return -ENOMEM; | |
8cefc107 | 1546 | n = __pipe_get_pages(i, maxsize, p, iter_head, start); |
241699cd AV |
1547 | if (n > 0) |
1548 | *pages = p; | |
1549 | else | |
1550 | kvfree(p); | |
1551 | return n; | |
1552 | } | |
1553 | ||
7ff50620 DH |
1554 | static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, |
1555 | struct page ***pages, size_t maxsize, | |
1556 | size_t *_start_offset) | |
1557 | { | |
1558 | struct page **p; | |
1559 | unsigned nr, offset; | |
1560 | pgoff_t index, count; | |
1561 | size_t size = maxsize, actual; | |
1562 | loff_t pos; | |
1563 | ||
1564 | if (!size) | |
1565 | return 0; | |
1566 | ||
1567 | pos = i->xarray_start + i->iov_offset; | |
1568 | index = pos >> PAGE_SHIFT; | |
1569 | offset = pos & ~PAGE_MASK; | |
1570 | *_start_offset = offset; | |
1571 | ||
1572 | count = 1; | |
1573 | if (size > PAGE_SIZE - offset) { | |
1574 | size -= PAGE_SIZE - offset; | |
1575 | count += size >> PAGE_SHIFT; | |
1576 | size &= ~PAGE_MASK; | |
1577 | if (size) | |
1578 | count++; | |
1579 | } | |
1580 | ||
1581 | p = get_pages_array(count); | |
1582 | if (!p) | |
1583 | return -ENOMEM; | |
1584 | *pages = p; | |
1585 | ||
1586 | nr = iter_xarray_populate_pages(p, i->xarray, index, count); | |
1587 | if (nr == 0) | |
1588 | return 0; | |
1589 | ||
1590 | actual = PAGE_SIZE * nr; | |
1591 | actual -= offset; | |
1592 | if (nr == count && size > 0) { | |
1593 | unsigned last_offset = (nr > 1) ? 0 : offset; | |
1594 | actual -= PAGE_SIZE - (last_offset + size); | |
1595 | } | |
1596 | return actual; | |
1597 | } | |
1598 | ||
62a8067a AV |
1599 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, |
1600 | struct page ***pages, size_t maxsize, | |
1601 | size_t *start) | |
1602 | { | |
1b17f1f2 | 1603 | struct page **p; |
3d671ca6 AV |
1604 | size_t len; |
1605 | int n, res; | |
1b17f1f2 AV |
1606 | |
1607 | if (maxsize > i->count) | |
1608 | maxsize = i->count; | |
3d671ca6 AV |
1609 | if (!maxsize) |
1610 | return 0; | |
1b17f1f2 | 1611 | |
3d671ca6 AV |
1612 | if (likely(iter_is_iovec(i))) { |
1613 | unsigned long addr; | |
1b17f1f2 | 1614 | |
3d671ca6 | 1615 | addr = first_iovec_segment(i, &len, start, maxsize, ~0U); |
1b17f1f2 AV |
1616 | n = DIV_ROUND_UP(len, PAGE_SIZE); |
1617 | p = get_pages_array(n); | |
1618 | if (!p) | |
1619 | return -ENOMEM; | |
73b0140b IW |
1620 | res = get_user_pages_fast(addr, n, |
1621 | iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p); | |
814a6674 | 1622 | if (unlikely(res <= 0)) { |
1b17f1f2 | 1623 | kvfree(p); |
814a6674 | 1624 | *pages = NULL; |
1b17f1f2 AV |
1625 | return res; |
1626 | } | |
1627 | *pages = p; | |
1628 | return (res == n ? len : res * PAGE_SIZE) - *start; | |
3d671ca6 AV |
1629 | } |
1630 | if (iov_iter_is_bvec(i)) { | |
1631 | struct page *page; | |
1632 | ||
1633 | page = first_bvec_segment(i, &len, start, maxsize, ~0U); | |
1634 | n = DIV_ROUND_UP(len, PAGE_SIZE); | |
1635 | *pages = p = get_pages_array(n); | |
1b17f1f2 AV |
1636 | if (!p) |
1637 | return -ENOMEM; | |
3d671ca6 AV |
1638 | while (n--) |
1639 | get_page(*p++ = page++); | |
1640 | return len - *start; | |
1641 | } | |
1642 | if (iov_iter_is_pipe(i)) | |
1643 | return pipe_get_pages_alloc(i, pages, maxsize, start); | |
1644 | if (iov_iter_is_xarray(i)) | |
1645 | return iter_xarray_get_pages_alloc(i, pages, maxsize, start); | |
1646 | return -EFAULT; | |
62a8067a AV |
1647 | } |
1648 | EXPORT_SYMBOL(iov_iter_get_pages_alloc); | |
1649 | ||
a604ec7e AV |
1650 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, |
1651 | struct iov_iter *i) | |
1652 | { | |
a604ec7e | 1653 | __wsum sum, next; |
a604ec7e | 1654 | sum = *csum; |
9ea9ce04 | 1655 | if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd AV |
1656 | WARN_ON(1); |
1657 | return 0; | |
1658 | } | |
7baa5099 AV |
1659 | iterate_and_advance(i, bytes, base, len, off, ({ |
1660 | next = csum_and_copy_from_user(base, addr + off, len); | |
2495bdcc | 1661 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1662 | next ? 0 : len; |
a604ec7e | 1663 | }), ({ |
7baa5099 | 1664 | sum = csum_and_memcpy(addr + off, base, len, sum, off); |
a604ec7e AV |
1665 | }) |
1666 | ) | |
1667 | *csum = sum; | |
1668 | return bytes; | |
1669 | } | |
1670 | EXPORT_SYMBOL(csum_and_copy_from_iter); | |
1671 | ||
52cbd23a | 1672 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, |
a604ec7e AV |
1673 | struct iov_iter *i) |
1674 | { | |
52cbd23a | 1675 | struct csum_state *csstate = _csstate; |
a604ec7e | 1676 | __wsum sum, next; |
78e1f386 | 1677 | |
78e1f386 | 1678 | if (unlikely(iov_iter_is_discard(i))) { |
241699cd AV |
1679 | WARN_ON(1); /* for now */ |
1680 | return 0; | |
1681 | } | |
6852df12 AV |
1682 | |
1683 | sum = csum_shift(csstate->csum, csstate->off); | |
1684 | if (unlikely(iov_iter_is_pipe(i))) | |
1685 | bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); | |
1686 | else iterate_and_advance(i, bytes, base, len, off, ({ | |
7baa5099 | 1687 | next = csum_and_copy_to_user(addr + off, base, len); |
2495bdcc | 1688 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1689 | next ? 0 : len; |
a604ec7e | 1690 | }), ({ |
7baa5099 | 1691 | sum = csum_and_memcpy(base, addr + off, len, sum, off); |
a604ec7e AV |
1692 | }) |
1693 | ) | |
594e450b AV |
1694 | csstate->csum = csum_shift(sum, csstate->off); |
1695 | csstate->off += bytes; | |
a604ec7e AV |
1696 | return bytes; |
1697 | } | |
1698 | EXPORT_SYMBOL(csum_and_copy_to_iter); | |
1699 | ||
d05f4435 SG |
1700 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
1701 | struct iov_iter *i) | |
1702 | { | |
7999096f | 1703 | #ifdef CONFIG_CRYPTO_HASH |
d05f4435 SG |
1704 | struct ahash_request *hash = hashp; |
1705 | struct scatterlist sg; | |
1706 | size_t copied; | |
1707 | ||
1708 | copied = copy_to_iter(addr, bytes, i); | |
1709 | sg_init_one(&sg, addr, copied); | |
1710 | ahash_request_set_crypt(hash, &sg, NULL, copied); | |
1711 | crypto_ahash_update(hash); | |
1712 | return copied; | |
27fad74a Y |
1713 | #else |
1714 | return 0; | |
1715 | #endif | |
d05f4435 SG |
1716 | } |
1717 | EXPORT_SYMBOL(hash_and_copy_to_iter); | |
1718 | ||
66531c65 | 1719 | static int iov_npages(const struct iov_iter *i, int maxpages) |
62a8067a | 1720 | { |
66531c65 AV |
1721 | size_t skip = i->iov_offset, size = i->count; |
1722 | const struct iovec *p; | |
e0f2dc40 AV |
1723 | int npages = 0; |
1724 | ||
66531c65 AV |
1725 | for (p = i->iov; size; skip = 0, p++) { |
1726 | unsigned offs = offset_in_page(p->iov_base + skip); | |
1727 | size_t len = min(p->iov_len - skip, size); | |
e0f2dc40 | 1728 | |
66531c65 AV |
1729 | if (len) { |
1730 | size -= len; | |
1731 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1732 | if (unlikely(npages > maxpages)) | |
1733 | return maxpages; | |
1734 | } | |
1735 | } | |
1736 | return npages; | |
1737 | } | |
1738 | ||
1739 | static int bvec_npages(const struct iov_iter *i, int maxpages) | |
1740 | { | |
1741 | size_t skip = i->iov_offset, size = i->count; | |
1742 | const struct bio_vec *p; | |
1743 | int npages = 0; | |
1744 | ||
1745 | for (p = i->bvec; size; skip = 0, p++) { | |
1746 | unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; | |
1747 | size_t len = min(p->bv_len - skip, size); | |
1748 | ||
1749 | size -= len; | |
1750 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1751 | if (unlikely(npages > maxpages)) | |
1752 | return maxpages; | |
1753 | } | |
1754 | return npages; | |
1755 | } | |
1756 | ||
1757 | int iov_iter_npages(const struct iov_iter *i, int maxpages) | |
1758 | { | |
1759 | if (unlikely(!i->count)) | |
1760 | return 0; | |
1761 | /* iovec and kvec have identical layouts */ | |
1762 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1763 | return iov_npages(i, maxpages); | |
1764 | if (iov_iter_is_bvec(i)) | |
1765 | return bvec_npages(i, maxpages); | |
1766 | if (iov_iter_is_pipe(i)) { | |
8cefc107 | 1767 | unsigned int iter_head; |
66531c65 | 1768 | int npages; |
241699cd | 1769 | size_t off; |
241699cd AV |
1770 | |
1771 | if (!sanity(i)) | |
1772 | return 0; | |
1773 | ||
8cefc107 | 1774 | data_start(i, &iter_head, &off); |
241699cd | 1775 | /* some of this one + all after this one */ |
66531c65 AV |
1776 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); |
1777 | return min(npages, maxpages); | |
1778 | } | |
1779 | if (iov_iter_is_xarray(i)) { | |
e4f8df86 AV |
1780 | unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; |
1781 | int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); | |
66531c65 AV |
1782 | return min(npages, maxpages); |
1783 | } | |
1784 | return 0; | |
62a8067a | 1785 | } |
f67da30c | 1786 | EXPORT_SYMBOL(iov_iter_npages); |
4b8164b9 AV |
1787 | |
1788 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) | |
1789 | { | |
1790 | *new = *old; | |
00e23707 | 1791 | if (unlikely(iov_iter_is_pipe(new))) { |
241699cd AV |
1792 | WARN_ON(1); |
1793 | return NULL; | |
1794 | } | |
7ff50620 | 1795 | if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) |
9ea9ce04 | 1796 | return NULL; |
00e23707 | 1797 | if (iov_iter_is_bvec(new)) |
4b8164b9 AV |
1798 | return new->bvec = kmemdup(new->bvec, |
1799 | new->nr_segs * sizeof(struct bio_vec), | |
1800 | flags); | |
1801 | else | |
1802 | /* iovec and kvec have identical layout */ | |
1803 | return new->iov = kmemdup(new->iov, | |
1804 | new->nr_segs * sizeof(struct iovec), | |
1805 | flags); | |
1806 | } | |
1807 | EXPORT_SYMBOL(dup_iter); | |
bc917be8 | 1808 | |
bfdc5970 CH |
1809 | static int copy_compat_iovec_from_user(struct iovec *iov, |
1810 | const struct iovec __user *uvec, unsigned long nr_segs) | |
1811 | { | |
1812 | const struct compat_iovec __user *uiov = | |
1813 | (const struct compat_iovec __user *)uvec; | |
1814 | int ret = -EFAULT, i; | |
1815 | ||
a959a978 | 1816 | if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) |
bfdc5970 CH |
1817 | return -EFAULT; |
1818 | ||
1819 | for (i = 0; i < nr_segs; i++) { | |
1820 | compat_uptr_t buf; | |
1821 | compat_ssize_t len; | |
1822 | ||
1823 | unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); | |
1824 | unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); | |
1825 | ||
1826 | /* check for compat_size_t not fitting in compat_ssize_t .. */ | |
1827 | if (len < 0) { | |
1828 | ret = -EINVAL; | |
1829 | goto uaccess_end; | |
1830 | } | |
1831 | iov[i].iov_base = compat_ptr(buf); | |
1832 | iov[i].iov_len = len; | |
1833 | } | |
1834 | ||
1835 | ret = 0; | |
1836 | uaccess_end: | |
1837 | user_access_end(); | |
1838 | return ret; | |
1839 | } | |
1840 | ||
1841 | static int copy_iovec_from_user(struct iovec *iov, | |
1842 | const struct iovec __user *uvec, unsigned long nr_segs) | |
fb041b59 DL |
1843 | { |
1844 | unsigned long seg; | |
fb041b59 | 1845 | |
bfdc5970 CH |
1846 | if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) |
1847 | return -EFAULT; | |
1848 | for (seg = 0; seg < nr_segs; seg++) { | |
1849 | if ((ssize_t)iov[seg].iov_len < 0) | |
1850 | return -EINVAL; | |
fb041b59 DL |
1851 | } |
1852 | ||
bfdc5970 CH |
1853 | return 0; |
1854 | } | |
1855 | ||
1856 | struct iovec *iovec_from_user(const struct iovec __user *uvec, | |
1857 | unsigned long nr_segs, unsigned long fast_segs, | |
1858 | struct iovec *fast_iov, bool compat) | |
1859 | { | |
1860 | struct iovec *iov = fast_iov; | |
1861 | int ret; | |
1862 | ||
fb041b59 | 1863 | /* |
bfdc5970 CH |
1864 | * SuS says "The readv() function *may* fail if the iovcnt argument was |
1865 | * less than or equal to 0, or greater than {IOV_MAX}. Linux has | |
1866 | * traditionally returned zero for zero segments, so... | |
fb041b59 | 1867 | */ |
bfdc5970 CH |
1868 | if (nr_segs == 0) |
1869 | return iov; | |
1870 | if (nr_segs > UIO_MAXIOV) | |
1871 | return ERR_PTR(-EINVAL); | |
fb041b59 DL |
1872 | if (nr_segs > fast_segs) { |
1873 | iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); | |
bfdc5970 CH |
1874 | if (!iov) |
1875 | return ERR_PTR(-ENOMEM); | |
fb041b59 | 1876 | } |
bfdc5970 CH |
1877 | |
1878 | if (compat) | |
1879 | ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); | |
1880 | else | |
1881 | ret = copy_iovec_from_user(iov, uvec, nr_segs); | |
1882 | if (ret) { | |
1883 | if (iov != fast_iov) | |
1884 | kfree(iov); | |
1885 | return ERR_PTR(ret); | |
1886 | } | |
1887 | ||
1888 | return iov; | |
1889 | } | |
1890 | ||
1891 | ssize_t __import_iovec(int type, const struct iovec __user *uvec, | |
1892 | unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, | |
1893 | struct iov_iter *i, bool compat) | |
1894 | { | |
1895 | ssize_t total_len = 0; | |
1896 | unsigned long seg; | |
1897 | struct iovec *iov; | |
1898 | ||
1899 | iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); | |
1900 | if (IS_ERR(iov)) { | |
1901 | *iovp = NULL; | |
1902 | return PTR_ERR(iov); | |
fb041b59 DL |
1903 | } |
1904 | ||
1905 | /* | |
bfdc5970 CH |
1906 | * According to the Single Unix Specification we should return EINVAL if |
1907 | * an element length is < 0 when cast to ssize_t or if the total length | |
1908 | * would overflow the ssize_t return value of the system call. | |
fb041b59 DL |
1909 | * |
1910 | * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the | |
1911 | * overflow case. | |
1912 | */ | |
fb041b59 | 1913 | for (seg = 0; seg < nr_segs; seg++) { |
fb041b59 DL |
1914 | ssize_t len = (ssize_t)iov[seg].iov_len; |
1915 | ||
bfdc5970 CH |
1916 | if (!access_ok(iov[seg].iov_base, len)) { |
1917 | if (iov != *iovp) | |
1918 | kfree(iov); | |
1919 | *iovp = NULL; | |
1920 | return -EFAULT; | |
fb041b59 | 1921 | } |
bfdc5970 CH |
1922 | |
1923 | if (len > MAX_RW_COUNT - total_len) { | |
1924 | len = MAX_RW_COUNT - total_len; | |
fb041b59 DL |
1925 | iov[seg].iov_len = len; |
1926 | } | |
bfdc5970 | 1927 | total_len += len; |
fb041b59 | 1928 | } |
bfdc5970 CH |
1929 | |
1930 | iov_iter_init(i, type, iov, nr_segs, total_len); | |
1931 | if (iov == *iovp) | |
1932 | *iovp = NULL; | |
1933 | else | |
1934 | *iovp = iov; | |
1935 | return total_len; | |
fb041b59 DL |
1936 | } |
1937 | ||
ffecee4f VN |
1938 | /** |
1939 | * import_iovec() - Copy an array of &struct iovec from userspace | |
1940 | * into the kernel, check that it is valid, and initialize a new | |
1941 | * &struct iov_iter iterator to access it. | |
1942 | * | |
1943 | * @type: One of %READ or %WRITE. | |
bfdc5970 | 1944 | * @uvec: Pointer to the userspace array. |
ffecee4f VN |
1945 | * @nr_segs: Number of elements in userspace array. |
1946 | * @fast_segs: Number of elements in @iov. | |
bfdc5970 | 1947 | * @iovp: (input and output parameter) Pointer to pointer to (usually small |
ffecee4f VN |
1948 | * on-stack) kernel array. |
1949 | * @i: Pointer to iterator that will be initialized on success. | |
1950 | * | |
1951 | * If the array pointed to by *@iov is large enough to hold all @nr_segs, | |
1952 | * then this function places %NULL in *@iov on return. Otherwise, a new | |
1953 | * array will be allocated and the result placed in *@iov. This means that | |
1954 | * the caller may call kfree() on *@iov regardless of whether the small | |
1955 | * on-stack array was used or not (and regardless of whether this function | |
1956 | * returns an error or not). | |
1957 | * | |
87e5e6da | 1958 | * Return: Negative error code on error, bytes imported on success |
ffecee4f | 1959 | */ |
bfdc5970 | 1960 | ssize_t import_iovec(int type, const struct iovec __user *uvec, |
bc917be8 | 1961 | unsigned nr_segs, unsigned fast_segs, |
bfdc5970 | 1962 | struct iovec **iovp, struct iov_iter *i) |
bc917be8 | 1963 | { |
89cd35c5 CH |
1964 | return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, |
1965 | in_compat_syscall()); | |
bc917be8 AV |
1966 | } |
1967 | EXPORT_SYMBOL(import_iovec); | |
1968 | ||
bc917be8 AV |
1969 | int import_single_range(int rw, void __user *buf, size_t len, |
1970 | struct iovec *iov, struct iov_iter *i) | |
1971 | { | |
1972 | if (len > MAX_RW_COUNT) | |
1973 | len = MAX_RW_COUNT; | |
96d4f267 | 1974 | if (unlikely(!access_ok(buf, len))) |
bc917be8 AV |
1975 | return -EFAULT; |
1976 | ||
1977 | iov->iov_base = buf; | |
1978 | iov->iov_len = len; | |
1979 | iov_iter_init(i, rw, iov, 1, len); | |
1980 | return 0; | |
1981 | } | |
e1267585 | 1982 | EXPORT_SYMBOL(import_single_range); |
8fb0f47a JA |
1983 | |
1984 | /** | |
1985 | * iov_iter_restore() - Restore a &struct iov_iter to the same state as when | |
1986 | * iov_iter_save_state() was called. | |
1987 | * | |
1988 | * @i: &struct iov_iter to restore | |
1989 | * @state: state to restore from | |
1990 | * | |
1991 | * Used after iov_iter_save_state() to bring restore @i, if operations may | |
1992 | * have advanced it. | |
1993 | * | |
1994 | * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC | |
1995 | */ | |
1996 | void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) | |
1997 | { | |
1998 | if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && | |
1999 | !iov_iter_is_kvec(i)) | |
2000 | return; | |
2001 | i->iov_offset = state->iov_offset; | |
2002 | i->count = state->count; | |
2003 | /* | |
2004 | * For the *vec iters, nr_segs + iov is constant - if we increment | |
2005 | * the vec, then we also decrement the nr_segs count. Hence we don't | |
2006 | * need to track both of these, just one is enough and we can deduct | |
2007 | * the other from that. ITER_KVEC and ITER_IOVEC are the same struct | |
2008 | * size, so we can just increment the iov pointer as they are unionzed. | |
2009 | * ITER_BVEC _may_ be the same size on some archs, but on others it is | |
2010 | * not. Be safe and handle it separately. | |
2011 | */ | |
2012 | BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); | |
2013 | if (iov_iter_is_bvec(i)) | |
2014 | i->bvec -= state->nr_segs - i->nr_segs; | |
2015 | else | |
2016 | i->iov -= state->nr_segs - i->nr_segs; | |
2017 | i->nr_segs = state->nr_segs; | |
2018 | } |