Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
7999096f | 2 | #include <crypto/hash.h> |
4f18cd31 | 3 | #include <linux/export.h> |
2f8b5444 | 4 | #include <linux/bvec.h> |
4d0e9df5 | 5 | #include <linux/fault-inject-usercopy.h> |
4f18cd31 AV |
6 | #include <linux/uio.h> |
7 | #include <linux/pagemap.h> | |
28961998 | 8 | #include <linux/highmem.h> |
91f79c43 AV |
9 | #include <linux/slab.h> |
10 | #include <linux/vmalloc.h> | |
241699cd | 11 | #include <linux/splice.h> |
bfdc5970 | 12 | #include <linux/compat.h> |
a604ec7e | 13 | #include <net/checksum.h> |
d05f4435 | 14 | #include <linux/scatterlist.h> |
d0ef4c36 | 15 | #include <linux/instrumented.h> |
4f18cd31 | 16 | |
241699cd AV |
17 | #define PIPE_PARANOIA /* for now */ |
18 | ||
5c67aa90 | 19 | /* covers iovec and kvec alike */ |
a6e4ec7b | 20 | #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ |
7baa5099 | 21 | size_t off = 0; \ |
a6e4ec7b | 22 | size_t skip = i->iov_offset; \ |
7a1bcb5d | 23 | do { \ |
7baa5099 AV |
24 | len = min(n, __p->iov_len - skip); \ |
25 | if (likely(len)) { \ | |
26 | base = __p->iov_base + skip; \ | |
27 | len -= (STEP); \ | |
28 | off += len; \ | |
29 | skip += len; \ | |
30 | n -= len; \ | |
7a1bcb5d AV |
31 | if (skip < __p->iov_len) \ |
32 | break; \ | |
33 | } \ | |
34 | __p++; \ | |
35 | skip = 0; \ | |
36 | } while (n); \ | |
a6e4ec7b | 37 | i->iov_offset = skip; \ |
7baa5099 | 38 | n = off; \ |
04a31165 AV |
39 | } |
40 | ||
a6e4ec7b | 41 | #define iterate_bvec(i, n, base, len, off, p, STEP) { \ |
7baa5099 | 42 | size_t off = 0; \ |
a6e4ec7b | 43 | unsigned skip = i->iov_offset; \ |
7491a2bf AV |
44 | while (n) { \ |
45 | unsigned offset = p->bv_offset + skip; \ | |
1b4fb5ff | 46 | unsigned left; \ |
21b56c84 AV |
47 | void *kaddr = kmap_local_page(p->bv_page + \ |
48 | offset / PAGE_SIZE); \ | |
7baa5099 | 49 | base = kaddr + offset % PAGE_SIZE; \ |
a6e4ec7b | 50 | len = min(min(n, (size_t)(p->bv_len - skip)), \ |
7491a2bf | 51 | (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ |
1b4fb5ff | 52 | left = (STEP); \ |
21b56c84 | 53 | kunmap_local(kaddr); \ |
7baa5099 AV |
54 | len -= left; \ |
55 | off += len; \ | |
56 | skip += len; \ | |
7491a2bf AV |
57 | if (skip == p->bv_len) { \ |
58 | skip = 0; \ | |
59 | p++; \ | |
60 | } \ | |
7baa5099 | 61 | n -= len; \ |
1b4fb5ff AV |
62 | if (left) \ |
63 | break; \ | |
7491a2bf | 64 | } \ |
a6e4ec7b | 65 | i->iov_offset = skip; \ |
7baa5099 | 66 | n = off; \ |
04a31165 AV |
67 | } |
68 | ||
a6e4ec7b | 69 | #define iterate_xarray(i, n, base, len, __off, STEP) { \ |
1b4fb5ff | 70 | __label__ __out; \ |
622838f3 | 71 | size_t __off = 0; \ |
821979f5 | 72 | struct folio *folio; \ |
a6e4ec7b | 73 | loff_t start = i->xarray_start + i->iov_offset; \ |
4b179e9a | 74 | pgoff_t index = start / PAGE_SIZE; \ |
7ff50620 DH |
75 | XA_STATE(xas, i->xarray, index); \ |
76 | \ | |
821979f5 | 77 | len = PAGE_SIZE - offset_in_page(start); \ |
7baa5099 | 78 | rcu_read_lock(); \ |
821979f5 | 79 | xas_for_each(&xas, folio, ULONG_MAX) { \ |
7baa5099 | 80 | unsigned left; \ |
821979f5 MWO |
81 | size_t offset; \ |
82 | if (xas_retry(&xas, folio)) \ | |
7baa5099 | 83 | continue; \ |
821979f5 | 84 | if (WARN_ON(xa_is_value(folio))) \ |
7baa5099 | 85 | break; \ |
821979f5 | 86 | if (WARN_ON(folio_test_hugetlb(folio))) \ |
7baa5099 | 87 | break; \ |
821979f5 MWO |
88 | offset = offset_in_folio(folio, start + __off); \ |
89 | while (offset < folio_size(folio)) { \ | |
90 | base = kmap_local_folio(folio, offset); \ | |
7baa5099 AV |
91 | len = min(n, len); \ |
92 | left = (STEP); \ | |
821979f5 | 93 | kunmap_local(base); \ |
7baa5099 AV |
94 | len -= left; \ |
95 | __off += len; \ | |
96 | n -= len; \ | |
97 | if (left || n == 0) \ | |
98 | goto __out; \ | |
821979f5 MWO |
99 | offset += len; \ |
100 | len = PAGE_SIZE; \ | |
7baa5099 | 101 | } \ |
7ff50620 | 102 | } \ |
1b4fb5ff | 103 | __out: \ |
7ff50620 | 104 | rcu_read_unlock(); \ |
821979f5 | 105 | i->iov_offset += __off; \ |
622838f3 | 106 | n = __off; \ |
7ff50620 DH |
107 | } |
108 | ||
7baa5099 | 109 | #define __iterate_and_advance(i, n, base, len, off, I, K) { \ |
dd254f5a AV |
110 | if (unlikely(i->count < n)) \ |
111 | n = i->count; \ | |
f5da8354 | 112 | if (likely(n)) { \ |
28f38db7 | 113 | if (likely(iter_is_iovec(i))) { \ |
5c67aa90 | 114 | const struct iovec *iov = i->iov; \ |
7baa5099 AV |
115 | void __user *base; \ |
116 | size_t len; \ | |
117 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 118 | iov, (I)) \ |
28f38db7 AV |
119 | i->nr_segs -= iov - i->iov; \ |
120 | i->iov = iov; \ | |
121 | } else if (iov_iter_is_bvec(i)) { \ | |
1bdc76ae | 122 | const struct bio_vec *bvec = i->bvec; \ |
7baa5099 AV |
123 | void *base; \ |
124 | size_t len; \ | |
125 | iterate_bvec(i, n, base, len, off, \ | |
a6e4ec7b | 126 | bvec, (K)) \ |
7491a2bf AV |
127 | i->nr_segs -= bvec - i->bvec; \ |
128 | i->bvec = bvec; \ | |
28f38db7 | 129 | } else if (iov_iter_is_kvec(i)) { \ |
5c67aa90 | 130 | const struct kvec *kvec = i->kvec; \ |
7baa5099 AV |
131 | void *base; \ |
132 | size_t len; \ | |
133 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 134 | kvec, (K)) \ |
dd254f5a AV |
135 | i->nr_segs -= kvec - i->kvec; \ |
136 | i->kvec = kvec; \ | |
28f38db7 | 137 | } else if (iov_iter_is_xarray(i)) { \ |
7baa5099 AV |
138 | void *base; \ |
139 | size_t len; \ | |
140 | iterate_xarray(i, n, base, len, off, \ | |
a6e4ec7b | 141 | (K)) \ |
7ce2a91e | 142 | } \ |
dd254f5a | 143 | i->count -= n; \ |
7ce2a91e | 144 | } \ |
7ce2a91e | 145 | } |
7baa5099 AV |
146 | #define iterate_and_advance(i, n, base, len, off, I, K) \ |
147 | __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) | |
7ce2a91e | 148 | |
09fc68dc AV |
149 | static int copyout(void __user *to, const void *from, size_t n) |
150 | { | |
4d0e9df5 AL |
151 | if (should_fail_usercopy()) |
152 | return n; | |
96d4f267 | 153 | if (access_ok(to, n)) { |
d0ef4c36 | 154 | instrument_copy_to_user(to, from, n); |
09fc68dc AV |
155 | n = raw_copy_to_user(to, from, n); |
156 | } | |
157 | return n; | |
158 | } | |
159 | ||
160 | static int copyin(void *to, const void __user *from, size_t n) | |
161 | { | |
4d0e9df5 AL |
162 | if (should_fail_usercopy()) |
163 | return n; | |
96d4f267 | 164 | if (access_ok(from, n)) { |
d0ef4c36 | 165 | instrument_copy_from_user(to, from, n); |
09fc68dc AV |
166 | n = raw_copy_from_user(to, from, n); |
167 | } | |
168 | return n; | |
169 | } | |
170 | ||
241699cd AV |
171 | #ifdef PIPE_PARANOIA |
172 | static bool sanity(const struct iov_iter *i) | |
173 | { | |
174 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
175 | unsigned int p_head = pipe->head; |
176 | unsigned int p_tail = pipe->tail; | |
177 | unsigned int p_mask = pipe->ring_size - 1; | |
178 | unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); | |
179 | unsigned int i_head = i->head; | |
180 | unsigned int idx; | |
181 | ||
241699cd AV |
182 | if (i->iov_offset) { |
183 | struct pipe_buffer *p; | |
8cefc107 | 184 | if (unlikely(p_occupancy == 0)) |
241699cd | 185 | goto Bad; // pipe must be non-empty |
8cefc107 | 186 | if (unlikely(i_head != p_head - 1)) |
241699cd AV |
187 | goto Bad; // must be at the last buffer... |
188 | ||
8cefc107 | 189 | p = &pipe->bufs[i_head & p_mask]; |
241699cd AV |
190 | if (unlikely(p->offset + p->len != i->iov_offset)) |
191 | goto Bad; // ... at the end of segment | |
192 | } else { | |
8cefc107 | 193 | if (i_head != p_head) |
241699cd AV |
194 | goto Bad; // must be right after the last buffer |
195 | } | |
196 | return true; | |
197 | Bad: | |
8cefc107 DH |
198 | printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); |
199 | printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", | |
200 | p_head, p_tail, pipe->ring_size); | |
201 | for (idx = 0; idx < pipe->ring_size; idx++) | |
241699cd AV |
202 | printk(KERN_ERR "[%p %p %d %d]\n", |
203 | pipe->bufs[idx].ops, | |
204 | pipe->bufs[idx].page, | |
205 | pipe->bufs[idx].offset, | |
206 | pipe->bufs[idx].len); | |
207 | WARN_ON(1); | |
208 | return false; | |
209 | } | |
210 | #else | |
211 | #define sanity(i) true | |
212 | #endif | |
213 | ||
241699cd AV |
214 | static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, |
215 | struct iov_iter *i) | |
216 | { | |
217 | struct pipe_inode_info *pipe = i->pipe; | |
218 | struct pipe_buffer *buf; | |
8cefc107 DH |
219 | unsigned int p_tail = pipe->tail; |
220 | unsigned int p_mask = pipe->ring_size - 1; | |
221 | unsigned int i_head = i->head; | |
241699cd | 222 | size_t off; |
241699cd AV |
223 | |
224 | if (unlikely(bytes > i->count)) | |
225 | bytes = i->count; | |
226 | ||
227 | if (unlikely(!bytes)) | |
228 | return 0; | |
229 | ||
230 | if (!sanity(i)) | |
231 | return 0; | |
232 | ||
233 | off = i->iov_offset; | |
8cefc107 | 234 | buf = &pipe->bufs[i_head & p_mask]; |
241699cd AV |
235 | if (off) { |
236 | if (offset == off && buf->page == page) { | |
237 | /* merge with the last one */ | |
238 | buf->len += bytes; | |
239 | i->iov_offset += bytes; | |
240 | goto out; | |
241 | } | |
8cefc107 DH |
242 | i_head++; |
243 | buf = &pipe->bufs[i_head & p_mask]; | |
241699cd | 244 | } |
6718b6f8 | 245 | if (pipe_full(i_head, p_tail, pipe->max_usage)) |
241699cd | 246 | return 0; |
8cefc107 | 247 | |
241699cd | 248 | buf->ops = &page_cache_pipe_buf_ops; |
9d2231c5 | 249 | buf->flags = 0; |
8cefc107 DH |
250 | get_page(page); |
251 | buf->page = page; | |
241699cd AV |
252 | buf->offset = offset; |
253 | buf->len = bytes; | |
8cefc107 DH |
254 | |
255 | pipe->head = i_head + 1; | |
241699cd | 256 | i->iov_offset = offset + bytes; |
8cefc107 | 257 | i->head = i_head; |
241699cd AV |
258 | out: |
259 | i->count -= bytes; | |
260 | return bytes; | |
261 | } | |
262 | ||
171a0203 | 263 | /* |
a6294593 AG |
264 | * fault_in_iov_iter_readable - fault in iov iterator for reading |
265 | * @i: iterator | |
266 | * @size: maximum length | |
267 | * | |
171a0203 | 268 | * Fault in one or more iovecs of the given iov_iter, to a maximum length of |
a6294593 AG |
269 | * @size. For each iovec, fault in each page that constitutes the iovec. |
270 | * | |
271 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
272 | * copy_from_user()). | |
171a0203 | 273 | * |
a6294593 | 274 | * Always returns 0 for non-userspace iterators. |
171a0203 | 275 | */ |
a6294593 | 276 | size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) |
171a0203 | 277 | { |
0e8f0d67 | 278 | if (iter_is_iovec(i)) { |
a6294593 | 279 | size_t count = min(size, iov_iter_count(i)); |
8409a0d2 AV |
280 | const struct iovec *p; |
281 | size_t skip; | |
282 | ||
a6294593 AG |
283 | size -= count; |
284 | for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { | |
285 | size_t len = min(count, p->iov_len - skip); | |
286 | size_t ret; | |
8409a0d2 AV |
287 | |
288 | if (unlikely(!len)) | |
289 | continue; | |
a6294593 AG |
290 | ret = fault_in_readable(p->iov_base + skip, len); |
291 | count -= len - ret; | |
292 | if (ret) | |
293 | break; | |
8409a0d2 | 294 | } |
a6294593 | 295 | return count + size; |
171a0203 AA |
296 | } |
297 | return 0; | |
298 | } | |
a6294593 | 299 | EXPORT_SYMBOL(fault_in_iov_iter_readable); |
171a0203 | 300 | |
cdd591fc AG |
301 | /* |
302 | * fault_in_iov_iter_writeable - fault in iov iterator for writing | |
303 | * @i: iterator | |
304 | * @size: maximum length | |
305 | * | |
306 | * Faults in the iterator using get_user_pages(), i.e., without triggering | |
307 | * hardware page faults. This is primarily useful when we already know that | |
308 | * some or all of the pages in @i aren't in memory. | |
309 | * | |
310 | * Returns the number of bytes not faulted in, like copy_to_user() and | |
311 | * copy_from_user(). | |
312 | * | |
313 | * Always returns 0 for non-user-space iterators. | |
314 | */ | |
315 | size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) | |
316 | { | |
317 | if (iter_is_iovec(i)) { | |
318 | size_t count = min(size, iov_iter_count(i)); | |
319 | const struct iovec *p; | |
320 | size_t skip; | |
321 | ||
322 | size -= count; | |
323 | for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { | |
324 | size_t len = min(count, p->iov_len - skip); | |
325 | size_t ret; | |
326 | ||
327 | if (unlikely(!len)) | |
328 | continue; | |
329 | ret = fault_in_safe_writeable(p->iov_base + skip, len); | |
330 | count -= len - ret; | |
331 | if (ret) | |
332 | break; | |
333 | } | |
334 | return count + size; | |
335 | } | |
336 | return 0; | |
337 | } | |
338 | EXPORT_SYMBOL(fault_in_iov_iter_writeable); | |
339 | ||
aa563d7b | 340 | void iov_iter_init(struct iov_iter *i, unsigned int direction, |
71d8e532 AV |
341 | const struct iovec *iov, unsigned long nr_segs, |
342 | size_t count) | |
343 | { | |
aa563d7b | 344 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
345 | *i = (struct iov_iter) { |
346 | .iter_type = ITER_IOVEC, | |
3337ab08 | 347 | .nofault = false, |
8cd54c1c AV |
348 | .data_source = direction, |
349 | .iov = iov, | |
350 | .nr_segs = nr_segs, | |
351 | .iov_offset = 0, | |
352 | .count = count | |
353 | }; | |
71d8e532 AV |
354 | } |
355 | EXPORT_SYMBOL(iov_iter_init); | |
7b2c99d1 | 356 | |
241699cd AV |
357 | static inline bool allocated(struct pipe_buffer *buf) |
358 | { | |
359 | return buf->ops == &default_pipe_buf_ops; | |
360 | } | |
361 | ||
8cefc107 DH |
362 | static inline void data_start(const struct iov_iter *i, |
363 | unsigned int *iter_headp, size_t *offp) | |
241699cd | 364 | { |
8cefc107 DH |
365 | unsigned int p_mask = i->pipe->ring_size - 1; |
366 | unsigned int iter_head = i->head; | |
241699cd | 367 | size_t off = i->iov_offset; |
8cefc107 DH |
368 | |
369 | if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || | |
370 | off == PAGE_SIZE)) { | |
371 | iter_head++; | |
241699cd AV |
372 | off = 0; |
373 | } | |
8cefc107 | 374 | *iter_headp = iter_head; |
241699cd AV |
375 | *offp = off; |
376 | } | |
377 | ||
378 | static size_t push_pipe(struct iov_iter *i, size_t size, | |
8cefc107 | 379 | int *iter_headp, size_t *offp) |
241699cd AV |
380 | { |
381 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
382 | unsigned int p_tail = pipe->tail; |
383 | unsigned int p_mask = pipe->ring_size - 1; | |
384 | unsigned int iter_head; | |
241699cd | 385 | size_t off; |
241699cd AV |
386 | ssize_t left; |
387 | ||
388 | if (unlikely(size > i->count)) | |
389 | size = i->count; | |
390 | if (unlikely(!size)) | |
391 | return 0; | |
392 | ||
393 | left = size; | |
8cefc107 DH |
394 | data_start(i, &iter_head, &off); |
395 | *iter_headp = iter_head; | |
241699cd AV |
396 | *offp = off; |
397 | if (off) { | |
398 | left -= PAGE_SIZE - off; | |
399 | if (left <= 0) { | |
8cefc107 | 400 | pipe->bufs[iter_head & p_mask].len += size; |
241699cd AV |
401 | return size; |
402 | } | |
8cefc107 DH |
403 | pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; |
404 | iter_head++; | |
241699cd | 405 | } |
6718b6f8 | 406 | while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { |
8cefc107 | 407 | struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; |
241699cd AV |
408 | struct page *page = alloc_page(GFP_USER); |
409 | if (!page) | |
410 | break; | |
8cefc107 DH |
411 | |
412 | buf->ops = &default_pipe_buf_ops; | |
9d2231c5 | 413 | buf->flags = 0; |
8cefc107 DH |
414 | buf->page = page; |
415 | buf->offset = 0; | |
416 | buf->len = min_t(ssize_t, left, PAGE_SIZE); | |
417 | left -= buf->len; | |
418 | iter_head++; | |
419 | pipe->head = iter_head; | |
420 | ||
421 | if (left == 0) | |
241699cd | 422 | return size; |
241699cd AV |
423 | } |
424 | return size - left; | |
425 | } | |
426 | ||
427 | static size_t copy_pipe_to_iter(const void *addr, size_t bytes, | |
428 | struct iov_iter *i) | |
429 | { | |
430 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
431 | unsigned int p_mask = pipe->ring_size - 1; |
432 | unsigned int i_head; | |
241699cd | 433 | size_t n, off; |
241699cd AV |
434 | |
435 | if (!sanity(i)) | |
436 | return 0; | |
437 | ||
8cefc107 | 438 | bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd AV |
439 | if (unlikely(!n)) |
440 | return 0; | |
8cefc107 | 441 | do { |
241699cd | 442 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
8cefc107 DH |
443 | memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); |
444 | i->head = i_head; | |
241699cd AV |
445 | i->iov_offset = off + chunk; |
446 | n -= chunk; | |
447 | addr += chunk; | |
8cefc107 DH |
448 | off = 0; |
449 | i_head++; | |
450 | } while (n); | |
241699cd AV |
451 | i->count -= bytes; |
452 | return bytes; | |
453 | } | |
454 | ||
f9152895 AV |
455 | static __wsum csum_and_memcpy(void *to, const void *from, size_t len, |
456 | __wsum sum, size_t off) | |
457 | { | |
cc44c17b | 458 | __wsum next = csum_partial_copy_nocheck(from, to, len); |
f9152895 AV |
459 | return csum_block_add(sum, next, off); |
460 | } | |
461 | ||
78e1f386 | 462 | static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, |
6852df12 | 463 | struct iov_iter *i, __wsum *sump) |
78e1f386 AV |
464 | { |
465 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 | 466 | unsigned int p_mask = pipe->ring_size - 1; |
6852df12 AV |
467 | __wsum sum = *sump; |
468 | size_t off = 0; | |
8cefc107 | 469 | unsigned int i_head; |
6852df12 | 470 | size_t r; |
78e1f386 AV |
471 | |
472 | if (!sanity(i)) | |
473 | return 0; | |
474 | ||
6852df12 AV |
475 | bytes = push_pipe(i, bytes, &i_head, &r); |
476 | while (bytes) { | |
477 | size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r); | |
2495bdcc | 478 | char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); |
6852df12 | 479 | sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); |
2495bdcc | 480 | kunmap_local(p); |
8cefc107 | 481 | i->head = i_head; |
78e1f386 | 482 | i->iov_offset = r + chunk; |
6852df12 | 483 | bytes -= chunk; |
78e1f386 | 484 | off += chunk; |
8cefc107 DH |
485 | r = 0; |
486 | i_head++; | |
6852df12 AV |
487 | } |
488 | *sump = sum; | |
489 | i->count -= off; | |
490 | return off; | |
78e1f386 AV |
491 | } |
492 | ||
aa28de27 | 493 | size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
62a8067a | 494 | { |
00e23707 | 495 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 496 | return copy_pipe_to_iter(addr, bytes, i); |
09fc68dc AV |
497 | if (iter_is_iovec(i)) |
498 | might_fault(); | |
7baa5099 AV |
499 | iterate_and_advance(i, bytes, base, len, off, |
500 | copyout(base, addr + off, len), | |
501 | memcpy(base, addr + off, len) | |
3d4d3e48 | 502 | ) |
62a8067a | 503 | |
3d4d3e48 | 504 | return bytes; |
c35e0248 | 505 | } |
aa28de27 | 506 | EXPORT_SYMBOL(_copy_to_iter); |
c35e0248 | 507 | |
ec6347bb DW |
508 | #ifdef CONFIG_ARCH_HAS_COPY_MC |
509 | static int copyout_mc(void __user *to, const void *from, size_t n) | |
8780356e | 510 | { |
96d4f267 | 511 | if (access_ok(to, n)) { |
d0ef4c36 | 512 | instrument_copy_to_user(to, from, n); |
ec6347bb | 513 | n = copy_mc_to_user((__force void *) to, from, n); |
8780356e DW |
514 | } |
515 | return n; | |
516 | } | |
517 | ||
ec6347bb | 518 | static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, |
ca146f6f DW |
519 | struct iov_iter *i) |
520 | { | |
521 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
522 | unsigned int p_mask = pipe->ring_size - 1; |
523 | unsigned int i_head; | |
ca146f6f | 524 | size_t n, off, xfer = 0; |
ca146f6f DW |
525 | |
526 | if (!sanity(i)) | |
527 | return 0; | |
528 | ||
2a510a74 AV |
529 | n = push_pipe(i, bytes, &i_head, &off); |
530 | while (n) { | |
ca146f6f | 531 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
2a510a74 | 532 | char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); |
ca146f6f | 533 | unsigned long rem; |
2a510a74 AV |
534 | rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); |
535 | chunk -= rem; | |
536 | kunmap_local(p); | |
8cefc107 | 537 | i->head = i_head; |
2a510a74 AV |
538 | i->iov_offset = off + chunk; |
539 | xfer += chunk; | |
ca146f6f DW |
540 | if (rem) |
541 | break; | |
542 | n -= chunk; | |
8cefc107 DH |
543 | off = 0; |
544 | i_head++; | |
2a510a74 | 545 | } |
ca146f6f DW |
546 | i->count -= xfer; |
547 | return xfer; | |
548 | } | |
549 | ||
bf3eeb9b | 550 | /** |
ec6347bb | 551 | * _copy_mc_to_iter - copy to iter with source memory error exception handling |
bf3eeb9b DW |
552 | * @addr: source kernel address |
553 | * @bytes: total transfer length | |
44e55997 | 554 | * @i: destination iterator |
bf3eeb9b | 555 | * |
ec6347bb DW |
556 | * The pmem driver deploys this for the dax operation |
557 | * (dax_copy_to_iter()) for dax reads (bypass page-cache and the | |
558 | * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes | |
559 | * successfully copied. | |
bf3eeb9b | 560 | * |
ec6347bb | 561 | * The main differences between this and typical _copy_to_iter(). |
bf3eeb9b DW |
562 | * |
563 | * * Typical tail/residue handling after a fault retries the copy | |
564 | * byte-by-byte until the fault happens again. Re-triggering machine | |
565 | * checks is potentially fatal so the implementation uses source | |
566 | * alignment and poison alignment assumptions to avoid re-triggering | |
567 | * hardware exceptions. | |
568 | * | |
569 | * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. | |
570 | * Compare to copy_to_iter() where only ITER_IOVEC attempts might return | |
571 | * a short copy. | |
44e55997 RD |
572 | * |
573 | * Return: number of bytes copied (may be %0) | |
bf3eeb9b | 574 | */ |
ec6347bb | 575 | size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
8780356e | 576 | { |
00e23707 | 577 | if (unlikely(iov_iter_is_pipe(i))) |
ec6347bb | 578 | return copy_mc_pipe_to_iter(addr, bytes, i); |
8780356e DW |
579 | if (iter_is_iovec(i)) |
580 | might_fault(); | |
7baa5099 AV |
581 | __iterate_and_advance(i, bytes, base, len, off, |
582 | copyout_mc(base, addr + off, len), | |
583 | copy_mc_to_kernel(base, addr + off, len) | |
8780356e DW |
584 | ) |
585 | ||
586 | return bytes; | |
587 | } | |
ec6347bb DW |
588 | EXPORT_SYMBOL_GPL(_copy_mc_to_iter); |
589 | #endif /* CONFIG_ARCH_HAS_COPY_MC */ | |
8780356e | 590 | |
aa28de27 | 591 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
c35e0248 | 592 | { |
00e23707 | 593 | if (unlikely(iov_iter_is_pipe(i))) { |
241699cd AV |
594 | WARN_ON(1); |
595 | return 0; | |
596 | } | |
09fc68dc AV |
597 | if (iter_is_iovec(i)) |
598 | might_fault(); | |
7baa5099 AV |
599 | iterate_and_advance(i, bytes, base, len, off, |
600 | copyin(addr + off, base, len), | |
601 | memcpy(addr + off, base, len) | |
0dbca9a4 AV |
602 | ) |
603 | ||
604 | return bytes; | |
c35e0248 | 605 | } |
aa28de27 | 606 | EXPORT_SYMBOL(_copy_from_iter); |
c35e0248 | 607 | |
aa28de27 | 608 | size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
aa583096 | 609 | { |
00e23707 | 610 | if (unlikely(iov_iter_is_pipe(i))) { |
241699cd AV |
611 | WARN_ON(1); |
612 | return 0; | |
613 | } | |
7baa5099 AV |
614 | iterate_and_advance(i, bytes, base, len, off, |
615 | __copy_from_user_inatomic_nocache(addr + off, base, len), | |
616 | memcpy(addr + off, base, len) | |
aa583096 AV |
617 | ) |
618 | ||
619 | return bytes; | |
620 | } | |
aa28de27 | 621 | EXPORT_SYMBOL(_copy_from_iter_nocache); |
aa583096 | 622 | |
0aed55af | 623 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
abd08d7d DW |
624 | /** |
625 | * _copy_from_iter_flushcache - write destination through cpu cache | |
626 | * @addr: destination kernel address | |
627 | * @bytes: total transfer length | |
44e55997 | 628 | * @i: source iterator |
abd08d7d DW |
629 | * |
630 | * The pmem driver arranges for filesystem-dax to use this facility via | |
631 | * dax_copy_from_iter() for ensuring that writes to persistent memory | |
632 | * are flushed through the CPU cache. It is differentiated from | |
633 | * _copy_from_iter_nocache() in that guarantees all data is flushed for | |
634 | * all iterator types. The _copy_from_iter_nocache() only attempts to | |
635 | * bypass the cache for the ITER_IOVEC case, and on some archs may use | |
636 | * instructions that strand dirty-data in the cache. | |
44e55997 RD |
637 | * |
638 | * Return: number of bytes copied (may be %0) | |
abd08d7d | 639 | */ |
6a37e940 | 640 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) |
0aed55af | 641 | { |
00e23707 | 642 | if (unlikely(iov_iter_is_pipe(i))) { |
0aed55af DW |
643 | WARN_ON(1); |
644 | return 0; | |
645 | } | |
7baa5099 AV |
646 | iterate_and_advance(i, bytes, base, len, off, |
647 | __copy_from_user_flushcache(addr + off, base, len), | |
648 | memcpy_flushcache(addr + off, base, len) | |
0aed55af DW |
649 | ) |
650 | ||
651 | return bytes; | |
652 | } | |
6a37e940 | 653 | EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); |
0aed55af DW |
654 | #endif |
655 | ||
72e809ed AV |
656 | static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) |
657 | { | |
6daef95b ED |
658 | struct page *head; |
659 | size_t v = n + offset; | |
660 | ||
661 | /* | |
662 | * The general case needs to access the page order in order | |
663 | * to compute the page size. | |
664 | * However, we mostly deal with order-0 pages and thus can | |
665 | * avoid a possible cache line miss for requests that fit all | |
666 | * page orders. | |
667 | */ | |
668 | if (n <= v && v <= PAGE_SIZE) | |
669 | return true; | |
670 | ||
671 | head = compound_head(page); | |
672 | v += (page - head) << PAGE_SHIFT; | |
a90bcb86 | 673 | |
a50b854e | 674 | if (likely(n <= v && v <= (page_size(head)))) |
72e809ed AV |
675 | return true; |
676 | WARN_ON(1); | |
677 | return false; | |
678 | } | |
cbbd26b8 | 679 | |
08aa6479 | 680 | static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
62a8067a AV |
681 | struct iov_iter *i) |
682 | { | |
59bb69c6 AV |
683 | if (unlikely(iov_iter_is_pipe(i))) { |
684 | return copy_page_to_iter_pipe(page, offset, bytes, i); | |
685 | } else { | |
c1d4d6a9 AV |
686 | void *kaddr = kmap_local_page(page); |
687 | size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); | |
688 | kunmap_local(kaddr); | |
d271524a | 689 | return wanted; |
28f38db7 | 690 | } |
62a8067a | 691 | } |
08aa6479 AV |
692 | |
693 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, | |
694 | struct iov_iter *i) | |
695 | { | |
696 | size_t res = 0; | |
697 | if (unlikely(!page_copy_sane(page, offset, bytes))) | |
698 | return 0; | |
699 | page += offset / PAGE_SIZE; // first subpage | |
700 | offset %= PAGE_SIZE; | |
701 | while (1) { | |
702 | size_t n = __copy_page_to_iter(page, offset, | |
703 | min(bytes, (size_t)PAGE_SIZE - offset), i); | |
704 | res += n; | |
705 | bytes -= n; | |
706 | if (!bytes || !n) | |
707 | break; | |
708 | offset += n; | |
709 | if (offset == PAGE_SIZE) { | |
710 | page++; | |
711 | offset = 0; | |
712 | } | |
713 | } | |
714 | return res; | |
715 | } | |
62a8067a AV |
716 | EXPORT_SYMBOL(copy_page_to_iter); |
717 | ||
718 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | |
719 | struct iov_iter *i) | |
720 | { | |
59bb69c6 | 721 | if (page_copy_sane(page, offset, bytes)) { |
55ca375c | 722 | void *kaddr = kmap_local_page(page); |
aa28de27 | 723 | size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); |
55ca375c | 724 | kunmap_local(kaddr); |
d271524a | 725 | return wanted; |
28f38db7 | 726 | } |
28f38db7 | 727 | return 0; |
62a8067a AV |
728 | } |
729 | EXPORT_SYMBOL(copy_page_from_iter); | |
730 | ||
241699cd AV |
731 | static size_t pipe_zero(size_t bytes, struct iov_iter *i) |
732 | { | |
733 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
734 | unsigned int p_mask = pipe->ring_size - 1; |
735 | unsigned int i_head; | |
241699cd | 736 | size_t n, off; |
241699cd AV |
737 | |
738 | if (!sanity(i)) | |
739 | return 0; | |
740 | ||
8cefc107 | 741 | bytes = n = push_pipe(i, bytes, &i_head, &off); |
241699cd AV |
742 | if (unlikely(!n)) |
743 | return 0; | |
744 | ||
8cefc107 | 745 | do { |
241699cd | 746 | size_t chunk = min_t(size_t, n, PAGE_SIZE - off); |
893839fd AV |
747 | char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); |
748 | memset(p + off, 0, chunk); | |
749 | kunmap_local(p); | |
8cefc107 | 750 | i->head = i_head; |
241699cd AV |
751 | i->iov_offset = off + chunk; |
752 | n -= chunk; | |
8cefc107 DH |
753 | off = 0; |
754 | i_head++; | |
755 | } while (n); | |
241699cd AV |
756 | i->count -= bytes; |
757 | return bytes; | |
758 | } | |
759 | ||
c35e0248 MW |
760 | size_t iov_iter_zero(size_t bytes, struct iov_iter *i) |
761 | { | |
00e23707 | 762 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 763 | return pipe_zero(bytes, i); |
7baa5099 AV |
764 | iterate_and_advance(i, bytes, base, len, count, |
765 | clear_user(base, len), | |
766 | memset(base, 0, len) | |
8442fa46 AV |
767 | ) |
768 | ||
769 | return bytes; | |
c35e0248 MW |
770 | } |
771 | EXPORT_SYMBOL(iov_iter_zero); | |
772 | ||
f0b65f39 AV |
773 | size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, |
774 | struct iov_iter *i) | |
62a8067a | 775 | { |
04a31165 | 776 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
72e809ed AV |
777 | if (unlikely(!page_copy_sane(page, offset, bytes))) { |
778 | kunmap_atomic(kaddr); | |
779 | return 0; | |
780 | } | |
9ea9ce04 | 781 | if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd AV |
782 | kunmap_atomic(kaddr); |
783 | WARN_ON(1); | |
784 | return 0; | |
785 | } | |
7baa5099 AV |
786 | iterate_and_advance(i, bytes, base, len, off, |
787 | copyin(p + off, base, len), | |
788 | memcpy(p + off, base, len) | |
04a31165 AV |
789 | ) |
790 | kunmap_atomic(kaddr); | |
791 | return bytes; | |
62a8067a | 792 | } |
f0b65f39 | 793 | EXPORT_SYMBOL(copy_page_from_iter_atomic); |
62a8067a | 794 | |
b9dc6f65 AV |
795 | static inline void pipe_truncate(struct iov_iter *i) |
796 | { | |
797 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
798 | unsigned int p_tail = pipe->tail; |
799 | unsigned int p_head = pipe->head; | |
800 | unsigned int p_mask = pipe->ring_size - 1; | |
801 | ||
802 | if (!pipe_empty(p_head, p_tail)) { | |
803 | struct pipe_buffer *buf; | |
804 | unsigned int i_head = i->head; | |
b9dc6f65 | 805 | size_t off = i->iov_offset; |
8cefc107 | 806 | |
b9dc6f65 | 807 | if (off) { |
8cefc107 DH |
808 | buf = &pipe->bufs[i_head & p_mask]; |
809 | buf->len = off - buf->offset; | |
810 | i_head++; | |
b9dc6f65 | 811 | } |
8cefc107 DH |
812 | while (p_head != i_head) { |
813 | p_head--; | |
814 | pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); | |
b9dc6f65 | 815 | } |
8cefc107 DH |
816 | |
817 | pipe->head = p_head; | |
b9dc6f65 AV |
818 | } |
819 | } | |
820 | ||
241699cd AV |
821 | static void pipe_advance(struct iov_iter *i, size_t size) |
822 | { | |
823 | struct pipe_inode_info *pipe = i->pipe; | |
241699cd | 824 | if (size) { |
b9dc6f65 | 825 | struct pipe_buffer *buf; |
8cefc107 DH |
826 | unsigned int p_mask = pipe->ring_size - 1; |
827 | unsigned int i_head = i->head; | |
b9dc6f65 | 828 | size_t off = i->iov_offset, left = size; |
8cefc107 | 829 | |
241699cd | 830 | if (off) /* make it relative to the beginning of buffer */ |
8cefc107 | 831 | left += off - pipe->bufs[i_head & p_mask].offset; |
241699cd | 832 | while (1) { |
8cefc107 | 833 | buf = &pipe->bufs[i_head & p_mask]; |
b9dc6f65 | 834 | if (left <= buf->len) |
241699cd | 835 | break; |
b9dc6f65 | 836 | left -= buf->len; |
8cefc107 | 837 | i_head++; |
241699cd | 838 | } |
8cefc107 | 839 | i->head = i_head; |
b9dc6f65 | 840 | i->iov_offset = buf->offset + left; |
241699cd | 841 | } |
b9dc6f65 AV |
842 | i->count -= size; |
843 | /* ... and discard everything past that point */ | |
844 | pipe_truncate(i); | |
241699cd AV |
845 | } |
846 | ||
54c8195b PB |
847 | static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) |
848 | { | |
18fa9af7 | 849 | const struct bio_vec *bvec, *end; |
54c8195b | 850 | |
18fa9af7 AV |
851 | if (!i->count) |
852 | return; | |
853 | i->count -= size; | |
854 | ||
855 | size += i->iov_offset; | |
54c8195b | 856 | |
18fa9af7 AV |
857 | for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { |
858 | if (likely(size < bvec->bv_len)) | |
859 | break; | |
860 | size -= bvec->bv_len; | |
861 | } | |
862 | i->iov_offset = size; | |
863 | i->nr_segs -= bvec - i->bvec; | |
864 | i->bvec = bvec; | |
54c8195b PB |
865 | } |
866 | ||
185ac4d4 AV |
867 | static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) |
868 | { | |
869 | const struct iovec *iov, *end; | |
870 | ||
871 | if (!i->count) | |
872 | return; | |
873 | i->count -= size; | |
874 | ||
875 | size += i->iov_offset; // from beginning of current segment | |
876 | for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { | |
877 | if (likely(size < iov->iov_len)) | |
878 | break; | |
879 | size -= iov->iov_len; | |
880 | } | |
881 | i->iov_offset = size; | |
882 | i->nr_segs -= iov - i->iov; | |
883 | i->iov = iov; | |
884 | } | |
885 | ||
62a8067a AV |
886 | void iov_iter_advance(struct iov_iter *i, size_t size) |
887 | { | |
3b3fc051 AV |
888 | if (unlikely(i->count < size)) |
889 | size = i->count; | |
185ac4d4 AV |
890 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { |
891 | /* iovec and kvec have identical layouts */ | |
892 | iov_iter_iovec_advance(i, size); | |
893 | } else if (iov_iter_is_bvec(i)) { | |
894 | iov_iter_bvec_advance(i, size); | |
895 | } else if (iov_iter_is_pipe(i)) { | |
241699cd | 896 | pipe_advance(i, size); |
185ac4d4 | 897 | } else if (unlikely(iov_iter_is_xarray(i))) { |
7ff50620 DH |
898 | i->iov_offset += size; |
899 | i->count -= size; | |
185ac4d4 AV |
900 | } else if (iov_iter_is_discard(i)) { |
901 | i->count -= size; | |
54c8195b | 902 | } |
62a8067a AV |
903 | } |
904 | EXPORT_SYMBOL(iov_iter_advance); | |
905 | ||
27c0e374 AV |
906 | void iov_iter_revert(struct iov_iter *i, size_t unroll) |
907 | { | |
908 | if (!unroll) | |
909 | return; | |
5b47d59a AV |
910 | if (WARN_ON(unroll > MAX_RW_COUNT)) |
911 | return; | |
27c0e374 | 912 | i->count += unroll; |
00e23707 | 913 | if (unlikely(iov_iter_is_pipe(i))) { |
27c0e374 | 914 | struct pipe_inode_info *pipe = i->pipe; |
8cefc107 DH |
915 | unsigned int p_mask = pipe->ring_size - 1; |
916 | unsigned int i_head = i->head; | |
27c0e374 AV |
917 | size_t off = i->iov_offset; |
918 | while (1) { | |
8cefc107 DH |
919 | struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; |
920 | size_t n = off - b->offset; | |
27c0e374 | 921 | if (unroll < n) { |
4fa55cef | 922 | off -= unroll; |
27c0e374 AV |
923 | break; |
924 | } | |
925 | unroll -= n; | |
8cefc107 | 926 | if (!unroll && i_head == i->start_head) { |
27c0e374 AV |
927 | off = 0; |
928 | break; | |
929 | } | |
8cefc107 DH |
930 | i_head--; |
931 | b = &pipe->bufs[i_head & p_mask]; | |
932 | off = b->offset + b->len; | |
27c0e374 AV |
933 | } |
934 | i->iov_offset = off; | |
8cefc107 | 935 | i->head = i_head; |
27c0e374 AV |
936 | pipe_truncate(i); |
937 | return; | |
938 | } | |
9ea9ce04 DH |
939 | if (unlikely(iov_iter_is_discard(i))) |
940 | return; | |
27c0e374 AV |
941 | if (unroll <= i->iov_offset) { |
942 | i->iov_offset -= unroll; | |
943 | return; | |
944 | } | |
945 | unroll -= i->iov_offset; | |
7ff50620 DH |
946 | if (iov_iter_is_xarray(i)) { |
947 | BUG(); /* We should never go beyond the start of the specified | |
948 | * range since we might then be straying into pages that | |
949 | * aren't pinned. | |
950 | */ | |
951 | } else if (iov_iter_is_bvec(i)) { | |
27c0e374 AV |
952 | const struct bio_vec *bvec = i->bvec; |
953 | while (1) { | |
954 | size_t n = (--bvec)->bv_len; | |
955 | i->nr_segs++; | |
956 | if (unroll <= n) { | |
957 | i->bvec = bvec; | |
958 | i->iov_offset = n - unroll; | |
959 | return; | |
960 | } | |
961 | unroll -= n; | |
962 | } | |
963 | } else { /* same logics for iovec and kvec */ | |
964 | const struct iovec *iov = i->iov; | |
965 | while (1) { | |
966 | size_t n = (--iov)->iov_len; | |
967 | i->nr_segs++; | |
968 | if (unroll <= n) { | |
969 | i->iov = iov; | |
970 | i->iov_offset = n - unroll; | |
971 | return; | |
972 | } | |
973 | unroll -= n; | |
974 | } | |
975 | } | |
976 | } | |
977 | EXPORT_SYMBOL(iov_iter_revert); | |
978 | ||
62a8067a AV |
979 | /* |
980 | * Return the count of just the current iov_iter segment. | |
981 | */ | |
982 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | |
983 | { | |
28f38db7 AV |
984 | if (i->nr_segs > 1) { |
985 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
986 | return min(i->count, i->iov->iov_len - i->iov_offset); | |
987 | if (iov_iter_is_bvec(i)) | |
988 | return min(i->count, i->bvec->bv_len - i->iov_offset); | |
989 | } | |
990 | return i->count; | |
62a8067a AV |
991 | } |
992 | EXPORT_SYMBOL(iov_iter_single_seg_count); | |
993 | ||
aa563d7b | 994 | void iov_iter_kvec(struct iov_iter *i, unsigned int direction, |
05afcb77 | 995 | const struct kvec *kvec, unsigned long nr_segs, |
abb78f87 AV |
996 | size_t count) |
997 | { | |
aa563d7b | 998 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
999 | *i = (struct iov_iter){ |
1000 | .iter_type = ITER_KVEC, | |
1001 | .data_source = direction, | |
1002 | .kvec = kvec, | |
1003 | .nr_segs = nr_segs, | |
1004 | .iov_offset = 0, | |
1005 | .count = count | |
1006 | }; | |
abb78f87 AV |
1007 | } |
1008 | EXPORT_SYMBOL(iov_iter_kvec); | |
1009 | ||
aa563d7b | 1010 | void iov_iter_bvec(struct iov_iter *i, unsigned int direction, |
05afcb77 AV |
1011 | const struct bio_vec *bvec, unsigned long nr_segs, |
1012 | size_t count) | |
1013 | { | |
aa563d7b | 1014 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
1015 | *i = (struct iov_iter){ |
1016 | .iter_type = ITER_BVEC, | |
1017 | .data_source = direction, | |
1018 | .bvec = bvec, | |
1019 | .nr_segs = nr_segs, | |
1020 | .iov_offset = 0, | |
1021 | .count = count | |
1022 | }; | |
05afcb77 AV |
1023 | } |
1024 | EXPORT_SYMBOL(iov_iter_bvec); | |
1025 | ||
aa563d7b | 1026 | void iov_iter_pipe(struct iov_iter *i, unsigned int direction, |
241699cd AV |
1027 | struct pipe_inode_info *pipe, |
1028 | size_t count) | |
1029 | { | |
aa563d7b | 1030 | BUG_ON(direction != READ); |
8cefc107 | 1031 | WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); |
8cd54c1c AV |
1032 | *i = (struct iov_iter){ |
1033 | .iter_type = ITER_PIPE, | |
1034 | .data_source = false, | |
1035 | .pipe = pipe, | |
1036 | .head = pipe->head, | |
1037 | .start_head = pipe->head, | |
1038 | .iov_offset = 0, | |
1039 | .count = count | |
1040 | }; | |
241699cd AV |
1041 | } |
1042 | EXPORT_SYMBOL(iov_iter_pipe); | |
1043 | ||
7ff50620 DH |
1044 | /** |
1045 | * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray | |
1046 | * @i: The iterator to initialise. | |
1047 | * @direction: The direction of the transfer. | |
1048 | * @xarray: The xarray to access. | |
1049 | * @start: The start file position. | |
1050 | * @count: The size of the I/O buffer in bytes. | |
1051 | * | |
1052 | * Set up an I/O iterator to either draw data out of the pages attached to an | |
1053 | * inode or to inject data into those pages. The pages *must* be prevented | |
1054 | * from evaporation, either by taking a ref on them or locking them by the | |
1055 | * caller. | |
1056 | */ | |
1057 | void iov_iter_xarray(struct iov_iter *i, unsigned int direction, | |
1058 | struct xarray *xarray, loff_t start, size_t count) | |
1059 | { | |
1060 | BUG_ON(direction & ~1); | |
8cd54c1c AV |
1061 | *i = (struct iov_iter) { |
1062 | .iter_type = ITER_XARRAY, | |
1063 | .data_source = direction, | |
1064 | .xarray = xarray, | |
1065 | .xarray_start = start, | |
1066 | .count = count, | |
1067 | .iov_offset = 0 | |
1068 | }; | |
7ff50620 DH |
1069 | } |
1070 | EXPORT_SYMBOL(iov_iter_xarray); | |
1071 | ||
9ea9ce04 DH |
1072 | /** |
1073 | * iov_iter_discard - Initialise an I/O iterator that discards data | |
1074 | * @i: The iterator to initialise. | |
1075 | * @direction: The direction of the transfer. | |
1076 | * @count: The size of the I/O buffer in bytes. | |
1077 | * | |
1078 | * Set up an I/O iterator that just discards everything that's written to it. | |
1079 | * It's only available as a READ iterator. | |
1080 | */ | |
1081 | void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) | |
1082 | { | |
1083 | BUG_ON(direction != READ); | |
8cd54c1c AV |
1084 | *i = (struct iov_iter){ |
1085 | .iter_type = ITER_DISCARD, | |
1086 | .data_source = false, | |
1087 | .count = count, | |
1088 | .iov_offset = 0 | |
1089 | }; | |
9ea9ce04 DH |
1090 | } |
1091 | EXPORT_SYMBOL(iov_iter_discard); | |
1092 | ||
9221d2e3 | 1093 | static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) |
62a8067a | 1094 | { |
04a31165 AV |
1095 | unsigned long res = 0; |
1096 | size_t size = i->count; | |
9221d2e3 AV |
1097 | size_t skip = i->iov_offset; |
1098 | unsigned k; | |
1099 | ||
1100 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1101 | size_t len = i->iov[k].iov_len - skip; | |
1102 | if (len) { | |
1103 | res |= (unsigned long)i->iov[k].iov_base + skip; | |
1104 | if (len > size) | |
1105 | len = size; | |
1106 | res |= len; | |
1107 | size -= len; | |
1108 | if (!size) | |
1109 | break; | |
1110 | } | |
1111 | } | |
1112 | return res; | |
1113 | } | |
04a31165 | 1114 | |
9221d2e3 AV |
1115 | static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) |
1116 | { | |
1117 | unsigned res = 0; | |
1118 | size_t size = i->count; | |
1119 | unsigned skip = i->iov_offset; | |
1120 | unsigned k; | |
1121 | ||
1122 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1123 | size_t len = i->bvec[k].bv_len - skip; | |
1124 | res |= (unsigned long)i->bvec[k].bv_offset + skip; | |
1125 | if (len > size) | |
1126 | len = size; | |
1127 | res |= len; | |
1128 | size -= len; | |
1129 | if (!size) | |
1130 | break; | |
1131 | } | |
1132 | return res; | |
1133 | } | |
1134 | ||
1135 | unsigned long iov_iter_alignment(const struct iov_iter *i) | |
1136 | { | |
1137 | /* iovec and kvec have identical layouts */ | |
1138 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1139 | return iov_iter_alignment_iovec(i); | |
1140 | ||
1141 | if (iov_iter_is_bvec(i)) | |
1142 | return iov_iter_alignment_bvec(i); | |
1143 | ||
1144 | if (iov_iter_is_pipe(i)) { | |
e0ff126e | 1145 | unsigned int p_mask = i->pipe->ring_size - 1; |
9221d2e3 | 1146 | size_t size = i->count; |
e0ff126e | 1147 | |
8cefc107 | 1148 | if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) |
241699cd AV |
1149 | return size | i->iov_offset; |
1150 | return size; | |
1151 | } | |
9221d2e3 AV |
1152 | |
1153 | if (iov_iter_is_xarray(i)) | |
3d14ec1f | 1154 | return (i->xarray_start + i->iov_offset) | i->count; |
9221d2e3 AV |
1155 | |
1156 | return 0; | |
62a8067a AV |
1157 | } |
1158 | EXPORT_SYMBOL(iov_iter_alignment); | |
1159 | ||
357f435d AV |
1160 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i) |
1161 | { | |
33844e66 | 1162 | unsigned long res = 0; |
610c7a71 | 1163 | unsigned long v = 0; |
357f435d | 1164 | size_t size = i->count; |
610c7a71 | 1165 | unsigned k; |
357f435d | 1166 | |
610c7a71 | 1167 | if (WARN_ON(!iter_is_iovec(i))) |
241699cd | 1168 | return ~0U; |
241699cd | 1169 | |
610c7a71 AV |
1170 | for (k = 0; k < i->nr_segs; k++) { |
1171 | if (i->iov[k].iov_len) { | |
1172 | unsigned long base = (unsigned long)i->iov[k].iov_base; | |
1173 | if (v) // if not the first one | |
1174 | res |= base | v; // this start | previous end | |
1175 | v = base + i->iov[k].iov_len; | |
1176 | if (size <= i->iov[k].iov_len) | |
1177 | break; | |
1178 | size -= i->iov[k].iov_len; | |
1179 | } | |
1180 | } | |
33844e66 | 1181 | return res; |
357f435d AV |
1182 | } |
1183 | EXPORT_SYMBOL(iov_iter_gap_alignment); | |
1184 | ||
e76b6312 | 1185 | static inline ssize_t __pipe_get_pages(struct iov_iter *i, |
241699cd AV |
1186 | size_t maxsize, |
1187 | struct page **pages, | |
8cefc107 | 1188 | int iter_head, |
241699cd AV |
1189 | size_t *start) |
1190 | { | |
1191 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
1192 | unsigned int p_mask = pipe->ring_size - 1; |
1193 | ssize_t n = push_pipe(i, maxsize, &iter_head, start); | |
241699cd AV |
1194 | if (!n) |
1195 | return -EFAULT; | |
1196 | ||
1197 | maxsize = n; | |
1198 | n += *start; | |
1689c73a | 1199 | while (n > 0) { |
8cefc107 DH |
1200 | get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); |
1201 | iter_head++; | |
241699cd AV |
1202 | n -= PAGE_SIZE; |
1203 | } | |
1204 | ||
1205 | return maxsize; | |
1206 | } | |
1207 | ||
1208 | static ssize_t pipe_get_pages(struct iov_iter *i, | |
1209 | struct page **pages, size_t maxsize, unsigned maxpages, | |
1210 | size_t *start) | |
1211 | { | |
8cefc107 | 1212 | unsigned int iter_head, npages; |
241699cd | 1213 | size_t capacity; |
241699cd AV |
1214 | |
1215 | if (!sanity(i)) | |
1216 | return -EFAULT; | |
1217 | ||
8cefc107 DH |
1218 | data_start(i, &iter_head, start); |
1219 | /* Amount of free space: some of this one + all after this one */ | |
1220 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); | |
1221 | capacity = min(npages, maxpages) * PAGE_SIZE - *start; | |
241699cd | 1222 | |
8cefc107 | 1223 | return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); |
241699cd AV |
1224 | } |
1225 | ||
7ff50620 DH |
1226 | static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, |
1227 | pgoff_t index, unsigned int nr_pages) | |
1228 | { | |
1229 | XA_STATE(xas, xa, index); | |
1230 | struct page *page; | |
1231 | unsigned int ret = 0; | |
1232 | ||
1233 | rcu_read_lock(); | |
1234 | for (page = xas_load(&xas); page; page = xas_next(&xas)) { | |
1235 | if (xas_retry(&xas, page)) | |
1236 | continue; | |
1237 | ||
1238 | /* Has the page moved or been split? */ | |
1239 | if (unlikely(page != xas_reload(&xas))) { | |
1240 | xas_reset(&xas); | |
1241 | continue; | |
1242 | } | |
1243 | ||
1244 | pages[ret] = find_subpage(page, xas.xa_index); | |
1245 | get_page(pages[ret]); | |
1246 | if (++ret == nr_pages) | |
1247 | break; | |
1248 | } | |
1249 | rcu_read_unlock(); | |
1250 | return ret; | |
1251 | } | |
1252 | ||
1253 | static ssize_t iter_xarray_get_pages(struct iov_iter *i, | |
1254 | struct page **pages, size_t maxsize, | |
1255 | unsigned maxpages, size_t *_start_offset) | |
1256 | { | |
1257 | unsigned nr, offset; | |
1258 | pgoff_t index, count; | |
1259 | size_t size = maxsize, actual; | |
1260 | loff_t pos; | |
1261 | ||
1262 | if (!size || !maxpages) | |
1263 | return 0; | |
1264 | ||
1265 | pos = i->xarray_start + i->iov_offset; | |
1266 | index = pos >> PAGE_SHIFT; | |
1267 | offset = pos & ~PAGE_MASK; | |
1268 | *_start_offset = offset; | |
1269 | ||
1270 | count = 1; | |
1271 | if (size > PAGE_SIZE - offset) { | |
1272 | size -= PAGE_SIZE - offset; | |
1273 | count += size >> PAGE_SHIFT; | |
1274 | size &= ~PAGE_MASK; | |
1275 | if (size) | |
1276 | count++; | |
1277 | } | |
1278 | ||
1279 | if (count > maxpages) | |
1280 | count = maxpages; | |
1281 | ||
1282 | nr = iter_xarray_populate_pages(pages, i->xarray, index, count); | |
1283 | if (nr == 0) | |
1284 | return 0; | |
1285 | ||
1286 | actual = PAGE_SIZE * nr; | |
1287 | actual -= offset; | |
1288 | if (nr == count && size > 0) { | |
1289 | unsigned last_offset = (nr > 1) ? 0 : offset; | |
1290 | actual -= PAGE_SIZE - (last_offset + size); | |
1291 | } | |
1292 | return actual; | |
1293 | } | |
1294 | ||
3d671ca6 AV |
1295 | /* must be done on non-empty ITER_IOVEC one */ |
1296 | static unsigned long first_iovec_segment(const struct iov_iter *i, | |
59dbd7d0 | 1297 | size_t *size, size_t *start) |
3d671ca6 AV |
1298 | { |
1299 | size_t skip; | |
1300 | long k; | |
1301 | ||
1302 | for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { | |
1303 | unsigned long addr = (unsigned long)i->iov[k].iov_base + skip; | |
1304 | size_t len = i->iov[k].iov_len - skip; | |
1305 | ||
1306 | if (unlikely(!len)) | |
1307 | continue; | |
59dbd7d0 AV |
1308 | if (*size > len) |
1309 | *size = len; | |
dda8e5d1 | 1310 | *start = addr % PAGE_SIZE; |
3d671ca6 AV |
1311 | return addr & PAGE_MASK; |
1312 | } | |
1313 | BUG(); // if it had been empty, we wouldn't get called | |
1314 | } | |
1315 | ||
1316 | /* must be done on non-empty ITER_BVEC one */ | |
1317 | static struct page *first_bvec_segment(const struct iov_iter *i, | |
59dbd7d0 | 1318 | size_t *size, size_t *start) |
3d671ca6 AV |
1319 | { |
1320 | struct page *page; | |
1321 | size_t skip = i->iov_offset, len; | |
1322 | ||
1323 | len = i->bvec->bv_len - skip; | |
59dbd7d0 AV |
1324 | if (*size > len) |
1325 | *size = len; | |
3d671ca6 AV |
1326 | skip += i->bvec->bv_offset; |
1327 | page = i->bvec->bv_page + skip / PAGE_SIZE; | |
dda8e5d1 | 1328 | *start = skip % PAGE_SIZE; |
3d671ca6 AV |
1329 | return page; |
1330 | } | |
1331 | ||
62a8067a | 1332 | ssize_t iov_iter_get_pages(struct iov_iter *i, |
2c80929c | 1333 | struct page **pages, size_t maxsize, unsigned maxpages, |
62a8067a AV |
1334 | size_t *start) |
1335 | { | |
3d671ca6 AV |
1336 | int n, res; |
1337 | ||
e5393fae AV |
1338 | if (maxsize > i->count) |
1339 | maxsize = i->count; | |
3d671ca6 AV |
1340 | if (!maxsize) |
1341 | return 0; | |
7392ed17 AV |
1342 | if (maxsize > MAX_RW_COUNT) |
1343 | maxsize = MAX_RW_COUNT; | |
e5393fae | 1344 | |
3d671ca6 | 1345 | if (likely(iter_is_iovec(i))) { |
3337ab08 | 1346 | unsigned int gup_flags = 0; |
3d671ca6 | 1347 | unsigned long addr; |
e5393fae | 1348 | |
3337ab08 AG |
1349 | if (iov_iter_rw(i) != WRITE) |
1350 | gup_flags |= FOLL_WRITE; | |
1351 | if (i->nofault) | |
1352 | gup_flags |= FOLL_NOFAULT; | |
1353 | ||
59dbd7d0 AV |
1354 | addr = first_iovec_segment(i, &maxsize, start); |
1355 | n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
dda8e5d1 AV |
1356 | if (n > maxpages) |
1357 | n = maxpages; | |
3337ab08 | 1358 | res = get_user_pages_fast(addr, n, gup_flags, pages); |
814a6674 | 1359 | if (unlikely(res <= 0)) |
e5393fae | 1360 | return res; |
59dbd7d0 | 1361 | return min_t(size_t, maxsize, res * PAGE_SIZE - *start); |
3d671ca6 AV |
1362 | } |
1363 | if (iov_iter_is_bvec(i)) { | |
1364 | struct page *page; | |
1365 | ||
59dbd7d0 AV |
1366 | page = first_bvec_segment(i, &maxsize, start); |
1367 | n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
dda8e5d1 AV |
1368 | if (n > maxpages) |
1369 | n = maxpages; | |
1370 | for (int k = 0; k < n; k++) | |
3d671ca6 | 1371 | get_page(*pages++ = page++); |
59dbd7d0 | 1372 | return min_t(size_t, maxsize, n * PAGE_SIZE - *start); |
3d671ca6 AV |
1373 | } |
1374 | if (iov_iter_is_pipe(i)) | |
1375 | return pipe_get_pages(i, pages, maxsize, maxpages, start); | |
1376 | if (iov_iter_is_xarray(i)) | |
1377 | return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); | |
1378 | return -EFAULT; | |
62a8067a AV |
1379 | } |
1380 | EXPORT_SYMBOL(iov_iter_get_pages); | |
1381 | ||
1b17f1f2 AV |
1382 | static struct page **get_pages_array(size_t n) |
1383 | { | |
752ade68 | 1384 | return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); |
1b17f1f2 AV |
1385 | } |
1386 | ||
241699cd AV |
1387 | static ssize_t pipe_get_pages_alloc(struct iov_iter *i, |
1388 | struct page ***pages, size_t maxsize, | |
1389 | size_t *start) | |
1390 | { | |
1391 | struct page **p; | |
8cefc107 | 1392 | unsigned int iter_head, npages; |
d7760d63 | 1393 | ssize_t n; |
241699cd AV |
1394 | |
1395 | if (!sanity(i)) | |
1396 | return -EFAULT; | |
1397 | ||
8cefc107 DH |
1398 | data_start(i, &iter_head, start); |
1399 | /* Amount of free space: some of this one + all after this one */ | |
1400 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); | |
241699cd AV |
1401 | n = npages * PAGE_SIZE - *start; |
1402 | if (maxsize > n) | |
1403 | maxsize = n; | |
1404 | else | |
1405 | npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
1406 | p = get_pages_array(npages); | |
1407 | if (!p) | |
1408 | return -ENOMEM; | |
8cefc107 | 1409 | n = __pipe_get_pages(i, maxsize, p, iter_head, start); |
241699cd AV |
1410 | if (n > 0) |
1411 | *pages = p; | |
1412 | else | |
1413 | kvfree(p); | |
1414 | return n; | |
1415 | } | |
1416 | ||
7ff50620 DH |
1417 | static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, |
1418 | struct page ***pages, size_t maxsize, | |
1419 | size_t *_start_offset) | |
1420 | { | |
1421 | struct page **p; | |
1422 | unsigned nr, offset; | |
1423 | pgoff_t index, count; | |
1424 | size_t size = maxsize, actual; | |
1425 | loff_t pos; | |
1426 | ||
1427 | if (!size) | |
1428 | return 0; | |
1429 | ||
1430 | pos = i->xarray_start + i->iov_offset; | |
1431 | index = pos >> PAGE_SHIFT; | |
1432 | offset = pos & ~PAGE_MASK; | |
1433 | *_start_offset = offset; | |
1434 | ||
1435 | count = 1; | |
1436 | if (size > PAGE_SIZE - offset) { | |
1437 | size -= PAGE_SIZE - offset; | |
1438 | count += size >> PAGE_SHIFT; | |
1439 | size &= ~PAGE_MASK; | |
1440 | if (size) | |
1441 | count++; | |
1442 | } | |
1443 | ||
1444 | p = get_pages_array(count); | |
1445 | if (!p) | |
1446 | return -ENOMEM; | |
1447 | *pages = p; | |
1448 | ||
1449 | nr = iter_xarray_populate_pages(p, i->xarray, index, count); | |
1450 | if (nr == 0) | |
1451 | return 0; | |
1452 | ||
1453 | actual = PAGE_SIZE * nr; | |
1454 | actual -= offset; | |
1455 | if (nr == count && size > 0) { | |
1456 | unsigned last_offset = (nr > 1) ? 0 : offset; | |
1457 | actual -= PAGE_SIZE - (last_offset + size); | |
1458 | } | |
1459 | return actual; | |
1460 | } | |
1461 | ||
62a8067a AV |
1462 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, |
1463 | struct page ***pages, size_t maxsize, | |
1464 | size_t *start) | |
1465 | { | |
1b17f1f2 | 1466 | struct page **p; |
3d671ca6 | 1467 | int n, res; |
1b17f1f2 AV |
1468 | |
1469 | if (maxsize > i->count) | |
1470 | maxsize = i->count; | |
3d671ca6 AV |
1471 | if (!maxsize) |
1472 | return 0; | |
7392ed17 AV |
1473 | if (maxsize > MAX_RW_COUNT) |
1474 | maxsize = MAX_RW_COUNT; | |
1b17f1f2 | 1475 | |
3d671ca6 | 1476 | if (likely(iter_is_iovec(i))) { |
3337ab08 | 1477 | unsigned int gup_flags = 0; |
3d671ca6 | 1478 | unsigned long addr; |
1b17f1f2 | 1479 | |
3337ab08 AG |
1480 | if (iov_iter_rw(i) != WRITE) |
1481 | gup_flags |= FOLL_WRITE; | |
1482 | if (i->nofault) | |
1483 | gup_flags |= FOLL_NOFAULT; | |
1484 | ||
59dbd7d0 AV |
1485 | addr = first_iovec_segment(i, &maxsize, start); |
1486 | n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
1b17f1f2 AV |
1487 | p = get_pages_array(n); |
1488 | if (!p) | |
1489 | return -ENOMEM; | |
3337ab08 | 1490 | res = get_user_pages_fast(addr, n, gup_flags, p); |
814a6674 | 1491 | if (unlikely(res <= 0)) { |
1b17f1f2 | 1492 | kvfree(p); |
814a6674 | 1493 | *pages = NULL; |
1b17f1f2 AV |
1494 | return res; |
1495 | } | |
1496 | *pages = p; | |
59dbd7d0 | 1497 | return min_t(size_t, maxsize, res * PAGE_SIZE - *start); |
3d671ca6 AV |
1498 | } |
1499 | if (iov_iter_is_bvec(i)) { | |
1500 | struct page *page; | |
1501 | ||
59dbd7d0 AV |
1502 | page = first_bvec_segment(i, &maxsize, start); |
1503 | n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); | |
3d671ca6 | 1504 | *pages = p = get_pages_array(n); |
1b17f1f2 AV |
1505 | if (!p) |
1506 | return -ENOMEM; | |
dda8e5d1 | 1507 | for (int k = 0; k < n; k++) |
3d671ca6 | 1508 | get_page(*p++ = page++); |
59dbd7d0 | 1509 | return min_t(size_t, maxsize, n * PAGE_SIZE - *start); |
3d671ca6 AV |
1510 | } |
1511 | if (iov_iter_is_pipe(i)) | |
1512 | return pipe_get_pages_alloc(i, pages, maxsize, start); | |
1513 | if (iov_iter_is_xarray(i)) | |
1514 | return iter_xarray_get_pages_alloc(i, pages, maxsize, start); | |
1515 | return -EFAULT; | |
62a8067a AV |
1516 | } |
1517 | EXPORT_SYMBOL(iov_iter_get_pages_alloc); | |
1518 | ||
a604ec7e AV |
1519 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, |
1520 | struct iov_iter *i) | |
1521 | { | |
a604ec7e | 1522 | __wsum sum, next; |
a604ec7e | 1523 | sum = *csum; |
9ea9ce04 | 1524 | if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { |
241699cd AV |
1525 | WARN_ON(1); |
1526 | return 0; | |
1527 | } | |
7baa5099 AV |
1528 | iterate_and_advance(i, bytes, base, len, off, ({ |
1529 | next = csum_and_copy_from_user(base, addr + off, len); | |
2495bdcc | 1530 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1531 | next ? 0 : len; |
a604ec7e | 1532 | }), ({ |
7baa5099 | 1533 | sum = csum_and_memcpy(addr + off, base, len, sum, off); |
a604ec7e AV |
1534 | }) |
1535 | ) | |
1536 | *csum = sum; | |
1537 | return bytes; | |
1538 | } | |
1539 | EXPORT_SYMBOL(csum_and_copy_from_iter); | |
1540 | ||
52cbd23a | 1541 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, |
a604ec7e AV |
1542 | struct iov_iter *i) |
1543 | { | |
52cbd23a | 1544 | struct csum_state *csstate = _csstate; |
a604ec7e | 1545 | __wsum sum, next; |
78e1f386 | 1546 | |
78e1f386 | 1547 | if (unlikely(iov_iter_is_discard(i))) { |
241699cd AV |
1548 | WARN_ON(1); /* for now */ |
1549 | return 0; | |
1550 | } | |
6852df12 AV |
1551 | |
1552 | sum = csum_shift(csstate->csum, csstate->off); | |
1553 | if (unlikely(iov_iter_is_pipe(i))) | |
1554 | bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); | |
1555 | else iterate_and_advance(i, bytes, base, len, off, ({ | |
7baa5099 | 1556 | next = csum_and_copy_to_user(addr + off, base, len); |
2495bdcc | 1557 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1558 | next ? 0 : len; |
a604ec7e | 1559 | }), ({ |
7baa5099 | 1560 | sum = csum_and_memcpy(base, addr + off, len, sum, off); |
a604ec7e AV |
1561 | }) |
1562 | ) | |
594e450b AV |
1563 | csstate->csum = csum_shift(sum, csstate->off); |
1564 | csstate->off += bytes; | |
a604ec7e AV |
1565 | return bytes; |
1566 | } | |
1567 | EXPORT_SYMBOL(csum_and_copy_to_iter); | |
1568 | ||
d05f4435 SG |
1569 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
1570 | struct iov_iter *i) | |
1571 | { | |
7999096f | 1572 | #ifdef CONFIG_CRYPTO_HASH |
d05f4435 SG |
1573 | struct ahash_request *hash = hashp; |
1574 | struct scatterlist sg; | |
1575 | size_t copied; | |
1576 | ||
1577 | copied = copy_to_iter(addr, bytes, i); | |
1578 | sg_init_one(&sg, addr, copied); | |
1579 | ahash_request_set_crypt(hash, &sg, NULL, copied); | |
1580 | crypto_ahash_update(hash); | |
1581 | return copied; | |
27fad74a Y |
1582 | #else |
1583 | return 0; | |
1584 | #endif | |
d05f4435 SG |
1585 | } |
1586 | EXPORT_SYMBOL(hash_and_copy_to_iter); | |
1587 | ||
66531c65 | 1588 | static int iov_npages(const struct iov_iter *i, int maxpages) |
62a8067a | 1589 | { |
66531c65 AV |
1590 | size_t skip = i->iov_offset, size = i->count; |
1591 | const struct iovec *p; | |
e0f2dc40 AV |
1592 | int npages = 0; |
1593 | ||
66531c65 AV |
1594 | for (p = i->iov; size; skip = 0, p++) { |
1595 | unsigned offs = offset_in_page(p->iov_base + skip); | |
1596 | size_t len = min(p->iov_len - skip, size); | |
e0f2dc40 | 1597 | |
66531c65 AV |
1598 | if (len) { |
1599 | size -= len; | |
1600 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1601 | if (unlikely(npages > maxpages)) | |
1602 | return maxpages; | |
1603 | } | |
1604 | } | |
1605 | return npages; | |
1606 | } | |
1607 | ||
1608 | static int bvec_npages(const struct iov_iter *i, int maxpages) | |
1609 | { | |
1610 | size_t skip = i->iov_offset, size = i->count; | |
1611 | const struct bio_vec *p; | |
1612 | int npages = 0; | |
1613 | ||
1614 | for (p = i->bvec; size; skip = 0, p++) { | |
1615 | unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; | |
1616 | size_t len = min(p->bv_len - skip, size); | |
1617 | ||
1618 | size -= len; | |
1619 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1620 | if (unlikely(npages > maxpages)) | |
1621 | return maxpages; | |
1622 | } | |
1623 | return npages; | |
1624 | } | |
1625 | ||
1626 | int iov_iter_npages(const struct iov_iter *i, int maxpages) | |
1627 | { | |
1628 | if (unlikely(!i->count)) | |
1629 | return 0; | |
1630 | /* iovec and kvec have identical layouts */ | |
1631 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1632 | return iov_npages(i, maxpages); | |
1633 | if (iov_iter_is_bvec(i)) | |
1634 | return bvec_npages(i, maxpages); | |
1635 | if (iov_iter_is_pipe(i)) { | |
8cefc107 | 1636 | unsigned int iter_head; |
66531c65 | 1637 | int npages; |
241699cd | 1638 | size_t off; |
241699cd AV |
1639 | |
1640 | if (!sanity(i)) | |
1641 | return 0; | |
1642 | ||
8cefc107 | 1643 | data_start(i, &iter_head, &off); |
241699cd | 1644 | /* some of this one + all after this one */ |
66531c65 AV |
1645 | npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); |
1646 | return min(npages, maxpages); | |
1647 | } | |
1648 | if (iov_iter_is_xarray(i)) { | |
e4f8df86 AV |
1649 | unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; |
1650 | int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); | |
66531c65 AV |
1651 | return min(npages, maxpages); |
1652 | } | |
1653 | return 0; | |
62a8067a | 1654 | } |
f67da30c | 1655 | EXPORT_SYMBOL(iov_iter_npages); |
4b8164b9 AV |
1656 | |
1657 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) | |
1658 | { | |
1659 | *new = *old; | |
00e23707 | 1660 | if (unlikely(iov_iter_is_pipe(new))) { |
241699cd AV |
1661 | WARN_ON(1); |
1662 | return NULL; | |
1663 | } | |
7ff50620 | 1664 | if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) |
9ea9ce04 | 1665 | return NULL; |
00e23707 | 1666 | if (iov_iter_is_bvec(new)) |
4b8164b9 AV |
1667 | return new->bvec = kmemdup(new->bvec, |
1668 | new->nr_segs * sizeof(struct bio_vec), | |
1669 | flags); | |
1670 | else | |
1671 | /* iovec and kvec have identical layout */ | |
1672 | return new->iov = kmemdup(new->iov, | |
1673 | new->nr_segs * sizeof(struct iovec), | |
1674 | flags); | |
1675 | } | |
1676 | EXPORT_SYMBOL(dup_iter); | |
bc917be8 | 1677 | |
bfdc5970 CH |
1678 | static int copy_compat_iovec_from_user(struct iovec *iov, |
1679 | const struct iovec __user *uvec, unsigned long nr_segs) | |
1680 | { | |
1681 | const struct compat_iovec __user *uiov = | |
1682 | (const struct compat_iovec __user *)uvec; | |
1683 | int ret = -EFAULT, i; | |
1684 | ||
a959a978 | 1685 | if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) |
bfdc5970 CH |
1686 | return -EFAULT; |
1687 | ||
1688 | for (i = 0; i < nr_segs; i++) { | |
1689 | compat_uptr_t buf; | |
1690 | compat_ssize_t len; | |
1691 | ||
1692 | unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); | |
1693 | unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); | |
1694 | ||
1695 | /* check for compat_size_t not fitting in compat_ssize_t .. */ | |
1696 | if (len < 0) { | |
1697 | ret = -EINVAL; | |
1698 | goto uaccess_end; | |
1699 | } | |
1700 | iov[i].iov_base = compat_ptr(buf); | |
1701 | iov[i].iov_len = len; | |
1702 | } | |
1703 | ||
1704 | ret = 0; | |
1705 | uaccess_end: | |
1706 | user_access_end(); | |
1707 | return ret; | |
1708 | } | |
1709 | ||
1710 | static int copy_iovec_from_user(struct iovec *iov, | |
1711 | const struct iovec __user *uvec, unsigned long nr_segs) | |
fb041b59 DL |
1712 | { |
1713 | unsigned long seg; | |
fb041b59 | 1714 | |
bfdc5970 CH |
1715 | if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) |
1716 | return -EFAULT; | |
1717 | for (seg = 0; seg < nr_segs; seg++) { | |
1718 | if ((ssize_t)iov[seg].iov_len < 0) | |
1719 | return -EINVAL; | |
fb041b59 DL |
1720 | } |
1721 | ||
bfdc5970 CH |
1722 | return 0; |
1723 | } | |
1724 | ||
1725 | struct iovec *iovec_from_user(const struct iovec __user *uvec, | |
1726 | unsigned long nr_segs, unsigned long fast_segs, | |
1727 | struct iovec *fast_iov, bool compat) | |
1728 | { | |
1729 | struct iovec *iov = fast_iov; | |
1730 | int ret; | |
1731 | ||
fb041b59 | 1732 | /* |
bfdc5970 CH |
1733 | * SuS says "The readv() function *may* fail if the iovcnt argument was |
1734 | * less than or equal to 0, or greater than {IOV_MAX}. Linux has | |
1735 | * traditionally returned zero for zero segments, so... | |
fb041b59 | 1736 | */ |
bfdc5970 CH |
1737 | if (nr_segs == 0) |
1738 | return iov; | |
1739 | if (nr_segs > UIO_MAXIOV) | |
1740 | return ERR_PTR(-EINVAL); | |
fb041b59 DL |
1741 | if (nr_segs > fast_segs) { |
1742 | iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); | |
bfdc5970 CH |
1743 | if (!iov) |
1744 | return ERR_PTR(-ENOMEM); | |
fb041b59 | 1745 | } |
bfdc5970 CH |
1746 | |
1747 | if (compat) | |
1748 | ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); | |
1749 | else | |
1750 | ret = copy_iovec_from_user(iov, uvec, nr_segs); | |
1751 | if (ret) { | |
1752 | if (iov != fast_iov) | |
1753 | kfree(iov); | |
1754 | return ERR_PTR(ret); | |
1755 | } | |
1756 | ||
1757 | return iov; | |
1758 | } | |
1759 | ||
1760 | ssize_t __import_iovec(int type, const struct iovec __user *uvec, | |
1761 | unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, | |
1762 | struct iov_iter *i, bool compat) | |
1763 | { | |
1764 | ssize_t total_len = 0; | |
1765 | unsigned long seg; | |
1766 | struct iovec *iov; | |
1767 | ||
1768 | iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); | |
1769 | if (IS_ERR(iov)) { | |
1770 | *iovp = NULL; | |
1771 | return PTR_ERR(iov); | |
fb041b59 DL |
1772 | } |
1773 | ||
1774 | /* | |
bfdc5970 CH |
1775 | * According to the Single Unix Specification we should return EINVAL if |
1776 | * an element length is < 0 when cast to ssize_t or if the total length | |
1777 | * would overflow the ssize_t return value of the system call. | |
fb041b59 DL |
1778 | * |
1779 | * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the | |
1780 | * overflow case. | |
1781 | */ | |
fb041b59 | 1782 | for (seg = 0; seg < nr_segs; seg++) { |
fb041b59 DL |
1783 | ssize_t len = (ssize_t)iov[seg].iov_len; |
1784 | ||
bfdc5970 CH |
1785 | if (!access_ok(iov[seg].iov_base, len)) { |
1786 | if (iov != *iovp) | |
1787 | kfree(iov); | |
1788 | *iovp = NULL; | |
1789 | return -EFAULT; | |
fb041b59 | 1790 | } |
bfdc5970 CH |
1791 | |
1792 | if (len > MAX_RW_COUNT - total_len) { | |
1793 | len = MAX_RW_COUNT - total_len; | |
fb041b59 DL |
1794 | iov[seg].iov_len = len; |
1795 | } | |
bfdc5970 | 1796 | total_len += len; |
fb041b59 | 1797 | } |
bfdc5970 CH |
1798 | |
1799 | iov_iter_init(i, type, iov, nr_segs, total_len); | |
1800 | if (iov == *iovp) | |
1801 | *iovp = NULL; | |
1802 | else | |
1803 | *iovp = iov; | |
1804 | return total_len; | |
fb041b59 DL |
1805 | } |
1806 | ||
ffecee4f VN |
1807 | /** |
1808 | * import_iovec() - Copy an array of &struct iovec from userspace | |
1809 | * into the kernel, check that it is valid, and initialize a new | |
1810 | * &struct iov_iter iterator to access it. | |
1811 | * | |
1812 | * @type: One of %READ or %WRITE. | |
bfdc5970 | 1813 | * @uvec: Pointer to the userspace array. |
ffecee4f VN |
1814 | * @nr_segs: Number of elements in userspace array. |
1815 | * @fast_segs: Number of elements in @iov. | |
bfdc5970 | 1816 | * @iovp: (input and output parameter) Pointer to pointer to (usually small |
ffecee4f VN |
1817 | * on-stack) kernel array. |
1818 | * @i: Pointer to iterator that will be initialized on success. | |
1819 | * | |
1820 | * If the array pointed to by *@iov is large enough to hold all @nr_segs, | |
1821 | * then this function places %NULL in *@iov on return. Otherwise, a new | |
1822 | * array will be allocated and the result placed in *@iov. This means that | |
1823 | * the caller may call kfree() on *@iov regardless of whether the small | |
1824 | * on-stack array was used or not (and regardless of whether this function | |
1825 | * returns an error or not). | |
1826 | * | |
87e5e6da | 1827 | * Return: Negative error code on error, bytes imported on success |
ffecee4f | 1828 | */ |
bfdc5970 | 1829 | ssize_t import_iovec(int type, const struct iovec __user *uvec, |
bc917be8 | 1830 | unsigned nr_segs, unsigned fast_segs, |
bfdc5970 | 1831 | struct iovec **iovp, struct iov_iter *i) |
bc917be8 | 1832 | { |
89cd35c5 CH |
1833 | return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, |
1834 | in_compat_syscall()); | |
bc917be8 AV |
1835 | } |
1836 | EXPORT_SYMBOL(import_iovec); | |
1837 | ||
bc917be8 AV |
1838 | int import_single_range(int rw, void __user *buf, size_t len, |
1839 | struct iovec *iov, struct iov_iter *i) | |
1840 | { | |
1841 | if (len > MAX_RW_COUNT) | |
1842 | len = MAX_RW_COUNT; | |
96d4f267 | 1843 | if (unlikely(!access_ok(buf, len))) |
bc917be8 AV |
1844 | return -EFAULT; |
1845 | ||
1846 | iov->iov_base = buf; | |
1847 | iov->iov_len = len; | |
1848 | iov_iter_init(i, rw, iov, 1, len); | |
1849 | return 0; | |
1850 | } | |
e1267585 | 1851 | EXPORT_SYMBOL(import_single_range); |
8fb0f47a JA |
1852 | |
1853 | /** | |
1854 | * iov_iter_restore() - Restore a &struct iov_iter to the same state as when | |
1855 | * iov_iter_save_state() was called. | |
1856 | * | |
1857 | * @i: &struct iov_iter to restore | |
1858 | * @state: state to restore from | |
1859 | * | |
1860 | * Used after iov_iter_save_state() to bring restore @i, if operations may | |
1861 | * have advanced it. | |
1862 | * | |
1863 | * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC | |
1864 | */ | |
1865 | void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) | |
1866 | { | |
1867 | if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && | |
1868 | !iov_iter_is_kvec(i)) | |
1869 | return; | |
1870 | i->iov_offset = state->iov_offset; | |
1871 | i->count = state->count; | |
1872 | /* | |
1873 | * For the *vec iters, nr_segs + iov is constant - if we increment | |
1874 | * the vec, then we also decrement the nr_segs count. Hence we don't | |
1875 | * need to track both of these, just one is enough and we can deduct | |
1876 | * the other from that. ITER_KVEC and ITER_IOVEC are the same struct | |
1877 | * size, so we can just increment the iov pointer as they are unionzed. | |
1878 | * ITER_BVEC _may_ be the same size on some archs, but on others it is | |
1879 | * not. Be safe and handle it separately. | |
1880 | */ | |
1881 | BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); | |
1882 | if (iov_iter_is_bvec(i)) | |
1883 | i->bvec -= state->nr_segs - i->nr_segs; | |
1884 | else | |
1885 | i->iov -= state->nr_segs - i->nr_segs; | |
1886 | i->nr_segs = state->nr_segs; | |
1887 | } |