Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
7999096f | 2 | #include <crypto/hash.h> |
4f18cd31 | 3 | #include <linux/export.h> |
2f8b5444 | 4 | #include <linux/bvec.h> |
4d0e9df5 | 5 | #include <linux/fault-inject-usercopy.h> |
4f18cd31 AV |
6 | #include <linux/uio.h> |
7 | #include <linux/pagemap.h> | |
28961998 | 8 | #include <linux/highmem.h> |
91f79c43 AV |
9 | #include <linux/slab.h> |
10 | #include <linux/vmalloc.h> | |
241699cd | 11 | #include <linux/splice.h> |
bfdc5970 | 12 | #include <linux/compat.h> |
a604ec7e | 13 | #include <net/checksum.h> |
d05f4435 | 14 | #include <linux/scatterlist.h> |
d0ef4c36 | 15 | #include <linux/instrumented.h> |
4f18cd31 | 16 | |
241699cd AV |
17 | #define PIPE_PARANOIA /* for now */ |
18 | ||
fcb14cb1 AV |
19 | /* covers ubuf and kbuf alike */ |
20 | #define iterate_buf(i, n, base, len, off, __p, STEP) { \ | |
21 | size_t __maybe_unused off = 0; \ | |
22 | len = n; \ | |
23 | base = __p + i->iov_offset; \ | |
24 | len -= (STEP); \ | |
25 | i->iov_offset += len; \ | |
26 | n = len; \ | |
27 | } | |
28 | ||
5c67aa90 | 29 | /* covers iovec and kvec alike */ |
a6e4ec7b | 30 | #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ |
7baa5099 | 31 | size_t off = 0; \ |
a6e4ec7b | 32 | size_t skip = i->iov_offset; \ |
7a1bcb5d | 33 | do { \ |
7baa5099 AV |
34 | len = min(n, __p->iov_len - skip); \ |
35 | if (likely(len)) { \ | |
36 | base = __p->iov_base + skip; \ | |
37 | len -= (STEP); \ | |
38 | off += len; \ | |
39 | skip += len; \ | |
40 | n -= len; \ | |
7a1bcb5d AV |
41 | if (skip < __p->iov_len) \ |
42 | break; \ | |
43 | } \ | |
44 | __p++; \ | |
45 | skip = 0; \ | |
46 | } while (n); \ | |
a6e4ec7b | 47 | i->iov_offset = skip; \ |
7baa5099 | 48 | n = off; \ |
04a31165 AV |
49 | } |
50 | ||
a6e4ec7b | 51 | #define iterate_bvec(i, n, base, len, off, p, STEP) { \ |
7baa5099 | 52 | size_t off = 0; \ |
a6e4ec7b | 53 | unsigned skip = i->iov_offset; \ |
7491a2bf AV |
54 | while (n) { \ |
55 | unsigned offset = p->bv_offset + skip; \ | |
1b4fb5ff | 56 | unsigned left; \ |
21b56c84 AV |
57 | void *kaddr = kmap_local_page(p->bv_page + \ |
58 | offset / PAGE_SIZE); \ | |
7baa5099 | 59 | base = kaddr + offset % PAGE_SIZE; \ |
a6e4ec7b | 60 | len = min(min(n, (size_t)(p->bv_len - skip)), \ |
7491a2bf | 61 | (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ |
1b4fb5ff | 62 | left = (STEP); \ |
21b56c84 | 63 | kunmap_local(kaddr); \ |
7baa5099 AV |
64 | len -= left; \ |
65 | off += len; \ | |
66 | skip += len; \ | |
7491a2bf AV |
67 | if (skip == p->bv_len) { \ |
68 | skip = 0; \ | |
69 | p++; \ | |
70 | } \ | |
7baa5099 | 71 | n -= len; \ |
1b4fb5ff AV |
72 | if (left) \ |
73 | break; \ | |
7491a2bf | 74 | } \ |
a6e4ec7b | 75 | i->iov_offset = skip; \ |
7baa5099 | 76 | n = off; \ |
04a31165 AV |
77 | } |
78 | ||
a6e4ec7b | 79 | #define iterate_xarray(i, n, base, len, __off, STEP) { \ |
1b4fb5ff | 80 | __label__ __out; \ |
622838f3 | 81 | size_t __off = 0; \ |
821979f5 | 82 | struct folio *folio; \ |
a6e4ec7b | 83 | loff_t start = i->xarray_start + i->iov_offset; \ |
4b179e9a | 84 | pgoff_t index = start / PAGE_SIZE; \ |
7ff50620 DH |
85 | XA_STATE(xas, i->xarray, index); \ |
86 | \ | |
821979f5 | 87 | len = PAGE_SIZE - offset_in_page(start); \ |
7baa5099 | 88 | rcu_read_lock(); \ |
821979f5 | 89 | xas_for_each(&xas, folio, ULONG_MAX) { \ |
7baa5099 | 90 | unsigned left; \ |
821979f5 MWO |
91 | size_t offset; \ |
92 | if (xas_retry(&xas, folio)) \ | |
7baa5099 | 93 | continue; \ |
821979f5 | 94 | if (WARN_ON(xa_is_value(folio))) \ |
7baa5099 | 95 | break; \ |
821979f5 | 96 | if (WARN_ON(folio_test_hugetlb(folio))) \ |
7baa5099 | 97 | break; \ |
821979f5 MWO |
98 | offset = offset_in_folio(folio, start + __off); \ |
99 | while (offset < folio_size(folio)) { \ | |
100 | base = kmap_local_folio(folio, offset); \ | |
7baa5099 AV |
101 | len = min(n, len); \ |
102 | left = (STEP); \ | |
821979f5 | 103 | kunmap_local(base); \ |
7baa5099 AV |
104 | len -= left; \ |
105 | __off += len; \ | |
106 | n -= len; \ | |
107 | if (left || n == 0) \ | |
108 | goto __out; \ | |
821979f5 MWO |
109 | offset += len; \ |
110 | len = PAGE_SIZE; \ | |
7baa5099 | 111 | } \ |
7ff50620 | 112 | } \ |
1b4fb5ff | 113 | __out: \ |
7ff50620 | 114 | rcu_read_unlock(); \ |
821979f5 | 115 | i->iov_offset += __off; \ |
622838f3 | 116 | n = __off; \ |
7ff50620 DH |
117 | } |
118 | ||
7baa5099 | 119 | #define __iterate_and_advance(i, n, base, len, off, I, K) { \ |
dd254f5a AV |
120 | if (unlikely(i->count < n)) \ |
121 | n = i->count; \ | |
f5da8354 | 122 | if (likely(n)) { \ |
fcb14cb1 AV |
123 | if (likely(iter_is_ubuf(i))) { \ |
124 | void __user *base; \ | |
125 | size_t len; \ | |
126 | iterate_buf(i, n, base, len, off, \ | |
127 | i->ubuf, (I)) \ | |
128 | } else if (likely(iter_is_iovec(i))) { \ | |
5c67aa90 | 129 | const struct iovec *iov = i->iov; \ |
7baa5099 AV |
130 | void __user *base; \ |
131 | size_t len; \ | |
132 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 133 | iov, (I)) \ |
28f38db7 AV |
134 | i->nr_segs -= iov - i->iov; \ |
135 | i->iov = iov; \ | |
136 | } else if (iov_iter_is_bvec(i)) { \ | |
1bdc76ae | 137 | const struct bio_vec *bvec = i->bvec; \ |
7baa5099 AV |
138 | void *base; \ |
139 | size_t len; \ | |
140 | iterate_bvec(i, n, base, len, off, \ | |
a6e4ec7b | 141 | bvec, (K)) \ |
7491a2bf AV |
142 | i->nr_segs -= bvec - i->bvec; \ |
143 | i->bvec = bvec; \ | |
28f38db7 | 144 | } else if (iov_iter_is_kvec(i)) { \ |
5c67aa90 | 145 | const struct kvec *kvec = i->kvec; \ |
7baa5099 AV |
146 | void *base; \ |
147 | size_t len; \ | |
148 | iterate_iovec(i, n, base, len, off, \ | |
a6e4ec7b | 149 | kvec, (K)) \ |
dd254f5a AV |
150 | i->nr_segs -= kvec - i->kvec; \ |
151 | i->kvec = kvec; \ | |
28f38db7 | 152 | } else if (iov_iter_is_xarray(i)) { \ |
7baa5099 AV |
153 | void *base; \ |
154 | size_t len; \ | |
155 | iterate_xarray(i, n, base, len, off, \ | |
a6e4ec7b | 156 | (K)) \ |
7ce2a91e | 157 | } \ |
dd254f5a | 158 | i->count -= n; \ |
7ce2a91e | 159 | } \ |
7ce2a91e | 160 | } |
7baa5099 AV |
161 | #define iterate_and_advance(i, n, base, len, off, I, K) \ |
162 | __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) | |
7ce2a91e | 163 | |
09fc68dc AV |
164 | static int copyout(void __user *to, const void *from, size_t n) |
165 | { | |
4d0e9df5 AL |
166 | if (should_fail_usercopy()) |
167 | return n; | |
96d4f267 | 168 | if (access_ok(to, n)) { |
d0ef4c36 | 169 | instrument_copy_to_user(to, from, n); |
09fc68dc AV |
170 | n = raw_copy_to_user(to, from, n); |
171 | } | |
172 | return n; | |
173 | } | |
174 | ||
175 | static int copyin(void *to, const void __user *from, size_t n) | |
176 | { | |
33b75c1d AP |
177 | size_t res = n; |
178 | ||
4d0e9df5 AL |
179 | if (should_fail_usercopy()) |
180 | return n; | |
96d4f267 | 181 | if (access_ok(from, n)) { |
33b75c1d AP |
182 | instrument_copy_from_user_before(to, from, n); |
183 | res = raw_copy_from_user(to, from, n); | |
184 | instrument_copy_from_user_after(to, from, n, res); | |
09fc68dc | 185 | } |
33b75c1d | 186 | return res; |
09fc68dc AV |
187 | } |
188 | ||
241699cd AV |
189 | #ifdef PIPE_PARANOIA |
190 | static bool sanity(const struct iov_iter *i) | |
191 | { | |
192 | struct pipe_inode_info *pipe = i->pipe; | |
8cefc107 DH |
193 | unsigned int p_head = pipe->head; |
194 | unsigned int p_tail = pipe->tail; | |
8cefc107 DH |
195 | unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); |
196 | unsigned int i_head = i->head; | |
197 | unsigned int idx; | |
198 | ||
10f525a8 | 199 | if (i->last_offset) { |
241699cd | 200 | struct pipe_buffer *p; |
8cefc107 | 201 | if (unlikely(p_occupancy == 0)) |
241699cd | 202 | goto Bad; // pipe must be non-empty |
8cefc107 | 203 | if (unlikely(i_head != p_head - 1)) |
241699cd AV |
204 | goto Bad; // must be at the last buffer... |
205 | ||
2dcedb2a | 206 | p = pipe_buf(pipe, i_head); |
10f525a8 | 207 | if (unlikely(p->offset + p->len != abs(i->last_offset))) |
241699cd AV |
208 | goto Bad; // ... at the end of segment |
209 | } else { | |
8cefc107 | 210 | if (i_head != p_head) |
241699cd AV |
211 | goto Bad; // must be right after the last buffer |
212 | } | |
213 | return true; | |
214 | Bad: | |
10f525a8 | 215 | printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset); |
8cefc107 DH |
216 | printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", |
217 | p_head, p_tail, pipe->ring_size); | |
218 | for (idx = 0; idx < pipe->ring_size; idx++) | |
241699cd AV |
219 | printk(KERN_ERR "[%p %p %d %d]\n", |
220 | pipe->bufs[idx].ops, | |
221 | pipe->bufs[idx].page, | |
222 | pipe->bufs[idx].offset, | |
223 | pipe->bufs[idx].len); | |
224 | WARN_ON(1); | |
225 | return false; | |
226 | } | |
227 | #else | |
228 | #define sanity(i) true | |
229 | #endif | |
230 | ||
47b7fcae AV |
231 | static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) |
232 | { | |
233 | struct page *page = alloc_page(GFP_USER); | |
234 | if (page) { | |
235 | struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); | |
236 | *buf = (struct pipe_buffer) { | |
237 | .ops = &default_pipe_buf_ops, | |
238 | .page = page, | |
239 | .offset = 0, | |
240 | .len = size | |
241 | }; | |
242 | } | |
243 | return page; | |
244 | } | |
245 | ||
246 | static void push_page(struct pipe_inode_info *pipe, struct page *page, | |
247 | unsigned int offset, unsigned int size) | |
248 | { | |
249 | struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); | |
250 | *buf = (struct pipe_buffer) { | |
251 | .ops = &page_cache_pipe_buf_ops, | |
252 | .page = page, | |
253 | .offset = offset, | |
254 | .len = size | |
255 | }; | |
256 | get_page(page); | |
257 | } | |
258 | ||
10f525a8 | 259 | static inline int last_offset(const struct pipe_buffer *buf) |
8fad7767 | 260 | { |
10f525a8 AV |
261 | if (buf->ops == &default_pipe_buf_ops) |
262 | return buf->len; // buf->offset is 0 for those | |
263 | else | |
264 | return -(buf->offset + buf->len); | |
8fad7767 AV |
265 | } |
266 | ||
267 | static struct page *append_pipe(struct iov_iter *i, size_t size, | |
268 | unsigned int *off) | |
269 | { | |
270 | struct pipe_inode_info *pipe = i->pipe; | |
10f525a8 | 271 | int offset = i->last_offset; |
8fad7767 AV |
272 | struct pipe_buffer *buf; |
273 | struct page *page; | |
274 | ||
10f525a8 AV |
275 | if (offset > 0 && offset < PAGE_SIZE) { |
276 | // some space in the last buffer; add to it | |
8fad7767 | 277 | buf = pipe_buf(pipe, pipe->head - 1); |
10f525a8 AV |
278 | size = min_t(size_t, size, PAGE_SIZE - offset); |
279 | buf->len += size; | |
280 | i->last_offset += size; | |
281 | i->count -= size; | |
282 | *off = offset; | |
283 | return buf->page; | |
8fad7767 AV |
284 | } |
285 | // OK, we need a new buffer | |
286 | *off = 0; | |
287 | size = min_t(size_t, size, PAGE_SIZE); | |
288 | if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) | |
289 | return NULL; | |
290 | page = push_anon(pipe, size); | |
291 | if (!page) | |
292 | return NULL; | |
293 | i->head = pipe->head - 1; | |
10f525a8 | 294 | i->last_offset = size; |
8fad7767 AV |
295 | i->count -= size; |
296 | return page; | |
297 | } | |
298 | ||
241699cd AV |
299 | static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, |
300 | struct iov_iter *i) | |
301 | { | |
302 | struct pipe_inode_info *pipe = i->pipe; | |
47b7fcae | 303 | unsigned int head = pipe->head; |
241699cd AV |
304 | |
305 | if (unlikely(bytes > i->count)) | |
306 | bytes = i->count; | |
307 | ||
308 | if (unlikely(!bytes)) | |
309 | return 0; | |
310 | ||
311 | if (!sanity(i)) | |
312 | return 0; | |
313 | ||
10f525a8 | 314 | if (offset && i->last_offset == -offset) { // could we merge it? |
47b7fcae AV |
315 | struct pipe_buffer *buf = pipe_buf(pipe, head - 1); |
316 | if (buf->page == page) { | |
241699cd | 317 | buf->len += bytes; |
10f525a8 | 318 | i->last_offset -= bytes; |
47b7fcae AV |
319 | i->count -= bytes; |
320 | return bytes; | |
241699cd | 321 | } |
241699cd | 322 | } |
47b7fcae | 323 | if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) |
241699cd | 324 | return 0; |
8cefc107 | 325 | |
47b7fcae | 326 | push_page(pipe, page, offset, bytes); |
10f525a8 | 327 | i->last_offset = -(offset + bytes); |
47b7fcae | 328 | i->head = head; |
241699cd AV |
329 | i->count -= bytes; |
330 | return bytes; | |
331 | } | |
332 | ||
171a0203 | 333 | /* |
a6294593 AG |
334 | * fault_in_iov_iter_readable - fault in iov iterator for reading |
335 | * @i: iterator | |
336 | * @size: maximum length | |
337 | * | |
171a0203 | 338 | * Fault in one or more iovecs of the given iov_iter, to a maximum length of |
a6294593 AG |
339 | * @size. For each iovec, fault in each page that constitutes the iovec. |
340 | * | |
341 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
342 | * copy_from_user()). | |
171a0203 | 343 | * |
a6294593 | 344 | * Always returns 0 for non-userspace iterators. |
171a0203 | 345 | */ |
a6294593 | 346 | size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) |
171a0203 | 347 | { |
fcb14cb1 AV |
348 | if (iter_is_ubuf(i)) { |
349 | size_t n = min(size, iov_iter_count(i)); | |
350 | n -= fault_in_readable(i->ubuf + i->iov_offset, n); | |
351 | return size - n; | |
352 | } else if (iter_is_iovec(i)) { | |
a6294593 | 353 | size_t count = min(size, iov_iter_count(i)); |
8409a0d2 AV |
354 | const struct iovec *p; |
355 | size_t skip; | |
356 | ||
a6294593 AG |
357 | size -= count; |
358 | for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { | |
359 | size_t len = min(count, p->iov_len - skip); | |
360 | size_t ret; | |
8409a0d2 AV |
361 | |
362 | if (unlikely(!len)) | |
363 | continue; | |
a6294593 AG |
364 | ret = fault_in_readable(p->iov_base + skip, len); |
365 | count -= len - ret; | |
366 | if (ret) | |
367 | break; | |
8409a0d2 | 368 | } |
a6294593 | 369 | return count + size; |
171a0203 AA |
370 | } |
371 | return 0; | |
372 | } | |
a6294593 | 373 | EXPORT_SYMBOL(fault_in_iov_iter_readable); |
171a0203 | 374 | |
cdd591fc AG |
375 | /* |
376 | * fault_in_iov_iter_writeable - fault in iov iterator for writing | |
377 | * @i: iterator | |
378 | * @size: maximum length | |
379 | * | |
380 | * Faults in the iterator using get_user_pages(), i.e., without triggering | |
381 | * hardware page faults. This is primarily useful when we already know that | |
382 | * some or all of the pages in @i aren't in memory. | |
383 | * | |
384 | * Returns the number of bytes not faulted in, like copy_to_user() and | |
385 | * copy_from_user(). | |
386 | * | |
387 | * Always returns 0 for non-user-space iterators. | |
388 | */ | |
389 | size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) | |
390 | { | |
fcb14cb1 AV |
391 | if (iter_is_ubuf(i)) { |
392 | size_t n = min(size, iov_iter_count(i)); | |
393 | n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); | |
394 | return size - n; | |
395 | } else if (iter_is_iovec(i)) { | |
cdd591fc AG |
396 | size_t count = min(size, iov_iter_count(i)); |
397 | const struct iovec *p; | |
398 | size_t skip; | |
399 | ||
400 | size -= count; | |
401 | for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { | |
402 | size_t len = min(count, p->iov_len - skip); | |
403 | size_t ret; | |
404 | ||
405 | if (unlikely(!len)) | |
406 | continue; | |
407 | ret = fault_in_safe_writeable(p->iov_base + skip, len); | |
408 | count -= len - ret; | |
409 | if (ret) | |
410 | break; | |
411 | } | |
412 | return count + size; | |
413 | } | |
414 | return 0; | |
415 | } | |
416 | EXPORT_SYMBOL(fault_in_iov_iter_writeable); | |
417 | ||
aa563d7b | 418 | void iov_iter_init(struct iov_iter *i, unsigned int direction, |
71d8e532 AV |
419 | const struct iovec *iov, unsigned long nr_segs, |
420 | size_t count) | |
421 | { | |
aa563d7b | 422 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
423 | *i = (struct iov_iter) { |
424 | .iter_type = ITER_IOVEC, | |
3337ab08 | 425 | .nofault = false, |
fcb14cb1 | 426 | .user_backed = true, |
8cd54c1c AV |
427 | .data_source = direction, |
428 | .iov = iov, | |
429 | .nr_segs = nr_segs, | |
430 | .iov_offset = 0, | |
431 | .count = count | |
432 | }; | |
71d8e532 AV |
433 | } |
434 | EXPORT_SYMBOL(iov_iter_init); | |
7b2c99d1 | 435 | |
12d426ab AV |
436 | // returns the offset in partial buffer (if any) |
437 | static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages) | |
241699cd | 438 | { |
12d426ab AV |
439 | struct pipe_inode_info *pipe = i->pipe; |
440 | int used = pipe->head - pipe->tail; | |
10f525a8 | 441 | int off = i->last_offset; |
8cefc107 | 442 | |
12d426ab AV |
443 | *npages = max((int)pipe->max_usage - used, 0); |
444 | ||
10f525a8 | 445 | if (off > 0 && off < PAGE_SIZE) { // anon and not full |
12d426ab AV |
446 | (*npages)++; |
447 | return off; | |
241699cd | 448 | } |
12d426ab | 449 | return 0; |
241699cd AV |
450 | } |
451 | ||
241699cd AV |
452 | static size_t copy_pipe_to_iter(const void *addr, size_t bytes, |
453 | struct iov_iter *i) | |
454 | { | |
8fad7767 | 455 | unsigned int off, chunk; |
241699cd | 456 | |
8fad7767 AV |
457 | if (unlikely(bytes > i->count)) |
458 | bytes = i->count; | |
459 | if (unlikely(!bytes)) | |
241699cd AV |
460 | return 0; |
461 | ||
8fad7767 | 462 | if (!sanity(i)) |
241699cd | 463 | return 0; |
8fad7767 AV |
464 | |
465 | for (size_t n = bytes; n; n -= chunk) { | |
466 | struct page *page = append_pipe(i, n, &off); | |
467 | chunk = min_t(size_t, n, PAGE_SIZE - off); | |
468 | if (!page) | |
469 | return bytes - n; | |
470 | memcpy_to_page(page, off, addr, chunk); | |
241699cd | 471 | addr += chunk; |
8fad7767 | 472 | } |
241699cd AV |
473 | return bytes; |
474 | } | |
475 | ||
f9152895 AV |
476 | static __wsum csum_and_memcpy(void *to, const void *from, size_t len, |
477 | __wsum sum, size_t off) | |
478 | { | |
cc44c17b | 479 | __wsum next = csum_partial_copy_nocheck(from, to, len); |
f9152895 AV |
480 | return csum_block_add(sum, next, off); |
481 | } | |
482 | ||
78e1f386 | 483 | static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, |
6852df12 | 484 | struct iov_iter *i, __wsum *sump) |
78e1f386 | 485 | { |
6852df12 AV |
486 | __wsum sum = *sump; |
487 | size_t off = 0; | |
8fad7767 AV |
488 | unsigned int chunk, r; |
489 | ||
490 | if (unlikely(bytes > i->count)) | |
491 | bytes = i->count; | |
492 | if (unlikely(!bytes)) | |
493 | return 0; | |
78e1f386 AV |
494 | |
495 | if (!sanity(i)) | |
496 | return 0; | |
497 | ||
6852df12 | 498 | while (bytes) { |
8fad7767 AV |
499 | struct page *page = append_pipe(i, bytes, &r); |
500 | char *p; | |
501 | ||
502 | if (!page) | |
503 | break; | |
504 | chunk = min_t(size_t, bytes, PAGE_SIZE - r); | |
505 | p = kmap_local_page(page); | |
6852df12 | 506 | sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); |
2495bdcc | 507 | kunmap_local(p); |
78e1f386 | 508 | off += chunk; |
8fad7767 | 509 | bytes -= chunk; |
6852df12 AV |
510 | } |
511 | *sump = sum; | |
6852df12 | 512 | return off; |
78e1f386 AV |
513 | } |
514 | ||
aa28de27 | 515 | size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
62a8067a | 516 | { |
a41dad90 AV |
517 | if (WARN_ON_ONCE(i->data_source)) |
518 | return 0; | |
00e23707 | 519 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 520 | return copy_pipe_to_iter(addr, bytes, i); |
fcb14cb1 | 521 | if (user_backed_iter(i)) |
09fc68dc | 522 | might_fault(); |
7baa5099 AV |
523 | iterate_and_advance(i, bytes, base, len, off, |
524 | copyout(base, addr + off, len), | |
525 | memcpy(base, addr + off, len) | |
3d4d3e48 | 526 | ) |
62a8067a | 527 | |
3d4d3e48 | 528 | return bytes; |
c35e0248 | 529 | } |
aa28de27 | 530 | EXPORT_SYMBOL(_copy_to_iter); |
c35e0248 | 531 | |
ec6347bb DW |
532 | #ifdef CONFIG_ARCH_HAS_COPY_MC |
533 | static int copyout_mc(void __user *to, const void *from, size_t n) | |
8780356e | 534 | { |
96d4f267 | 535 | if (access_ok(to, n)) { |
d0ef4c36 | 536 | instrument_copy_to_user(to, from, n); |
ec6347bb | 537 | n = copy_mc_to_user((__force void *) to, from, n); |
8780356e DW |
538 | } |
539 | return n; | |
540 | } | |
541 | ||
ec6347bb | 542 | static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, |
ca146f6f DW |
543 | struct iov_iter *i) |
544 | { | |
8fad7767 AV |
545 | size_t xfer = 0; |
546 | unsigned int off, chunk; | |
547 | ||
548 | if (unlikely(bytes > i->count)) | |
549 | bytes = i->count; | |
550 | if (unlikely(!bytes)) | |
551 | return 0; | |
ca146f6f DW |
552 | |
553 | if (!sanity(i)) | |
554 | return 0; | |
555 | ||
8fad7767 AV |
556 | while (bytes) { |
557 | struct page *page = append_pipe(i, bytes, &off); | |
ca146f6f | 558 | unsigned long rem; |
8fad7767 AV |
559 | char *p; |
560 | ||
561 | if (!page) | |
562 | break; | |
563 | chunk = min_t(size_t, bytes, PAGE_SIZE - off); | |
564 | p = kmap_local_page(page); | |
2a510a74 AV |
565 | rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); |
566 | chunk -= rem; | |
567 | kunmap_local(p); | |
8fad7767 AV |
568 | xfer += chunk; |
569 | bytes -= chunk; | |
c3497fd0 | 570 | if (rem) { |
8fad7767 | 571 | iov_iter_revert(i, rem); |
ca146f6f | 572 | break; |
c3497fd0 | 573 | } |
2a510a74 | 574 | } |
ca146f6f DW |
575 | return xfer; |
576 | } | |
577 | ||
bf3eeb9b | 578 | /** |
ec6347bb | 579 | * _copy_mc_to_iter - copy to iter with source memory error exception handling |
bf3eeb9b DW |
580 | * @addr: source kernel address |
581 | * @bytes: total transfer length | |
44e55997 | 582 | * @i: destination iterator |
bf3eeb9b | 583 | * |
ec6347bb DW |
584 | * The pmem driver deploys this for the dax operation |
585 | * (dax_copy_to_iter()) for dax reads (bypass page-cache and the | |
586 | * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes | |
587 | * successfully copied. | |
bf3eeb9b | 588 | * |
ec6347bb | 589 | * The main differences between this and typical _copy_to_iter(). |
bf3eeb9b DW |
590 | * |
591 | * * Typical tail/residue handling after a fault retries the copy | |
592 | * byte-by-byte until the fault happens again. Re-triggering machine | |
593 | * checks is potentially fatal so the implementation uses source | |
594 | * alignment and poison alignment assumptions to avoid re-triggering | |
595 | * hardware exceptions. | |
596 | * | |
597 | * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. | |
598 | * Compare to copy_to_iter() where only ITER_IOVEC attempts might return | |
599 | * a short copy. | |
44e55997 RD |
600 | * |
601 | * Return: number of bytes copied (may be %0) | |
bf3eeb9b | 602 | */ |
ec6347bb | 603 | size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) |
8780356e | 604 | { |
a41dad90 AV |
605 | if (WARN_ON_ONCE(i->data_source)) |
606 | return 0; | |
00e23707 | 607 | if (unlikely(iov_iter_is_pipe(i))) |
ec6347bb | 608 | return copy_mc_pipe_to_iter(addr, bytes, i); |
fcb14cb1 | 609 | if (user_backed_iter(i)) |
8780356e | 610 | might_fault(); |
7baa5099 AV |
611 | __iterate_and_advance(i, bytes, base, len, off, |
612 | copyout_mc(base, addr + off, len), | |
613 | copy_mc_to_kernel(base, addr + off, len) | |
8780356e DW |
614 | ) |
615 | ||
616 | return bytes; | |
617 | } | |
ec6347bb DW |
618 | EXPORT_SYMBOL_GPL(_copy_mc_to_iter); |
619 | #endif /* CONFIG_ARCH_HAS_COPY_MC */ | |
8780356e | 620 | |
aa28de27 | 621 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) |
c35e0248 | 622 | { |
a41dad90 | 623 | if (WARN_ON_ONCE(!i->data_source)) |
241699cd | 624 | return 0; |
a41dad90 | 625 | |
fcb14cb1 | 626 | if (user_backed_iter(i)) |
09fc68dc | 627 | might_fault(); |
7baa5099 AV |
628 | iterate_and_advance(i, bytes, base, len, off, |
629 | copyin(addr + off, base, len), | |
630 | memcpy(addr + off, base, len) | |
0dbca9a4 AV |
631 | ) |
632 | ||
633 | return bytes; | |
c35e0248 | 634 | } |
aa28de27 | 635 | EXPORT_SYMBOL(_copy_from_iter); |
c35e0248 | 636 | |
aa28de27 | 637 | size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) |
aa583096 | 638 | { |
a41dad90 | 639 | if (WARN_ON_ONCE(!i->data_source)) |
241699cd | 640 | return 0; |
a41dad90 | 641 | |
7baa5099 AV |
642 | iterate_and_advance(i, bytes, base, len, off, |
643 | __copy_from_user_inatomic_nocache(addr + off, base, len), | |
644 | memcpy(addr + off, base, len) | |
aa583096 AV |
645 | ) |
646 | ||
647 | return bytes; | |
648 | } | |
aa28de27 | 649 | EXPORT_SYMBOL(_copy_from_iter_nocache); |
aa583096 | 650 | |
0aed55af | 651 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
abd08d7d DW |
652 | /** |
653 | * _copy_from_iter_flushcache - write destination through cpu cache | |
654 | * @addr: destination kernel address | |
655 | * @bytes: total transfer length | |
44e55997 | 656 | * @i: source iterator |
abd08d7d DW |
657 | * |
658 | * The pmem driver arranges for filesystem-dax to use this facility via | |
659 | * dax_copy_from_iter() for ensuring that writes to persistent memory | |
660 | * are flushed through the CPU cache. It is differentiated from | |
661 | * _copy_from_iter_nocache() in that guarantees all data is flushed for | |
662 | * all iterator types. The _copy_from_iter_nocache() only attempts to | |
663 | * bypass the cache for the ITER_IOVEC case, and on some archs may use | |
664 | * instructions that strand dirty-data in the cache. | |
44e55997 RD |
665 | * |
666 | * Return: number of bytes copied (may be %0) | |
abd08d7d | 667 | */ |
6a37e940 | 668 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) |
0aed55af | 669 | { |
a41dad90 | 670 | if (WARN_ON_ONCE(!i->data_source)) |
0aed55af | 671 | return 0; |
a41dad90 | 672 | |
7baa5099 AV |
673 | iterate_and_advance(i, bytes, base, len, off, |
674 | __copy_from_user_flushcache(addr + off, base, len), | |
675 | memcpy_flushcache(addr + off, base, len) | |
0aed55af DW |
676 | ) |
677 | ||
678 | return bytes; | |
679 | } | |
6a37e940 | 680 | EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); |
0aed55af DW |
681 | #endif |
682 | ||
72e809ed AV |
683 | static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) |
684 | { | |
6daef95b ED |
685 | struct page *head; |
686 | size_t v = n + offset; | |
687 | ||
688 | /* | |
689 | * The general case needs to access the page order in order | |
690 | * to compute the page size. | |
691 | * However, we mostly deal with order-0 pages and thus can | |
692 | * avoid a possible cache line miss for requests that fit all | |
693 | * page orders. | |
694 | */ | |
695 | if (n <= v && v <= PAGE_SIZE) | |
696 | return true; | |
697 | ||
698 | head = compound_head(page); | |
699 | v += (page - head) << PAGE_SHIFT; | |
a90bcb86 | 700 | |
40a86061 AV |
701 | if (WARN_ON(n > v || v > page_size(head))) |
702 | return false; | |
703 | return true; | |
72e809ed | 704 | } |
cbbd26b8 | 705 | |
08aa6479 AV |
706 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
707 | struct iov_iter *i) | |
708 | { | |
709 | size_t res = 0; | |
40a86061 | 710 | if (!page_copy_sane(page, offset, bytes)) |
08aa6479 | 711 | return 0; |
a41dad90 AV |
712 | if (WARN_ON_ONCE(i->data_source)) |
713 | return 0; | |
f0f6b614 AV |
714 | if (unlikely(iov_iter_is_pipe(i))) |
715 | return copy_page_to_iter_pipe(page, offset, bytes, i); | |
08aa6479 AV |
716 | page += offset / PAGE_SIZE; // first subpage |
717 | offset %= PAGE_SIZE; | |
718 | while (1) { | |
f0f6b614 AV |
719 | void *kaddr = kmap_local_page(page); |
720 | size_t n = min(bytes, (size_t)PAGE_SIZE - offset); | |
721 | n = _copy_to_iter(kaddr + offset, n, i); | |
722 | kunmap_local(kaddr); | |
08aa6479 AV |
723 | res += n; |
724 | bytes -= n; | |
725 | if (!bytes || !n) | |
726 | break; | |
727 | offset += n; | |
728 | if (offset == PAGE_SIZE) { | |
729 | page++; | |
730 | offset = 0; | |
731 | } | |
732 | } | |
733 | return res; | |
734 | } | |
62a8067a AV |
735 | EXPORT_SYMBOL(copy_page_to_iter); |
736 | ||
737 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, | |
738 | struct iov_iter *i) | |
739 | { | |
c03f05f1 AV |
740 | size_t res = 0; |
741 | if (!page_copy_sane(page, offset, bytes)) | |
742 | return 0; | |
743 | page += offset / PAGE_SIZE; // first subpage | |
744 | offset %= PAGE_SIZE; | |
745 | while (1) { | |
55ca375c | 746 | void *kaddr = kmap_local_page(page); |
c03f05f1 AV |
747 | size_t n = min(bytes, (size_t)PAGE_SIZE - offset); |
748 | n = _copy_from_iter(kaddr + offset, n, i); | |
55ca375c | 749 | kunmap_local(kaddr); |
c03f05f1 AV |
750 | res += n; |
751 | bytes -= n; | |
752 | if (!bytes || !n) | |
753 | break; | |
754 | offset += n; | |
755 | if (offset == PAGE_SIZE) { | |
756 | page++; | |
757 | offset = 0; | |
758 | } | |
28f38db7 | 759 | } |
c03f05f1 | 760 | return res; |
62a8067a AV |
761 | } |
762 | EXPORT_SYMBOL(copy_page_from_iter); | |
763 | ||
241699cd AV |
764 | static size_t pipe_zero(size_t bytes, struct iov_iter *i) |
765 | { | |
8fad7767 | 766 | unsigned int chunk, off; |
241699cd | 767 | |
8fad7767 AV |
768 | if (unlikely(bytes > i->count)) |
769 | bytes = i->count; | |
770 | if (unlikely(!bytes)) | |
241699cd AV |
771 | return 0; |
772 | ||
8fad7767 | 773 | if (!sanity(i)) |
241699cd AV |
774 | return 0; |
775 | ||
8fad7767 AV |
776 | for (size_t n = bytes; n; n -= chunk) { |
777 | struct page *page = append_pipe(i, n, &off); | |
778 | char *p; | |
779 | ||
780 | if (!page) | |
781 | return bytes - n; | |
782 | chunk = min_t(size_t, n, PAGE_SIZE - off); | |
783 | p = kmap_local_page(page); | |
893839fd AV |
784 | memset(p + off, 0, chunk); |
785 | kunmap_local(p); | |
8fad7767 | 786 | } |
241699cd AV |
787 | return bytes; |
788 | } | |
789 | ||
c35e0248 MW |
790 | size_t iov_iter_zero(size_t bytes, struct iov_iter *i) |
791 | { | |
00e23707 | 792 | if (unlikely(iov_iter_is_pipe(i))) |
241699cd | 793 | return pipe_zero(bytes, i); |
7baa5099 AV |
794 | iterate_and_advance(i, bytes, base, len, count, |
795 | clear_user(base, len), | |
796 | memset(base, 0, len) | |
8442fa46 AV |
797 | ) |
798 | ||
799 | return bytes; | |
c35e0248 MW |
800 | } |
801 | EXPORT_SYMBOL(iov_iter_zero); | |
802 | ||
f0b65f39 AV |
803 | size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, |
804 | struct iov_iter *i) | |
62a8067a | 805 | { |
04a31165 | 806 | char *kaddr = kmap_atomic(page), *p = kaddr + offset; |
40a86061 | 807 | if (!page_copy_sane(page, offset, bytes)) { |
72e809ed AV |
808 | kunmap_atomic(kaddr); |
809 | return 0; | |
810 | } | |
a41dad90 | 811 | if (WARN_ON_ONCE(!i->data_source)) { |
241699cd | 812 | kunmap_atomic(kaddr); |
241699cd AV |
813 | return 0; |
814 | } | |
7baa5099 AV |
815 | iterate_and_advance(i, bytes, base, len, off, |
816 | copyin(p + off, base, len), | |
817 | memcpy(p + off, base, len) | |
04a31165 AV |
818 | ) |
819 | kunmap_atomic(kaddr); | |
820 | return bytes; | |
62a8067a | 821 | } |
f0b65f39 | 822 | EXPORT_SYMBOL(copy_page_from_iter_atomic); |
62a8067a | 823 | |
241699cd AV |
824 | static void pipe_advance(struct iov_iter *i, size_t size) |
825 | { | |
826 | struct pipe_inode_info *pipe = i->pipe; | |
10f525a8 | 827 | int off = i->last_offset; |
8cefc107 | 828 | |
2c855de9 AV |
829 | if (!off && !size) { |
830 | pipe_discard_from(pipe, i->start_head); // discard everything | |
831 | return; | |
832 | } | |
833 | i->count -= size; | |
834 | while (1) { | |
835 | struct pipe_buffer *buf = pipe_buf(pipe, i->head); | |
241699cd | 836 | if (off) /* make it relative to the beginning of buffer */ |
10f525a8 | 837 | size += abs(off) - buf->offset; |
2c855de9 AV |
838 | if (size <= buf->len) { |
839 | buf->len = size; | |
10f525a8 | 840 | i->last_offset = last_offset(buf); |
2c855de9 | 841 | break; |
241699cd | 842 | } |
2c855de9 AV |
843 | size -= buf->len; |
844 | i->head++; | |
845 | off = 0; | |
241699cd | 846 | } |
2c855de9 | 847 | pipe_discard_from(pipe, i->head + 1); // discard everything past this one |
241699cd AV |
848 | } |
849 | ||
54c8195b PB |
850 | static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) |
851 | { | |
18fa9af7 | 852 | const struct bio_vec *bvec, *end; |
54c8195b | 853 | |
18fa9af7 AV |
854 | if (!i->count) |
855 | return; | |
856 | i->count -= size; | |
857 | ||
858 | size += i->iov_offset; | |
54c8195b | 859 | |
18fa9af7 AV |
860 | for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { |
861 | if (likely(size < bvec->bv_len)) | |
862 | break; | |
863 | size -= bvec->bv_len; | |
864 | } | |
865 | i->iov_offset = size; | |
866 | i->nr_segs -= bvec - i->bvec; | |
867 | i->bvec = bvec; | |
54c8195b PB |
868 | } |
869 | ||
185ac4d4 AV |
870 | static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) |
871 | { | |
872 | const struct iovec *iov, *end; | |
873 | ||
874 | if (!i->count) | |
875 | return; | |
876 | i->count -= size; | |
877 | ||
878 | size += i->iov_offset; // from beginning of current segment | |
879 | for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { | |
880 | if (likely(size < iov->iov_len)) | |
881 | break; | |
882 | size -= iov->iov_len; | |
883 | } | |
884 | i->iov_offset = size; | |
885 | i->nr_segs -= iov - i->iov; | |
886 | i->iov = iov; | |
887 | } | |
888 | ||
62a8067a AV |
889 | void iov_iter_advance(struct iov_iter *i, size_t size) |
890 | { | |
3b3fc051 AV |
891 | if (unlikely(i->count < size)) |
892 | size = i->count; | |
fcb14cb1 AV |
893 | if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { |
894 | i->iov_offset += size; | |
895 | i->count -= size; | |
896 | } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { | |
185ac4d4 AV |
897 | /* iovec and kvec have identical layouts */ |
898 | iov_iter_iovec_advance(i, size); | |
899 | } else if (iov_iter_is_bvec(i)) { | |
900 | iov_iter_bvec_advance(i, size); | |
901 | } else if (iov_iter_is_pipe(i)) { | |
241699cd | 902 | pipe_advance(i, size); |
185ac4d4 AV |
903 | } else if (iov_iter_is_discard(i)) { |
904 | i->count -= size; | |
54c8195b | 905 | } |
62a8067a AV |
906 | } |
907 | EXPORT_SYMBOL(iov_iter_advance); | |
908 | ||
27c0e374 AV |
909 | void iov_iter_revert(struct iov_iter *i, size_t unroll) |
910 | { | |
911 | if (!unroll) | |
912 | return; | |
5b47d59a AV |
913 | if (WARN_ON(unroll > MAX_RW_COUNT)) |
914 | return; | |
27c0e374 | 915 | i->count += unroll; |
00e23707 | 916 | if (unlikely(iov_iter_is_pipe(i))) { |
27c0e374 | 917 | struct pipe_inode_info *pipe = i->pipe; |
92acdc4f AV |
918 | unsigned int head = pipe->head; |
919 | ||
920 | while (head > i->start_head) { | |
921 | struct pipe_buffer *b = pipe_buf(pipe, --head); | |
922 | if (unroll < b->len) { | |
923 | b->len -= unroll; | |
10f525a8 | 924 | i->last_offset = last_offset(b); |
92acdc4f AV |
925 | i->head = head; |
926 | return; | |
27c0e374 | 927 | } |
92acdc4f AV |
928 | unroll -= b->len; |
929 | pipe_buf_release(pipe, b); | |
930 | pipe->head--; | |
27c0e374 | 931 | } |
10f525a8 | 932 | i->last_offset = 0; |
92acdc4f | 933 | i->head = head; |
27c0e374 AV |
934 | return; |
935 | } | |
9ea9ce04 DH |
936 | if (unlikely(iov_iter_is_discard(i))) |
937 | return; | |
27c0e374 AV |
938 | if (unroll <= i->iov_offset) { |
939 | i->iov_offset -= unroll; | |
940 | return; | |
941 | } | |
942 | unroll -= i->iov_offset; | |
fcb14cb1 | 943 | if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { |
7ff50620 DH |
944 | BUG(); /* We should never go beyond the start of the specified |
945 | * range since we might then be straying into pages that | |
946 | * aren't pinned. | |
947 | */ | |
948 | } else if (iov_iter_is_bvec(i)) { | |
27c0e374 AV |
949 | const struct bio_vec *bvec = i->bvec; |
950 | while (1) { | |
951 | size_t n = (--bvec)->bv_len; | |
952 | i->nr_segs++; | |
953 | if (unroll <= n) { | |
954 | i->bvec = bvec; | |
955 | i->iov_offset = n - unroll; | |
956 | return; | |
957 | } | |
958 | unroll -= n; | |
959 | } | |
960 | } else { /* same logics for iovec and kvec */ | |
961 | const struct iovec *iov = i->iov; | |
962 | while (1) { | |
963 | size_t n = (--iov)->iov_len; | |
964 | i->nr_segs++; | |
965 | if (unroll <= n) { | |
966 | i->iov = iov; | |
967 | i->iov_offset = n - unroll; | |
968 | return; | |
969 | } | |
970 | unroll -= n; | |
971 | } | |
972 | } | |
973 | } | |
974 | EXPORT_SYMBOL(iov_iter_revert); | |
975 | ||
62a8067a AV |
976 | /* |
977 | * Return the count of just the current iov_iter segment. | |
978 | */ | |
979 | size_t iov_iter_single_seg_count(const struct iov_iter *i) | |
980 | { | |
28f38db7 AV |
981 | if (i->nr_segs > 1) { |
982 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
983 | return min(i->count, i->iov->iov_len - i->iov_offset); | |
984 | if (iov_iter_is_bvec(i)) | |
985 | return min(i->count, i->bvec->bv_len - i->iov_offset); | |
986 | } | |
987 | return i->count; | |
62a8067a AV |
988 | } |
989 | EXPORT_SYMBOL(iov_iter_single_seg_count); | |
990 | ||
aa563d7b | 991 | void iov_iter_kvec(struct iov_iter *i, unsigned int direction, |
05afcb77 | 992 | const struct kvec *kvec, unsigned long nr_segs, |
abb78f87 AV |
993 | size_t count) |
994 | { | |
aa563d7b | 995 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
996 | *i = (struct iov_iter){ |
997 | .iter_type = ITER_KVEC, | |
998 | .data_source = direction, | |
999 | .kvec = kvec, | |
1000 | .nr_segs = nr_segs, | |
1001 | .iov_offset = 0, | |
1002 | .count = count | |
1003 | }; | |
abb78f87 AV |
1004 | } |
1005 | EXPORT_SYMBOL(iov_iter_kvec); | |
1006 | ||
aa563d7b | 1007 | void iov_iter_bvec(struct iov_iter *i, unsigned int direction, |
05afcb77 AV |
1008 | const struct bio_vec *bvec, unsigned long nr_segs, |
1009 | size_t count) | |
1010 | { | |
aa563d7b | 1011 | WARN_ON(direction & ~(READ | WRITE)); |
8cd54c1c AV |
1012 | *i = (struct iov_iter){ |
1013 | .iter_type = ITER_BVEC, | |
1014 | .data_source = direction, | |
1015 | .bvec = bvec, | |
1016 | .nr_segs = nr_segs, | |
1017 | .iov_offset = 0, | |
1018 | .count = count | |
1019 | }; | |
05afcb77 AV |
1020 | } |
1021 | EXPORT_SYMBOL(iov_iter_bvec); | |
1022 | ||
aa563d7b | 1023 | void iov_iter_pipe(struct iov_iter *i, unsigned int direction, |
241699cd AV |
1024 | struct pipe_inode_info *pipe, |
1025 | size_t count) | |
1026 | { | |
aa563d7b | 1027 | BUG_ON(direction != READ); |
8cefc107 | 1028 | WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); |
8cd54c1c AV |
1029 | *i = (struct iov_iter){ |
1030 | .iter_type = ITER_PIPE, | |
1031 | .data_source = false, | |
1032 | .pipe = pipe, | |
1033 | .head = pipe->head, | |
1034 | .start_head = pipe->head, | |
10f525a8 | 1035 | .last_offset = 0, |
8cd54c1c AV |
1036 | .count = count |
1037 | }; | |
241699cd AV |
1038 | } |
1039 | EXPORT_SYMBOL(iov_iter_pipe); | |
1040 | ||
7ff50620 DH |
1041 | /** |
1042 | * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray | |
1043 | * @i: The iterator to initialise. | |
1044 | * @direction: The direction of the transfer. | |
1045 | * @xarray: The xarray to access. | |
1046 | * @start: The start file position. | |
1047 | * @count: The size of the I/O buffer in bytes. | |
1048 | * | |
1049 | * Set up an I/O iterator to either draw data out of the pages attached to an | |
1050 | * inode or to inject data into those pages. The pages *must* be prevented | |
1051 | * from evaporation, either by taking a ref on them or locking them by the | |
1052 | * caller. | |
1053 | */ | |
1054 | void iov_iter_xarray(struct iov_iter *i, unsigned int direction, | |
1055 | struct xarray *xarray, loff_t start, size_t count) | |
1056 | { | |
1057 | BUG_ON(direction & ~1); | |
8cd54c1c AV |
1058 | *i = (struct iov_iter) { |
1059 | .iter_type = ITER_XARRAY, | |
1060 | .data_source = direction, | |
1061 | .xarray = xarray, | |
1062 | .xarray_start = start, | |
1063 | .count = count, | |
1064 | .iov_offset = 0 | |
1065 | }; | |
7ff50620 DH |
1066 | } |
1067 | EXPORT_SYMBOL(iov_iter_xarray); | |
1068 | ||
9ea9ce04 DH |
1069 | /** |
1070 | * iov_iter_discard - Initialise an I/O iterator that discards data | |
1071 | * @i: The iterator to initialise. | |
1072 | * @direction: The direction of the transfer. | |
1073 | * @count: The size of the I/O buffer in bytes. | |
1074 | * | |
1075 | * Set up an I/O iterator that just discards everything that's written to it. | |
1076 | * It's only available as a READ iterator. | |
1077 | */ | |
1078 | void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) | |
1079 | { | |
1080 | BUG_ON(direction != READ); | |
8cd54c1c AV |
1081 | *i = (struct iov_iter){ |
1082 | .iter_type = ITER_DISCARD, | |
1083 | .data_source = false, | |
1084 | .count = count, | |
1085 | .iov_offset = 0 | |
1086 | }; | |
9ea9ce04 DH |
1087 | } |
1088 | EXPORT_SYMBOL(iov_iter_discard); | |
1089 | ||
cfa320f7 KB |
1090 | static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, |
1091 | unsigned len_mask) | |
1092 | { | |
1093 | size_t size = i->count; | |
1094 | size_t skip = i->iov_offset; | |
1095 | unsigned k; | |
1096 | ||
1097 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1098 | size_t len = i->iov[k].iov_len - skip; | |
1099 | ||
1100 | if (len > size) | |
1101 | len = size; | |
1102 | if (len & len_mask) | |
1103 | return false; | |
1104 | if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) | |
1105 | return false; | |
1106 | ||
1107 | size -= len; | |
1108 | if (!size) | |
1109 | break; | |
1110 | } | |
1111 | return true; | |
1112 | } | |
1113 | ||
1114 | static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, | |
1115 | unsigned len_mask) | |
1116 | { | |
1117 | size_t size = i->count; | |
1118 | unsigned skip = i->iov_offset; | |
1119 | unsigned k; | |
1120 | ||
1121 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1122 | size_t len = i->bvec[k].bv_len - skip; | |
1123 | ||
1124 | if (len > size) | |
1125 | len = size; | |
1126 | if (len & len_mask) | |
1127 | return false; | |
1128 | if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) | |
1129 | return false; | |
1130 | ||
1131 | size -= len; | |
1132 | if (!size) | |
1133 | break; | |
1134 | } | |
1135 | return true; | |
1136 | } | |
1137 | ||
1138 | /** | |
1139 | * iov_iter_is_aligned() - Check if the addresses and lengths of each segments | |
1140 | * are aligned to the parameters. | |
1141 | * | |
1142 | * @i: &struct iov_iter to restore | |
1143 | * @addr_mask: bit mask to check against the iov element's addresses | |
1144 | * @len_mask: bit mask to check against the iov element's lengths | |
1145 | * | |
1146 | * Return: false if any addresses or lengths intersect with the provided masks | |
1147 | */ | |
1148 | bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, | |
1149 | unsigned len_mask) | |
1150 | { | |
fcb14cb1 AV |
1151 | if (likely(iter_is_ubuf(i))) { |
1152 | if (i->count & len_mask) | |
1153 | return false; | |
1154 | if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) | |
1155 | return false; | |
1156 | return true; | |
1157 | } | |
1158 | ||
cfa320f7 KB |
1159 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) |
1160 | return iov_iter_aligned_iovec(i, addr_mask, len_mask); | |
1161 | ||
1162 | if (iov_iter_is_bvec(i)) | |
1163 | return iov_iter_aligned_bvec(i, addr_mask, len_mask); | |
1164 | ||
1165 | if (iov_iter_is_pipe(i)) { | |
cfa320f7 KB |
1166 | size_t size = i->count; |
1167 | ||
1168 | if (size & len_mask) | |
1169 | return false; | |
10f525a8 AV |
1170 | if (size && i->last_offset > 0) { |
1171 | if (i->last_offset & addr_mask) | |
cfa320f7 KB |
1172 | return false; |
1173 | } | |
1174 | ||
1175 | return true; | |
1176 | } | |
1177 | ||
1178 | if (iov_iter_is_xarray(i)) { | |
1179 | if (i->count & len_mask) | |
1180 | return false; | |
1181 | if ((i->xarray_start + i->iov_offset) & addr_mask) | |
1182 | return false; | |
1183 | } | |
1184 | ||
1185 | return true; | |
1186 | } | |
1187 | EXPORT_SYMBOL_GPL(iov_iter_is_aligned); | |
1188 | ||
9221d2e3 | 1189 | static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) |
62a8067a | 1190 | { |
04a31165 AV |
1191 | unsigned long res = 0; |
1192 | size_t size = i->count; | |
9221d2e3 AV |
1193 | size_t skip = i->iov_offset; |
1194 | unsigned k; | |
1195 | ||
1196 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1197 | size_t len = i->iov[k].iov_len - skip; | |
1198 | if (len) { | |
1199 | res |= (unsigned long)i->iov[k].iov_base + skip; | |
1200 | if (len > size) | |
1201 | len = size; | |
1202 | res |= len; | |
1203 | size -= len; | |
1204 | if (!size) | |
1205 | break; | |
1206 | } | |
1207 | } | |
1208 | return res; | |
1209 | } | |
04a31165 | 1210 | |
9221d2e3 AV |
1211 | static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) |
1212 | { | |
1213 | unsigned res = 0; | |
1214 | size_t size = i->count; | |
1215 | unsigned skip = i->iov_offset; | |
1216 | unsigned k; | |
1217 | ||
1218 | for (k = 0; k < i->nr_segs; k++, skip = 0) { | |
1219 | size_t len = i->bvec[k].bv_len - skip; | |
1220 | res |= (unsigned long)i->bvec[k].bv_offset + skip; | |
1221 | if (len > size) | |
1222 | len = size; | |
1223 | res |= len; | |
1224 | size -= len; | |
1225 | if (!size) | |
1226 | break; | |
1227 | } | |
1228 | return res; | |
1229 | } | |
1230 | ||
1231 | unsigned long iov_iter_alignment(const struct iov_iter *i) | |
1232 | { | |
fcb14cb1 AV |
1233 | if (likely(iter_is_ubuf(i))) { |
1234 | size_t size = i->count; | |
1235 | if (size) | |
1236 | return ((unsigned long)i->ubuf + i->iov_offset) | size; | |
1237 | return 0; | |
1238 | } | |
1239 | ||
9221d2e3 AV |
1240 | /* iovec and kvec have identical layouts */ |
1241 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1242 | return iov_iter_alignment_iovec(i); | |
1243 | ||
1244 | if (iov_iter_is_bvec(i)) | |
1245 | return iov_iter_alignment_bvec(i); | |
1246 | ||
1247 | if (iov_iter_is_pipe(i)) { | |
9221d2e3 | 1248 | size_t size = i->count; |
e0ff126e | 1249 | |
10f525a8 AV |
1250 | if (size && i->last_offset > 0) |
1251 | return size | i->last_offset; | |
241699cd AV |
1252 | return size; |
1253 | } | |
9221d2e3 AV |
1254 | |
1255 | if (iov_iter_is_xarray(i)) | |
3d14ec1f | 1256 | return (i->xarray_start + i->iov_offset) | i->count; |
9221d2e3 AV |
1257 | |
1258 | return 0; | |
62a8067a AV |
1259 | } |
1260 | EXPORT_SYMBOL(iov_iter_alignment); | |
1261 | ||
357f435d AV |
1262 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i) |
1263 | { | |
33844e66 | 1264 | unsigned long res = 0; |
610c7a71 | 1265 | unsigned long v = 0; |
357f435d | 1266 | size_t size = i->count; |
610c7a71 | 1267 | unsigned k; |
357f435d | 1268 | |
fcb14cb1 AV |
1269 | if (iter_is_ubuf(i)) |
1270 | return 0; | |
1271 | ||
610c7a71 | 1272 | if (WARN_ON(!iter_is_iovec(i))) |
241699cd | 1273 | return ~0U; |
241699cd | 1274 | |
610c7a71 AV |
1275 | for (k = 0; k < i->nr_segs; k++) { |
1276 | if (i->iov[k].iov_len) { | |
1277 | unsigned long base = (unsigned long)i->iov[k].iov_base; | |
1278 | if (v) // if not the first one | |
1279 | res |= base | v; // this start | previous end | |
1280 | v = base + i->iov[k].iov_len; | |
1281 | if (size <= i->iov[k].iov_len) | |
1282 | break; | |
1283 | size -= i->iov[k].iov_len; | |
1284 | } | |
1285 | } | |
33844e66 | 1286 | return res; |
357f435d AV |
1287 | } |
1288 | EXPORT_SYMBOL(iov_iter_gap_alignment); | |
1289 | ||
3cf42da3 AV |
1290 | static int want_pages_array(struct page ***res, size_t size, |
1291 | size_t start, unsigned int maxpages) | |
acbdeb83 | 1292 | { |
3cf42da3 AV |
1293 | unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE); |
1294 | ||
1295 | if (count > maxpages) | |
1296 | count = maxpages; | |
1297 | WARN_ON(!count); // caller should've prevented that | |
1298 | if (!*res) { | |
1299 | *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); | |
1300 | if (!*res) | |
1301 | return 0; | |
1302 | } | |
1303 | return count; | |
acbdeb83 AV |
1304 | } |
1305 | ||
241699cd | 1306 | static ssize_t pipe_get_pages(struct iov_iter *i, |
acbdeb83 | 1307 | struct page ***pages, size_t maxsize, unsigned maxpages, |
241699cd AV |
1308 | size_t *start) |
1309 | { | |
746de1f8 | 1310 | unsigned int npages, count, off, chunk; |
acbdeb83 | 1311 | struct page **p; |
746de1f8 | 1312 | size_t left; |
241699cd AV |
1313 | |
1314 | if (!sanity(i)) | |
1315 | return -EFAULT; | |
1316 | ||
12d426ab | 1317 | *start = off = pipe_npages(i, &npages); |
3cf42da3 AV |
1318 | if (!npages) |
1319 | return -EFAULT; | |
1320 | count = want_pages_array(pages, maxsize, off, min(npages, maxpages)); | |
1321 | if (!count) | |
1322 | return -ENOMEM; | |
acbdeb83 | 1323 | p = *pages; |
746de1f8 AV |
1324 | for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) { |
1325 | struct page *page = append_pipe(i, left, &off); | |
85200084 AV |
1326 | if (!page) |
1327 | break; | |
746de1f8 | 1328 | chunk = min_t(size_t, left, PAGE_SIZE - off); |
85200084 | 1329 | get_page(*p++ = page); |
85200084 AV |
1330 | } |
1331 | if (!npages) | |
1332 | return -EFAULT; | |
746de1f8 | 1333 | return maxsize - left; |
241699cd AV |
1334 | } |
1335 | ||
7ff50620 DH |
1336 | static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, |
1337 | pgoff_t index, unsigned int nr_pages) | |
1338 | { | |
1339 | XA_STATE(xas, xa, index); | |
1340 | struct page *page; | |
1341 | unsigned int ret = 0; | |
1342 | ||
1343 | rcu_read_lock(); | |
1344 | for (page = xas_load(&xas); page; page = xas_next(&xas)) { | |
1345 | if (xas_retry(&xas, page)) | |
1346 | continue; | |
1347 | ||
1348 | /* Has the page moved or been split? */ | |
1349 | if (unlikely(page != xas_reload(&xas))) { | |
1350 | xas_reset(&xas); | |
1351 | continue; | |
1352 | } | |
1353 | ||
1354 | pages[ret] = find_subpage(page, xas.xa_index); | |
1355 | get_page(pages[ret]); | |
1356 | if (++ret == nr_pages) | |
1357 | break; | |
1358 | } | |
1359 | rcu_read_unlock(); | |
1360 | return ret; | |
1361 | } | |
1362 | ||
1363 | static ssize_t iter_xarray_get_pages(struct iov_iter *i, | |
68fe506f | 1364 | struct page ***pages, size_t maxsize, |
7ff50620 DH |
1365 | unsigned maxpages, size_t *_start_offset) |
1366 | { | |
3cf42da3 AV |
1367 | unsigned nr, offset, count; |
1368 | pgoff_t index; | |
7ff50620 DH |
1369 | loff_t pos; |
1370 | ||
7ff50620 DH |
1371 | pos = i->xarray_start + i->iov_offset; |
1372 | index = pos >> PAGE_SHIFT; | |
1373 | offset = pos & ~PAGE_MASK; | |
1374 | *_start_offset = offset; | |
1375 | ||
3cf42da3 AV |
1376 | count = want_pages_array(pages, maxsize, offset, maxpages); |
1377 | if (!count) | |
1378 | return -ENOMEM; | |
68fe506f | 1379 | nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); |
7ff50620 DH |
1380 | if (nr == 0) |
1381 | return 0; | |
1382 | ||
eba2d3d7 | 1383 | maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); |
310d9d5a AV |
1384 | i->iov_offset += maxsize; |
1385 | i->count -= maxsize; | |
eba2d3d7 | 1386 | return maxsize; |
7ff50620 DH |
1387 | } |
1388 | ||
fcb14cb1 | 1389 | /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */ |
dd45ab9d | 1390 | static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) |
3d671ca6 AV |
1391 | { |
1392 | size_t skip; | |
1393 | long k; | |
1394 | ||
fcb14cb1 AV |
1395 | if (iter_is_ubuf(i)) |
1396 | return (unsigned long)i->ubuf + i->iov_offset; | |
1397 | ||
3d671ca6 | 1398 | for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { |
3d671ca6 AV |
1399 | size_t len = i->iov[k].iov_len - skip; |
1400 | ||
1401 | if (unlikely(!len)) | |
1402 | continue; | |
59dbd7d0 AV |
1403 | if (*size > len) |
1404 | *size = len; | |
dd45ab9d | 1405 | return (unsigned long)i->iov[k].iov_base + skip; |
3d671ca6 AV |
1406 | } |
1407 | BUG(); // if it had been empty, we wouldn't get called | |
1408 | } | |
1409 | ||
1410 | /* must be done on non-empty ITER_BVEC one */ | |
1411 | static struct page *first_bvec_segment(const struct iov_iter *i, | |
59dbd7d0 | 1412 | size_t *size, size_t *start) |
3d671ca6 AV |
1413 | { |
1414 | struct page *page; | |
1415 | size_t skip = i->iov_offset, len; | |
1416 | ||
1417 | len = i->bvec->bv_len - skip; | |
59dbd7d0 AV |
1418 | if (*size > len) |
1419 | *size = len; | |
3d671ca6 AV |
1420 | skip += i->bvec->bv_offset; |
1421 | page = i->bvec->bv_page + skip / PAGE_SIZE; | |
dda8e5d1 | 1422 | *start = skip % PAGE_SIZE; |
3d671ca6 AV |
1423 | return page; |
1424 | } | |
1425 | ||
451c0ba9 AV |
1426 | static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, |
1427 | struct page ***pages, size_t maxsize, | |
d8207640 LG |
1428 | unsigned int maxpages, size_t *start, |
1429 | unsigned int gup_flags) | |
62a8067a | 1430 | { |
3cf42da3 | 1431 | unsigned int n; |
3d671ca6 | 1432 | |
e5393fae AV |
1433 | if (maxsize > i->count) |
1434 | maxsize = i->count; | |
451c0ba9 | 1435 | if (!maxsize) |
3d671ca6 | 1436 | return 0; |
7392ed17 AV |
1437 | if (maxsize > MAX_RW_COUNT) |
1438 | maxsize = MAX_RW_COUNT; | |
e5393fae | 1439 | |
fcb14cb1 | 1440 | if (likely(user_backed_iter(i))) { |
3d671ca6 | 1441 | unsigned long addr; |
3cf42da3 | 1442 | int res; |
e5393fae | 1443 | |
3337ab08 AG |
1444 | if (iov_iter_rw(i) != WRITE) |
1445 | gup_flags |= FOLL_WRITE; | |
1446 | if (i->nofault) | |
1447 | gup_flags |= FOLL_NOFAULT; | |
1448 | ||
dd45ab9d AV |
1449 | addr = first_iovec_segment(i, &maxsize); |
1450 | *start = addr % PAGE_SIZE; | |
1451 | addr &= PAGE_MASK; | |
3cf42da3 AV |
1452 | n = want_pages_array(pages, maxsize, *start, maxpages); |
1453 | if (!n) | |
1454 | return -ENOMEM; | |
451c0ba9 | 1455 | res = get_user_pages_fast(addr, n, gup_flags, *pages); |
814a6674 | 1456 | if (unlikely(res <= 0)) |
e5393fae | 1457 | return res; |
eba2d3d7 AV |
1458 | maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); |
1459 | iov_iter_advance(i, maxsize); | |
1460 | return maxsize; | |
3d671ca6 AV |
1461 | } |
1462 | if (iov_iter_is_bvec(i)) { | |
451c0ba9 | 1463 | struct page **p; |
3d671ca6 AV |
1464 | struct page *page; |
1465 | ||
59dbd7d0 | 1466 | page = first_bvec_segment(i, &maxsize, start); |
3cf42da3 AV |
1467 | n = want_pages_array(pages, maxsize, *start, maxpages); |
1468 | if (!n) | |
1469 | return -ENOMEM; | |
451c0ba9 | 1470 | p = *pages; |
dda8e5d1 | 1471 | for (int k = 0; k < n; k++) |
eba2d3d7 AV |
1472 | get_page(p[k] = page + k); |
1473 | maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); | |
310d9d5a AV |
1474 | i->count -= maxsize; |
1475 | i->iov_offset += maxsize; | |
1476 | if (i->iov_offset == i->bvec->bv_len) { | |
1477 | i->iov_offset = 0; | |
1478 | i->bvec++; | |
1479 | i->nr_segs--; | |
1480 | } | |
eba2d3d7 | 1481 | return maxsize; |
3d671ca6 AV |
1482 | } |
1483 | if (iov_iter_is_pipe(i)) | |
451c0ba9 | 1484 | return pipe_get_pages(i, pages, maxsize, maxpages, start); |
3d671ca6 | 1485 | if (iov_iter_is_xarray(i)) |
451c0ba9 | 1486 | return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); |
3d671ca6 | 1487 | return -EFAULT; |
62a8067a | 1488 | } |
62a8067a | 1489 | |
d8207640 | 1490 | ssize_t iov_iter_get_pages(struct iov_iter *i, |
451c0ba9 | 1491 | struct page **pages, size_t maxsize, unsigned maxpages, |
d8207640 | 1492 | size_t *start, unsigned gup_flags) |
62a8067a | 1493 | { |
451c0ba9 | 1494 | if (!maxpages) |
3d671ca6 | 1495 | return 0; |
451c0ba9 | 1496 | BUG_ON(!pages); |
3d671ca6 | 1497 | |
d8207640 LG |
1498 | return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, |
1499 | start, gup_flags); | |
1500 | } | |
1501 | EXPORT_SYMBOL_GPL(iov_iter_get_pages); | |
1502 | ||
1503 | ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, | |
1504 | size_t maxsize, unsigned maxpages, size_t *start) | |
1505 | { | |
1506 | return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0); | |
62a8067a | 1507 | } |
eba2d3d7 | 1508 | EXPORT_SYMBOL(iov_iter_get_pages2); |
91329559 | 1509 | |
d8207640 | 1510 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, |
91329559 | 1511 | struct page ***pages, size_t maxsize, |
d8207640 | 1512 | size_t *start, unsigned gup_flags) |
91329559 AV |
1513 | { |
1514 | ssize_t len; | |
1515 | ||
1516 | *pages = NULL; | |
1517 | ||
d8207640 LG |
1518 | len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start, |
1519 | gup_flags); | |
91329559 AV |
1520 | if (len <= 0) { |
1521 | kvfree(*pages); | |
1522 | *pages = NULL; | |
1523 | } | |
1524 | return len; | |
1525 | } | |
d8207640 LG |
1526 | EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc); |
1527 | ||
1528 | ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, | |
1529 | struct page ***pages, size_t maxsize, size_t *start) | |
1530 | { | |
1531 | return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0); | |
91329559 | 1532 | } |
eba2d3d7 | 1533 | EXPORT_SYMBOL(iov_iter_get_pages_alloc2); |
62a8067a | 1534 | |
a604ec7e AV |
1535 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, |
1536 | struct iov_iter *i) | |
1537 | { | |
a604ec7e | 1538 | __wsum sum, next; |
a604ec7e | 1539 | sum = *csum; |
a41dad90 | 1540 | if (WARN_ON_ONCE(!i->data_source)) |
241699cd | 1541 | return 0; |
a41dad90 | 1542 | |
7baa5099 AV |
1543 | iterate_and_advance(i, bytes, base, len, off, ({ |
1544 | next = csum_and_copy_from_user(base, addr + off, len); | |
2495bdcc | 1545 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1546 | next ? 0 : len; |
a604ec7e | 1547 | }), ({ |
7baa5099 | 1548 | sum = csum_and_memcpy(addr + off, base, len, sum, off); |
a604ec7e AV |
1549 | }) |
1550 | ) | |
1551 | *csum = sum; | |
1552 | return bytes; | |
1553 | } | |
1554 | EXPORT_SYMBOL(csum_and_copy_from_iter); | |
1555 | ||
52cbd23a | 1556 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, |
a604ec7e AV |
1557 | struct iov_iter *i) |
1558 | { | |
52cbd23a | 1559 | struct csum_state *csstate = _csstate; |
a604ec7e | 1560 | __wsum sum, next; |
78e1f386 | 1561 | |
a41dad90 AV |
1562 | if (WARN_ON_ONCE(i->data_source)) |
1563 | return 0; | |
78e1f386 | 1564 | if (unlikely(iov_iter_is_discard(i))) { |
c67f1fd2 AV |
1565 | // can't use csum_memcpy() for that one - data is not copied |
1566 | csstate->csum = csum_block_add(csstate->csum, | |
1567 | csum_partial(addr, bytes, 0), | |
1568 | csstate->off); | |
1569 | csstate->off += bytes; | |
1570 | return bytes; | |
241699cd | 1571 | } |
6852df12 AV |
1572 | |
1573 | sum = csum_shift(csstate->csum, csstate->off); | |
1574 | if (unlikely(iov_iter_is_pipe(i))) | |
1575 | bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); | |
1576 | else iterate_and_advance(i, bytes, base, len, off, ({ | |
7baa5099 | 1577 | next = csum_and_copy_to_user(addr + off, base, len); |
2495bdcc | 1578 | sum = csum_block_add(sum, next, off); |
7baa5099 | 1579 | next ? 0 : len; |
a604ec7e | 1580 | }), ({ |
7baa5099 | 1581 | sum = csum_and_memcpy(base, addr + off, len, sum, off); |
a604ec7e AV |
1582 | }) |
1583 | ) | |
594e450b AV |
1584 | csstate->csum = csum_shift(sum, csstate->off); |
1585 | csstate->off += bytes; | |
a604ec7e AV |
1586 | return bytes; |
1587 | } | |
1588 | EXPORT_SYMBOL(csum_and_copy_to_iter); | |
1589 | ||
d05f4435 SG |
1590 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
1591 | struct iov_iter *i) | |
1592 | { | |
7999096f | 1593 | #ifdef CONFIG_CRYPTO_HASH |
d05f4435 SG |
1594 | struct ahash_request *hash = hashp; |
1595 | struct scatterlist sg; | |
1596 | size_t copied; | |
1597 | ||
1598 | copied = copy_to_iter(addr, bytes, i); | |
1599 | sg_init_one(&sg, addr, copied); | |
1600 | ahash_request_set_crypt(hash, &sg, NULL, copied); | |
1601 | crypto_ahash_update(hash); | |
1602 | return copied; | |
27fad74a Y |
1603 | #else |
1604 | return 0; | |
1605 | #endif | |
d05f4435 SG |
1606 | } |
1607 | EXPORT_SYMBOL(hash_and_copy_to_iter); | |
1608 | ||
66531c65 | 1609 | static int iov_npages(const struct iov_iter *i, int maxpages) |
62a8067a | 1610 | { |
66531c65 AV |
1611 | size_t skip = i->iov_offset, size = i->count; |
1612 | const struct iovec *p; | |
e0f2dc40 AV |
1613 | int npages = 0; |
1614 | ||
66531c65 AV |
1615 | for (p = i->iov; size; skip = 0, p++) { |
1616 | unsigned offs = offset_in_page(p->iov_base + skip); | |
1617 | size_t len = min(p->iov_len - skip, size); | |
e0f2dc40 | 1618 | |
66531c65 AV |
1619 | if (len) { |
1620 | size -= len; | |
1621 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1622 | if (unlikely(npages > maxpages)) | |
1623 | return maxpages; | |
1624 | } | |
1625 | } | |
1626 | return npages; | |
1627 | } | |
1628 | ||
1629 | static int bvec_npages(const struct iov_iter *i, int maxpages) | |
1630 | { | |
1631 | size_t skip = i->iov_offset, size = i->count; | |
1632 | const struct bio_vec *p; | |
1633 | int npages = 0; | |
1634 | ||
1635 | for (p = i->bvec; size; skip = 0, p++) { | |
1636 | unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; | |
1637 | size_t len = min(p->bv_len - skip, size); | |
1638 | ||
1639 | size -= len; | |
1640 | npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); | |
1641 | if (unlikely(npages > maxpages)) | |
1642 | return maxpages; | |
1643 | } | |
1644 | return npages; | |
1645 | } | |
1646 | ||
1647 | int iov_iter_npages(const struct iov_iter *i, int maxpages) | |
1648 | { | |
1649 | if (unlikely(!i->count)) | |
1650 | return 0; | |
fcb14cb1 AV |
1651 | if (likely(iter_is_ubuf(i))) { |
1652 | unsigned offs = offset_in_page(i->ubuf + i->iov_offset); | |
1653 | int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); | |
1654 | return min(npages, maxpages); | |
1655 | } | |
66531c65 AV |
1656 | /* iovec and kvec have identical layouts */ |
1657 | if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) | |
1658 | return iov_npages(i, maxpages); | |
1659 | if (iov_iter_is_bvec(i)) | |
1660 | return bvec_npages(i, maxpages); | |
1661 | if (iov_iter_is_pipe(i)) { | |
66531c65 | 1662 | int npages; |
241699cd AV |
1663 | |
1664 | if (!sanity(i)) | |
1665 | return 0; | |
1666 | ||
12d426ab | 1667 | pipe_npages(i, &npages); |
66531c65 AV |
1668 | return min(npages, maxpages); |
1669 | } | |
1670 | if (iov_iter_is_xarray(i)) { | |
e4f8df86 AV |
1671 | unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; |
1672 | int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); | |
66531c65 AV |
1673 | return min(npages, maxpages); |
1674 | } | |
1675 | return 0; | |
62a8067a | 1676 | } |
f67da30c | 1677 | EXPORT_SYMBOL(iov_iter_npages); |
4b8164b9 AV |
1678 | |
1679 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) | |
1680 | { | |
1681 | *new = *old; | |
00e23707 | 1682 | if (unlikely(iov_iter_is_pipe(new))) { |
241699cd AV |
1683 | WARN_ON(1); |
1684 | return NULL; | |
1685 | } | |
00e23707 | 1686 | if (iov_iter_is_bvec(new)) |
4b8164b9 AV |
1687 | return new->bvec = kmemdup(new->bvec, |
1688 | new->nr_segs * sizeof(struct bio_vec), | |
1689 | flags); | |
fcb14cb1 | 1690 | else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) |
4b8164b9 AV |
1691 | /* iovec and kvec have identical layout */ |
1692 | return new->iov = kmemdup(new->iov, | |
1693 | new->nr_segs * sizeof(struct iovec), | |
1694 | flags); | |
fcb14cb1 | 1695 | return NULL; |
4b8164b9 AV |
1696 | } |
1697 | EXPORT_SYMBOL(dup_iter); | |
bc917be8 | 1698 | |
bfdc5970 CH |
1699 | static int copy_compat_iovec_from_user(struct iovec *iov, |
1700 | const struct iovec __user *uvec, unsigned long nr_segs) | |
1701 | { | |
1702 | const struct compat_iovec __user *uiov = | |
1703 | (const struct compat_iovec __user *)uvec; | |
1704 | int ret = -EFAULT, i; | |
1705 | ||
a959a978 | 1706 | if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) |
bfdc5970 CH |
1707 | return -EFAULT; |
1708 | ||
1709 | for (i = 0; i < nr_segs; i++) { | |
1710 | compat_uptr_t buf; | |
1711 | compat_ssize_t len; | |
1712 | ||
1713 | unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); | |
1714 | unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); | |
1715 | ||
1716 | /* check for compat_size_t not fitting in compat_ssize_t .. */ | |
1717 | if (len < 0) { | |
1718 | ret = -EINVAL; | |
1719 | goto uaccess_end; | |
1720 | } | |
1721 | iov[i].iov_base = compat_ptr(buf); | |
1722 | iov[i].iov_len = len; | |
1723 | } | |
1724 | ||
1725 | ret = 0; | |
1726 | uaccess_end: | |
1727 | user_access_end(); | |
1728 | return ret; | |
1729 | } | |
1730 | ||
1731 | static int copy_iovec_from_user(struct iovec *iov, | |
1732 | const struct iovec __user *uvec, unsigned long nr_segs) | |
fb041b59 DL |
1733 | { |
1734 | unsigned long seg; | |
fb041b59 | 1735 | |
bfdc5970 CH |
1736 | if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) |
1737 | return -EFAULT; | |
1738 | for (seg = 0; seg < nr_segs; seg++) { | |
1739 | if ((ssize_t)iov[seg].iov_len < 0) | |
1740 | return -EINVAL; | |
fb041b59 DL |
1741 | } |
1742 | ||
bfdc5970 CH |
1743 | return 0; |
1744 | } | |
1745 | ||
1746 | struct iovec *iovec_from_user(const struct iovec __user *uvec, | |
1747 | unsigned long nr_segs, unsigned long fast_segs, | |
1748 | struct iovec *fast_iov, bool compat) | |
1749 | { | |
1750 | struct iovec *iov = fast_iov; | |
1751 | int ret; | |
1752 | ||
fb041b59 | 1753 | /* |
bfdc5970 CH |
1754 | * SuS says "The readv() function *may* fail if the iovcnt argument was |
1755 | * less than or equal to 0, or greater than {IOV_MAX}. Linux has | |
1756 | * traditionally returned zero for zero segments, so... | |
fb041b59 | 1757 | */ |
bfdc5970 CH |
1758 | if (nr_segs == 0) |
1759 | return iov; | |
1760 | if (nr_segs > UIO_MAXIOV) | |
1761 | return ERR_PTR(-EINVAL); | |
fb041b59 DL |
1762 | if (nr_segs > fast_segs) { |
1763 | iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); | |
bfdc5970 CH |
1764 | if (!iov) |
1765 | return ERR_PTR(-ENOMEM); | |
fb041b59 | 1766 | } |
bfdc5970 CH |
1767 | |
1768 | if (compat) | |
1769 | ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); | |
1770 | else | |
1771 | ret = copy_iovec_from_user(iov, uvec, nr_segs); | |
1772 | if (ret) { | |
1773 | if (iov != fast_iov) | |
1774 | kfree(iov); | |
1775 | return ERR_PTR(ret); | |
1776 | } | |
1777 | ||
1778 | return iov; | |
1779 | } | |
1780 | ||
1781 | ssize_t __import_iovec(int type, const struct iovec __user *uvec, | |
1782 | unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, | |
1783 | struct iov_iter *i, bool compat) | |
1784 | { | |
1785 | ssize_t total_len = 0; | |
1786 | unsigned long seg; | |
1787 | struct iovec *iov; | |
1788 | ||
1789 | iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); | |
1790 | if (IS_ERR(iov)) { | |
1791 | *iovp = NULL; | |
1792 | return PTR_ERR(iov); | |
fb041b59 DL |
1793 | } |
1794 | ||
1795 | /* | |
bfdc5970 CH |
1796 | * According to the Single Unix Specification we should return EINVAL if |
1797 | * an element length is < 0 when cast to ssize_t or if the total length | |
1798 | * would overflow the ssize_t return value of the system call. | |
fb041b59 DL |
1799 | * |
1800 | * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the | |
1801 | * overflow case. | |
1802 | */ | |
fb041b59 | 1803 | for (seg = 0; seg < nr_segs; seg++) { |
fb041b59 DL |
1804 | ssize_t len = (ssize_t)iov[seg].iov_len; |
1805 | ||
bfdc5970 CH |
1806 | if (!access_ok(iov[seg].iov_base, len)) { |
1807 | if (iov != *iovp) | |
1808 | kfree(iov); | |
1809 | *iovp = NULL; | |
1810 | return -EFAULT; | |
fb041b59 | 1811 | } |
bfdc5970 CH |
1812 | |
1813 | if (len > MAX_RW_COUNT - total_len) { | |
1814 | len = MAX_RW_COUNT - total_len; | |
fb041b59 DL |
1815 | iov[seg].iov_len = len; |
1816 | } | |
bfdc5970 | 1817 | total_len += len; |
fb041b59 | 1818 | } |
bfdc5970 CH |
1819 | |
1820 | iov_iter_init(i, type, iov, nr_segs, total_len); | |
1821 | if (iov == *iovp) | |
1822 | *iovp = NULL; | |
1823 | else | |
1824 | *iovp = iov; | |
1825 | return total_len; | |
fb041b59 DL |
1826 | } |
1827 | ||
ffecee4f VN |
1828 | /** |
1829 | * import_iovec() - Copy an array of &struct iovec from userspace | |
1830 | * into the kernel, check that it is valid, and initialize a new | |
1831 | * &struct iov_iter iterator to access it. | |
1832 | * | |
1833 | * @type: One of %READ or %WRITE. | |
bfdc5970 | 1834 | * @uvec: Pointer to the userspace array. |
ffecee4f VN |
1835 | * @nr_segs: Number of elements in userspace array. |
1836 | * @fast_segs: Number of elements in @iov. | |
bfdc5970 | 1837 | * @iovp: (input and output parameter) Pointer to pointer to (usually small |
ffecee4f VN |
1838 | * on-stack) kernel array. |
1839 | * @i: Pointer to iterator that will be initialized on success. | |
1840 | * | |
1841 | * If the array pointed to by *@iov is large enough to hold all @nr_segs, | |
1842 | * then this function places %NULL in *@iov on return. Otherwise, a new | |
1843 | * array will be allocated and the result placed in *@iov. This means that | |
1844 | * the caller may call kfree() on *@iov regardless of whether the small | |
1845 | * on-stack array was used or not (and regardless of whether this function | |
1846 | * returns an error or not). | |
1847 | * | |
87e5e6da | 1848 | * Return: Negative error code on error, bytes imported on success |
ffecee4f | 1849 | */ |
bfdc5970 | 1850 | ssize_t import_iovec(int type, const struct iovec __user *uvec, |
bc917be8 | 1851 | unsigned nr_segs, unsigned fast_segs, |
bfdc5970 | 1852 | struct iovec **iovp, struct iov_iter *i) |
bc917be8 | 1853 | { |
89cd35c5 CH |
1854 | return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, |
1855 | in_compat_syscall()); | |
bc917be8 AV |
1856 | } |
1857 | EXPORT_SYMBOL(import_iovec); | |
1858 | ||
bc917be8 AV |
1859 | int import_single_range(int rw, void __user *buf, size_t len, |
1860 | struct iovec *iov, struct iov_iter *i) | |
1861 | { | |
1862 | if (len > MAX_RW_COUNT) | |
1863 | len = MAX_RW_COUNT; | |
96d4f267 | 1864 | if (unlikely(!access_ok(buf, len))) |
bc917be8 AV |
1865 | return -EFAULT; |
1866 | ||
1867 | iov->iov_base = buf; | |
1868 | iov->iov_len = len; | |
1869 | iov_iter_init(i, rw, iov, 1, len); | |
1870 | return 0; | |
1871 | } | |
e1267585 | 1872 | EXPORT_SYMBOL(import_single_range); |
8fb0f47a JA |
1873 | |
1874 | /** | |
1875 | * iov_iter_restore() - Restore a &struct iov_iter to the same state as when | |
1876 | * iov_iter_save_state() was called. | |
1877 | * | |
1878 | * @i: &struct iov_iter to restore | |
1879 | * @state: state to restore from | |
1880 | * | |
1881 | * Used after iov_iter_save_state() to bring restore @i, if operations may | |
1882 | * have advanced it. | |
1883 | * | |
1884 | * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC | |
1885 | */ | |
1886 | void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) | |
1887 | { | |
1888 | if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && | |
fcb14cb1 | 1889 | !iov_iter_is_kvec(i) && !iter_is_ubuf(i)) |
8fb0f47a JA |
1890 | return; |
1891 | i->iov_offset = state->iov_offset; | |
1892 | i->count = state->count; | |
fcb14cb1 AV |
1893 | if (iter_is_ubuf(i)) |
1894 | return; | |
8fb0f47a JA |
1895 | /* |
1896 | * For the *vec iters, nr_segs + iov is constant - if we increment | |
1897 | * the vec, then we also decrement the nr_segs count. Hence we don't | |
1898 | * need to track both of these, just one is enough and we can deduct | |
1899 | * the other from that. ITER_KVEC and ITER_IOVEC are the same struct | |
1900 | * size, so we can just increment the iov pointer as they are unionzed. | |
1901 | * ITER_BVEC _may_ be the same size on some archs, but on others it is | |
1902 | * not. Be safe and handle it separately. | |
1903 | */ | |
1904 | BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); | |
1905 | if (iov_iter_is_bvec(i)) | |
1906 | i->bvec -= state->nr_segs - i->nr_segs; | |
1907 | else | |
1908 | i->iov -= state->nr_segs - i->nr_segs; | |
1909 | i->nr_segs = state->nr_segs; | |
1910 | } |