Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
1da177e4 LT |
2 | /* |
3 | * Berkeley style UIO structures - Alan Cox 1994. | |
1da177e4 | 4 | */ |
607ca46e DH |
5 | #ifndef __LINUX_UIO_H |
6 | #define __LINUX_UIO_H | |
1da177e4 | 7 | |
92236878 | 8 | #include <linux/kernel.h> |
aa28de27 | 9 | #include <linux/thread_info.h> |
d05f4435 | 10 | #include <crypto/hash.h> |
607ca46e | 11 | #include <uapi/linux/uio.h> |
1da177e4 | 12 | |
92236878 | 13 | struct page; |
241699cd | 14 | struct pipe_inode_info; |
812ed032 JS |
15 | |
16 | struct kvec { | |
17 | void *iov_base; /* and that should *never* hold a userland pointer */ | |
18 | size_t iov_len; | |
19 | }; | |
20 | ||
00e23707 | 21 | enum iter_type { |
875f1d07 JA |
22 | /* iter types */ |
23 | ITER_IOVEC = 4, | |
24 | ITER_KVEC = 8, | |
25 | ITER_BVEC = 16, | |
26 | ITER_PIPE = 32, | |
27 | ITER_DISCARD = 64, | |
62a8067a AV |
28 | }; |
29 | ||
92236878 | 30 | struct iov_iter { |
875f1d07 JA |
31 | /* |
32 | * Bit 0 is the read/write bit, set if we're writing. | |
33 | * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and | |
34 | * the caller isn't expecting to drop a page reference when done. | |
35 | */ | |
aa563d7b | 36 | unsigned int type; |
92236878 KO |
37 | size_t iov_offset; |
38 | size_t count; | |
62a8067a AV |
39 | union { |
40 | const struct iovec *iov; | |
a280455f | 41 | const struct kvec *kvec; |
62a8067a | 42 | const struct bio_vec *bvec; |
241699cd AV |
43 | struct pipe_inode_info *pipe; |
44 | }; | |
45 | union { | |
46 | unsigned long nr_segs; | |
27c0e374 AV |
47 | struct { |
48 | int idx; | |
49 | int start_idx; | |
50 | }; | |
62a8067a | 51 | }; |
92236878 KO |
52 | }; |
53 | ||
00e23707 DH |
54 | static inline enum iter_type iov_iter_type(const struct iov_iter *i) |
55 | { | |
b6207430 | 56 | return i->type & ~(READ | WRITE); |
00e23707 DH |
57 | } |
58 | ||
59 | static inline bool iter_is_iovec(const struct iov_iter *i) | |
60 | { | |
61 | return iov_iter_type(i) == ITER_IOVEC; | |
62 | } | |
63 | ||
64 | static inline bool iov_iter_is_kvec(const struct iov_iter *i) | |
65 | { | |
66 | return iov_iter_type(i) == ITER_KVEC; | |
67 | } | |
68 | ||
69 | static inline bool iov_iter_is_bvec(const struct iov_iter *i) | |
70 | { | |
71 | return iov_iter_type(i) == ITER_BVEC; | |
72 | } | |
73 | ||
74 | static inline bool iov_iter_is_pipe(const struct iov_iter *i) | |
75 | { | |
76 | return iov_iter_type(i) == ITER_PIPE; | |
77 | } | |
78 | ||
9ea9ce04 DH |
79 | static inline bool iov_iter_is_discard(const struct iov_iter *i) |
80 | { | |
81 | return iov_iter_type(i) == ITER_DISCARD; | |
82 | } | |
83 | ||
00e23707 DH |
84 | static inline unsigned char iov_iter_rw(const struct iov_iter *i) |
85 | { | |
86 | return i->type & (READ | WRITE); | |
87 | } | |
88 | ||
1da177e4 LT |
89 | /* |
90 | * Total number of bytes covered by an iovec. | |
91 | * | |
92 | * NOTE that it is not safe to use this function until all the iovec's | |
93 | * segment lengths have been validated. Because the individual lengths can | |
94 | * overflow a size_t when added together. | |
95 | */ | |
96 | static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) | |
97 | { | |
98 | unsigned long seg; | |
99 | size_t ret = 0; | |
100 | ||
101 | for (seg = 0; seg < nr_segs; seg++) | |
102 | ret += iov[seg].iov_len; | |
103 | return ret; | |
104 | } | |
105 | ||
92236878 KO |
106 | static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) |
107 | { | |
108 | return (struct iovec) { | |
109 | .iov_base = iter->iov->iov_base + iter->iov_offset, | |
110 | .iov_len = min(iter->count, | |
111 | iter->iov->iov_len - iter->iov_offset), | |
112 | }; | |
113 | } | |
114 | ||
92236878 KO |
115 | size_t iov_iter_copy_from_user_atomic(struct page *page, |
116 | struct iov_iter *i, unsigned long offset, size_t bytes); | |
92236878 | 117 | void iov_iter_advance(struct iov_iter *i, size_t bytes); |
27c0e374 | 118 | void iov_iter_revert(struct iov_iter *i, size_t bytes); |
92236878 KO |
119 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); |
120 | size_t iov_iter_single_seg_count(const struct iov_iter *i); | |
6e58e79d AV |
121 | size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, |
122 | struct iov_iter *i); | |
f0d1bec9 AV |
123 | size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, |
124 | struct iov_iter *i); | |
aa28de27 AV |
125 | |
126 | size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); | |
127 | size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); | |
128 | bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i); | |
129 | size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); | |
130 | bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i); | |
131 | ||
132 | static __always_inline __must_check | |
133 | size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) | |
134 | { | |
135 | if (unlikely(!check_copy_size(addr, bytes, true))) | |
c43aeb19 | 136 | return 0; |
aa28de27 AV |
137 | else |
138 | return _copy_to_iter(addr, bytes, i); | |
139 | } | |
140 | ||
141 | static __always_inline __must_check | |
142 | size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) | |
143 | { | |
144 | if (unlikely(!check_copy_size(addr, bytes, false))) | |
c43aeb19 | 145 | return 0; |
aa28de27 AV |
146 | else |
147 | return _copy_from_iter(addr, bytes, i); | |
148 | } | |
149 | ||
150 | static __always_inline __must_check | |
151 | bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) | |
152 | { | |
153 | if (unlikely(!check_copy_size(addr, bytes, false))) | |
154 | return false; | |
155 | else | |
156 | return _copy_from_iter_full(addr, bytes, i); | |
157 | } | |
158 | ||
159 | static __always_inline __must_check | |
160 | size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | |
161 | { | |
162 | if (unlikely(!check_copy_size(addr, bytes, false))) | |
c43aeb19 | 163 | return 0; |
aa28de27 AV |
164 | else |
165 | return _copy_from_iter_nocache(addr, bytes, i); | |
166 | } | |
167 | ||
168 | static __always_inline __must_check | |
169 | bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) | |
170 | { | |
171 | if (unlikely(!check_copy_size(addr, bytes, false))) | |
172 | return false; | |
173 | else | |
174 | return _copy_from_iter_full_nocache(addr, bytes, i); | |
175 | } | |
176 | ||
0aed55af DW |
177 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
178 | /* | |
179 | * Note, users like pmem that depend on the stricter semantics of | |
180 | * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for | |
181 | * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the | |
182 | * destination is flushed from the cache on return. | |
183 | */ | |
6a37e940 | 184 | size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i); |
0aed55af | 185 | #else |
6a37e940 LT |
186 | #define _copy_from_iter_flushcache _copy_from_iter_nocache |
187 | #endif | |
188 | ||
8780356e | 189 | #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE |
522239b4 | 190 | size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i); |
8780356e DW |
191 | #else |
192 | #define _copy_to_iter_mcsafe _copy_to_iter | |
193 | #endif | |
194 | ||
6a37e940 LT |
195 | static __always_inline __must_check |
196 | size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) | |
0aed55af | 197 | { |
6a37e940 | 198 | if (unlikely(!check_copy_size(addr, bytes, false))) |
c43aeb19 | 199 | return 0; |
6a37e940 LT |
200 | else |
201 | return _copy_from_iter_flushcache(addr, bytes, i); | |
0aed55af | 202 | } |
6a37e940 | 203 | |
8780356e DW |
204 | static __always_inline __must_check |
205 | size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) | |
206 | { | |
dfb06cba | 207 | if (unlikely(!check_copy_size(addr, bytes, true))) |
8780356e DW |
208 | return 0; |
209 | else | |
210 | return _copy_to_iter_mcsafe(addr, bytes, i); | |
211 | } | |
212 | ||
c35e0248 | 213 | size_t iov_iter_zero(size_t bytes, struct iov_iter *); |
886a3911 | 214 | unsigned long iov_iter_alignment(const struct iov_iter *i); |
357f435d | 215 | unsigned long iov_iter_gap_alignment(const struct iov_iter *i); |
aa563d7b | 216 | void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, |
71d8e532 | 217 | unsigned long nr_segs, size_t count); |
aa563d7b | 218 | void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, |
05afcb77 | 219 | unsigned long nr_segs, size_t count); |
aa563d7b | 220 | void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, |
abb78f87 | 221 | unsigned long nr_segs, size_t count); |
aa563d7b | 222 | void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe, |
241699cd | 223 | size_t count); |
9ea9ce04 | 224 | void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); |
7b2c99d1 | 225 | ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, |
2c80929c | 226 | size_t maxsize, unsigned maxpages, size_t *start); |
91f79c43 AV |
227 | ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, |
228 | size_t maxsize, size_t *start); | |
f67da30c | 229 | int iov_iter_npages(const struct iov_iter *i, int maxpages); |
92236878 | 230 | |
4b8164b9 AV |
231 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); |
232 | ||
b57332b4 | 233 | static inline size_t iov_iter_count(const struct iov_iter *i) |
92236878 KO |
234 | { |
235 | return i->count; | |
236 | } | |
237 | ||
0b86dbf6 AV |
238 | /* |
239 | * Cap the iov_iter by given limit; note that the second argument is | |
240 | * *not* the new size - it's upper limit for such. Passing it a value | |
241 | * greater than the amount of data in iov_iter is fine - it'll just do | |
242 | * nothing in that case. | |
243 | */ | |
244 | static inline void iov_iter_truncate(struct iov_iter *i, u64 count) | |
0c949334 | 245 | { |
0b86dbf6 AV |
246 | /* |
247 | * count doesn't have to fit in size_t - comparison extends both | |
248 | * operands to u64 here and any value that would be truncated by | |
249 | * conversion in assignement is by definition greater than all | |
250 | * values of size_t, including old i->count. | |
251 | */ | |
0c949334 AV |
252 | if (i->count > count) |
253 | i->count = count; | |
254 | } | |
255 | ||
b42b15fd AV |
256 | /* |
257 | * reexpand a previously truncated iterator; count must be no more than how much | |
258 | * we had shrunk it. | |
259 | */ | |
260 | static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) | |
261 | { | |
262 | i->count = count; | |
263 | } | |
cb002d07 | 264 | size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); |
a604ec7e | 265 | size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); |
cbbd26b8 | 266 | bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); |
d05f4435 SG |
267 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
268 | struct iov_iter *i); | |
b42b15fd | 269 | |
87e5e6da | 270 | ssize_t import_iovec(int type, const struct iovec __user * uvector, |
bc917be8 AV |
271 | unsigned nr_segs, unsigned fast_segs, |
272 | struct iovec **iov, struct iov_iter *i); | |
273 | ||
274 | #ifdef CONFIG_COMPAT | |
275 | struct compat_iovec; | |
87e5e6da | 276 | ssize_t compat_import_iovec(int type, const struct compat_iovec __user * uvector, |
bc917be8 AV |
277 | unsigned nr_segs, unsigned fast_segs, |
278 | struct iovec **iov, struct iov_iter *i); | |
279 | #endif | |
280 | ||
281 | int import_single_range(int type, void __user *buf, size_t len, | |
282 | struct iovec *iov, struct iov_iter *i); | |
283 | ||
09cf698a AV |
284 | int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, |
285 | int (*f)(struct kvec *vec, void *context), | |
286 | void *context); | |
287 | ||
812ed032 | 288 | #endif |