Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
4bbd4c77 KS |
2 | #include <linux/kernel.h> |
3 | #include <linux/errno.h> | |
4 | #include <linux/err.h> | |
5 | #include <linux/spinlock.h> | |
6 | ||
4bbd4c77 | 7 | #include <linux/mm.h> |
89c1905d | 8 | #include <linux/memfd.h> |
3565fce3 | 9 | #include <linux/memremap.h> |
4bbd4c77 KS |
10 | #include <linux/pagemap.h> |
11 | #include <linux/rmap.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/swapops.h> | |
1507f512 | 14 | #include <linux/secretmem.h> |
4bbd4c77 | 15 | |
174cd4b1 | 16 | #include <linux/sched/signal.h> |
2667f50e | 17 | #include <linux/rwsem.h> |
f30c59e9 | 18 | #include <linux/hugetlb.h> |
9a4e9f3b AK |
19 | #include <linux/migrate.h> |
20 | #include <linux/mm_inline.h> | |
89c1905d | 21 | #include <linux/pagevec.h> |
9a4e9f3b | 22 | #include <linux/sched/mm.h> |
a6e79df9 | 23 | #include <linux/shmem_fs.h> |
1027e443 | 24 | |
33a709b2 | 25 | #include <asm/mmu_context.h> |
1027e443 | 26 | #include <asm/tlbflush.h> |
2667f50e | 27 | |
4bbd4c77 | 28 | #include "internal.h" |
7d0f0f06 | 29 | #include "swap.h" |
4bbd4c77 | 30 | |
df06b37f KB |
31 | struct follow_page_context { |
32 | struct dev_pagemap *pgmap; | |
33 | unsigned int page_mask; | |
34 | }; | |
35 | ||
b6a2619c DH |
36 | static inline void sanity_check_pinned_pages(struct page **pages, |
37 | unsigned long npages) | |
38 | { | |
39 | if (!IS_ENABLED(CONFIG_DEBUG_VM)) | |
40 | return; | |
41 | ||
42 | /* | |
43 | * We only pin anonymous pages if they are exclusive. Once pinned, we | |
44 | * can no longer turn them possibly shared and PageAnonExclusive() will | |
45 | * stick around until the page is freed. | |
46 | * | |
47 | * We'd like to verify that our pinned anonymous pages are still mapped | |
48 | * exclusively. The issue with anon THP is that we don't know how | |
49 | * they are/were mapped when pinning them. However, for anon | |
50 | * THP we can assume that either the given page (PTE-mapped THP) or | |
51 | * the head page (PMD-mapped THP) should be PageAnonExclusive(). If | |
52 | * neither is the case, there is certainly something wrong. | |
53 | */ | |
54 | for (; npages; npages--, pages++) { | |
55 | struct page *page = *pages; | |
a1268be2 JH |
56 | struct folio *folio; |
57 | ||
58 | if (!page) | |
59 | continue; | |
60 | ||
61 | folio = page_folio(page); | |
b6a2619c | 62 | |
c8070b78 DH |
63 | if (is_zero_page(page) || |
64 | !folio_test_anon(folio)) | |
b6a2619c DH |
65 | continue; |
66 | if (!folio_test_large(folio) || folio_test_hugetlb(folio)) | |
792b429d | 67 | VM_WARN_ON_ONCE_FOLIO(!PageAnonExclusive(&folio->page), folio); |
b6a2619c DH |
68 | else |
69 | /* Either a PTE-mapped or a PMD-mapped THP. */ | |
792b429d DH |
70 | VM_WARN_ON_ONCE_PAGE(!PageAnonExclusive(&folio->page) && |
71 | !PageAnonExclusive(page), page); | |
b6a2619c DH |
72 | } |
73 | } | |
74 | ||
cd1adf1b | 75 | /* |
ece1ed7b | 76 | * Return the folio with ref appropriately incremented, |
cd1adf1b | 77 | * or NULL if that failed. |
a707cdd5 | 78 | */ |
ece1ed7b | 79 | static inline struct folio *try_get_folio(struct page *page, int refs) |
a707cdd5 | 80 | { |
ece1ed7b | 81 | struct folio *folio; |
a707cdd5 | 82 | |
59409373 | 83 | retry: |
ece1ed7b MWO |
84 | folio = page_folio(page); |
85 | if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) | |
a707cdd5 | 86 | return NULL; |
fa2690af | 87 | if (unlikely(!folio_ref_try_add(folio, refs))) |
a707cdd5 | 88 | return NULL; |
c24d3732 JH |
89 | |
90 | /* | |
ece1ed7b MWO |
91 | * At this point we have a stable reference to the folio; but it |
92 | * could be that between calling page_folio() and the refcount | |
93 | * increment, the folio was split, in which case we'd end up | |
94 | * holding a reference on a folio that has nothing to do with the page | |
c24d3732 | 95 | * we were given anymore. |
ece1ed7b MWO |
96 | * So now that the folio is stable, recheck that the page still |
97 | * belongs to this folio. | |
c24d3732 | 98 | */ |
ece1ed7b | 99 | if (unlikely(page_folio(page) != folio)) { |
38607c62 | 100 | folio_put_refs(folio, refs); |
59409373 | 101 | goto retry; |
c24d3732 JH |
102 | } |
103 | ||
ece1ed7b | 104 | return folio; |
a707cdd5 JH |
105 | } |
106 | ||
d8ddc099 | 107 | static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) |
4509b42c JG |
108 | { |
109 | if (flags & FOLL_PIN) { | |
c8070b78 DH |
110 | if (is_zero_folio(folio)) |
111 | return; | |
d8ddc099 | 112 | node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); |
31a31da8 | 113 | if (folio_has_pincount(folio)) |
94688e8e | 114 | atomic_sub(refs, &folio->_pincount); |
4509b42c JG |
115 | else |
116 | refs *= GUP_PIN_COUNTING_BIAS; | |
117 | } | |
118 | ||
38607c62 | 119 | folio_put_refs(folio, refs); |
4509b42c JG |
120 | } |
121 | ||
3faa52c0 | 122 | /** |
f442fa61 YS |
123 | * try_grab_folio() - add a folio's refcount by a flag-dependent amount |
124 | * @folio: pointer to folio to be grabbed | |
125 | * @refs: the value to (effectively) add to the folio's refcount | |
126 | * @flags: gup flags: these are the FOLL_* flag values | |
3faa52c0 JH |
127 | * |
128 | * This might not do anything at all, depending on the flags argument. | |
129 | * | |
130 | * "grab" names in this file mean, "look at flags to decide whether to use | |
f442fa61 | 131 | * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. |
3faa52c0 | 132 | * |
3faa52c0 | 133 | * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same |
f442fa61 | 134 | * time. |
3faa52c0 | 135 | * |
0f089235 LG |
136 | * Return: 0 for success, or if no action was required (if neither FOLL_PIN |
137 | * nor FOLL_GET was set, nothing is done). A negative error code for failure: | |
138 | * | |
f442fa61 | 139 | * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not |
0f089235 | 140 | * be grabbed. |
f442fa61 YS |
141 | * |
142 | * It is called when we have a stable reference for the folio, typically in | |
143 | * GUP slow path. | |
3faa52c0 | 144 | */ |
f442fa61 YS |
145 | int __must_check try_grab_folio(struct folio *folio, int refs, |
146 | unsigned int flags) | |
3faa52c0 | 147 | { |
5fec0719 | 148 | if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) |
0f089235 | 149 | return -ENOMEM; |
3faa52c0 | 150 | |
f442fa61 | 151 | if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page))) |
4003f107 | 152 | return -EREMOTEIO; |
3faa52c0 | 153 | |
c36c04c2 | 154 | if (flags & FOLL_GET) |
f442fa61 | 155 | folio_ref_add(folio, refs); |
c36c04c2 | 156 | else if (flags & FOLL_PIN) { |
c8070b78 DH |
157 | /* |
158 | * Don't take a pin on the zero page - it's not going anywhere | |
159 | * and it is used in a *lot* of places. | |
160 | */ | |
f442fa61 | 161 | if (is_zero_folio(folio)) |
c8070b78 DH |
162 | return 0; |
163 | ||
c36c04c2 | 164 | /* |
f442fa61 | 165 | * Increment the normal page refcount field at least once, |
78d9d6ce | 166 | * so that the page really is pinned. |
c36c04c2 | 167 | */ |
31a31da8 | 168 | if (folio_has_pincount(folio)) { |
f442fa61 YS |
169 | folio_ref_add(folio, refs); |
170 | atomic_add(refs, &folio->_pincount); | |
8ea2979c | 171 | } else { |
f442fa61 | 172 | folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS); |
8ea2979c | 173 | } |
c36c04c2 | 174 | |
f442fa61 | 175 | node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); |
c36c04c2 JH |
176 | } |
177 | ||
0f089235 | 178 | return 0; |
3faa52c0 JH |
179 | } |
180 | ||
3faa52c0 JH |
181 | /** |
182 | * unpin_user_page() - release a dma-pinned page | |
183 | * @page: pointer to page to be released | |
184 | * | |
185 | * Pages that were pinned via pin_user_pages*() must be released via either | |
186 | * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so | |
187 | * that such pages can be separately tracked and uniquely handled. In | |
188 | * particular, interactions with RDMA and filesystems need special handling. | |
189 | */ | |
190 | void unpin_user_page(struct page *page) | |
191 | { | |
b6a2619c | 192 | sanity_check_pinned_pages(&page, 1); |
d8ddc099 | 193 | gup_put_folio(page_folio(page), 1, FOLL_PIN); |
3faa52c0 JH |
194 | } |
195 | EXPORT_SYMBOL(unpin_user_page); | |
196 | ||
6cc04054 VK |
197 | /** |
198 | * unpin_folio() - release a dma-pinned folio | |
199 | * @folio: pointer to folio to be released | |
200 | * | |
201 | * Folios that were pinned via memfd_pin_folios() or other similar routines | |
202 | * must be released either using unpin_folio() or unpin_folios(). | |
203 | */ | |
204 | void unpin_folio(struct folio *folio) | |
205 | { | |
206 | gup_put_folio(folio, 1, FOLL_PIN); | |
207 | } | |
208 | EXPORT_SYMBOL_GPL(unpin_folio); | |
209 | ||
1101fb8f DH |
210 | /** |
211 | * folio_add_pin - Try to get an additional pin on a pinned folio | |
212 | * @folio: The folio to be pinned | |
213 | * | |
214 | * Get an additional pin on a folio we already have a pin on. Makes no change | |
215 | * if the folio is a zero_page. | |
216 | */ | |
217 | void folio_add_pin(struct folio *folio) | |
218 | { | |
219 | if (is_zero_folio(folio)) | |
220 | return; | |
221 | ||
222 | /* | |
223 | * Similar to try_grab_folio(): be sure to *also* increment the normal | |
224 | * page refcount field at least once, so that the page really is | |
225 | * pinned. | |
226 | */ | |
31a31da8 | 227 | if (folio_has_pincount(folio)) { |
1101fb8f DH |
228 | WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1); |
229 | folio_ref_inc(folio); | |
230 | atomic_inc(&folio->_pincount); | |
231 | } else { | |
232 | WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS); | |
233 | folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); | |
234 | } | |
235 | } | |
236 | ||
659508f9 | 237 | static inline struct folio *gup_folio_range_next(struct page *start, |
8f39f5fc | 238 | unsigned long npages, unsigned long i, unsigned int *ntails) |
458a4f78 | 239 | { |
659508f9 MWO |
240 | struct page *next = nth_page(start, i); |
241 | struct folio *folio = page_folio(next); | |
458a4f78 JM |
242 | unsigned int nr = 1; |
243 | ||
659508f9 | 244 | if (folio_test_large(folio)) |
4c654229 | 245 | nr = min_t(unsigned int, npages - i, |
659508f9 | 246 | folio_nr_pages(folio) - folio_page_idx(folio, next)); |
458a4f78 | 247 | |
458a4f78 | 248 | *ntails = nr; |
659508f9 | 249 | return folio; |
458a4f78 JM |
250 | } |
251 | ||
12521c76 | 252 | static inline struct folio *gup_folio_next(struct page **list, |
28297dbc | 253 | unsigned long npages, unsigned long i, unsigned int *ntails) |
8745d7f6 | 254 | { |
12521c76 | 255 | struct folio *folio = page_folio(list[i]); |
8745d7f6 JM |
256 | unsigned int nr; |
257 | ||
8745d7f6 | 258 | for (nr = i + 1; nr < npages; nr++) { |
12521c76 | 259 | if (page_folio(list[nr]) != folio) |
8745d7f6 JM |
260 | break; |
261 | } | |
262 | ||
8745d7f6 | 263 | *ntails = nr - i; |
12521c76 | 264 | return folio; |
8745d7f6 JM |
265 | } |
266 | ||
fc1d8e7c | 267 | /** |
f1f6a7dd | 268 | * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages |
2d15eb31 | 269 | * @pages: array of pages to be maybe marked dirty, and definitely released. |
fc1d8e7c | 270 | * @npages: number of pages in the @pages array. |
2d15eb31 | 271 | * @make_dirty: whether to mark the pages dirty |
fc1d8e7c JH |
272 | * |
273 | * "gup-pinned page" refers to a page that has had one of the get_user_pages() | |
274 | * variants called on that page. | |
275 | * | |
276 | * For each page in the @pages array, make that page (or its head page, if a | |
2d15eb31 | 277 | * compound page) dirty, if @make_dirty is true, and if the page was previously |
f1f6a7dd JH |
278 | * listed as clean. In any case, releases all pages using unpin_user_page(), |
279 | * possibly via unpin_user_pages(), for the non-dirty case. | |
fc1d8e7c | 280 | * |
f1f6a7dd | 281 | * Please see the unpin_user_page() documentation for details. |
fc1d8e7c | 282 | * |
2d15eb31 | 283 | * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is |
284 | * required, then the caller should a) verify that this is really correct, | |
285 | * because _lock() is usually required, and b) hand code it: | |
f1f6a7dd | 286 | * set_page_dirty_lock(), unpin_user_page(). |
fc1d8e7c JH |
287 | * |
288 | */ | |
f1f6a7dd JH |
289 | void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, |
290 | bool make_dirty) | |
fc1d8e7c | 291 | { |
12521c76 MWO |
292 | unsigned long i; |
293 | struct folio *folio; | |
294 | unsigned int nr; | |
2d15eb31 | 295 | |
296 | if (!make_dirty) { | |
f1f6a7dd | 297 | unpin_user_pages(pages, npages); |
2d15eb31 | 298 | return; |
299 | } | |
300 | ||
b6a2619c | 301 | sanity_check_pinned_pages(pages, npages); |
12521c76 MWO |
302 | for (i = 0; i < npages; i += nr) { |
303 | folio = gup_folio_next(pages, npages, i, &nr); | |
2d15eb31 | 304 | /* |
305 | * Checking PageDirty at this point may race with | |
306 | * clear_page_dirty_for_io(), but that's OK. Two key | |
307 | * cases: | |
308 | * | |
309 | * 1) This code sees the page as already dirty, so it | |
310 | * skips the call to set_page_dirty(). That could happen | |
311 | * because clear_page_dirty_for_io() called | |
a929e0d1 | 312 | * folio_mkclean(), followed by set_page_dirty(). |
2d15eb31 | 313 | * However, now the page is going to get written back, |
314 | * which meets the original intention of setting it | |
315 | * dirty, so all is well: clear_page_dirty_for_io() goes | |
316 | * on to call TestClearPageDirty(), and write the page | |
317 | * back. | |
318 | * | |
319 | * 2) This code sees the page as clean, so it calls | |
320 | * set_page_dirty(). The page stays dirty, despite being | |
321 | * written back, so it gets written back again in the | |
322 | * next writeback cycle. This is harmless. | |
323 | */ | |
12521c76 MWO |
324 | if (!folio_test_dirty(folio)) { |
325 | folio_lock(folio); | |
326 | folio_mark_dirty(folio); | |
327 | folio_unlock(folio); | |
328 | } | |
329 | gup_put_folio(folio, nr, FOLL_PIN); | |
2d15eb31 | 330 | } |
fc1d8e7c | 331 | } |
f1f6a7dd | 332 | EXPORT_SYMBOL(unpin_user_pages_dirty_lock); |
fc1d8e7c | 333 | |
458a4f78 JM |
334 | /** |
335 | * unpin_user_page_range_dirty_lock() - release and optionally dirty | |
336 | * gup-pinned page range | |
337 | * | |
338 | * @page: the starting page of a range maybe marked dirty, and definitely released. | |
339 | * @npages: number of consecutive pages to release. | |
340 | * @make_dirty: whether to mark the pages dirty | |
341 | * | |
342 | * "gup-pinned page range" refers to a range of pages that has had one of the | |
343 | * pin_user_pages() variants called on that page. | |
344 | * | |
345 | * For the page ranges defined by [page .. page+npages], make that range (or | |
346 | * its head pages, if a compound page) dirty, if @make_dirty is true, and if the | |
347 | * page range was previously listed as clean. | |
348 | * | |
349 | * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is | |
350 | * required, then the caller should a) verify that this is really correct, | |
351 | * because _lock() is usually required, and b) hand code it: | |
352 | * set_page_dirty_lock(), unpin_user_page(). | |
353 | * | |
354 | */ | |
355 | void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, | |
356 | bool make_dirty) | |
357 | { | |
659508f9 MWO |
358 | unsigned long i; |
359 | struct folio *folio; | |
360 | unsigned int nr; | |
361 | ||
362 | for (i = 0; i < npages; i += nr) { | |
363 | folio = gup_folio_range_next(page, npages, i, &nr); | |
364 | if (make_dirty && !folio_test_dirty(folio)) { | |
365 | folio_lock(folio); | |
366 | folio_mark_dirty(folio); | |
367 | folio_unlock(folio); | |
368 | } | |
369 | gup_put_folio(folio, nr, FOLL_PIN); | |
458a4f78 JM |
370 | } |
371 | } | |
372 | EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); | |
373 | ||
23babe19 | 374 | static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages) |
b6a2619c DH |
375 | { |
376 | unsigned long i; | |
377 | struct folio *folio; | |
378 | unsigned int nr; | |
379 | ||
380 | /* | |
381 | * Don't perform any sanity checks because we might have raced with | |
382 | * fork() and some anonymous pages might now actually be shared -- | |
383 | * which is why we're unpinning after all. | |
384 | */ | |
385 | for (i = 0; i < npages; i += nr) { | |
386 | folio = gup_folio_next(pages, npages, i, &nr); | |
387 | gup_put_folio(folio, nr, FOLL_PIN); | |
388 | } | |
389 | } | |
390 | ||
fc1d8e7c | 391 | /** |
f1f6a7dd | 392 | * unpin_user_pages() - release an array of gup-pinned pages. |
fc1d8e7c JH |
393 | * @pages: array of pages to be marked dirty and released. |
394 | * @npages: number of pages in the @pages array. | |
395 | * | |
f1f6a7dd | 396 | * For each page in the @pages array, release the page using unpin_user_page(). |
fc1d8e7c | 397 | * |
f1f6a7dd | 398 | * Please see the unpin_user_page() documentation for details. |
fc1d8e7c | 399 | */ |
f1f6a7dd | 400 | void unpin_user_pages(struct page **pages, unsigned long npages) |
fc1d8e7c | 401 | { |
12521c76 MWO |
402 | unsigned long i; |
403 | struct folio *folio; | |
404 | unsigned int nr; | |
fc1d8e7c | 405 | |
146608bb JH |
406 | /* |
407 | * If this WARN_ON() fires, then the system *might* be leaking pages (by | |
408 | * leaving them pinned), but probably not. More likely, gup/pup returned | |
409 | * a hard -ERRNO error to the caller, who erroneously passed it here. | |
410 | */ | |
411 | if (WARN_ON(IS_ERR_VALUE(npages))) | |
412 | return; | |
31b912de | 413 | |
b6a2619c | 414 | sanity_check_pinned_pages(pages, npages); |
12521c76 | 415 | for (i = 0; i < npages; i += nr) { |
a1268be2 JH |
416 | if (!pages[i]) { |
417 | nr = 1; | |
418 | continue; | |
419 | } | |
12521c76 MWO |
420 | folio = gup_folio_next(pages, npages, i, &nr); |
421 | gup_put_folio(folio, nr, FOLL_PIN); | |
e7602748 | 422 | } |
fc1d8e7c | 423 | } |
f1f6a7dd | 424 | EXPORT_SYMBOL(unpin_user_pages); |
fc1d8e7c | 425 | |
d3bfbfb1 KK |
426 | /** |
427 | * unpin_user_folio() - release pages of a folio | |
428 | * @folio: pointer to folio to be released | |
429 | * @npages: number of pages of same folio | |
430 | * | |
431 | * Release npages of the folio | |
432 | */ | |
433 | void unpin_user_folio(struct folio *folio, unsigned long npages) | |
434 | { | |
435 | gup_put_folio(folio, npages, FOLL_PIN); | |
436 | } | |
437 | EXPORT_SYMBOL(unpin_user_folio); | |
438 | ||
6cc04054 VK |
439 | /** |
440 | * unpin_folios() - release an array of gup-pinned folios. | |
441 | * @folios: array of folios to be marked dirty and released. | |
442 | * @nfolios: number of folios in the @folios array. | |
443 | * | |
444 | * For each folio in the @folios array, release the folio using gup_put_folio. | |
445 | * | |
446 | * Please see the unpin_folio() documentation for details. | |
447 | */ | |
448 | void unpin_folios(struct folio **folios, unsigned long nfolios) | |
449 | { | |
450 | unsigned long i = 0, j; | |
451 | ||
452 | /* | |
453 | * If this WARN_ON() fires, then the system *might* be leaking folios | |
454 | * (by leaving them pinned), but probably not. More likely, gup/pup | |
455 | * returned a hard -ERRNO error to the caller, who erroneously passed | |
456 | * it here. | |
457 | */ | |
458 | if (WARN_ON(IS_ERR_VALUE(nfolios))) | |
459 | return; | |
460 | ||
461 | while (i < nfolios) { | |
462 | for (j = i + 1; j < nfolios; j++) | |
463 | if (folios[i] != folios[j]) | |
464 | break; | |
465 | ||
466 | if (folios[i]) | |
467 | gup_put_folio(folios[i], j - i, FOLL_PIN); | |
468 | i = j; | |
469 | } | |
470 | } | |
471 | EXPORT_SYMBOL_GPL(unpin_folios); | |
472 | ||
a458b76a AA |
473 | /* |
474 | * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's | |
475 | * lifecycle. Avoid setting the bit unless necessary, or it might cause write | |
476 | * cache bouncing on large SMP machines for concurrent pinned gups. | |
477 | */ | |
478 | static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) | |
479 | { | |
480 | if (!test_bit(MMF_HAS_PINNED, mm_flags)) | |
481 | set_bit(MMF_HAS_PINNED, mm_flags); | |
482 | } | |
483 | ||
050a9adc | 484 | #ifdef CONFIG_MMU |
a12083d7 | 485 | |
8268614b | 486 | #ifdef CONFIG_HAVE_GUP_FAST |
a12083d7 PX |
487 | static int record_subpages(struct page *page, unsigned long sz, |
488 | unsigned long addr, unsigned long end, | |
489 | struct page **pages) | |
490 | { | |
491 | struct page *start_page; | |
492 | int nr; | |
493 | ||
494 | start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT); | |
495 | for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) | |
496 | pages[nr] = nth_page(start_page, nr); | |
497 | ||
498 | return nr; | |
499 | } | |
f442fa61 YS |
500 | |
501 | /** | |
502 | * try_grab_folio_fast() - Attempt to get or pin a folio in fast path. | |
503 | * @page: pointer to page to be grabbed | |
504 | * @refs: the value to (effectively) add to the folio's refcount | |
505 | * @flags: gup flags: these are the FOLL_* flag values. | |
506 | * | |
507 | * "grab" names in this file mean, "look at flags to decide whether to use | |
508 | * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. | |
509 | * | |
510 | * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the | |
511 | * same time. (That's true throughout the get_user_pages*() and | |
512 | * pin_user_pages*() APIs.) Cases: | |
513 | * | |
514 | * FOLL_GET: folio's refcount will be incremented by @refs. | |
515 | * | |
516 | * FOLL_PIN on large folios: folio's refcount will be incremented by | |
517 | * @refs, and its pincount will be incremented by @refs. | |
518 | * | |
519 | * FOLL_PIN on single-page folios: folio's refcount will be incremented by | |
520 | * @refs * GUP_PIN_COUNTING_BIAS. | |
521 | * | |
522 | * Return: The folio containing @page (with refcount appropriately | |
523 | * incremented) for success, or NULL upon failure. If neither FOLL_GET | |
524 | * nor FOLL_PIN was set, that's considered failure, and furthermore, | |
525 | * a likely bug in the caller, so a warning is also emitted. | |
526 | * | |
527 | * It uses add ref unless zero to elevate the folio refcount and must be called | |
528 | * in fast path only. | |
529 | */ | |
530 | static struct folio *try_grab_folio_fast(struct page *page, int refs, | |
531 | unsigned int flags) | |
532 | { | |
533 | struct folio *folio; | |
534 | ||
535 | /* Raise warn if it is not called in fast GUP */ | |
536 | VM_WARN_ON_ONCE(!irqs_disabled()); | |
537 | ||
538 | if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) | |
539 | return NULL; | |
540 | ||
541 | if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) | |
542 | return NULL; | |
543 | ||
544 | if (flags & FOLL_GET) | |
545 | return try_get_folio(page, refs); | |
546 | ||
547 | /* FOLL_PIN is set */ | |
548 | ||
549 | /* | |
550 | * Don't take a pin on the zero page - it's not going anywhere | |
551 | * and it is used in a *lot* of places. | |
552 | */ | |
553 | if (is_zero_page(page)) | |
554 | return page_folio(page); | |
555 | ||
556 | folio = try_get_folio(page, refs); | |
557 | if (!folio) | |
558 | return NULL; | |
559 | ||
560 | /* | |
561 | * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a | |
562 | * right zone, so fail and let the caller fall back to the slow | |
563 | * path. | |
564 | */ | |
565 | if (unlikely((flags & FOLL_LONGTERM) && | |
566 | !folio_is_longterm_pinnable(folio))) { | |
38607c62 | 567 | folio_put_refs(folio, refs); |
f442fa61 YS |
568 | return NULL; |
569 | } | |
570 | ||
571 | /* | |
572 | * When pinning a large folio, use an exact count to track it. | |
573 | * | |
574 | * However, be sure to *also* increment the normal folio | |
575 | * refcount field at least once, so that the folio really | |
576 | * is pinned. That's why the refcount from the earlier | |
577 | * try_get_folio() is left intact. | |
578 | */ | |
31a31da8 | 579 | if (folio_has_pincount(folio)) |
f442fa61 YS |
580 | atomic_add(refs, &folio->_pincount); |
581 | else | |
582 | folio_ref_add(folio, | |
583 | refs * (GUP_PIN_COUNTING_BIAS - 1)); | |
584 | /* | |
585 | * Adjust the pincount before re-checking the PTE for changes. | |
586 | * This is essentially a smp_mb() and is paired with a memory | |
587 | * barrier in folio_try_share_anon_rmap_*(). | |
588 | */ | |
589 | smp_mb__after_atomic(); | |
590 | ||
591 | node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); | |
592 | ||
593 | return folio; | |
594 | } | |
8268614b | 595 | #endif /* CONFIG_HAVE_GUP_FAST */ |
a12083d7 | 596 | |
052ccfbc GM |
597 | /* Common code for can_follow_write_* */ |
598 | static inline bool can_follow_write_common(struct page *page, | |
599 | struct vm_area_struct *vma, unsigned int flags) | |
600 | { | |
601 | /* Maybe FOLL_FORCE is set to override it? */ | |
602 | if (!(flags & FOLL_FORCE)) | |
603 | return false; | |
604 | ||
605 | /* But FOLL_FORCE has no effect on shared mappings */ | |
606 | if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) | |
607 | return false; | |
608 | ||
609 | /* ... or read-only private ones */ | |
610 | if (!(vma->vm_flags & VM_MAYWRITE)) | |
611 | return false; | |
612 | ||
613 | /* ... or already writable ones that just need to take a write fault */ | |
614 | if (vma->vm_flags & VM_WRITE) | |
615 | return false; | |
616 | ||
617 | /* | |
618 | * See can_change_pte_writable(): we broke COW and could map the page | |
619 | * writable if we have an exclusive anonymous page ... | |
620 | */ | |
621 | return page && PageAnon(page) && PageAnonExclusive(page); | |
622 | } | |
623 | ||
69e68b4f | 624 | static struct page *no_page_table(struct vm_area_struct *vma, |
878b0c45 | 625 | unsigned int flags, unsigned long address) |
4bbd4c77 | 626 | { |
878b0c45 PX |
627 | if (!(flags & FOLL_DUMP)) |
628 | return NULL; | |
629 | ||
69e68b4f | 630 | /* |
878b0c45 | 631 | * When core dumping, we don't want to allocate unnecessary pages or |
69e68b4f KS |
632 | * page tables. Return error instead of NULL to skip handle_mm_fault, |
633 | * then get_dump_page() will return NULL to leave a hole in the dump. | |
634 | * But we can only make this optimization where a hole would surely | |
635 | * be zero-filled if handle_mm_fault() actually did handle it. | |
636 | */ | |
878b0c45 PX |
637 | if (is_vm_hugetlb_page(vma)) { |
638 | struct hstate *h = hstate_vma(vma); | |
639 | ||
640 | if (!hugetlbfs_pagecache_present(h, vma, address)) | |
641 | return ERR_PTR(-EFAULT); | |
642 | } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) { | |
69e68b4f | 643 | return ERR_PTR(-EFAULT); |
878b0c45 PX |
644 | } |
645 | ||
69e68b4f KS |
646 | return NULL; |
647 | } | |
4bbd4c77 | 648 | |
1b167618 | 649 | #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES |
052ccfbc GM |
650 | /* FOLL_FORCE can write to even unwritable PUDs in COW mappings. */ |
651 | static inline bool can_follow_write_pud(pud_t pud, struct page *page, | |
652 | struct vm_area_struct *vma, | |
653 | unsigned int flags) | |
654 | { | |
655 | /* If the pud is writable, we can write to the page. */ | |
656 | if (pud_write(pud)) | |
657 | return true; | |
658 | ||
659 | return can_follow_write_common(page, vma, flags); | |
660 | } | |
661 | ||
1b167618 PX |
662 | static struct page *follow_huge_pud(struct vm_area_struct *vma, |
663 | unsigned long addr, pud_t *pudp, | |
664 | int flags, struct follow_page_context *ctx) | |
665 | { | |
666 | struct mm_struct *mm = vma->vm_mm; | |
667 | struct page *page; | |
668 | pud_t pud = *pudp; | |
669 | unsigned long pfn = pud_pfn(pud); | |
670 | int ret; | |
671 | ||
672 | assert_spin_locked(pud_lockptr(mm, pudp)); | |
673 | ||
052ccfbc | 674 | if (!pud_present(pud)) |
1b167618 PX |
675 | return NULL; |
676 | ||
052ccfbc GM |
677 | if ((flags & FOLL_WRITE) && |
678 | !can_follow_write_pud(pud, pfn_to_page(pfn), vma, flags)) | |
1b167618 PX |
679 | return NULL; |
680 | ||
681 | pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; | |
1b167618 PX |
682 | page = pfn_to_page(pfn); |
683 | ||
fd2825b0 | 684 | if (!pud_write(pud) && gup_must_unshare(vma, flags, page)) |
1b167618 PX |
685 | return ERR_PTR(-EMLINK); |
686 | ||
f442fa61 | 687 | ret = try_grab_folio(page_folio(page), 1, flags); |
1b167618 PX |
688 | if (ret) |
689 | page = ERR_PTR(ret); | |
690 | else | |
691 | ctx->page_mask = HPAGE_PUD_NR - 1; | |
692 | ||
693 | return page; | |
694 | } | |
4418c522 PX |
695 | |
696 | /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ | |
697 | static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, | |
698 | struct vm_area_struct *vma, | |
699 | unsigned int flags) | |
700 | { | |
701 | /* If the pmd is writable, we can write to the page. */ | |
702 | if (pmd_write(pmd)) | |
703 | return true; | |
704 | ||
052ccfbc | 705 | if (!can_follow_write_common(page, vma, flags)) |
4418c522 PX |
706 | return false; |
707 | ||
708 | /* ... and a write-fault isn't required for other reasons. */ | |
f38ee285 | 709 | if (pmd_needs_soft_dirty_wp(vma, pmd)) |
4418c522 PX |
710 | return false; |
711 | return !userfaultfd_huge_pmd_wp(vma, pmd); | |
712 | } | |
713 | ||
714 | static struct page *follow_huge_pmd(struct vm_area_struct *vma, | |
715 | unsigned long addr, pmd_t *pmd, | |
716 | unsigned int flags, | |
717 | struct follow_page_context *ctx) | |
718 | { | |
719 | struct mm_struct *mm = vma->vm_mm; | |
720 | pmd_t pmdval = *pmd; | |
721 | struct page *page; | |
722 | int ret; | |
723 | ||
724 | assert_spin_locked(pmd_lockptr(mm, pmd)); | |
725 | ||
726 | page = pmd_page(pmdval); | |
727 | if ((flags & FOLL_WRITE) && | |
728 | !can_follow_write_pmd(pmdval, page, vma, flags)) | |
729 | return NULL; | |
730 | ||
731 | /* Avoid dumping huge zero page */ | |
732 | if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval)) | |
733 | return ERR_PTR(-EFAULT); | |
734 | ||
735 | if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags)) | |
736 | return NULL; | |
737 | ||
738 | if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page)) | |
739 | return ERR_PTR(-EMLINK); | |
740 | ||
792b429d DH |
741 | VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) && |
742 | !PageAnonExclusive(page), page); | |
4418c522 | 743 | |
f442fa61 | 744 | ret = try_grab_folio(page_folio(page), 1, flags); |
4418c522 PX |
745 | if (ret) |
746 | return ERR_PTR(ret); | |
747 | ||
748 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
749 | if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH)) | |
750 | touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); | |
751 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
752 | ||
753 | page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; | |
754 | ctx->page_mask = HPAGE_PMD_NR - 1; | |
755 | ||
756 | return page; | |
757 | } | |
758 | ||
1b167618 PX |
759 | #else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ |
760 | static struct page *follow_huge_pud(struct vm_area_struct *vma, | |
761 | unsigned long addr, pud_t *pudp, | |
762 | int flags, struct follow_page_context *ctx) | |
763 | { | |
764 | return NULL; | |
765 | } | |
4418c522 PX |
766 | |
767 | static struct page *follow_huge_pmd(struct vm_area_struct *vma, | |
768 | unsigned long addr, pmd_t *pmd, | |
769 | unsigned int flags, | |
770 | struct follow_page_context *ctx) | |
771 | { | |
772 | return NULL; | |
773 | } | |
1b167618 PX |
774 | #endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ |
775 | ||
1027e443 KS |
776 | static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, |
777 | pte_t *pte, unsigned int flags) | |
778 | { | |
1027e443 | 779 | if (flags & FOLL_TOUCH) { |
c33c7948 RR |
780 | pte_t orig_entry = ptep_get(pte); |
781 | pte_t entry = orig_entry; | |
1027e443 KS |
782 | |
783 | if (flags & FOLL_WRITE) | |
784 | entry = pte_mkdirty(entry); | |
785 | entry = pte_mkyoung(entry); | |
786 | ||
c33c7948 | 787 | if (!pte_same(orig_entry, entry)) { |
1027e443 KS |
788 | set_pte_at(vma->vm_mm, address, pte, entry); |
789 | update_mmu_cache(vma, address, pte); | |
790 | } | |
791 | } | |
792 | ||
793 | /* Proper page table entry exists, but no corresponding struct page */ | |
794 | return -EEXIST; | |
795 | } | |
796 | ||
5535be30 DH |
797 | /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */ |
798 | static inline bool can_follow_write_pte(pte_t pte, struct page *page, | |
799 | struct vm_area_struct *vma, | |
800 | unsigned int flags) | |
19be0eaf | 801 | { |
5535be30 DH |
802 | /* If the pte is writable, we can write to the page. */ |
803 | if (pte_write(pte)) | |
804 | return true; | |
805 | ||
052ccfbc | 806 | if (!can_follow_write_common(page, vma, flags)) |
5535be30 DH |
807 | return false; |
808 | ||
809 | /* ... and a write-fault isn't required for other reasons. */ | |
f38ee285 | 810 | if (pte_needs_soft_dirty_wp(vma, pte)) |
5535be30 DH |
811 | return false; |
812 | return !userfaultfd_pte_wp(vma, pte); | |
19be0eaf LT |
813 | } |
814 | ||
69e68b4f | 815 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
df06b37f KB |
816 | unsigned long address, pmd_t *pmd, unsigned int flags, |
817 | struct dev_pagemap **pgmap) | |
69e68b4f KS |
818 | { |
819 | struct mm_struct *mm = vma->vm_mm; | |
b967c648 | 820 | struct folio *folio; |
69e68b4f KS |
821 | struct page *page; |
822 | spinlock_t *ptl; | |
823 | pte_t *ptep, pte; | |
f28d4363 | 824 | int ret; |
4bbd4c77 | 825 | |
4bbd4c77 | 826 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
04dee9e8 | 827 | if (!ptep) |
878b0c45 | 828 | return no_page_table(vma, flags, address); |
c33c7948 | 829 | pte = ptep_get(ptep); |
f7355e99 DH |
830 | if (!pte_present(pte)) |
831 | goto no_page; | |
d74943a2 | 832 | if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags)) |
4bbd4c77 | 833 | goto no_page; |
4bbd4c77 KS |
834 | |
835 | page = vm_normal_page(vma, address, pte); | |
5535be30 DH |
836 | |
837 | /* | |
fd2825b0 | 838 | * We only care about anon pages in can_follow_write_pte(). |
5535be30 DH |
839 | */ |
840 | if ((flags & FOLL_WRITE) && | |
841 | !can_follow_write_pte(pte, page, vma, flags)) { | |
842 | page = NULL; | |
843 | goto out; | |
844 | } | |
845 | ||
fd2825b0 | 846 | if (unlikely(!page)) { |
1027e443 KS |
847 | if (flags & FOLL_DUMP) { |
848 | /* Avoid special (like zero) pages in core dumps */ | |
849 | page = ERR_PTR(-EFAULT); | |
850 | goto out; | |
851 | } | |
852 | ||
853 | if (is_zero_pfn(pte_pfn(pte))) { | |
854 | page = pte_page(pte); | |
855 | } else { | |
1027e443 KS |
856 | ret = follow_pfn_pte(vma, address, ptep, flags); |
857 | page = ERR_PTR(ret); | |
858 | goto out; | |
859 | } | |
4bbd4c77 | 860 | } |
b967c648 | 861 | folio = page_folio(page); |
4bbd4c77 | 862 | |
84209e87 | 863 | if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) { |
a7f22660 DH |
864 | page = ERR_PTR(-EMLINK); |
865 | goto out; | |
866 | } | |
b6a2619c | 867 | |
792b429d DH |
868 | VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) && |
869 | !PageAnonExclusive(page), page); | |
b6a2619c | 870 | |
f442fa61 | 871 | /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */ |
b967c648 | 872 | ret = try_grab_folio(folio, 1, flags); |
0f089235 LG |
873 | if (unlikely(ret)) { |
874 | page = ERR_PTR(ret); | |
3faa52c0 | 875 | goto out; |
8fde12ca | 876 | } |
4003f107 | 877 | |
f28d4363 CI |
878 | /* |
879 | * We need to make the page accessible if and only if we are going | |
880 | * to access its content (the FOLL_PIN case). Please see | |
881 | * Documentation/core-api/pin_user_pages.rst for details. | |
882 | */ | |
883 | if (flags & FOLL_PIN) { | |
b967c648 | 884 | ret = arch_make_folio_accessible(folio); |
f28d4363 CI |
885 | if (ret) { |
886 | unpin_user_page(page); | |
887 | page = ERR_PTR(ret); | |
888 | goto out; | |
889 | } | |
890 | } | |
4bbd4c77 KS |
891 | if (flags & FOLL_TOUCH) { |
892 | if ((flags & FOLL_WRITE) && | |
f0327de7 MWO |
893 | !pte_dirty(pte) && !folio_test_dirty(folio)) |
894 | folio_mark_dirty(folio); | |
4bbd4c77 KS |
895 | /* |
896 | * pte_mkyoung() would be more correct here, but atomic care | |
897 | * is needed to avoid losing the dirty bit: it is easier to use | |
f0327de7 | 898 | * folio_mark_accessed(). |
4bbd4c77 | 899 | */ |
f0327de7 | 900 | folio_mark_accessed(folio); |
4bbd4c77 | 901 | } |
1027e443 | 902 | out: |
4bbd4c77 | 903 | pte_unmap_unlock(ptep, ptl); |
4bbd4c77 | 904 | return page; |
4bbd4c77 KS |
905 | no_page: |
906 | pte_unmap_unlock(ptep, ptl); | |
907 | if (!pte_none(pte)) | |
69e68b4f | 908 | return NULL; |
878b0c45 | 909 | return no_page_table(vma, flags, address); |
69e68b4f KS |
910 | } |
911 | ||
080dbb61 AK |
912 | static struct page *follow_pmd_mask(struct vm_area_struct *vma, |
913 | unsigned long address, pud_t *pudp, | |
df06b37f KB |
914 | unsigned int flags, |
915 | struct follow_page_context *ctx) | |
69e68b4f | 916 | { |
68827280 | 917 | pmd_t *pmd, pmdval; |
69e68b4f KS |
918 | spinlock_t *ptl; |
919 | struct page *page; | |
920 | struct mm_struct *mm = vma->vm_mm; | |
921 | ||
080dbb61 | 922 | pmd = pmd_offset(pudp, address); |
26e1a0c3 | 923 | pmdval = pmdp_get_lockless(pmd); |
68827280 | 924 | if (pmd_none(pmdval)) |
878b0c45 | 925 | return no_page_table(vma, flags, address); |
f7355e99 | 926 | if (!pmd_present(pmdval)) |
878b0c45 | 927 | return no_page_table(vma, flags, address); |
4418c522 | 928 | if (likely(!pmd_leaf(pmdval))) |
df06b37f | 929 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
6742d293 | 930 | |
d74943a2 | 931 | if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags)) |
878b0c45 | 932 | return no_page_table(vma, flags, address); |
db08f203 | 933 | |
6742d293 | 934 | ptl = pmd_lock(mm, pmd); |
4418c522 PX |
935 | pmdval = *pmd; |
936 | if (unlikely(!pmd_present(pmdval))) { | |
84c3fc4e | 937 | spin_unlock(ptl); |
878b0c45 | 938 | return no_page_table(vma, flags, address); |
84c3fc4e | 939 | } |
4418c522 | 940 | if (unlikely(!pmd_leaf(pmdval))) { |
6742d293 | 941 | spin_unlock(ptl); |
df06b37f | 942 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
6742d293 | 943 | } |
4418c522 | 944 | if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) { |
2378118b HD |
945 | spin_unlock(ptl); |
946 | split_huge_pmd(vma, pmd, address); | |
947 | /* If pmd was left empty, stuff a page table in there quickly */ | |
948 | return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : | |
df06b37f | 949 | follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
69e68b4f | 950 | } |
4418c522 | 951 | page = follow_huge_pmd(vma, address, pmd, flags, ctx); |
6742d293 | 952 | spin_unlock(ptl); |
6742d293 | 953 | return page; |
4bbd4c77 KS |
954 | } |
955 | ||
080dbb61 AK |
956 | static struct page *follow_pud_mask(struct vm_area_struct *vma, |
957 | unsigned long address, p4d_t *p4dp, | |
df06b37f KB |
958 | unsigned int flags, |
959 | struct follow_page_context *ctx) | |
080dbb61 | 960 | { |
caf8cab7 | 961 | pud_t *pudp, pud; |
080dbb61 AK |
962 | spinlock_t *ptl; |
963 | struct page *page; | |
964 | struct mm_struct *mm = vma->vm_mm; | |
965 | ||
caf8cab7 PX |
966 | pudp = pud_offset(p4dp, address); |
967 | pud = READ_ONCE(*pudp); | |
1b167618 | 968 | if (!pud_present(pud)) |
878b0c45 | 969 | return no_page_table(vma, flags, address); |
1b167618 | 970 | if (pud_leaf(pud)) { |
caf8cab7 | 971 | ptl = pud_lock(mm, pudp); |
1b167618 | 972 | page = follow_huge_pud(vma, address, pudp, flags, ctx); |
080dbb61 AK |
973 | spin_unlock(ptl); |
974 | if (page) | |
975 | return page; | |
878b0c45 | 976 | return no_page_table(vma, flags, address); |
080dbb61 | 977 | } |
caf8cab7 | 978 | if (unlikely(pud_bad(pud))) |
878b0c45 | 979 | return no_page_table(vma, flags, address); |
080dbb61 | 980 | |
caf8cab7 | 981 | return follow_pmd_mask(vma, address, pudp, flags, ctx); |
080dbb61 AK |
982 | } |
983 | ||
080dbb61 AK |
984 | static struct page *follow_p4d_mask(struct vm_area_struct *vma, |
985 | unsigned long address, pgd_t *pgdp, | |
df06b37f KB |
986 | unsigned int flags, |
987 | struct follow_page_context *ctx) | |
080dbb61 | 988 | { |
e6fd5564 | 989 | p4d_t *p4dp, p4d; |
080dbb61 | 990 | |
e6fd5564 PX |
991 | p4dp = p4d_offset(pgdp, address); |
992 | p4d = READ_ONCE(*p4dp); | |
1965e933 | 993 | BUILD_BUG_ON(p4d_leaf(p4d)); |
a12083d7 | 994 | |
a12083d7 | 995 | if (!p4d_present(p4d) || p4d_bad(p4d)) |
878b0c45 | 996 | return no_page_table(vma, flags, address); |
080dbb61 | 997 | |
e6fd5564 | 998 | return follow_pud_mask(vma, address, p4dp, flags, ctx); |
080dbb61 AK |
999 | } |
1000 | ||
1001 | /** | |
1002 | * follow_page_mask - look up a page descriptor from a user-virtual address | |
1003 | * @vma: vm_area_struct mapping @address | |
1004 | * @address: virtual address to look up | |
1005 | * @flags: flags modifying lookup behaviour | |
78179556 MR |
1006 | * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a |
1007 | * pointer to output page_mask | |
080dbb61 AK |
1008 | * |
1009 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> | |
1010 | * | |
78179556 MR |
1011 | * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches |
1012 | * the device's dev_pagemap metadata to avoid repeating expensive lookups. | |
1013 | * | |
a7f22660 DH |
1014 | * When getting an anonymous page and the caller has to trigger unsharing |
1015 | * of a shared anonymous page first, -EMLINK is returned. The caller should | |
1016 | * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only | |
1017 | * relevant with FOLL_PIN and !FOLL_WRITE. | |
1018 | * | |
78179556 MR |
1019 | * On output, the @ctx->page_mask is set according to the size of the page. |
1020 | * | |
1021 | * Return: the mapped (struct page *), %NULL if no mapping exists, or | |
080dbb61 AK |
1022 | * an error pointer if there is a mapping to something not represented |
1023 | * by a page descriptor (see also vm_normal_page()). | |
1024 | */ | |
a7030aea | 1025 | static struct page *follow_page_mask(struct vm_area_struct *vma, |
080dbb61 | 1026 | unsigned long address, unsigned int flags, |
df06b37f | 1027 | struct follow_page_context *ctx) |
080dbb61 AK |
1028 | { |
1029 | pgd_t *pgd; | |
080dbb61 | 1030 | struct mm_struct *mm = vma->vm_mm; |
9cb28da5 | 1031 | struct page *page; |
080dbb61 | 1032 | |
9cb28da5 | 1033 | vma_pgtable_walk_begin(vma); |
080dbb61 | 1034 | |
9cb28da5 | 1035 | ctx->page_mask = 0; |
080dbb61 AK |
1036 | pgd = pgd_offset(mm, address); |
1037 | ||
8268614b | 1038 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
a12083d7 PX |
1039 | page = no_page_table(vma, flags, address); |
1040 | else | |
1041 | page = follow_p4d_mask(vma, address, pgd, flags, ctx); | |
080dbb61 | 1042 | |
9cb28da5 PX |
1043 | vma_pgtable_walk_end(vma); |
1044 | ||
a12083d7 | 1045 | return page; |
df06b37f KB |
1046 | } |
1047 | ||
f2b495ca KS |
1048 | static int get_gate_page(struct mm_struct *mm, unsigned long address, |
1049 | unsigned int gup_flags, struct vm_area_struct **vma, | |
1050 | struct page **page) | |
1051 | { | |
1052 | pgd_t *pgd; | |
c2febafc | 1053 | p4d_t *p4d; |
f2b495ca KS |
1054 | pud_t *pud; |
1055 | pmd_t *pmd; | |
1056 | pte_t *pte; | |
c33c7948 | 1057 | pte_t entry; |
f2b495ca KS |
1058 | int ret = -EFAULT; |
1059 | ||
1060 | /* user gate pages are read-only */ | |
1061 | if (gup_flags & FOLL_WRITE) | |
1062 | return -EFAULT; | |
0cad6736 | 1063 | pgd = pgd_offset(mm, address); |
b5d1c39f AL |
1064 | if (pgd_none(*pgd)) |
1065 | return -EFAULT; | |
c2febafc | 1066 | p4d = p4d_offset(pgd, address); |
b5d1c39f AL |
1067 | if (p4d_none(*p4d)) |
1068 | return -EFAULT; | |
c2febafc | 1069 | pud = pud_offset(p4d, address); |
b5d1c39f AL |
1070 | if (pud_none(*pud)) |
1071 | return -EFAULT; | |
f2b495ca | 1072 | pmd = pmd_offset(pud, address); |
84c3fc4e | 1073 | if (!pmd_present(*pmd)) |
f2b495ca | 1074 | return -EFAULT; |
f2b495ca | 1075 | pte = pte_offset_map(pmd, address); |
04dee9e8 HD |
1076 | if (!pte) |
1077 | return -EFAULT; | |
c33c7948 RR |
1078 | entry = ptep_get(pte); |
1079 | if (pte_none(entry)) | |
f2b495ca KS |
1080 | goto unmap; |
1081 | *vma = get_gate_vma(mm); | |
1082 | if (!page) | |
1083 | goto out; | |
c33c7948 | 1084 | *page = vm_normal_page(*vma, address, entry); |
f2b495ca | 1085 | if (!*page) { |
c33c7948 | 1086 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) |
f2b495ca | 1087 | goto unmap; |
c33c7948 | 1088 | *page = pte_page(entry); |
f2b495ca | 1089 | } |
f442fa61 | 1090 | ret = try_grab_folio(page_folio(*page), 1, gup_flags); |
0f089235 | 1091 | if (unlikely(ret)) |
8fde12ca | 1092 | goto unmap; |
f2b495ca KS |
1093 | out: |
1094 | ret = 0; | |
1095 | unmap: | |
1096 | pte_unmap(pte); | |
1097 | return ret; | |
1098 | } | |
1099 | ||
9a95f3cf | 1100 | /* |
9a863a6a JG |
1101 | * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not |
1102 | * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set | |
1103 | * to 0 and -EBUSY returned. | |
9a95f3cf | 1104 | */ |
64019a2e | 1105 | static int faultin_page(struct vm_area_struct *vma, |
47872953 | 1106 | unsigned long address, unsigned int flags, bool unshare, |
a7f22660 | 1107 | int *locked) |
16744483 | 1108 | { |
16744483 | 1109 | unsigned int fault_flags = 0; |
2b740303 | 1110 | vm_fault_t ret; |
16744483 | 1111 | |
47872953 | 1112 | if (flags & FOLL_NOFAULT) |
55b8fe70 | 1113 | return -EFAULT; |
47872953 | 1114 | if (flags & FOLL_WRITE) |
16744483 | 1115 | fault_flags |= FAULT_FLAG_WRITE; |
47872953 | 1116 | if (flags & FOLL_REMOTE) |
1b2ee126 | 1117 | fault_flags |= FAULT_FLAG_REMOTE; |
47872953 | 1118 | if (flags & FOLL_UNLOCKABLE) { |
71335f37 | 1119 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
93c5c61d PX |
1120 | /* |
1121 | * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set | |
1122 | * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE. | |
1123 | * That's because some callers may not be prepared to | |
1124 | * handle early exits caused by non-fatal signals. | |
1125 | */ | |
47872953 | 1126 | if (flags & FOLL_INTERRUPTIBLE) |
93c5c61d PX |
1127 | fault_flags |= FAULT_FLAG_INTERRUPTIBLE; |
1128 | } | |
47872953 | 1129 | if (flags & FOLL_NOWAIT) |
16744483 | 1130 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; |
47872953 | 1131 | if (flags & FOLL_TRIED) { |
4426e945 PX |
1132 | /* |
1133 | * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED | |
1134 | * can co-exist | |
1135 | */ | |
234b239b ALC |
1136 | fault_flags |= FAULT_FLAG_TRIED; |
1137 | } | |
a7f22660 DH |
1138 | if (unshare) { |
1139 | fault_flags |= FAULT_FLAG_UNSHARE; | |
1140 | /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */ | |
792b429d | 1141 | VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_WRITE); |
a7f22660 | 1142 | } |
16744483 | 1143 | |
bce617ed | 1144 | ret = handle_mm_fault(vma, address, fault_flags, NULL); |
d9272525 PX |
1145 | |
1146 | if (ret & VM_FAULT_COMPLETED) { | |
1147 | /* | |
1148 | * With FAULT_FLAG_RETRY_NOWAIT we'll never release the | |
1149 | * mmap lock in the page fault handler. Sanity check this. | |
1150 | */ | |
1151 | WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); | |
9a863a6a JG |
1152 | *locked = 0; |
1153 | ||
d9272525 PX |
1154 | /* |
1155 | * We should do the same as VM_FAULT_RETRY, but let's not | |
1156 | * return -EBUSY since that's not reflecting the reality of | |
1157 | * what has happened - we've just fully completed a page | |
1158 | * fault, with the mmap lock released. Use -EAGAIN to show | |
1159 | * that we want to take the mmap lock _again_. | |
1160 | */ | |
1161 | return -EAGAIN; | |
1162 | } | |
1163 | ||
16744483 | 1164 | if (ret & VM_FAULT_ERROR) { |
47872953 | 1165 | int err = vm_fault_to_errno(ret, flags); |
9a291a7c JM |
1166 | |
1167 | if (err) | |
1168 | return err; | |
16744483 KS |
1169 | BUG(); |
1170 | } | |
1171 | ||
16744483 | 1172 | if (ret & VM_FAULT_RETRY) { |
9a863a6a | 1173 | if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) |
4f6da934 | 1174 | *locked = 0; |
16744483 KS |
1175 | return -EBUSY; |
1176 | } | |
1177 | ||
16744483 KS |
1178 | return 0; |
1179 | } | |
1180 | ||
8ac26843 LS |
1181 | /* |
1182 | * Writing to file-backed mappings which require folio dirty tracking using GUP | |
1183 | * is a fundamentally broken operation, as kernel write access to GUP mappings | |
1184 | * do not adhere to the semantics expected by a file system. | |
1185 | * | |
1186 | * Consider the following scenario:- | |
1187 | * | |
1188 | * 1. A folio is written to via GUP which write-faults the memory, notifying | |
1189 | * the file system and dirtying the folio. | |
1190 | * 2. Later, writeback is triggered, resulting in the folio being cleaned and | |
1191 | * the PTE being marked read-only. | |
1192 | * 3. The GUP caller writes to the folio, as it is mapped read/write via the | |
1193 | * direct mapping. | |
1194 | * 4. The GUP caller, now done with the page, unpins it and sets it dirty | |
1195 | * (though it does not have to). | |
1196 | * | |
1197 | * This results in both data being written to a folio without writenotify, and | |
1198 | * the folio being dirtied unexpectedly (if the caller decides to do so). | |
1199 | */ | |
1200 | static bool writable_file_mapping_allowed(struct vm_area_struct *vma, | |
1201 | unsigned long gup_flags) | |
1202 | { | |
1203 | /* | |
1204 | * If we aren't pinning then no problematic write can occur. A long term | |
1205 | * pin is the most egregious case so this is the case we disallow. | |
1206 | */ | |
1207 | if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != | |
1208 | (FOLL_PIN | FOLL_LONGTERM)) | |
1209 | return true; | |
1210 | ||
1211 | /* | |
1212 | * If the VMA does not require dirty tracking then no problematic write | |
1213 | * can occur either. | |
1214 | */ | |
1215 | return !vma_needs_dirty_tracking(vma); | |
1216 | } | |
1217 | ||
fa5bb209 KS |
1218 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) |
1219 | { | |
1220 | vm_flags_t vm_flags = vma->vm_flags; | |
1b2ee126 DH |
1221 | int write = (gup_flags & FOLL_WRITE); |
1222 | int foreign = (gup_flags & FOLL_REMOTE); | |
8ac26843 | 1223 | bool vma_anon = vma_is_anonymous(vma); |
fa5bb209 KS |
1224 | |
1225 | if (vm_flags & (VM_IO | VM_PFNMAP)) | |
1226 | return -EFAULT; | |
1227 | ||
8ac26843 | 1228 | if ((gup_flags & FOLL_ANON) && !vma_anon) |
7f7ccc2c WT |
1229 | return -EFAULT; |
1230 | ||
52650c8b JG |
1231 | if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) |
1232 | return -EOPNOTSUPP; | |
1233 | ||
8977752c DH |
1234 | if ((gup_flags & FOLL_SPLIT_PMD) && is_vm_hugetlb_page(vma)) |
1235 | return -EOPNOTSUPP; | |
1236 | ||
1507f512 MR |
1237 | if (vma_is_secretmem(vma)) |
1238 | return -EFAULT; | |
1239 | ||
1b2ee126 | 1240 | if (write) { |
8ac26843 LS |
1241 | if (!vma_anon && |
1242 | !writable_file_mapping_allowed(vma, gup_flags)) | |
1243 | return -EFAULT; | |
1244 | ||
6beb9958 | 1245 | if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { |
fa5bb209 KS |
1246 | if (!(gup_flags & FOLL_FORCE)) |
1247 | return -EFAULT; | |
1248 | /* | |
1249 | * We used to let the write,force case do COW in a | |
1250 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could | |
1251 | * set a breakpoint in a read-only mapping of an | |
1252 | * executable, without corrupting the file (yet only | |
1253 | * when that file had been opened for writing!). | |
1254 | * Anon pages in shared mappings are surprising: now | |
1255 | * just reject it. | |
1256 | */ | |
46435364 | 1257 | if (!is_cow_mapping(vm_flags)) |
fa5bb209 | 1258 | return -EFAULT; |
fa5bb209 KS |
1259 | } |
1260 | } else if (!(vm_flags & VM_READ)) { | |
1261 | if (!(gup_flags & FOLL_FORCE)) | |
1262 | return -EFAULT; | |
1263 | /* | |
1264 | * Is there actually any vma we can reach here which does not | |
1265 | * have VM_MAYREAD set? | |
1266 | */ | |
1267 | if (!(vm_flags & VM_MAYREAD)) | |
1268 | return -EFAULT; | |
1269 | } | |
d61172b4 DH |
1270 | /* |
1271 | * gups are always data accesses, not instruction | |
1272 | * fetches, so execute=false here | |
1273 | */ | |
1274 | if (!arch_vma_access_permitted(vma, write, false, foreign)) | |
33a709b2 | 1275 | return -EFAULT; |
fa5bb209 KS |
1276 | return 0; |
1277 | } | |
1278 | ||
6cd06ab1 LT |
1279 | /* |
1280 | * This is "vma_lookup()", but with a warning if we would have | |
1281 | * historically expanded the stack in the GUP code. | |
1282 | */ | |
1283 | static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm, | |
1284 | unsigned long addr) | |
1285 | { | |
1286 | #ifdef CONFIG_STACK_GROWSUP | |
1287 | return vma_lookup(mm, addr); | |
1288 | #else | |
1289 | static volatile unsigned long next_warn; | |
1290 | struct vm_area_struct *vma; | |
1291 | unsigned long now, next; | |
1292 | ||
1293 | vma = find_vma(mm, addr); | |
1294 | if (!vma || (addr >= vma->vm_start)) | |
1295 | return vma; | |
1296 | ||
1297 | /* Only warn for half-way relevant accesses */ | |
1298 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
1299 | return NULL; | |
1300 | if (vma->vm_start - addr > 65536) | |
1301 | return NULL; | |
1302 | ||
1303 | /* Let's not warn more than once an hour.. */ | |
1304 | now = jiffies; next = next_warn; | |
1305 | if (next && time_before(now, next)) | |
1306 | return NULL; | |
1307 | next_warn = now + 60*60*HZ; | |
1308 | ||
1309 | /* Let people know things may have changed. */ | |
1310 | pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n", | |
1311 | current->comm, task_pid_nr(current), | |
1312 | vma->vm_start, vma->vm_end, addr); | |
1313 | dump_stack(); | |
1314 | return NULL; | |
1315 | #endif | |
1316 | } | |
1317 | ||
4bbd4c77 KS |
1318 | /** |
1319 | * __get_user_pages() - pin user pages in memory | |
4bbd4c77 KS |
1320 | * @mm: mm_struct of target mm |
1321 | * @start: starting user address | |
1322 | * @nr_pages: number of pages from start to pin | |
1323 | * @gup_flags: flags modifying pin behaviour | |
1324 | * @pages: array that receives pointers to the pages pinned. | |
1325 | * Should be at least nr_pages long. Or NULL, if caller | |
1326 | * only intends to ensure the pages are faulted in. | |
c1e8d7c6 | 1327 | * @locked: whether we're still with the mmap_lock held |
4bbd4c77 | 1328 | * |
d2dfbe47 LX |
1329 | * Returns either number of pages pinned (which may be less than the |
1330 | * number requested), or an error. Details about the return value: | |
1331 | * | |
1332 | * -- If nr_pages is 0, returns 0. | |
1333 | * -- If nr_pages is >0, but no pages were pinned, returns -errno. | |
1334 | * -- If nr_pages is >0, and some pages were pinned, returns the number of | |
1335 | * pages pinned. Again, this may be less than nr_pages. | |
2d3a36a4 | 1336 | * -- 0 return value is possible when the fault would need to be retried. |
d2dfbe47 LX |
1337 | * |
1338 | * The caller is responsible for releasing returned @pages, via put_page(). | |
1339 | * | |
c1e8d7c6 | 1340 | * Must be called with mmap_lock held. It may be released. See below. |
4bbd4c77 KS |
1341 | * |
1342 | * __get_user_pages walks a process's page tables and takes a reference to | |
1343 | * each struct page that each user address corresponds to at a given | |
1344 | * instant. That is, it takes the page that would be accessed if a user | |
1345 | * thread accesses the given user virtual address at that instant. | |
1346 | * | |
1347 | * This does not guarantee that the page exists in the user mappings when | |
1348 | * __get_user_pages returns, and there may even be a completely different | |
1349 | * page there in some cases (eg. if mmapped pagecache has been invalidated | |
c5acf1f6 | 1350 | * and subsequently re-faulted). However it does guarantee that the page |
4bbd4c77 KS |
1351 | * won't be freed completely. And mostly callers simply care that the page |
1352 | * contains data that was valid *at some point in time*. Typically, an IO | |
1353 | * or similar operation cannot guarantee anything stronger anyway because | |
1354 | * locks can't be held over the syscall boundary. | |
1355 | * | |
1356 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If | |
1357 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as | |
1358 | * appropriate) must be called after the page is finished with, and | |
1359 | * before put_page is called. | |
1360 | * | |
9a863a6a JG |
1361 | * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may |
1362 | * be released. If this happens *@locked will be set to 0 on return. | |
9a95f3cf | 1363 | * |
9a863a6a JG |
1364 | * A caller using such a combination of @gup_flags must therefore hold the |
1365 | * mmap_lock for reading only, and recognize when it's been released. Otherwise, | |
1366 | * it must be held for either reading or writing and will not be released. | |
4bbd4c77 KS |
1367 | * |
1368 | * In most cases, get_user_pages or get_user_pages_fast should be used | |
1369 | * instead of __get_user_pages. __get_user_pages should be used only if | |
1370 | * you need some special @gup_flags. | |
1371 | */ | |
64019a2e | 1372 | static long __get_user_pages(struct mm_struct *mm, |
4bbd4c77 KS |
1373 | unsigned long start, unsigned long nr_pages, |
1374 | unsigned int gup_flags, struct page **pages, | |
b2cac248 | 1375 | int *locked) |
4bbd4c77 | 1376 | { |
df06b37f | 1377 | long ret = 0, i = 0; |
fa5bb209 | 1378 | struct vm_area_struct *vma = NULL; |
df06b37f | 1379 | struct follow_page_context ctx = { NULL }; |
4bbd4c77 KS |
1380 | |
1381 | if (!nr_pages) | |
1382 | return 0; | |
1383 | ||
428e106a | 1384 | start = untagged_addr_remote(mm, start); |
f9652594 | 1385 | |
ede27b7e BH |
1386 | VM_WARN_ON_ONCE(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); |
1387 | ||
1388 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ | |
1389 | VM_WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == | |
1390 | (FOLL_PIN | FOLL_GET)); | |
4bbd4c77 | 1391 | |
4bbd4c77 | 1392 | do { |
fa5bb209 | 1393 | struct page *page; |
fa5bb209 KS |
1394 | unsigned int page_increm; |
1395 | ||
1396 | /* first iteration or cross vma bound */ | |
1397 | if (!vma || start >= vma->vm_end) { | |
631426ba DH |
1398 | /* |
1399 | * MADV_POPULATE_(READ|WRITE) wants to handle VMA | |
1400 | * lookups+error reporting differently. | |
1401 | */ | |
1402 | if (gup_flags & FOLL_MADV_POPULATE) { | |
1403 | vma = vma_lookup(mm, start); | |
1404 | if (!vma) { | |
1405 | ret = -ENOMEM; | |
1406 | goto out; | |
1407 | } | |
1408 | if (check_vma_flags(vma, gup_flags)) { | |
1409 | ret = -EINVAL; | |
1410 | goto out; | |
1411 | } | |
1412 | goto retry; | |
1413 | } | |
6cd06ab1 | 1414 | vma = gup_vma_lookup(mm, start); |
fa5bb209 | 1415 | if (!vma && in_gate_area(mm, start)) { |
fa5bb209 KS |
1416 | ret = get_gate_page(mm, start & PAGE_MASK, |
1417 | gup_flags, &vma, | |
ffe1e786 | 1418 | pages ? &page : NULL); |
fa5bb209 | 1419 | if (ret) |
08be37b7 | 1420 | goto out; |
df06b37f | 1421 | ctx.page_mask = 0; |
fa5bb209 KS |
1422 | goto next_page; |
1423 | } | |
4bbd4c77 | 1424 | |
52650c8b | 1425 | if (!vma) { |
df06b37f KB |
1426 | ret = -EFAULT; |
1427 | goto out; | |
1428 | } | |
52650c8b JG |
1429 | ret = check_vma_flags(vma, gup_flags); |
1430 | if (ret) | |
1431 | goto out; | |
fa5bb209 KS |
1432 | } |
1433 | retry: | |
1434 | /* | |
1435 | * If we have a pending SIGKILL, don't keep faulting pages and | |
1436 | * potentially allocating memory. | |
1437 | */ | |
fa45f116 | 1438 | if (fatal_signal_pending(current)) { |
d180870d | 1439 | ret = -EINTR; |
df06b37f KB |
1440 | goto out; |
1441 | } | |
fa5bb209 | 1442 | cond_resched(); |
df06b37f | 1443 | |
dc21e700 | 1444 | page = follow_page_mask(vma, start, gup_flags, &ctx); |
a7f22660 | 1445 | if (!page || PTR_ERR(page) == -EMLINK) { |
dc21e700 | 1446 | ret = faultin_page(vma, start, gup_flags, |
a7f22660 | 1447 | PTR_ERR(page) == -EMLINK, locked); |
fa5bb209 KS |
1448 | switch (ret) { |
1449 | case 0: | |
1450 | goto retry; | |
df06b37f | 1451 | case -EBUSY: |
d9272525 | 1452 | case -EAGAIN: |
df06b37f | 1453 | ret = 0; |
e4a9bc58 | 1454 | fallthrough; |
fa5bb209 KS |
1455 | case -EFAULT: |
1456 | case -ENOMEM: | |
1457 | case -EHWPOISON: | |
df06b37f | 1458 | goto out; |
4bbd4c77 | 1459 | } |
fa5bb209 | 1460 | BUG(); |
1027e443 KS |
1461 | } else if (PTR_ERR(page) == -EEXIST) { |
1462 | /* | |
1463 | * Proper page table entry exists, but no corresponding | |
65462462 JH |
1464 | * struct page. If the caller expects **pages to be |
1465 | * filled in, bail out now, because that can't be done | |
1466 | * for this page. | |
1027e443 | 1467 | */ |
65462462 JH |
1468 | if (pages) { |
1469 | ret = PTR_ERR(page); | |
1470 | goto out; | |
1471 | } | |
1027e443 | 1472 | } else if (IS_ERR(page)) { |
df06b37f KB |
1473 | ret = PTR_ERR(page); |
1474 | goto out; | |
1027e443 | 1475 | } |
ffe1e786 | 1476 | next_page: |
df06b37f | 1477 | page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); |
fa5bb209 KS |
1478 | if (page_increm > nr_pages) |
1479 | page_increm = nr_pages; | |
57edfcfd PX |
1480 | |
1481 | if (pages) { | |
1482 | struct page *subpage; | |
1483 | unsigned int j; | |
1484 | ||
1485 | /* | |
1486 | * This must be a large folio (and doesn't need to | |
1487 | * be the whole folio; it can be part of it), do | |
1488 | * the refcount work for all the subpages too. | |
1489 | * | |
1490 | * NOTE: here the page may not be the head page | |
1491 | * e.g. when start addr is not thp-size aligned. | |
1492 | * try_grab_folio() should have taken care of tail | |
1493 | * pages. | |
1494 | */ | |
1495 | if (page_increm > 1) { | |
f442fa61 | 1496 | struct folio *folio = page_folio(page); |
57edfcfd PX |
1497 | |
1498 | /* | |
1499 | * Since we already hold refcount on the | |
1500 | * large folio, this should never fail. | |
1501 | */ | |
f442fa61 | 1502 | if (try_grab_folio(folio, page_increm - 1, |
dc21e700 | 1503 | gup_flags)) { |
57edfcfd PX |
1504 | /* |
1505 | * Release the 1st page ref if the | |
1506 | * folio is problematic, fail hard. | |
1507 | */ | |
dc21e700 | 1508 | gup_put_folio(folio, 1, gup_flags); |
57edfcfd PX |
1509 | ret = -EFAULT; |
1510 | goto out; | |
1511 | } | |
1512 | } | |
1513 | ||
1514 | for (j = 0; j < page_increm; j++) { | |
1515 | subpage = nth_page(page, j); | |
1516 | pages[i + j] = subpage; | |
1517 | flush_anon_page(vma, subpage, start + j * PAGE_SIZE); | |
1518 | flush_dcache_page(subpage); | |
1519 | } | |
1520 | } | |
1521 | ||
fa5bb209 KS |
1522 | i += page_increm; |
1523 | start += page_increm * PAGE_SIZE; | |
1524 | nr_pages -= page_increm; | |
4bbd4c77 | 1525 | } while (nr_pages); |
df06b37f KB |
1526 | out: |
1527 | if (ctx.pgmap) | |
1528 | put_dev_pagemap(ctx.pgmap); | |
1529 | return i ? i : ret; | |
4bbd4c77 | 1530 | } |
4bbd4c77 | 1531 | |
771ab430 TK |
1532 | static bool vma_permits_fault(struct vm_area_struct *vma, |
1533 | unsigned int fault_flags) | |
d4925e00 | 1534 | { |
1b2ee126 DH |
1535 | bool write = !!(fault_flags & FAULT_FLAG_WRITE); |
1536 | bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); | |
33a709b2 | 1537 | vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; |
d4925e00 DH |
1538 | |
1539 | if (!(vm_flags & vma->vm_flags)) | |
1540 | return false; | |
1541 | ||
33a709b2 DH |
1542 | /* |
1543 | * The architecture might have a hardware protection | |
1b2ee126 | 1544 | * mechanism other than read/write that can deny access. |
d61172b4 DH |
1545 | * |
1546 | * gup always represents data access, not instruction | |
1547 | * fetches, so execute=false here: | |
33a709b2 | 1548 | */ |
d61172b4 | 1549 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
33a709b2 DH |
1550 | return false; |
1551 | ||
d4925e00 DH |
1552 | return true; |
1553 | } | |
1554 | ||
adc8cb40 | 1555 | /** |
4bbd4c77 | 1556 | * fixup_user_fault() - manually resolve a user page fault |
4bbd4c77 KS |
1557 | * @mm: mm_struct of target mm |
1558 | * @address: user address | |
1559 | * @fault_flags:flags to pass down to handle_mm_fault() | |
c1e8d7c6 | 1560 | * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller |
548b6a1e MC |
1561 | * does not allow retry. If NULL, the caller must guarantee |
1562 | * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. | |
4bbd4c77 KS |
1563 | * |
1564 | * This is meant to be called in the specific scenario where for locking reasons | |
1565 | * we try to access user memory in atomic context (within a pagefault_disable() | |
1566 | * section), this returns -EFAULT, and we want to resolve the user fault before | |
1567 | * trying again. | |
1568 | * | |
1569 | * Typically this is meant to be used by the futex code. | |
1570 | * | |
1571 | * The main difference with get_user_pages() is that this function will | |
1572 | * unconditionally call handle_mm_fault() which will in turn perform all the | |
1573 | * necessary SW fixup of the dirty and young bits in the PTE, while | |
4a9e1cda | 1574 | * get_user_pages() only guarantees to update these in the struct page. |
4bbd4c77 KS |
1575 | * |
1576 | * This is important for some architectures where those bits also gate the | |
1577 | * access permission to the page because they are maintained in software. On | |
1578 | * such architectures, gup() will not be enough to make a subsequent access | |
1579 | * succeed. | |
1580 | * | |
c1e8d7c6 ML |
1581 | * This function will not return with an unlocked mmap_lock. So it has not the |
1582 | * same semantics wrt the @mm->mmap_lock as does filemap_fault(). | |
4bbd4c77 | 1583 | */ |
64019a2e | 1584 | int fixup_user_fault(struct mm_struct *mm, |
4a9e1cda DD |
1585 | unsigned long address, unsigned int fault_flags, |
1586 | bool *unlocked) | |
4bbd4c77 KS |
1587 | { |
1588 | struct vm_area_struct *vma; | |
8fed2f3c | 1589 | vm_fault_t ret; |
4a9e1cda | 1590 | |
428e106a | 1591 | address = untagged_addr_remote(mm, address); |
f9652594 | 1592 | |
4a9e1cda | 1593 | if (unlocked) |
71335f37 | 1594 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
4bbd4c77 | 1595 | |
4a9e1cda | 1596 | retry: |
6cd06ab1 | 1597 | vma = gup_vma_lookup(mm, address); |
8d7071af | 1598 | if (!vma) |
4bbd4c77 KS |
1599 | return -EFAULT; |
1600 | ||
d4925e00 | 1601 | if (!vma_permits_fault(vma, fault_flags)) |
4bbd4c77 KS |
1602 | return -EFAULT; |
1603 | ||
475f4dfc PX |
1604 | if ((fault_flags & FAULT_FLAG_KILLABLE) && |
1605 | fatal_signal_pending(current)) | |
1606 | return -EINTR; | |
1607 | ||
bce617ed | 1608 | ret = handle_mm_fault(vma, address, fault_flags, NULL); |
d9272525 PX |
1609 | |
1610 | if (ret & VM_FAULT_COMPLETED) { | |
1611 | /* | |
1612 | * NOTE: it's a pity that we need to retake the lock here | |
1613 | * to pair with the unlock() in the callers. Ideally we | |
1614 | * could tell the callers so they do not need to unlock. | |
1615 | */ | |
1616 | mmap_read_lock(mm); | |
1617 | *unlocked = true; | |
1618 | return 0; | |
1619 | } | |
1620 | ||
4bbd4c77 | 1621 | if (ret & VM_FAULT_ERROR) { |
9a291a7c JM |
1622 | int err = vm_fault_to_errno(ret, 0); |
1623 | ||
1624 | if (err) | |
1625 | return err; | |
4bbd4c77 KS |
1626 | BUG(); |
1627 | } | |
4a9e1cda DD |
1628 | |
1629 | if (ret & VM_FAULT_RETRY) { | |
d8ed45c5 | 1630 | mmap_read_lock(mm); |
475f4dfc PX |
1631 | *unlocked = true; |
1632 | fault_flags |= FAULT_FLAG_TRIED; | |
1633 | goto retry; | |
4a9e1cda DD |
1634 | } |
1635 | ||
4bbd4c77 KS |
1636 | return 0; |
1637 | } | |
add6a0cd | 1638 | EXPORT_SYMBOL_GPL(fixup_user_fault); |
4bbd4c77 | 1639 | |
93c5c61d PX |
1640 | /* |
1641 | * GUP always responds to fatal signals. When FOLL_INTERRUPTIBLE is | |
1642 | * specified, it'll also respond to generic signals. The caller of GUP | |
1643 | * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption. | |
1644 | */ | |
1645 | static bool gup_signal_pending(unsigned int flags) | |
1646 | { | |
1647 | if (fatal_signal_pending(current)) | |
1648 | return true; | |
1649 | ||
1650 | if (!(flags & FOLL_INTERRUPTIBLE)) | |
1651 | return false; | |
1652 | ||
1653 | return signal_pending(current); | |
1654 | } | |
1655 | ||
2d3a36a4 | 1656 | /* |
b2a72dff JG |
1657 | * Locking: (*locked == 1) means that the mmap_lock has already been acquired by |
1658 | * the caller. This function may drop the mmap_lock. If it does so, then it will | |
1659 | * set (*locked = 0). | |
1660 | * | |
1661 | * (*locked == 0) means that the caller expects this function to acquire and | |
1662 | * drop the mmap_lock. Therefore, the value of *locked will still be zero when | |
1663 | * the function returns, even though it may have changed temporarily during | |
1664 | * function execution. | |
1665 | * | |
1666 | * Please note that this function, unlike __get_user_pages(), will not return 0 | |
1667 | * for nr_pages > 0, unless FOLL_NOWAIT is used. | |
2d3a36a4 | 1668 | */ |
64019a2e | 1669 | static __always_inline long __get_user_pages_locked(struct mm_struct *mm, |
f0818f47 AA |
1670 | unsigned long start, |
1671 | unsigned long nr_pages, | |
f0818f47 | 1672 | struct page **pages, |
e716712f | 1673 | int *locked, |
0fd71a56 | 1674 | unsigned int flags) |
f0818f47 | 1675 | { |
f0818f47 | 1676 | long ret, pages_done; |
b2a72dff | 1677 | bool must_unlock = false; |
f0818f47 | 1678 | |
9c4b2142 LS |
1679 | if (!nr_pages) |
1680 | return 0; | |
1681 | ||
b2a72dff JG |
1682 | /* |
1683 | * The internal caller expects GUP to manage the lock internally and the | |
1684 | * lock must be released when this returns. | |
1685 | */ | |
9a863a6a | 1686 | if (!*locked) { |
b2a72dff JG |
1687 | if (mmap_read_lock_killable(mm)) |
1688 | return -EAGAIN; | |
1689 | must_unlock = true; | |
1690 | *locked = 1; | |
f0818f47 | 1691 | } |
961ba472 JG |
1692 | else |
1693 | mmap_assert_locked(mm); | |
f0818f47 | 1694 | |
a458b76a AA |
1695 | if (flags & FOLL_PIN) |
1696 | mm_set_has_pinned_flag(&mm->flags); | |
008cfe44 | 1697 | |
eddb1c22 JH |
1698 | /* |
1699 | * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior | |
1700 | * is to set FOLL_GET if the caller wants pages[] filled in (but has | |
1701 | * carelessly failed to specify FOLL_GET), so keep doing that, but only | |
1702 | * for FOLL_GET, not for the newer FOLL_PIN. | |
1703 | * | |
1704 | * FOLL_PIN always expects pages to be non-null, but no need to assert | |
1705 | * that here, as any failures will be obvious enough. | |
1706 | */ | |
1707 | if (pages && !(flags & FOLL_PIN)) | |
f0818f47 | 1708 | flags |= FOLL_GET; |
f0818f47 AA |
1709 | |
1710 | pages_done = 0; | |
f0818f47 | 1711 | for (;;) { |
64019a2e | 1712 | ret = __get_user_pages(mm, start, nr_pages, flags, pages, |
b2cac248 | 1713 | locked); |
f04740f5 | 1714 | if (!(flags & FOLL_UNLOCKABLE)) { |
f0818f47 | 1715 | /* VM_FAULT_RETRY couldn't trigger, bypass */ |
f04740f5 JG |
1716 | pages_done = ret; |
1717 | break; | |
1718 | } | |
f0818f47 | 1719 | |
d9272525 | 1720 | /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ |
792b429d | 1721 | VM_WARN_ON_ONCE(!*locked && (ret < 0 || ret >= nr_pages)); |
f0818f47 | 1722 | |
f0818f47 AA |
1723 | if (ret > 0) { |
1724 | nr_pages -= ret; | |
1725 | pages_done += ret; | |
1726 | if (!nr_pages) | |
1727 | break; | |
1728 | } | |
1729 | if (*locked) { | |
96312e61 AA |
1730 | /* |
1731 | * VM_FAULT_RETRY didn't trigger or it was a | |
1732 | * FOLL_NOWAIT. | |
1733 | */ | |
f0818f47 AA |
1734 | if (!pages_done) |
1735 | pages_done = ret; | |
1736 | break; | |
1737 | } | |
df17277b MR |
1738 | /* |
1739 | * VM_FAULT_RETRY triggered, so seek to the faulting offset. | |
1740 | * For the prefault case (!pages) we only update counts. | |
1741 | */ | |
1742 | if (likely(pages)) | |
1743 | pages += ret; | |
f0818f47 | 1744 | start += ret << PAGE_SHIFT; |
b2a72dff JG |
1745 | |
1746 | /* The lock was temporarily dropped, so we must unlock later */ | |
1747 | must_unlock = true; | |
f0818f47 | 1748 | |
4426e945 | 1749 | retry: |
f0818f47 AA |
1750 | /* |
1751 | * Repeat on the address that fired VM_FAULT_RETRY | |
4426e945 PX |
1752 | * with both FAULT_FLAG_ALLOW_RETRY and |
1753 | * FAULT_FLAG_TRIED. Note that GUP can be interrupted | |
93c5c61d PX |
1754 | * by fatal signals of even common signals, depending on |
1755 | * the caller's request. So we need to check it before we | |
4426e945 | 1756 | * start trying again otherwise it can loop forever. |
f0818f47 | 1757 | */ |
93c5c61d | 1758 | if (gup_signal_pending(flags)) { |
ae46d2aa HD |
1759 | if (!pages_done) |
1760 | pages_done = -EINTR; | |
4426e945 | 1761 | break; |
ae46d2aa | 1762 | } |
4426e945 | 1763 | |
d8ed45c5 | 1764 | ret = mmap_read_lock_killable(mm); |
71335f37 | 1765 | if (ret) { |
71335f37 PX |
1766 | if (!pages_done) |
1767 | pages_done = ret; | |
1768 | break; | |
1769 | } | |
4426e945 | 1770 | |
c7b6a566 | 1771 | *locked = 1; |
64019a2e | 1772 | ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, |
b2cac248 | 1773 | pages, locked); |
4426e945 PX |
1774 | if (!*locked) { |
1775 | /* Continue to retry until we succeeded */ | |
792b429d | 1776 | VM_WARN_ON_ONCE(ret != 0); |
4426e945 PX |
1777 | goto retry; |
1778 | } | |
f0818f47 | 1779 | if (ret != 1) { |
792b429d | 1780 | VM_WARN_ON_ONCE(ret > 1); |
f0818f47 AA |
1781 | if (!pages_done) |
1782 | pages_done = ret; | |
1783 | break; | |
1784 | } | |
1785 | nr_pages--; | |
1786 | pages_done++; | |
1787 | if (!nr_pages) | |
1788 | break; | |
df17277b MR |
1789 | if (likely(pages)) |
1790 | pages++; | |
f0818f47 AA |
1791 | start += PAGE_SIZE; |
1792 | } | |
b2a72dff | 1793 | if (must_unlock && *locked) { |
f0818f47 | 1794 | /* |
b2a72dff JG |
1795 | * We either temporarily dropped the lock, or the caller |
1796 | * requested that we both acquire and drop the lock. Either way, | |
1797 | * we must now unlock, and notify the caller of that state. | |
f0818f47 | 1798 | */ |
d8ed45c5 | 1799 | mmap_read_unlock(mm); |
f0818f47 AA |
1800 | *locked = 0; |
1801 | } | |
9c4b2142 LS |
1802 | |
1803 | /* | |
1804 | * Failing to pin anything implies something has gone wrong (except when | |
1805 | * FOLL_NOWAIT is specified). | |
1806 | */ | |
1807 | if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT))) | |
1808 | return -EFAULT; | |
1809 | ||
f0818f47 AA |
1810 | return pages_done; |
1811 | } | |
1812 | ||
d3649f68 CH |
1813 | /** |
1814 | * populate_vma_page_range() - populate a range of pages in the vma. | |
1815 | * @vma: target vma | |
1816 | * @start: start address | |
1817 | * @end: end address | |
c1e8d7c6 | 1818 | * @locked: whether the mmap_lock is still held |
d3649f68 CH |
1819 | * |
1820 | * This takes care of mlocking the pages too if VM_LOCKED is set. | |
1821 | * | |
0a36f7f8 TY |
1822 | * Return either number of pages pinned in the vma, or a negative error |
1823 | * code on error. | |
d3649f68 | 1824 | * |
c1e8d7c6 | 1825 | * vma->vm_mm->mmap_lock must be held. |
d3649f68 | 1826 | * |
4f6da934 | 1827 | * If @locked is NULL, it may be held for read or write and will |
d3649f68 CH |
1828 | * be unperturbed. |
1829 | * | |
4f6da934 PX |
1830 | * If @locked is non-NULL, it must held for read only and may be |
1831 | * released. If it's released, *@locked will be set to 0. | |
d3649f68 CH |
1832 | */ |
1833 | long populate_vma_page_range(struct vm_area_struct *vma, | |
4f6da934 | 1834 | unsigned long start, unsigned long end, int *locked) |
d3649f68 CH |
1835 | { |
1836 | struct mm_struct *mm = vma->vm_mm; | |
1837 | unsigned long nr_pages = (end - start) / PAGE_SIZE; | |
9a863a6a | 1838 | int local_locked = 1; |
d3649f68 | 1839 | int gup_flags; |
ece369c7 | 1840 | long ret; |
d3649f68 | 1841 | |
792b429d DH |
1842 | VM_WARN_ON_ONCE(!PAGE_ALIGNED(start)); |
1843 | VM_WARN_ON_ONCE(!PAGE_ALIGNED(end)); | |
1844 | VM_WARN_ON_ONCE_VMA(start < vma->vm_start, vma); | |
1845 | VM_WARN_ON_ONCE_VMA(end > vma->vm_end, vma); | |
42fc5414 | 1846 | mmap_assert_locked(mm); |
d3649f68 | 1847 | |
b67bf49c HD |
1848 | /* |
1849 | * Rightly or wrongly, the VM_LOCKONFAULT case has never used | |
1850 | * faultin_page() to break COW, so it has no work to do here. | |
1851 | */ | |
d3649f68 | 1852 | if (vma->vm_flags & VM_LOCKONFAULT) |
b67bf49c HD |
1853 | return nr_pages; |
1854 | ||
1096bc93 LT |
1855 | /* ... similarly, we've never faulted in PROT_NONE pages */ |
1856 | if (!vma_is_accessible(vma)) | |
1857 | return -EFAULT; | |
1858 | ||
b67bf49c | 1859 | gup_flags = FOLL_TOUCH; |
d3649f68 CH |
1860 | /* |
1861 | * We want to touch writable mappings with a write fault in order | |
1862 | * to break COW, except for shared mappings because these don't COW | |
1863 | * and we would not want to dirty them for nothing. | |
1096bc93 LT |
1864 | * |
1865 | * Otherwise, do a read fault, and use FOLL_FORCE in case it's not | |
1866 | * readable (ie write-only or executable). | |
d3649f68 CH |
1867 | */ |
1868 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | |
1869 | gup_flags |= FOLL_WRITE; | |
1096bc93 | 1870 | else |
d3649f68 CH |
1871 | gup_flags |= FOLL_FORCE; |
1872 | ||
f04740f5 JG |
1873 | if (locked) |
1874 | gup_flags |= FOLL_UNLOCKABLE; | |
1875 | ||
d3649f68 CH |
1876 | /* |
1877 | * We made sure addr is within a VMA, so the following will | |
1878 | * not result in a stack expansion that recurses back here. | |
1879 | */ | |
ece369c7 | 1880 | ret = __get_user_pages(mm, start, nr_pages, gup_flags, |
b2cac248 | 1881 | NULL, locked ? locked : &local_locked); |
ece369c7 HD |
1882 | lru_add_drain(); |
1883 | return ret; | |
d3649f68 CH |
1884 | } |
1885 | ||
4ca9b385 | 1886 | /* |
631426ba DH |
1887 | * faultin_page_range() - populate (prefault) page tables inside the |
1888 | * given range readable/writable | |
4ca9b385 DH |
1889 | * |
1890 | * This takes care of mlocking the pages, too, if VM_LOCKED is set. | |
1891 | * | |
631426ba | 1892 | * @mm: the mm to populate page tables in |
4ca9b385 DH |
1893 | * @start: start address |
1894 | * @end: end address | |
1895 | * @write: whether to prefault readable or writable | |
1896 | * @locked: whether the mmap_lock is still held | |
1897 | * | |
631426ba DH |
1898 | * Returns either number of processed pages in the MM, or a negative error |
1899 | * code on error (see __get_user_pages()). Note that this function reports | |
1900 | * errors related to VMAs, such as incompatible mappings, as expected by | |
1901 | * MADV_POPULATE_(READ|WRITE). | |
4ca9b385 | 1902 | * |
631426ba DH |
1903 | * The range must be page-aligned. |
1904 | * | |
1905 | * mm->mmap_lock must be held. If it's released, *@locked will be set to 0. | |
4ca9b385 | 1906 | */ |
631426ba DH |
1907 | long faultin_page_range(struct mm_struct *mm, unsigned long start, |
1908 | unsigned long end, bool write, int *locked) | |
4ca9b385 | 1909 | { |
4ca9b385 DH |
1910 | unsigned long nr_pages = (end - start) / PAGE_SIZE; |
1911 | int gup_flags; | |
ece369c7 | 1912 | long ret; |
4ca9b385 | 1913 | |
792b429d DH |
1914 | VM_WARN_ON_ONCE(!PAGE_ALIGNED(start)); |
1915 | VM_WARN_ON_ONCE(!PAGE_ALIGNED(end)); | |
4ca9b385 DH |
1916 | mmap_assert_locked(mm); |
1917 | ||
1918 | /* | |
1919 | * FOLL_TOUCH: Mark page accessed and thereby young; will also mark | |
1920 | * the page dirty with FOLL_WRITE -- which doesn't make a | |
1921 | * difference with !FOLL_FORCE, because the page is writable | |
1922 | * in the page table. | |
1923 | * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit | |
1924 | * a poisoned page. | |
4ca9b385 DH |
1925 | * !FOLL_FORCE: Require proper access permissions. |
1926 | */ | |
631426ba DH |
1927 | gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE | |
1928 | FOLL_MADV_POPULATE; | |
4ca9b385 DH |
1929 | if (write) |
1930 | gup_flags |= FOLL_WRITE; | |
1931 | ||
631426ba DH |
1932 | ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked, |
1933 | gup_flags); | |
ece369c7 HD |
1934 | lru_add_drain(); |
1935 | return ret; | |
4ca9b385 DH |
1936 | } |
1937 | ||
d3649f68 CH |
1938 | /* |
1939 | * __mm_populate - populate and/or mlock pages within a range of address space. | |
1940 | * | |
1941 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap | |
1942 | * flags. VMAs must be already marked with the desired vm_flags, and | |
c1e8d7c6 | 1943 | * mmap_lock must not be held. |
d3649f68 CH |
1944 | */ |
1945 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | |
1946 | { | |
1947 | struct mm_struct *mm = current->mm; | |
1948 | unsigned long end, nstart, nend; | |
1949 | struct vm_area_struct *vma = NULL; | |
1950 | int locked = 0; | |
1951 | long ret = 0; | |
1952 | ||
1953 | end = start + len; | |
1954 | ||
1955 | for (nstart = start; nstart < end; nstart = nend) { | |
1956 | /* | |
1957 | * We want to fault in pages for [nstart; end) address range. | |
1958 | * Find first corresponding VMA. | |
1959 | */ | |
1960 | if (!locked) { | |
1961 | locked = 1; | |
d8ed45c5 | 1962 | mmap_read_lock(mm); |
c4d1a92d | 1963 | vma = find_vma_intersection(mm, nstart, end); |
d3649f68 | 1964 | } else if (nstart >= vma->vm_end) |
c4d1a92d LH |
1965 | vma = find_vma_intersection(mm, vma->vm_end, end); |
1966 | ||
1967 | if (!vma) | |
d3649f68 CH |
1968 | break; |
1969 | /* | |
1970 | * Set [nstart; nend) to intersection of desired address | |
1971 | * range with the first VMA. Also, skip undesirable VMA types. | |
1972 | */ | |
1973 | nend = min(end, vma->vm_end); | |
1974 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | |
1975 | continue; | |
1976 | if (nstart < vma->vm_start) | |
1977 | nstart = vma->vm_start; | |
1978 | /* | |
1979 | * Now fault in a range of pages. populate_vma_page_range() | |
1980 | * double checks the vma flags, so that it won't mlock pages | |
1981 | * if the vma was already munlocked. | |
1982 | */ | |
1983 | ret = populate_vma_page_range(vma, nstart, nend, &locked); | |
1984 | if (ret < 0) { | |
1985 | if (ignore_errors) { | |
1986 | ret = 0; | |
1987 | continue; /* continue at next VMA */ | |
1988 | } | |
1989 | break; | |
1990 | } | |
1991 | nend = nstart + ret * PAGE_SIZE; | |
1992 | ret = 0; | |
1993 | } | |
1994 | if (locked) | |
d8ed45c5 | 1995 | mmap_read_unlock(mm); |
d3649f68 CH |
1996 | return ret; /* 0 or negative error code */ |
1997 | } | |
050a9adc | 1998 | #else /* CONFIG_MMU */ |
64019a2e | 1999 | static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, |
050a9adc | 2000 | unsigned long nr_pages, struct page **pages, |
b2cac248 | 2001 | int *locked, unsigned int foll_flags) |
050a9adc CH |
2002 | { |
2003 | struct vm_area_struct *vma; | |
b2a72dff | 2004 | bool must_unlock = false; |
bfbe7110 | 2005 | vm_flags_t vm_flags; |
24dc20c7 | 2006 | long i; |
050a9adc | 2007 | |
b2a72dff JG |
2008 | if (!nr_pages) |
2009 | return 0; | |
2010 | ||
2011 | /* | |
2012 | * The internal caller expects GUP to manage the lock internally and the | |
2013 | * lock must be released when this returns. | |
2014 | */ | |
9a863a6a | 2015 | if (!*locked) { |
b2a72dff JG |
2016 | if (mmap_read_lock_killable(mm)) |
2017 | return -EAGAIN; | |
2018 | must_unlock = true; | |
2019 | *locked = 1; | |
2020 | } | |
2021 | ||
050a9adc CH |
2022 | /* calculate required read or write permissions. |
2023 | * If FOLL_FORCE is set, we only require the "MAY" flags. | |
2024 | */ | |
2025 | vm_flags = (foll_flags & FOLL_WRITE) ? | |
2026 | (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); | |
2027 | vm_flags &= (foll_flags & FOLL_FORCE) ? | |
2028 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); | |
2029 | ||
2030 | for (i = 0; i < nr_pages; i++) { | |
2031 | vma = find_vma(mm, start); | |
2032 | if (!vma) | |
b2a72dff | 2033 | break; |
050a9adc CH |
2034 | |
2035 | /* protect what we can, including chardevs */ | |
2036 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || | |
2037 | !(vm_flags & vma->vm_flags)) | |
b2a72dff | 2038 | break; |
050a9adc CH |
2039 | |
2040 | if (pages) { | |
396a400b | 2041 | pages[i] = virt_to_page((void *)start); |
050a9adc CH |
2042 | if (pages[i]) |
2043 | get_page(pages[i]); | |
2044 | } | |
b2cac248 | 2045 | |
050a9adc CH |
2046 | start = (start + PAGE_SIZE) & PAGE_MASK; |
2047 | } | |
2048 | ||
b2a72dff JG |
2049 | if (must_unlock && *locked) { |
2050 | mmap_read_unlock(mm); | |
2051 | *locked = 0; | |
2052 | } | |
050a9adc | 2053 | |
050a9adc CH |
2054 | return i ? : -EFAULT; |
2055 | } | |
2056 | #endif /* !CONFIG_MMU */ | |
d3649f68 | 2057 | |
bb523b40 AG |
2058 | /** |
2059 | * fault_in_writeable - fault in userspace address range for writing | |
2060 | * @uaddr: start of address range | |
2061 | * @size: size of address range | |
2062 | * | |
2063 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
2064 | * copy_from_user()). | |
2065 | */ | |
2066 | size_t fault_in_writeable(char __user *uaddr, size_t size) | |
2067 | { | |
a7797e74 BH |
2068 | const unsigned long start = (unsigned long)uaddr; |
2069 | const unsigned long end = start + size; | |
2070 | unsigned long cur; | |
bb523b40 AG |
2071 | |
2072 | if (unlikely(size == 0)) | |
2073 | return 0; | |
677b2a8c CL |
2074 | if (!user_write_access_begin(uaddr, size)) |
2075 | return size; | |
bb523b40 | 2076 | |
a7797e74 BH |
2077 | /* Stop once we overflow to 0. */ |
2078 | for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) | |
2079 | unsafe_put_user(0, (char __user *)cur, out); | |
bb523b40 | 2080 | out: |
677b2a8c | 2081 | user_write_access_end(); |
a7797e74 BH |
2082 | if (size > cur - start) |
2083 | return size - (cur - start); | |
bb523b40 AG |
2084 | return 0; |
2085 | } | |
2086 | EXPORT_SYMBOL(fault_in_writeable); | |
2087 | ||
da32b581 CM |
2088 | /** |
2089 | * fault_in_subpage_writeable - fault in an address range for writing | |
2090 | * @uaddr: start of address range | |
2091 | * @size: size of address range | |
2092 | * | |
2093 | * Fault in a user address range for writing while checking for permissions at | |
2094 | * sub-page granularity (e.g. arm64 MTE). This function should be used when | |
2095 | * the caller cannot guarantee forward progress of a copy_to_user() loop. | |
2096 | * | |
2097 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
2098 | * copy_from_user()). | |
2099 | */ | |
2100 | size_t fault_in_subpage_writeable(char __user *uaddr, size_t size) | |
2101 | { | |
2102 | size_t faulted_in; | |
2103 | ||
2104 | /* | |
2105 | * Attempt faulting in at page granularity first for page table | |
2106 | * permission checking. The arch-specific probe_subpage_writeable() | |
2107 | * functions may not check for this. | |
2108 | */ | |
2109 | faulted_in = size - fault_in_writeable(uaddr, size); | |
2110 | if (faulted_in) | |
2111 | faulted_in -= probe_subpage_writeable(uaddr, faulted_in); | |
2112 | ||
2113 | return size - faulted_in; | |
2114 | } | |
2115 | EXPORT_SYMBOL(fault_in_subpage_writeable); | |
2116 | ||
cdd591fc AG |
2117 | /* |
2118 | * fault_in_safe_writeable - fault in an address range for writing | |
2119 | * @uaddr: start of address range | |
2120 | * @size: length of address range | |
2121 | * | |
fe673d3f LT |
2122 | * Faults in an address range for writing. This is primarily useful when we |
2123 | * already know that some or all of the pages in the address range aren't in | |
2124 | * memory. | |
cdd591fc | 2125 | * |
fe673d3f | 2126 | * Unlike fault_in_writeable(), this function is non-destructive. |
cdd591fc AG |
2127 | * |
2128 | * Note that we don't pin or otherwise hold the pages referenced that we fault | |
2129 | * in. There's no guarantee that they'll stay in memory for any duration of | |
2130 | * time. | |
2131 | * | |
2132 | * Returns the number of bytes not faulted in, like copy_to_user() and | |
2133 | * copy_from_user(). | |
2134 | */ | |
2135 | size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) | |
2136 | { | |
a7797e74 BH |
2137 | const unsigned long start = (unsigned long)uaddr; |
2138 | const unsigned long end = start + size; | |
2139 | unsigned long cur; | |
cdd591fc | 2140 | struct mm_struct *mm = current->mm; |
fe673d3f | 2141 | bool unlocked = false; |
cdd591fc | 2142 | |
fe673d3f LT |
2143 | if (unlikely(size == 0)) |
2144 | return 0; | |
cdd591fc | 2145 | |
fe673d3f | 2146 | mmap_read_lock(mm); |
a7797e74 BH |
2147 | /* Stop once we overflow to 0. */ |
2148 | for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) | |
2149 | if (fixup_user_fault(mm, cur, FAULT_FLAG_WRITE, &unlocked)) | |
cdd591fc | 2150 | break; |
fe673d3f LT |
2151 | mmap_read_unlock(mm); |
2152 | ||
a7797e74 BH |
2153 | if (size > cur - start) |
2154 | return size - (cur - start); | |
fe673d3f | 2155 | return 0; |
cdd591fc AG |
2156 | } |
2157 | EXPORT_SYMBOL(fault_in_safe_writeable); | |
2158 | ||
bb523b40 AG |
2159 | /** |
2160 | * fault_in_readable - fault in userspace address range for reading | |
2161 | * @uaddr: start of user address range | |
2162 | * @size: size of user address range | |
2163 | * | |
2164 | * Returns the number of bytes not faulted in (like copy_to_user() and | |
2165 | * copy_from_user()). | |
2166 | */ | |
2167 | size_t fault_in_readable(const char __user *uaddr, size_t size) | |
2168 | { | |
a7797e74 BH |
2169 | const unsigned long start = (unsigned long)uaddr; |
2170 | const unsigned long end = start + size; | |
2171 | unsigned long cur; | |
bb523b40 AG |
2172 | volatile char c; |
2173 | ||
2174 | if (unlikely(size == 0)) | |
2175 | return 0; | |
677b2a8c CL |
2176 | if (!user_read_access_begin(uaddr, size)) |
2177 | return size; | |
bb523b40 | 2178 | |
a7797e74 BH |
2179 | /* Stop once we overflow to 0. */ |
2180 | for (cur = start; cur && cur < end; cur = PAGE_ALIGN_DOWN(cur + PAGE_SIZE)) | |
2181 | unsafe_get_user(c, (const char __user *)cur, out); | |
bb523b40 | 2182 | out: |
677b2a8c | 2183 | user_read_access_end(); |
bb523b40 | 2184 | (void)c; |
a7797e74 BH |
2185 | if (size > cur - start) |
2186 | return size - (cur - start); | |
bb523b40 AG |
2187 | return 0; |
2188 | } | |
2189 | EXPORT_SYMBOL(fault_in_readable); | |
2190 | ||
8f942eea JH |
2191 | /** |
2192 | * get_dump_page() - pin user page in memory while writing it to core dump | |
2193 | * @addr: user address | |
d6ff4c8f | 2194 | * @locked: a pointer to an int denoting whether the mmap sem is held |
8f942eea JH |
2195 | * |
2196 | * Returns struct page pointer of user page pinned for dump, | |
2197 | * to be freed afterwards by put_page(). | |
2198 | * | |
2199 | * Returns NULL on any kind of failure - a hole must then be inserted into | |
2200 | * the corefile, to preserve alignment with its headers; and also returns | |
2201 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - | |
f0953a1b | 2202 | * allowing a hole to be left in the corefile to save disk space. |
8f942eea | 2203 | * |
7f3bfab5 | 2204 | * Called without mmap_lock (takes and releases the mmap_lock by itself). |
8f942eea JH |
2205 | */ |
2206 | #ifdef CONFIG_ELF_CORE | |
d6ff4c8f | 2207 | struct page *get_dump_page(unsigned long addr, int *locked) |
8f942eea | 2208 | { |
8f942eea | 2209 | struct page *page; |
7f3bfab5 | 2210 | int ret; |
8f942eea | 2211 | |
d6ff4c8f | 2212 | ret = __get_user_pages_locked(current->mm, addr, 1, &page, locked, |
7f3bfab5 | 2213 | FOLL_FORCE | FOLL_DUMP | FOLL_GET); |
7f3bfab5 | 2214 | return (ret == 1) ? page : NULL; |
8f942eea JH |
2215 | } |
2216 | #endif /* CONFIG_ELF_CORE */ | |
2217 | ||
d1e153fe | 2218 | #ifdef CONFIG_MIGRATION |
94efde1d JH |
2219 | |
2220 | /* | |
2221 | * An array of either pages or folios ("pofs"). Although it may seem tempting to | |
2222 | * avoid this complication, by simply interpreting a list of folios as a list of | |
2223 | * pages, that approach won't work in the longer term, because eventually the | |
2224 | * layouts of struct page and struct folio will become completely different. | |
2225 | * Furthermore, this pof approach avoids excessive page_folio() calls. | |
2226 | */ | |
2227 | struct pages_or_folios { | |
2228 | union { | |
2229 | struct page **pages; | |
2230 | struct folio **folios; | |
2231 | void **entries; | |
2232 | }; | |
2233 | bool has_folios; | |
2234 | long nr_entries; | |
2235 | }; | |
2236 | ||
2237 | static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i) | |
2238 | { | |
2239 | if (pofs->has_folios) | |
2240 | return pofs->folios[i]; | |
2241 | return page_folio(pofs->pages[i]); | |
2242 | } | |
2243 | ||
2244 | static void pofs_clear_entry(struct pages_or_folios *pofs, long i) | |
2245 | { | |
2246 | pofs->entries[i] = NULL; | |
2247 | } | |
2248 | ||
2249 | static void pofs_unpin(struct pages_or_folios *pofs) | |
2250 | { | |
2251 | if (pofs->has_folios) | |
2252 | unpin_folios(pofs->folios, pofs->nr_entries); | |
2253 | else | |
2254 | unpin_user_pages(pofs->pages, pofs->nr_entries); | |
2255 | } | |
2256 | ||
a03db236 LZ |
2257 | static struct folio *pofs_next_folio(struct folio *folio, |
2258 | struct pages_or_folios *pofs, long *index_ptr) | |
2259 | { | |
2260 | long i = *index_ptr + 1; | |
2261 | ||
2262 | if (!pofs->has_folios && folio_test_large(folio)) { | |
2263 | const unsigned long start_pfn = folio_pfn(folio); | |
2264 | const unsigned long end_pfn = start_pfn + folio_nr_pages(folio); | |
2265 | ||
2266 | for (; i < pofs->nr_entries; i++) { | |
2267 | unsigned long pfn = page_to_pfn(pofs->pages[i]); | |
2268 | ||
2269 | /* Is this page part of this folio? */ | |
2270 | if (pfn < start_pfn || pfn >= end_pfn) | |
2271 | break; | |
2272 | } | |
2273 | } | |
2274 | ||
2275 | if (unlikely(i == pofs->nr_entries)) | |
2276 | return NULL; | |
2277 | *index_ptr = i; | |
2278 | ||
2279 | return pofs_get_folio(pofs, i); | |
2280 | } | |
2281 | ||
f68749ec | 2282 | /* |
53ba78de | 2283 | * Returns the number of collected folios. Return value is always >= 0. |
f68749ec | 2284 | */ |
517f496e | 2285 | static unsigned long collect_longterm_unpinnable_folios( |
94efde1d JH |
2286 | struct list_head *movable_folio_list, |
2287 | struct pages_or_folios *pofs) | |
9a4e9f3b | 2288 | { |
a03db236 | 2289 | unsigned long collected = 0; |
a03db236 | 2290 | struct folio *folio; |
a09a8a1f | 2291 | int drained = 0; |
a03db236 | 2292 | long i = 0; |
9a4e9f3b | 2293 | |
a03db236 LZ |
2294 | for (folio = pofs_get_folio(pofs, i); folio; |
2295 | folio = pofs_next_folio(folio, pofs, &i)) { | |
f9f38f78 | 2296 | |
67e139b0 AP |
2297 | if (folio_is_longterm_pinnable(folio)) |
2298 | continue; | |
b05a79d4 | 2299 | |
517f496e DH |
2300 | collected++; |
2301 | ||
67e139b0 | 2302 | if (folio_is_device_coherent(folio)) |
f9f38f78 CH |
2303 | continue; |
2304 | ||
1b7f7e58 | 2305 | if (folio_test_hugetlb(folio)) { |
4c640f12 | 2306 | folio_isolate_hugetlb(folio, movable_folio_list); |
f9f38f78 CH |
2307 | continue; |
2308 | } | |
9a4e9f3b | 2309 | |
2da6de30 | 2310 | if (drained == 0 && folio_may_be_lru_cached(folio) && |
a09a8a1f HD |
2311 | folio_ref_count(folio) != |
2312 | folio_expected_ref_count(folio) + 1) { | |
2313 | lru_add_drain(); | |
2314 | drained = 1; | |
2315 | } | |
2da6de30 | 2316 | if (drained == 1 && folio_may_be_lru_cached(folio) && |
a09a8a1f HD |
2317 | folio_ref_count(folio) != |
2318 | folio_expected_ref_count(folio) + 1) { | |
f9f38f78 | 2319 | lru_add_drain_all(); |
a09a8a1f | 2320 | drained = 2; |
f9f38f78 CH |
2321 | } |
2322 | ||
be2d5756 | 2323 | if (!folio_isolate_lru(folio)) |
f9f38f78 | 2324 | continue; |
67e139b0 | 2325 | |
53ba78de | 2326 | list_add_tail(&folio->lru, movable_folio_list); |
1b7f7e58 MWO |
2327 | node_stat_mod_folio(folio, |
2328 | NR_ISOLATED_ANON + folio_is_file_lru(folio), | |
2329 | folio_nr_pages(folio)); | |
9a4e9f3b | 2330 | } |
517f496e DH |
2331 | |
2332 | return collected; | |
67e139b0 AP |
2333 | } |
2334 | ||
2335 | /* | |
53ba78de VK |
2336 | * Unpins all folios and migrates device coherent folios and movable_folio_list. |
2337 | * Returns -EAGAIN if all folios were successfully migrated or -errno for | |
2338 | * failure (or partial success). | |
67e139b0 | 2339 | */ |
94efde1d JH |
2340 | static int |
2341 | migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list, | |
2342 | struct pages_or_folios *pofs) | |
67e139b0 AP |
2343 | { |
2344 | int ret; | |
2345 | unsigned long i; | |
6e7f34eb | 2346 | |
94efde1d JH |
2347 | for (i = 0; i < pofs->nr_entries; i++) { |
2348 | struct folio *folio = pofs_get_folio(pofs, i); | |
67e139b0 AP |
2349 | |
2350 | if (folio_is_device_coherent(folio)) { | |
2351 | /* | |
53ba78de VK |
2352 | * Migration will fail if the folio is pinned, so |
2353 | * convert the pin on the source folio to a normal | |
2354 | * reference. | |
67e139b0 | 2355 | */ |
94efde1d | 2356 | pofs_clear_entry(pofs, i); |
67e139b0 AP |
2357 | folio_get(folio); |
2358 | gup_put_folio(folio, 1, FOLL_PIN); | |
2359 | ||
5c8525a3 | 2360 | if (migrate_device_coherent_folio(folio)) { |
67e139b0 AP |
2361 | ret = -EBUSY; |
2362 | goto err; | |
2363 | } | |
2364 | ||
b05a79d4 | 2365 | continue; |
67e139b0 | 2366 | } |
b05a79d4 | 2367 | |
67e139b0 | 2368 | /* |
53ba78de | 2369 | * We can't migrate folios with unexpected references, so drop |
67e139b0 | 2370 | * the reference obtained by __get_user_pages_locked(). |
53ba78de | 2371 | * Migrating folios have been added to movable_folio_list after |
67e139b0 | 2372 | * calling folio_isolate_lru() which takes a reference so the |
53ba78de | 2373 | * folio won't be freed if it's migrating. |
67e139b0 | 2374 | */ |
94efde1d JH |
2375 | unpin_folio(folio); |
2376 | pofs_clear_entry(pofs, i); | |
f68749ec | 2377 | } |
f9f38f78 | 2378 | |
53ba78de | 2379 | if (!list_empty(movable_folio_list)) { |
f9f38f78 CH |
2380 | struct migration_target_control mtc = { |
2381 | .nid = NUMA_NO_NODE, | |
2382 | .gfp_mask = GFP_USER | __GFP_NOWARN, | |
e42dfe4e | 2383 | .reason = MR_LONGTERM_PIN, |
f9f38f78 CH |
2384 | }; |
2385 | ||
53ba78de | 2386 | if (migrate_pages(movable_folio_list, alloc_migration_target, |
67e139b0 AP |
2387 | NULL, (unsigned long)&mtc, MIGRATE_SYNC, |
2388 | MR_LONGTERM_PIN, NULL)) { | |
f9f38f78 | 2389 | ret = -ENOMEM; |
67e139b0 AP |
2390 | goto err; |
2391 | } | |
9a4e9f3b AK |
2392 | } |
2393 | ||
53ba78de | 2394 | putback_movable_pages(movable_folio_list); |
67e139b0 AP |
2395 | |
2396 | return -EAGAIN; | |
2397 | ||
2398 | err: | |
94efde1d | 2399 | pofs_unpin(pofs); |
53ba78de | 2400 | putback_movable_pages(movable_folio_list); |
24a95998 | 2401 | |
67e139b0 AP |
2402 | return ret; |
2403 | } | |
2404 | ||
94efde1d JH |
2405 | static long |
2406 | check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs) | |
2407 | { | |
2408 | LIST_HEAD(movable_folio_list); | |
517f496e | 2409 | unsigned long collected; |
94efde1d | 2410 | |
517f496e DH |
2411 | collected = collect_longterm_unpinnable_folios(&movable_folio_list, |
2412 | pofs); | |
2413 | if (!collected) | |
94efde1d JH |
2414 | return 0; |
2415 | ||
2416 | return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs); | |
2417 | } | |
2418 | ||
67e139b0 | 2419 | /* |
aa6f8b25 | 2420 | * Check whether all folios are *allowed* to be pinned indefinitely (long term). |
53ba78de VK |
2421 | * Rather confusingly, all folios in the range are required to be pinned via |
2422 | * FOLL_PIN, before calling this routine. | |
67e139b0 | 2423 | * |
aa6f8b25 | 2424 | * Return values: |
67e139b0 | 2425 | * |
aa6f8b25 | 2426 | * 0: if everything is OK and all folios in the range are allowed to be pinned, |
53ba78de | 2427 | * then this routine leaves all folios pinned and returns zero for success. |
aa6f8b25 JH |
2428 | * |
2429 | * -EAGAIN: if any folios in the range are not allowed to be pinned, then this | |
2430 | * routine will migrate those folios away, unpin all the folios in the range. If | |
2431 | * migration of the entire set of folios succeeds, then -EAGAIN is returned. The | |
2432 | * caller should re-pin the entire range with FOLL_PIN and then call this | |
2433 | * routine again. | |
2434 | * | |
2435 | * -ENOMEM, or any other -errno: if an error *other* than -EAGAIN occurs, this | |
2436 | * indicates a migration failure. The caller should give up, and propagate the | |
2437 | * error back up the call stack. The caller does not need to unpin any folios in | |
2438 | * that case, because this routine will do the unpinning. | |
67e139b0 | 2439 | */ |
53ba78de VK |
2440 | static long check_and_migrate_movable_folios(unsigned long nr_folios, |
2441 | struct folio **folios) | |
67e139b0 | 2442 | { |
94efde1d JH |
2443 | struct pages_or_folios pofs = { |
2444 | .folios = folios, | |
2445 | .has_folios = true, | |
2446 | .nr_entries = nr_folios, | |
2447 | }; | |
67e139b0 | 2448 | |
94efde1d | 2449 | return check_and_migrate_movable_pages_or_folios(&pofs); |
53ba78de VK |
2450 | } |
2451 | ||
2452 | /* | |
aa6f8b25 JH |
2453 | * Return values and behavior are the same as those for |
2454 | * check_and_migrate_movable_folios(). | |
53ba78de VK |
2455 | */ |
2456 | static long check_and_migrate_movable_pages(unsigned long nr_pages, | |
2457 | struct page **pages) | |
2458 | { | |
94efde1d JH |
2459 | struct pages_or_folios pofs = { |
2460 | .pages = pages, | |
2461 | .has_folios = false, | |
2462 | .nr_entries = nr_pages, | |
2463 | }; | |
53ba78de | 2464 | |
94efde1d | 2465 | return check_and_migrate_movable_pages_or_folios(&pofs); |
9a4e9f3b AK |
2466 | } |
2467 | #else | |
f68749ec | 2468 | static long check_and_migrate_movable_pages(unsigned long nr_pages, |
f6d299ec | 2469 | struct page **pages) |
9a4e9f3b | 2470 | { |
24a95998 | 2471 | return 0; |
9a4e9f3b | 2472 | } |
53ba78de VK |
2473 | |
2474 | static long check_and_migrate_movable_folios(unsigned long nr_folios, | |
2475 | struct folio **folios) | |
2476 | { | |
2477 | return 0; | |
2478 | } | |
d1e153fe | 2479 | #endif /* CONFIG_MIGRATION */ |
9a4e9f3b | 2480 | |
2bb6d283 | 2481 | /* |
932f4a63 IW |
2482 | * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which |
2483 | * allows us to process the FOLL_LONGTERM flag. | |
2bb6d283 | 2484 | */ |
64019a2e | 2485 | static long __gup_longterm_locked(struct mm_struct *mm, |
932f4a63 IW |
2486 | unsigned long start, |
2487 | unsigned long nr_pages, | |
2488 | struct page **pages, | |
53b2d09b | 2489 | int *locked, |
932f4a63 | 2490 | unsigned int gup_flags) |
2bb6d283 | 2491 | { |
f68749ec | 2492 | unsigned int flags; |
24a95998 | 2493 | long rc, nr_pinned_pages; |
2bb6d283 | 2494 | |
f68749ec | 2495 | if (!(gup_flags & FOLL_LONGTERM)) |
b2cac248 | 2496 | return __get_user_pages_locked(mm, start, nr_pages, pages, |
53b2d09b | 2497 | locked, gup_flags); |
67e139b0 | 2498 | |
f68749ec PT |
2499 | flags = memalloc_pin_save(); |
2500 | do { | |
24a95998 | 2501 | nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, |
b2cac248 | 2502 | pages, locked, |
24a95998 AP |
2503 | gup_flags); |
2504 | if (nr_pinned_pages <= 0) { | |
2505 | rc = nr_pinned_pages; | |
f68749ec | 2506 | break; |
24a95998 | 2507 | } |
d64e2dbc JG |
2508 | |
2509 | /* FOLL_LONGTERM implies FOLL_PIN */ | |
f6d299ec | 2510 | rc = check_and_migrate_movable_pages(nr_pinned_pages, pages); |
24a95998 | 2511 | } while (rc == -EAGAIN); |
f68749ec | 2512 | memalloc_pin_restore(flags); |
24a95998 | 2513 | return rc ? rc : nr_pinned_pages; |
2bb6d283 | 2514 | } |
932f4a63 | 2515 | |
d64e2dbc JG |
2516 | /* |
2517 | * Check that the given flags are valid for the exported gup/pup interface, and | |
2518 | * update them with the required flags that the caller must have set. | |
2519 | */ | |
b2cac248 LS |
2520 | static bool is_valid_gup_args(struct page **pages, int *locked, |
2521 | unsigned int *gup_flags_p, unsigned int to_set) | |
447f3e45 | 2522 | { |
d64e2dbc JG |
2523 | unsigned int gup_flags = *gup_flags_p; |
2524 | ||
447f3e45 | 2525 | /* |
d64e2dbc JG |
2526 | * These flags not allowed to be specified externally to the gup |
2527 | * interfaces: | |
0f20bba1 | 2528 | * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only |
7290840d | 2529 | * - FOLL_REMOTE is internal only, set in (get|pin)_user_pages_remote() |
f04740f5 | 2530 | * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL |
447f3e45 | 2531 | */ |
0f20bba1 | 2532 | if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS)) |
d64e2dbc JG |
2533 | return false; |
2534 | ||
2535 | gup_flags |= to_set; | |
f04740f5 JG |
2536 | if (locked) { |
2537 | /* At the external interface locked must be set */ | |
2538 | if (WARN_ON_ONCE(*locked != 1)) | |
2539 | return false; | |
2540 | ||
2541 | gup_flags |= FOLL_UNLOCKABLE; | |
2542 | } | |
d64e2dbc JG |
2543 | |
2544 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ | |
2545 | if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) == | |
2546 | (FOLL_PIN | FOLL_GET))) | |
2547 | return false; | |
2548 | ||
2549 | /* LONGTERM can only be specified when pinning */ | |
2550 | if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM))) | |
2551 | return false; | |
2552 | ||
2553 | /* Pages input must be given if using GET/PIN */ | |
2554 | if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages)) | |
447f3e45 | 2555 | return false; |
d64e2dbc | 2556 | |
d64e2dbc JG |
2557 | /* We want to allow the pgmap to be hot-unplugged at all times */ |
2558 | if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) && | |
2559 | (gup_flags & FOLL_PCI_P2PDMA))) | |
2560 | return false; | |
2561 | ||
d64e2dbc | 2562 | *gup_flags_p = gup_flags; |
447f3e45 BS |
2563 | return true; |
2564 | } | |
2565 | ||
22bf29b6 | 2566 | #ifdef CONFIG_MMU |
adc8cb40 | 2567 | /** |
c4237f8b | 2568 | * get_user_pages_remote() - pin user pages in memory |
c4237f8b JH |
2569 | * @mm: mm_struct of target mm |
2570 | * @start: starting user address | |
2571 | * @nr_pages: number of pages from start to pin | |
2572 | * @gup_flags: flags modifying lookup behaviour | |
2573 | * @pages: array that receives pointers to the pages pinned. | |
2574 | * Should be at least nr_pages long. Or NULL, if caller | |
2575 | * only intends to ensure the pages are faulted in. | |
c4237f8b JH |
2576 | * @locked: pointer to lock flag indicating whether lock is held and |
2577 | * subsequently whether VM_FAULT_RETRY functionality can be | |
2578 | * utilised. Lock must initially be held. | |
2579 | * | |
2580 | * Returns either number of pages pinned (which may be less than the | |
2581 | * number requested), or an error. Details about the return value: | |
2582 | * | |
2583 | * -- If nr_pages is 0, returns 0. | |
2584 | * -- If nr_pages is >0, but no pages were pinned, returns -errno. | |
2585 | * -- If nr_pages is >0, and some pages were pinned, returns the number of | |
2586 | * pages pinned. Again, this may be less than nr_pages. | |
2587 | * | |
2588 | * The caller is responsible for releasing returned @pages, via put_page(). | |
2589 | * | |
c1e8d7c6 | 2590 | * Must be called with mmap_lock held for read or write. |
c4237f8b | 2591 | * |
adc8cb40 SJ |
2592 | * get_user_pages_remote walks a process's page tables and takes a reference |
2593 | * to each struct page that each user address corresponds to at a given | |
c4237f8b JH |
2594 | * instant. That is, it takes the page that would be accessed if a user |
2595 | * thread accesses the given user virtual address at that instant. | |
2596 | * | |
2597 | * This does not guarantee that the page exists in the user mappings when | |
adc8cb40 | 2598 | * get_user_pages_remote returns, and there may even be a completely different |
c4237f8b | 2599 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
5da1a868 | 2600 | * and subsequently re-faulted). However it does guarantee that the page |
c4237f8b JH |
2601 | * won't be freed completely. And mostly callers simply care that the page |
2602 | * contains data that was valid *at some point in time*. Typically, an IO | |
2603 | * or similar operation cannot guarantee anything stronger anyway because | |
2604 | * locks can't be held over the syscall boundary. | |
2605 | * | |
2606 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page | |
2607 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must | |
2608 | * be called after the page is finished with, and before put_page is called. | |
2609 | * | |
adc8cb40 SJ |
2610 | * get_user_pages_remote is typically used for fewer-copy IO operations, |
2611 | * to get a handle on the memory by some means other than accesses | |
2612 | * via the user virtual addresses. The pages may be submitted for | |
2613 | * DMA to devices or accessed via their kernel linear mapping (via the | |
2614 | * kmap APIs). Care should be taken to use the correct cache flushing APIs. | |
c4237f8b JH |
2615 | * |
2616 | * See also get_user_pages_fast, for performance critical applications. | |
2617 | * | |
adc8cb40 | 2618 | * get_user_pages_remote should be phased out in favor of |
c4237f8b | 2619 | * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing |
adc8cb40 | 2620 | * should use get_user_pages_remote because it cannot pass |
c4237f8b JH |
2621 | * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. |
2622 | */ | |
64019a2e | 2623 | long get_user_pages_remote(struct mm_struct *mm, |
c4237f8b JH |
2624 | unsigned long start, unsigned long nr_pages, |
2625 | unsigned int gup_flags, struct page **pages, | |
ca5e8632 | 2626 | int *locked) |
c4237f8b | 2627 | { |
9a863a6a JG |
2628 | int local_locked = 1; |
2629 | ||
b2cac248 | 2630 | if (!is_valid_gup_args(pages, locked, &gup_flags, |
d64e2dbc | 2631 | FOLL_TOUCH | FOLL_REMOTE)) |
eddb1c22 JH |
2632 | return -EINVAL; |
2633 | ||
b2cac248 | 2634 | return __get_user_pages_locked(mm, start, nr_pages, pages, |
9a863a6a | 2635 | locked ? locked : &local_locked, |
d64e2dbc | 2636 | gup_flags); |
c4237f8b JH |
2637 | } |
2638 | EXPORT_SYMBOL(get_user_pages_remote); | |
2639 | ||
eddb1c22 | 2640 | #else /* CONFIG_MMU */ |
64019a2e | 2641 | long get_user_pages_remote(struct mm_struct *mm, |
eddb1c22 JH |
2642 | unsigned long start, unsigned long nr_pages, |
2643 | unsigned int gup_flags, struct page **pages, | |
ca5e8632 | 2644 | int *locked) |
eddb1c22 JH |
2645 | { |
2646 | return 0; | |
2647 | } | |
2648 | #endif /* !CONFIG_MMU */ | |
2649 | ||
adc8cb40 SJ |
2650 | /** |
2651 | * get_user_pages() - pin user pages in memory | |
2652 | * @start: starting user address | |
2653 | * @nr_pages: number of pages from start to pin | |
2654 | * @gup_flags: flags modifying lookup behaviour | |
2655 | * @pages: array that receives pointers to the pages pinned. | |
2656 | * Should be at least nr_pages long. Or NULL, if caller | |
2657 | * only intends to ensure the pages are faulted in. | |
adc8cb40 | 2658 | * |
64019a2e PX |
2659 | * This is the same as get_user_pages_remote(), just with a less-flexible |
2660 | * calling convention where we assume that the mm being operated on belongs to | |
2661 | * the current task, and doesn't allow passing of a locked parameter. We also | |
2662 | * obviously don't pass FOLL_REMOTE in here. | |
932f4a63 IW |
2663 | */ |
2664 | long get_user_pages(unsigned long start, unsigned long nr_pages, | |
54d02069 | 2665 | unsigned int gup_flags, struct page **pages) |
932f4a63 | 2666 | { |
9a863a6a JG |
2667 | int locked = 1; |
2668 | ||
b2cac248 | 2669 | if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) |
eddb1c22 JH |
2670 | return -EINVAL; |
2671 | ||
afa3c33e | 2672 | return __get_user_pages_locked(current->mm, start, nr_pages, pages, |
b2cac248 | 2673 | &locked, gup_flags); |
932f4a63 IW |
2674 | } |
2675 | EXPORT_SYMBOL(get_user_pages); | |
2bb6d283 | 2676 | |
acc3c8d1 | 2677 | /* |
d3649f68 | 2678 | * get_user_pages_unlocked() is suitable to replace the form: |
acc3c8d1 | 2679 | * |
3e4e28c5 | 2680 | * mmap_read_lock(mm); |
64019a2e | 2681 | * get_user_pages(mm, ..., pages, NULL); |
3e4e28c5 | 2682 | * mmap_read_unlock(mm); |
d3649f68 CH |
2683 | * |
2684 | * with: | |
2685 | * | |
64019a2e | 2686 | * get_user_pages_unlocked(mm, ..., pages); |
d3649f68 CH |
2687 | * |
2688 | * It is functionally equivalent to get_user_pages_fast so | |
2689 | * get_user_pages_fast should be used instead if specific gup_flags | |
2690 | * (e.g. FOLL_FORCE) are not required. | |
acc3c8d1 | 2691 | */ |
d3649f68 CH |
2692 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
2693 | struct page **pages, unsigned int gup_flags) | |
acc3c8d1 | 2694 | { |
b2a72dff | 2695 | int locked = 0; |
acc3c8d1 | 2696 | |
b2cac248 | 2697 | if (!is_valid_gup_args(pages, NULL, &gup_flags, |
f04740f5 | 2698 | FOLL_TOUCH | FOLL_UNLOCKABLE)) |
d64e2dbc JG |
2699 | return -EINVAL; |
2700 | ||
afa3c33e | 2701 | return __get_user_pages_locked(current->mm, start, nr_pages, pages, |
b2cac248 | 2702 | &locked, gup_flags); |
4bbd4c77 | 2703 | } |
d3649f68 | 2704 | EXPORT_SYMBOL(get_user_pages_unlocked); |
2667f50e SC |
2705 | |
2706 | /* | |
23babe19 | 2707 | * GUP-fast |
2667f50e SC |
2708 | * |
2709 | * get_user_pages_fast attempts to pin user pages by walking the page | |
2710 | * tables directly and avoids taking locks. Thus the walker needs to be | |
2711 | * protected from page table pages being freed from under it, and should | |
2712 | * block any THP splits. | |
2713 | * | |
2714 | * One way to achieve this is to have the walker disable interrupts, and | |
2715 | * rely on IPIs from the TLB flushing code blocking before the page table | |
2716 | * pages are freed. This is unsuitable for architectures that do not need | |
2717 | * to broadcast an IPI when invalidating TLBs. | |
2718 | * | |
2719 | * Another way to achieve this is to batch up page table containing pages | |
2720 | * belonging to more than one mm_user, then rcu_sched a callback to free those | |
23babe19 | 2721 | * pages. Disabling interrupts will allow the gup_fast() walker to both block |
2667f50e SC |
2722 | * the rcu_sched callback, and an IPI that we broadcast for splitting THPs |
2723 | * (which is a relatively rare event). The code below adopts this strategy. | |
2724 | * | |
2725 | * Before activating this code, please be aware that the following assumptions | |
2726 | * are currently made: | |
2727 | * | |
ff2e6d72 | 2728 | * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to |
e585513b | 2729 | * free pages containing page tables or TLB flushing requires IPI broadcast. |
2667f50e | 2730 | * |
2667f50e SC |
2731 | * *) ptes can be read atomically by the architecture. |
2732 | * | |
c2661f5f | 2733 | * *) valid user addesses are below TASK_MAX_SIZE |
2667f50e SC |
2734 | * |
2735 | * The last two assumptions can be relaxed by the addition of helper functions. | |
2736 | * | |
2737 | * This code is based heavily on the PowerPC implementation by Nick Piggin. | |
2738 | */ | |
25176ad0 | 2739 | #ifdef CONFIG_HAVE_GUP_FAST |
a6e79df9 | 2740 | /* |
f002882c DH |
2741 | * Used in the GUP-fast path to determine whether GUP is permitted to work on |
2742 | * a specific folio. | |
a6e79df9 LS |
2743 | * |
2744 | * This call assumes the caller has pinned the folio, that the lowest page table | |
2745 | * level still points to this folio, and that interrupts have been disabled. | |
2746 | * | |
f002882c DH |
2747 | * GUP-fast must reject all secretmem folios. |
2748 | * | |
a6e79df9 LS |
2749 | * Writing to pinned file-backed dirty tracked folios is inherently problematic |
2750 | * (see comment describing the writable_file_mapping_allowed() function). We | |
2751 | * therefore try to avoid the most egregious case of a long-term mapping doing | |
2752 | * so. | |
2753 | * | |
2754 | * This function cannot be as thorough as that one as the VMA is not available | |
2755 | * in the fast path, so instead we whitelist known good cases and if in doubt, | |
2756 | * fall back to the slow path. | |
2757 | */ | |
f002882c | 2758 | static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags) |
a6e79df9 | 2759 | { |
f002882c | 2760 | bool reject_file_backed = false; |
a6e79df9 | 2761 | struct address_space *mapping; |
f002882c | 2762 | bool check_secretmem = false; |
a6e79df9 LS |
2763 | unsigned long mapping_flags; |
2764 | ||
2765 | /* | |
2766 | * If we aren't pinning then no problematic write can occur. A long term | |
2767 | * pin is the most egregious case so this is the one we disallow. | |
2768 | */ | |
f002882c | 2769 | if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) == |
a6e79df9 | 2770 | (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) |
f002882c DH |
2771 | reject_file_backed = true; |
2772 | ||
2773 | /* We hold a folio reference, so we can safely access folio fields. */ | |
a6e79df9 | 2774 | |
f002882c DH |
2775 | /* secretmem folios are always order-0 folios. */ |
2776 | if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio)) | |
2777 | check_secretmem = true; | |
2778 | ||
2779 | if (!reject_file_backed && !check_secretmem) | |
2780 | return true; | |
a6e79df9 LS |
2781 | |
2782 | if (WARN_ON_ONCE(folio_test_slab(folio))) | |
2783 | return false; | |
2784 | ||
f002882c | 2785 | /* hugetlb neither requires dirty-tracking nor can be secretmem. */ |
a6e79df9 LS |
2786 | if (folio_test_hugetlb(folio)) |
2787 | return true; | |
2788 | ||
2789 | /* | |
2790 | * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods | |
2791 | * cannot proceed, which means no actions performed under RCU can | |
2792 | * proceed either. | |
2793 | * | |
2794 | * inodes and thus their mappings are freed under RCU, which means the | |
2795 | * mapping cannot be freed beneath us and thus we can safely dereference | |
2796 | * it. | |
2797 | */ | |
2798 | lockdep_assert_irqs_disabled(); | |
2799 | ||
2800 | /* | |
2801 | * However, there may be operations which _alter_ the mapping, so ensure | |
2802 | * we read it once and only once. | |
2803 | */ | |
2804 | mapping = READ_ONCE(folio->mapping); | |
2805 | ||
2806 | /* | |
2807 | * The mapping may have been truncated, in any case we cannot determine | |
2808 | * if this mapping is safe - fall back to slow path to determine how to | |
2809 | * proceed. | |
2810 | */ | |
2811 | if (!mapping) | |
2812 | return false; | |
2813 | ||
2814 | /* Anonymous folios pose no problem. */ | |
df25569d | 2815 | mapping_flags = (unsigned long)mapping & FOLIO_MAPPING_FLAGS; |
a6e79df9 | 2816 | if (mapping_flags) |
df25569d | 2817 | return mapping_flags & FOLIO_MAPPING_ANON; |
a6e79df9 LS |
2818 | |
2819 | /* | |
2820 | * At this point, we know the mapping is non-null and points to an | |
f002882c | 2821 | * address_space object. |
a6e79df9 | 2822 | */ |
f002882c DH |
2823 | if (check_secretmem && secretmem_mapping(mapping)) |
2824 | return false; | |
2825 | /* The only remaining allowed file system is shmem. */ | |
2826 | return !reject_file_backed || shmem_mapping(mapping); | |
a6e79df9 LS |
2827 | } |
2828 | ||
23babe19 DH |
2829 | static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start, |
2830 | unsigned int flags, struct page **pages) | |
b59f65fa KS |
2831 | { |
2832 | while ((*nr) - nr_start) { | |
9cbe4954 | 2833 | struct folio *folio = page_folio(pages[--(*nr)]); |
b59f65fa | 2834 | |
9cbe4954 MWO |
2835 | folio_clear_referenced(folio); |
2836 | gup_put_folio(folio, 1, flags); | |
b59f65fa KS |
2837 | } |
2838 | } | |
2839 | ||
3010a5ea | 2840 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL |
70cbc3cc | 2841 | /* |
23babe19 | 2842 | * GUP-fast relies on pte change detection to avoid concurrent pgtable |
70cbc3cc YS |
2843 | * operations. |
2844 | * | |
23babe19 | 2845 | * To pin the page, GUP-fast needs to do below in order: |
70cbc3cc YS |
2846 | * (1) pin the page (by prefetching pte), then (2) check pte not changed. |
2847 | * | |
2848 | * For the rest of pgtable operations where pgtable updates can be racy | |
23babe19 | 2849 | * with GUP-fast, we need to do (1) clear pte, then (2) check whether page |
70cbc3cc YS |
2850 | * is pinned. |
2851 | * | |
2852 | * Above will work for all pte-level operations, including THP split. | |
2853 | * | |
23babe19 | 2854 | * For THP collapse, it's a bit more complicated because GUP-fast may be |
70cbc3cc YS |
2855 | * walking a pgtable page that is being freed (pte is still valid but pmd |
2856 | * can be cleared already). To avoid race in such condition, we need to | |
2857 | * also check pmd here to make sure pmd doesn't change (corresponds to | |
2858 | * pmdp_collapse_flush() in the THP collapse code path). | |
2859 | */ | |
23babe19 DH |
2860 | static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, |
2861 | unsigned long end, unsigned int flags, struct page **pages, | |
2862 | int *nr) | |
2667f50e | 2863 | { |
b59f65fa | 2864 | struct dev_pagemap *pgmap = NULL; |
fd2825b0 | 2865 | int ret = 0; |
2667f50e | 2866 | pte_t *ptep, *ptem; |
2667f50e SC |
2867 | |
2868 | ptem = ptep = pte_offset_map(&pmd, addr); | |
04dee9e8 HD |
2869 | if (!ptep) |
2870 | return 0; | |
2667f50e | 2871 | do { |
2a4a06da | 2872 | pte_t pte = ptep_get_lockless(ptep); |
b0496fe4 MWO |
2873 | struct page *page; |
2874 | struct folio *folio; | |
2667f50e | 2875 | |
d74943a2 DH |
2876 | /* |
2877 | * Always fallback to ordinary GUP on PROT_NONE-mapped pages: | |
2878 | * pte_access_permitted() better should reject these pages | |
2879 | * either way: otherwise, GUP-fast might succeed in | |
2880 | * cases where ordinary GUP would fail due to VMA access | |
2881 | * permissions. | |
2882 | */ | |
2883 | if (pte_protnone(pte)) | |
e7884f8e KS |
2884 | goto pte_unmap; |
2885 | ||
b798bec4 | 2886 | if (!pte_access_permitted(pte, flags & FOLL_WRITE)) |
e7884f8e KS |
2887 | goto pte_unmap; |
2888 | ||
fd2825b0 | 2889 | if (pte_special(pte)) |
2667f50e SC |
2890 | goto pte_unmap; |
2891 | ||
792b429d DH |
2892 | /* If it's not marked as special it must have a valid memmap. */ |
2893 | VM_WARN_ON_ONCE(!pfn_valid(pte_pfn(pte))); | |
2667f50e SC |
2894 | page = pte_page(pte); |
2895 | ||
f442fa61 | 2896 | folio = try_grab_folio_fast(page, 1, flags); |
b0496fe4 | 2897 | if (!folio) |
2667f50e SC |
2898 | goto pte_unmap; |
2899 | ||
70cbc3cc | 2900 | if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || |
c33c7948 | 2901 | unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { |
b0496fe4 | 2902 | gup_put_folio(folio, 1, flags); |
2667f50e SC |
2903 | goto pte_unmap; |
2904 | } | |
2905 | ||
f002882c | 2906 | if (!gup_fast_folio_allowed(folio, flags)) { |
b0496fe4 | 2907 | gup_put_folio(folio, 1, flags); |
2667f50e SC |
2908 | goto pte_unmap; |
2909 | } | |
2910 | ||
84209e87 | 2911 | if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { |
a7f22660 DH |
2912 | gup_put_folio(folio, 1, flags); |
2913 | goto pte_unmap; | |
2914 | } | |
2915 | ||
f28d4363 CI |
2916 | /* |
2917 | * We need to make the page accessible if and only if we are | |
2918 | * going to access its content (the FOLL_PIN case). Please | |
2919 | * see Documentation/core-api/pin_user_pages.rst for | |
2920 | * details. | |
2921 | */ | |
2922 | if (flags & FOLL_PIN) { | |
b967c648 | 2923 | ret = arch_make_folio_accessible(folio); |
f28d4363 | 2924 | if (ret) { |
b0496fe4 | 2925 | gup_put_folio(folio, 1, flags); |
f28d4363 CI |
2926 | goto pte_unmap; |
2927 | } | |
2928 | } | |
b0496fe4 | 2929 | folio_set_referenced(folio); |
2667f50e SC |
2930 | pages[*nr] = page; |
2931 | (*nr)++; | |
2667f50e SC |
2932 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
2933 | ||
2934 | ret = 1; | |
2935 | ||
2936 | pte_unmap: | |
832d7aa0 CH |
2937 | if (pgmap) |
2938 | put_dev_pagemap(pgmap); | |
2667f50e SC |
2939 | pte_unmap(ptem); |
2940 | return ret; | |
2941 | } | |
2942 | #else | |
2943 | ||
2944 | /* | |
2945 | * If we can't determine whether or not a pte is special, then fail immediately | |
2946 | * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not | |
2947 | * to be special. | |
2948 | * | |
2949 | * For a futex to be placed on a THP tail page, get_futex_key requires a | |
dadbb612 | 2950 | * get_user_pages_fast_only implementation that can pin pages. Thus it's still |
23babe19 | 2951 | * useful to have gup_fast_pmd_leaf even if we can't operate on ptes. |
2667f50e | 2952 | */ |
23babe19 DH |
2953 | static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, |
2954 | unsigned long end, unsigned int flags, struct page **pages, | |
2955 | int *nr) | |
2667f50e SC |
2956 | { |
2957 | return 0; | |
2958 | } | |
3010a5ea | 2959 | #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ |
2667f50e | 2960 | |
23babe19 DH |
2961 | static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
2962 | unsigned long end, unsigned int flags, struct page **pages, | |
2963 | int *nr) | |
2667f50e | 2964 | { |
667ed1f7 MWO |
2965 | struct page *page; |
2966 | struct folio *folio; | |
2667f50e SC |
2967 | int refs; |
2968 | ||
b798bec4 | 2969 | if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) |
2667f50e SC |
2970 | return 0; |
2971 | ||
ae3c99e6 PX |
2972 | if (pmd_special(orig)) |
2973 | return 0; | |
2974 | ||
f3c94c62 PX |
2975 | page = pmd_page(orig); |
2976 | refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr); | |
2667f50e | 2977 | |
f442fa61 | 2978 | folio = try_grab_folio_fast(page, refs, flags); |
667ed1f7 | 2979 | if (!folio) |
2667f50e | 2980 | return 0; |
2667f50e SC |
2981 | |
2982 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { | |
667ed1f7 | 2983 | gup_put_folio(folio, refs, flags); |
2667f50e SC |
2984 | return 0; |
2985 | } | |
2986 | ||
f002882c | 2987 | if (!gup_fast_folio_allowed(folio, flags)) { |
a6e79df9 LS |
2988 | gup_put_folio(folio, refs, flags); |
2989 | return 0; | |
2990 | } | |
84209e87 | 2991 | if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { |
a7f22660 DH |
2992 | gup_put_folio(folio, refs, flags); |
2993 | return 0; | |
2994 | } | |
2995 | ||
a43e9820 | 2996 | *nr += refs; |
667ed1f7 | 2997 | folio_set_referenced(folio); |
2667f50e SC |
2998 | return 1; |
2999 | } | |
3000 | ||
23babe19 DH |
3001 | static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr, |
3002 | unsigned long end, unsigned int flags, struct page **pages, | |
3003 | int *nr) | |
2667f50e | 3004 | { |
83afb52e MWO |
3005 | struct page *page; |
3006 | struct folio *folio; | |
2667f50e SC |
3007 | int refs; |
3008 | ||
b798bec4 | 3009 | if (!pud_access_permitted(orig, flags & FOLL_WRITE)) |
2667f50e SC |
3010 | return 0; |
3011 | ||
ae3c99e6 PX |
3012 | if (pud_special(orig)) |
3013 | return 0; | |
3014 | ||
f3c94c62 PX |
3015 | page = pud_page(orig); |
3016 | refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr); | |
2667f50e | 3017 | |
f442fa61 | 3018 | folio = try_grab_folio_fast(page, refs, flags); |
83afb52e | 3019 | if (!folio) |
2667f50e | 3020 | return 0; |
2667f50e SC |
3021 | |
3022 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { | |
83afb52e | 3023 | gup_put_folio(folio, refs, flags); |
2667f50e SC |
3024 | return 0; |
3025 | } | |
3026 | ||
f002882c | 3027 | if (!gup_fast_folio_allowed(folio, flags)) { |
a6e79df9 LS |
3028 | gup_put_folio(folio, refs, flags); |
3029 | return 0; | |
3030 | } | |
3031 | ||
84209e87 | 3032 | if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { |
a7f22660 DH |
3033 | gup_put_folio(folio, refs, flags); |
3034 | return 0; | |
3035 | } | |
3036 | ||
a43e9820 | 3037 | *nr += refs; |
83afb52e | 3038 | folio_set_referenced(folio); |
2667f50e SC |
3039 | return 1; |
3040 | } | |
3041 | ||
23babe19 DH |
3042 | static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, |
3043 | unsigned long end, unsigned int flags, struct page **pages, | |
3044 | int *nr) | |
2667f50e SC |
3045 | { |
3046 | unsigned long next; | |
3047 | pmd_t *pmdp; | |
3048 | ||
d3f7b1bb | 3049 | pmdp = pmd_offset_lockless(pudp, pud, addr); |
2667f50e | 3050 | do { |
1180e732 | 3051 | pmd_t pmd = pmdp_get_lockless(pmdp); |
2667f50e SC |
3052 | |
3053 | next = pmd_addr_end(addr, end); | |
84c3fc4e | 3054 | if (!pmd_present(pmd)) |
2667f50e SC |
3055 | return 0; |
3056 | ||
7db86dc3 | 3057 | if (unlikely(pmd_leaf(pmd))) { |
23babe19 | 3058 | /* See gup_fast_pte_range() */ |
d74943a2 | 3059 | if (pmd_protnone(pmd)) |
2667f50e SC |
3060 | return 0; |
3061 | ||
23babe19 | 3062 | if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags, |
2667f50e SC |
3063 | pages, nr)) |
3064 | return 0; | |
3065 | ||
23babe19 DH |
3066 | } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags, |
3067 | pages, nr)) | |
2923117b | 3068 | return 0; |
2667f50e SC |
3069 | } while (pmdp++, addr = next, addr != end); |
3070 | ||
3071 | return 1; | |
3072 | } | |
3073 | ||
23babe19 DH |
3074 | static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, |
3075 | unsigned long end, unsigned int flags, struct page **pages, | |
3076 | int *nr) | |
2667f50e SC |
3077 | { |
3078 | unsigned long next; | |
3079 | pud_t *pudp; | |
3080 | ||
d3f7b1bb | 3081 | pudp = pud_offset_lockless(p4dp, p4d, addr); |
2667f50e | 3082 | do { |
e37c6982 | 3083 | pud_t pud = READ_ONCE(*pudp); |
2667f50e SC |
3084 | |
3085 | next = pud_addr_end(addr, end); | |
15494520 | 3086 | if (unlikely(!pud_present(pud))) |
2667f50e | 3087 | return 0; |
7db86dc3 | 3088 | if (unlikely(pud_leaf(pud))) { |
23babe19 DH |
3089 | if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags, |
3090 | pages, nr)) | |
f30c59e9 | 3091 | return 0; |
23babe19 DH |
3092 | } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags, |
3093 | pages, nr)) | |
2667f50e SC |
3094 | return 0; |
3095 | } while (pudp++, addr = next, addr != end); | |
3096 | ||
3097 | return 1; | |
3098 | } | |
3099 | ||
23babe19 DH |
3100 | static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, |
3101 | unsigned long end, unsigned int flags, struct page **pages, | |
3102 | int *nr) | |
c2febafc KS |
3103 | { |
3104 | unsigned long next; | |
3105 | p4d_t *p4dp; | |
3106 | ||
d3f7b1bb | 3107 | p4dp = p4d_offset_lockless(pgdp, pgd, addr); |
c2febafc KS |
3108 | do { |
3109 | p4d_t p4d = READ_ONCE(*p4dp); | |
3110 | ||
3111 | next = p4d_addr_end(addr, end); | |
089f9214 | 3112 | if (!p4d_present(p4d)) |
c2febafc | 3113 | return 0; |
1965e933 | 3114 | BUILD_BUG_ON(p4d_leaf(p4d)); |
8268614b CL |
3115 | if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags, |
3116 | pages, nr)) | |
c2febafc KS |
3117 | return 0; |
3118 | } while (p4dp++, addr = next, addr != end); | |
3119 | ||
3120 | return 1; | |
3121 | } | |
3122 | ||
23babe19 | 3123 | static void gup_fast_pgd_range(unsigned long addr, unsigned long end, |
b798bec4 | 3124 | unsigned int flags, struct page **pages, int *nr) |
5b65c467 KS |
3125 | { |
3126 | unsigned long next; | |
3127 | pgd_t *pgdp; | |
3128 | ||
3129 | pgdp = pgd_offset(current->mm, addr); | |
3130 | do { | |
3131 | pgd_t pgd = READ_ONCE(*pgdp); | |
3132 | ||
3133 | next = pgd_addr_end(addr, end); | |
3134 | if (pgd_none(pgd)) | |
3135 | return; | |
339122ab BH |
3136 | BUILD_BUG_ON(pgd_leaf(pgd)); |
3137 | if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags, | |
3138 | pages, nr)) | |
5b65c467 KS |
3139 | return; |
3140 | } while (pgdp++, addr = next, addr != end); | |
3141 | } | |
050a9adc | 3142 | #else |
23babe19 | 3143 | static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end, |
050a9adc CH |
3144 | unsigned int flags, struct page **pages, int *nr) |
3145 | { | |
3146 | } | |
25176ad0 | 3147 | #endif /* CONFIG_HAVE_GUP_FAST */ |
5b65c467 KS |
3148 | |
3149 | #ifndef gup_fast_permitted | |
3150 | /* | |
dadbb612 | 3151 | * Check if it's allowed to use get_user_pages_fast_only() for the range, or |
5b65c467 KS |
3152 | * we need to fall back to the slow version: |
3153 | */ | |
26f4c328 | 3154 | static bool gup_fast_permitted(unsigned long start, unsigned long end) |
5b65c467 | 3155 | { |
26f4c328 | 3156 | return true; |
5b65c467 KS |
3157 | } |
3158 | #endif | |
3159 | ||
23babe19 DH |
3160 | static unsigned long gup_fast(unsigned long start, unsigned long end, |
3161 | unsigned int gup_flags, struct page **pages) | |
c28b1fc7 JG |
3162 | { |
3163 | unsigned long flags; | |
3164 | int nr_pinned = 0; | |
57efa1fe | 3165 | unsigned seq; |
c28b1fc7 | 3166 | |
25176ad0 | 3167 | if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) || |
c28b1fc7 JG |
3168 | !gup_fast_permitted(start, end)) |
3169 | return 0; | |
3170 | ||
57efa1fe | 3171 | if (gup_flags & FOLL_PIN) { |
75285852 | 3172 | if (!raw_seqcount_try_begin(¤t->mm->write_protect_seq, seq)) |
57efa1fe JG |
3173 | return 0; |
3174 | } | |
3175 | ||
c28b1fc7 JG |
3176 | /* |
3177 | * Disable interrupts. The nested form is used, in order to allow full, | |
3178 | * general purpose use of this routine. | |
3179 | * | |
3180 | * With interrupts disabled, we block page table pages from being freed | |
3181 | * from under us. See struct mmu_table_batch comments in | |
3182 | * include/asm-generic/tlb.h for more details. | |
3183 | * | |
3184 | * We do not adopt an rcu_read_lock() here as we also want to block IPIs | |
52084f25 | 3185 | * that come from callers of tlb_remove_table_sync_one(). |
c28b1fc7 JG |
3186 | */ |
3187 | local_irq_save(flags); | |
23babe19 | 3188 | gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned); |
c28b1fc7 | 3189 | local_irq_restore(flags); |
57efa1fe JG |
3190 | |
3191 | /* | |
3192 | * When pinning pages for DMA there could be a concurrent write protect | |
23babe19 | 3193 | * from fork() via copy_page_range(), in this case always fail GUP-fast. |
57efa1fe JG |
3194 | */ |
3195 | if (gup_flags & FOLL_PIN) { | |
3196 | if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { | |
23babe19 | 3197 | gup_fast_unpin_user_pages(pages, nr_pinned); |
57efa1fe | 3198 | return 0; |
b6a2619c DH |
3199 | } else { |
3200 | sanity_check_pinned_pages(pages, nr_pinned); | |
57efa1fe JG |
3201 | } |
3202 | } | |
c28b1fc7 JG |
3203 | return nr_pinned; |
3204 | } | |
3205 | ||
23babe19 DH |
3206 | static int gup_fast_fallback(unsigned long start, unsigned long nr_pages, |
3207 | unsigned int gup_flags, struct page **pages) | |
2667f50e | 3208 | { |
c28b1fc7 JG |
3209 | unsigned long len, end; |
3210 | unsigned long nr_pinned; | |
b2a72dff | 3211 | int locked = 0; |
c28b1fc7 | 3212 | int ret; |
2667f50e | 3213 | |
f4000fdf | 3214 | if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | |
376a34ef | 3215 | FOLL_FORCE | FOLL_PIN | FOLL_GET | |
4003f107 | 3216 | FOLL_FAST_ONLY | FOLL_NOFAULT | |
d74943a2 | 3217 | FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT))) |
817be129 CH |
3218 | return -EINVAL; |
3219 | ||
a458b76a AA |
3220 | if (gup_flags & FOLL_PIN) |
3221 | mm_set_has_pinned_flag(¤t->mm->flags); | |
008cfe44 | 3222 | |
f81cd178 | 3223 | if (!(gup_flags & FOLL_FAST_ONLY)) |
da1c55f1 | 3224 | might_lock_read(¤t->mm->mmap_lock); |
f81cd178 | 3225 | |
f455c854 | 3226 | start = untagged_addr(start) & PAGE_MASK; |
c28b1fc7 JG |
3227 | len = nr_pages << PAGE_SHIFT; |
3228 | if (check_add_overflow(start, len, &end)) | |
9883c7f8 | 3229 | return -EOVERFLOW; |
6014bc27 LT |
3230 | if (end > TASK_SIZE_MAX) |
3231 | return -EFAULT; | |
73e10a61 | 3232 | |
23babe19 | 3233 | nr_pinned = gup_fast(start, end, gup_flags, pages); |
c28b1fc7 JG |
3234 | if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) |
3235 | return nr_pinned; | |
2667f50e | 3236 | |
c28b1fc7 JG |
3237 | /* Slow path: try to get the remaining pages with get_user_pages */ |
3238 | start += nr_pinned << PAGE_SHIFT; | |
3239 | pages += nr_pinned; | |
b2a72dff | 3240 | ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, |
b2cac248 | 3241 | pages, &locked, |
f04740f5 | 3242 | gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); |
c28b1fc7 JG |
3243 | if (ret < 0) { |
3244 | /* | |
3245 | * The caller has to unpin the pages we already pinned so | |
3246 | * returning -errno is not an option | |
3247 | */ | |
3248 | if (nr_pinned) | |
3249 | return nr_pinned; | |
3250 | return ret; | |
2667f50e | 3251 | } |
c28b1fc7 | 3252 | return ret + nr_pinned; |
2667f50e | 3253 | } |
c28b1fc7 | 3254 | |
dadbb612 SJ |
3255 | /** |
3256 | * get_user_pages_fast_only() - pin user pages in memory | |
3257 | * @start: starting user address | |
3258 | * @nr_pages: number of pages from start to pin | |
3259 | * @gup_flags: flags modifying pin behaviour | |
3260 | * @pages: array that receives pointers to the pages pinned. | |
3261 | * Should be at least nr_pages long. | |
3262 | * | |
9e1f0580 JH |
3263 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to |
3264 | * the regular GUP. | |
9e1f0580 JH |
3265 | * |
3266 | * If the architecture does not support this function, simply return with no | |
3267 | * pages pinned. | |
3268 | * | |
3269 | * Careful, careful! COW breaking can go either way, so a non-write | |
3270 | * access can get ambiguous page results. If you call this function without | |
3271 | * 'write' set, you'd better be sure that you're ok with that ambiguity. | |
3272 | */ | |
dadbb612 SJ |
3273 | int get_user_pages_fast_only(unsigned long start, int nr_pages, |
3274 | unsigned int gup_flags, struct page **pages) | |
9e1f0580 | 3275 | { |
9e1f0580 JH |
3276 | /* |
3277 | * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, | |
3278 | * because gup fast is always a "pin with a +1 page refcount" request. | |
376a34ef JH |
3279 | * |
3280 | * FOLL_FAST_ONLY is required in order to match the API description of | |
3281 | * this routine: no fall back to regular ("slow") GUP. | |
9e1f0580 | 3282 | */ |
b2cac248 | 3283 | if (!is_valid_gup_args(pages, NULL, &gup_flags, |
d64e2dbc JG |
3284 | FOLL_GET | FOLL_FAST_ONLY)) |
3285 | return -EINVAL; | |
9e1f0580 | 3286 | |
23babe19 | 3287 | return gup_fast_fallback(start, nr_pages, gup_flags, pages); |
9e1f0580 | 3288 | } |
dadbb612 | 3289 | EXPORT_SYMBOL_GPL(get_user_pages_fast_only); |
9e1f0580 | 3290 | |
eddb1c22 JH |
3291 | /** |
3292 | * get_user_pages_fast() - pin user pages in memory | |
3faa52c0 JH |
3293 | * @start: starting user address |
3294 | * @nr_pages: number of pages from start to pin | |
3295 | * @gup_flags: flags modifying pin behaviour | |
3296 | * @pages: array that receives pointers to the pages pinned. | |
3297 | * Should be at least nr_pages long. | |
eddb1c22 | 3298 | * |
c1e8d7c6 | 3299 | * Attempt to pin user pages in memory without taking mm->mmap_lock. |
eddb1c22 JH |
3300 | * If not successful, it will fall back to taking the lock and |
3301 | * calling get_user_pages(). | |
3302 | * | |
3303 | * Returns number of pages pinned. This may be fewer than the number requested. | |
3304 | * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns | |
3305 | * -errno. | |
3306 | */ | |
3307 | int get_user_pages_fast(unsigned long start, int nr_pages, | |
3308 | unsigned int gup_flags, struct page **pages) | |
3309 | { | |
94202f12 JH |
3310 | /* |
3311 | * The caller may or may not have explicitly set FOLL_GET; either way is | |
3312 | * OK. However, internally (within mm/gup.c), gup fast variants must set | |
3313 | * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" | |
3314 | * request. | |
3315 | */ | |
b2cac248 | 3316 | if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) |
d64e2dbc | 3317 | return -EINVAL; |
23babe19 | 3318 | return gup_fast_fallback(start, nr_pages, gup_flags, pages); |
eddb1c22 | 3319 | } |
050a9adc | 3320 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
eddb1c22 JH |
3321 | |
3322 | /** | |
3323 | * pin_user_pages_fast() - pin user pages in memory without taking locks | |
3324 | * | |
3faa52c0 JH |
3325 | * @start: starting user address |
3326 | * @nr_pages: number of pages from start to pin | |
3327 | * @gup_flags: flags modifying pin behaviour | |
3328 | * @pages: array that receives pointers to the pages pinned. | |
3329 | * Should be at least nr_pages long. | |
3330 | * | |
3331 | * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See | |
3332 | * get_user_pages_fast() for documentation on the function arguments, because | |
3333 | * the arguments here are identical. | |
3334 | * | |
3335 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please | |
72ef5e52 | 3336 | * see Documentation/core-api/pin_user_pages.rst for further details. |
c8070b78 DH |
3337 | * |
3338 | * Note that if a zero_page is amongst the returned pages, it will not have | |
3339 | * pins in it and unpin_user_page() will not remove pins from it. | |
eddb1c22 JH |
3340 | */ |
3341 | int pin_user_pages_fast(unsigned long start, int nr_pages, | |
3342 | unsigned int gup_flags, struct page **pages) | |
3343 | { | |
b2cac248 | 3344 | if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) |
3faa52c0 | 3345 | return -EINVAL; |
23babe19 | 3346 | return gup_fast_fallback(start, nr_pages, gup_flags, pages); |
eddb1c22 JH |
3347 | } |
3348 | EXPORT_SYMBOL_GPL(pin_user_pages_fast); | |
3349 | ||
3350 | /** | |
64019a2e | 3351 | * pin_user_pages_remote() - pin pages of a remote process |
eddb1c22 | 3352 | * |
3faa52c0 JH |
3353 | * @mm: mm_struct of target mm |
3354 | * @start: starting user address | |
3355 | * @nr_pages: number of pages from start to pin | |
3356 | * @gup_flags: flags modifying lookup behaviour | |
3357 | * @pages: array that receives pointers to the pages pinned. | |
0768c8de | 3358 | * Should be at least nr_pages long. |
3faa52c0 JH |
3359 | * @locked: pointer to lock flag indicating whether lock is held and |
3360 | * subsequently whether VM_FAULT_RETRY functionality can be | |
3361 | * utilised. Lock must initially be held. | |
3362 | * | |
3363 | * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See | |
3364 | * get_user_pages_remote() for documentation on the function arguments, because | |
3365 | * the arguments here are identical. | |
3366 | * | |
3367 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please | |
72ef5e52 | 3368 | * see Documentation/core-api/pin_user_pages.rst for details. |
c8070b78 DH |
3369 | * |
3370 | * Note that if a zero_page is amongst the returned pages, it will not have | |
3371 | * pins in it and unpin_user_page*() will not remove pins from it. | |
eddb1c22 | 3372 | */ |
64019a2e | 3373 | long pin_user_pages_remote(struct mm_struct *mm, |
eddb1c22 JH |
3374 | unsigned long start, unsigned long nr_pages, |
3375 | unsigned int gup_flags, struct page **pages, | |
0b295316 | 3376 | int *locked) |
eddb1c22 | 3377 | { |
9a863a6a JG |
3378 | int local_locked = 1; |
3379 | ||
b2cac248 | 3380 | if (!is_valid_gup_args(pages, locked, &gup_flags, |
d64e2dbc JG |
3381 | FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) |
3382 | return 0; | |
b2cac248 | 3383 | return __gup_longterm_locked(mm, start, nr_pages, pages, |
9a863a6a | 3384 | locked ? locked : &local_locked, |
d64e2dbc | 3385 | gup_flags); |
eddb1c22 JH |
3386 | } |
3387 | EXPORT_SYMBOL(pin_user_pages_remote); | |
3388 | ||
3389 | /** | |
3390 | * pin_user_pages() - pin user pages in memory for use by other devices | |
3391 | * | |
3faa52c0 JH |
3392 | * @start: starting user address |
3393 | * @nr_pages: number of pages from start to pin | |
3394 | * @gup_flags: flags modifying lookup behaviour | |
3395 | * @pages: array that receives pointers to the pages pinned. | |
0768c8de | 3396 | * Should be at least nr_pages long. |
3faa52c0 JH |
3397 | * |
3398 | * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and | |
3399 | * FOLL_PIN is set. | |
3400 | * | |
3401 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please | |
72ef5e52 | 3402 | * see Documentation/core-api/pin_user_pages.rst for details. |
c8070b78 DH |
3403 | * |
3404 | * Note that if a zero_page is amongst the returned pages, it will not have | |
3405 | * pins in it and unpin_user_page*() will not remove pins from it. | |
eddb1c22 JH |
3406 | */ |
3407 | long pin_user_pages(unsigned long start, unsigned long nr_pages, | |
4c630f30 | 3408 | unsigned int gup_flags, struct page **pages) |
eddb1c22 | 3409 | { |
9a863a6a JG |
3410 | int locked = 1; |
3411 | ||
b2cac248 | 3412 | if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) |
d64e2dbc | 3413 | return 0; |
64019a2e | 3414 | return __gup_longterm_locked(current->mm, start, nr_pages, |
b2cac248 | 3415 | pages, &locked, gup_flags); |
eddb1c22 JH |
3416 | } |
3417 | EXPORT_SYMBOL(pin_user_pages); | |
91429023 JH |
3418 | |
3419 | /* | |
3420 | * pin_user_pages_unlocked() is the FOLL_PIN variant of | |
3421 | * get_user_pages_unlocked(). Behavior is the same, except that this one sets | |
3422 | * FOLL_PIN and rejects FOLL_GET. | |
c8070b78 DH |
3423 | * |
3424 | * Note that if a zero_page is amongst the returned pages, it will not have | |
3425 | * pins in it and unpin_user_page*() will not remove pins from it. | |
91429023 JH |
3426 | */ |
3427 | long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | |
3428 | struct page **pages, unsigned int gup_flags) | |
3429 | { | |
b2a72dff | 3430 | int locked = 0; |
91429023 | 3431 | |
b2cac248 | 3432 | if (!is_valid_gup_args(pages, NULL, &gup_flags, |
f04740f5 | 3433 | FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) |
d64e2dbc | 3434 | return 0; |
0768c8de | 3435 | |
b2cac248 | 3436 | return __gup_longterm_locked(current->mm, start, nr_pages, pages, |
b2a72dff | 3437 | &locked, gup_flags); |
91429023 JH |
3438 | } |
3439 | EXPORT_SYMBOL(pin_user_pages_unlocked); | |
89c1905d VK |
3440 | |
3441 | /** | |
3442 | * memfd_pin_folios() - pin folios associated with a memfd | |
3443 | * @memfd: the memfd whose folios are to be pinned | |
3444 | * @start: the first memfd offset | |
3445 | * @end: the last memfd offset (inclusive) | |
3446 | * @folios: array that receives pointers to the folios pinned | |
3447 | * @max_folios: maximum number of entries in @folios | |
3448 | * @offset: the offset into the first folio | |
3449 | * | |
3450 | * Attempt to pin folios associated with a memfd in the contiguous range | |
3451 | * [start, end]. Given that a memfd is either backed by shmem or hugetlb, | |
3452 | * the folios can either be found in the page cache or need to be allocated | |
3453 | * if necessary. Once the folios are located, they are all pinned via | |
3454 | * FOLL_PIN and @offset is populatedwith the offset into the first folio. | |
3455 | * And, eventually, these pinned folios must be released either using | |
3456 | * unpin_folios() or unpin_folio(). | |
3457 | * | |
3458 | * It must be noted that the folios may be pinned for an indefinite amount | |
3459 | * of time. And, in most cases, the duration of time they may stay pinned | |
3460 | * would be controlled by the userspace. This behavior is effectively the | |
3461 | * same as using FOLL_LONGTERM with other GUP APIs. | |
3462 | * | |
3463 | * Returns number of folios pinned, which could be less than @max_folios | |
3464 | * as it depends on the folio sizes that cover the range [start, end]. | |
3465 | * If no folios were pinned, it returns -errno. | |
3466 | */ | |
3467 | long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end, | |
3468 | struct folio **folios, unsigned int max_folios, | |
3469 | pgoff_t *offset) | |
3470 | { | |
3471 | unsigned int flags, nr_folios, nr_found; | |
3472 | unsigned int i, pgshift = PAGE_SHIFT; | |
30f62b92 | 3473 | pgoff_t start_idx, end_idx; |
89c1905d VK |
3474 | struct folio *folio = NULL; |
3475 | struct folio_batch fbatch; | |
dc677b5f | 3476 | struct hstate *h; |
89c1905d VK |
3477 | long ret = -EINVAL; |
3478 | ||
3479 | if (start < 0 || start > end || !max_folios) | |
3480 | return -EINVAL; | |
3481 | ||
3482 | if (!memfd) | |
3483 | return -EINVAL; | |
3484 | ||
3485 | if (!shmem_file(memfd) && !is_file_hugepages(memfd)) | |
3486 | return -EINVAL; | |
3487 | ||
3488 | if (end >= i_size_read(file_inode(memfd))) | |
3489 | return -EINVAL; | |
3490 | ||
3491 | if (is_file_hugepages(memfd)) { | |
3492 | h = hstate_file(memfd); | |
3493 | pgshift = huge_page_shift(h); | |
3494 | } | |
3495 | ||
3496 | flags = memalloc_pin_save(); | |
3497 | do { | |
3498 | nr_folios = 0; | |
3499 | start_idx = start >> pgshift; | |
3500 | end_idx = end >> pgshift; | |
3501 | if (is_file_hugepages(memfd)) { | |
3502 | start_idx <<= huge_page_order(h); | |
3503 | end_idx <<= huge_page_order(h); | |
3504 | } | |
3505 | ||
3506 | folio_batch_init(&fbatch); | |
3507 | while (start_idx <= end_idx && nr_folios < max_folios) { | |
3508 | /* | |
3509 | * In most cases, we should be able to find the folios | |
3510 | * in the page cache. If we cannot find them for some | |
3511 | * reason, we try to allocate them and add them to the | |
3512 | * page cache. | |
3513 | */ | |
3514 | nr_found = filemap_get_folios_contig(memfd->f_mapping, | |
3515 | &start_idx, | |
3516 | end_idx, | |
3517 | &fbatch); | |
3518 | if (folio) { | |
3519 | folio_put(folio); | |
3520 | folio = NULL; | |
3521 | } | |
3522 | ||
89c1905d | 3523 | for (i = 0; i < nr_found; i++) { |
fe488d34 | 3524 | folio = fbatch.folios[i]; |
89c1905d VK |
3525 | |
3526 | if (try_grab_folio(folio, 1, FOLL_PIN)) { | |
3527 | folio_batch_release(&fbatch); | |
3528 | ret = -EINVAL; | |
3529 | goto err; | |
3530 | } | |
3531 | ||
3532 | if (nr_folios == 0) | |
3533 | *offset = offset_in_folio(folio, start); | |
3534 | ||
3535 | folios[nr_folios] = folio; | |
89c1905d VK |
3536 | if (++nr_folios == max_folios) |
3537 | break; | |
3538 | } | |
3539 | ||
3540 | folio = NULL; | |
3541 | folio_batch_release(&fbatch); | |
3542 | if (!nr_found) { | |
3543 | folio = memfd_alloc_folio(memfd, start_idx); | |
3544 | if (IS_ERR(folio)) { | |
3545 | ret = PTR_ERR(folio); | |
3546 | if (ret != -EEXIST) | |
3547 | goto err; | |
ce645b9f | 3548 | folio = NULL; |
89c1905d VK |
3549 | } |
3550 | } | |
3551 | } | |
3552 | ||
3553 | ret = check_and_migrate_movable_folios(nr_folios, folios); | |
3554 | } while (ret == -EAGAIN); | |
3555 | ||
3556 | memalloc_pin_restore(flags); | |
3557 | return ret ? ret : nr_folios; | |
3558 | err: | |
3559 | memalloc_pin_restore(flags); | |
3560 | unpin_folios(folios, nr_folios); | |
3561 | ||
3562 | return ret; | |
3563 | } | |
3564 | EXPORT_SYMBOL_GPL(memfd_pin_folios); | |
a2ad1b81 SS |
3565 | |
3566 | /** | |
3567 | * folio_add_pins() - add pins to an already-pinned folio | |
3568 | * @folio: the folio to add more pins to | |
3569 | * @pins: number of pins to add | |
3570 | * | |
3571 | * Try to add more pins to an already-pinned folio. The semantics | |
3572 | * of the pin (e.g., FOLL_WRITE) follow any existing pin and cannot | |
3573 | * be changed. | |
3574 | * | |
3575 | * This function is helpful when having obtained a pin on a large folio | |
3576 | * using memfd_pin_folios(), but wanting to logically unpin parts | |
3577 | * (e.g., individual pages) of the folio later, for example, using | |
3578 | * unpin_user_page_range_dirty_lock(). | |
3579 | * | |
3580 | * This is not the right interface to initially pin a folio. | |
3581 | */ | |
3582 | int folio_add_pins(struct folio *folio, unsigned int pins) | |
3583 | { | |
3584 | VM_WARN_ON_ONCE(!folio_maybe_dma_pinned(folio)); | |
3585 | ||
3586 | return try_grab_folio(folio, pins, FOLL_PIN); | |
3587 | } | |
3588 | EXPORT_SYMBOL_GPL(folio_add_pins); |