Commit | Line | Data |
---|---|---|
76cbbead CH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Device Memory Migration functionality. | |
4 | * | |
5 | * Originally written by Jérôme Glisse. | |
6 | */ | |
7 | #include <linux/export.h> | |
8 | #include <linux/memremap.h> | |
9 | #include <linux/migrate.h> | |
fd35ca3d | 10 | #include <linux/mm.h> |
76cbbead CH |
11 | #include <linux/mm_inline.h> |
12 | #include <linux/mmu_notifier.h> | |
13 | #include <linux/oom.h> | |
14 | #include <linux/pagewalk.h> | |
15 | #include <linux/rmap.h> | |
16 | #include <linux/swapops.h> | |
17 | #include <asm/tlbflush.h> | |
18 | #include "internal.h" | |
19 | ||
20 | static int migrate_vma_collect_skip(unsigned long start, | |
21 | unsigned long end, | |
22 | struct mm_walk *walk) | |
23 | { | |
24 | struct migrate_vma *migrate = walk->private; | |
25 | unsigned long addr; | |
26 | ||
27 | for (addr = start; addr < end; addr += PAGE_SIZE) { | |
28 | migrate->dst[migrate->npages] = 0; | |
29 | migrate->src[migrate->npages++] = 0; | |
30 | } | |
31 | ||
32 | return 0; | |
33 | } | |
34 | ||
35 | static int migrate_vma_collect_hole(unsigned long start, | |
36 | unsigned long end, | |
37 | __always_unused int depth, | |
38 | struct mm_walk *walk) | |
39 | { | |
40 | struct migrate_vma *migrate = walk->private; | |
41 | unsigned long addr; | |
42 | ||
43 | /* Only allow populating anonymous memory. */ | |
44 | if (!vma_is_anonymous(walk->vma)) | |
45 | return migrate_vma_collect_skip(start, end, walk); | |
46 | ||
47 | for (addr = start; addr < end; addr += PAGE_SIZE) { | |
48 | migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; | |
49 | migrate->dst[migrate->npages] = 0; | |
50 | migrate->npages++; | |
51 | migrate->cpages++; | |
52 | } | |
53 | ||
54 | return 0; | |
55 | } | |
56 | ||
57 | static int migrate_vma_collect_pmd(pmd_t *pmdp, | |
58 | unsigned long start, | |
59 | unsigned long end, | |
60 | struct mm_walk *walk) | |
61 | { | |
62 | struct migrate_vma *migrate = walk->private; | |
63 | struct vm_area_struct *vma = walk->vma; | |
64 | struct mm_struct *mm = vma->vm_mm; | |
65 | unsigned long addr = start, unmapped = 0; | |
66 | spinlock_t *ptl; | |
67 | pte_t *ptep; | |
68 | ||
69 | again: | |
70 | if (pmd_none(*pmdp)) | |
71 | return migrate_vma_collect_hole(start, end, -1, walk); | |
72 | ||
73 | if (pmd_trans_huge(*pmdp)) { | |
74 | struct page *page; | |
75 | ||
76 | ptl = pmd_lock(mm, pmdp); | |
77 | if (unlikely(!pmd_trans_huge(*pmdp))) { | |
78 | spin_unlock(ptl); | |
79 | goto again; | |
80 | } | |
81 | ||
82 | page = pmd_page(*pmdp); | |
83 | if (is_huge_zero_page(page)) { | |
84 | spin_unlock(ptl); | |
85 | split_huge_pmd(vma, pmdp, addr); | |
86 | if (pmd_trans_unstable(pmdp)) | |
87 | return migrate_vma_collect_skip(start, end, | |
88 | walk); | |
89 | } else { | |
90 | int ret; | |
91 | ||
92 | get_page(page); | |
93 | spin_unlock(ptl); | |
94 | if (unlikely(!trylock_page(page))) | |
95 | return migrate_vma_collect_skip(start, end, | |
96 | walk); | |
97 | ret = split_huge_page(page); | |
98 | unlock_page(page); | |
99 | put_page(page); | |
100 | if (ret) | |
101 | return migrate_vma_collect_skip(start, end, | |
102 | walk); | |
103 | if (pmd_none(*pmdp)) | |
104 | return migrate_vma_collect_hole(start, end, -1, | |
105 | walk); | |
106 | } | |
107 | } | |
108 | ||
109 | if (unlikely(pmd_bad(*pmdp))) | |
110 | return migrate_vma_collect_skip(start, end, walk); | |
111 | ||
112 | ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); | |
113 | arch_enter_lazy_mmu_mode(); | |
114 | ||
115 | for (; addr < end; addr += PAGE_SIZE, ptep++) { | |
116 | unsigned long mpfn = 0, pfn; | |
117 | struct page *page; | |
118 | swp_entry_t entry; | |
119 | pte_t pte; | |
120 | ||
121 | pte = *ptep; | |
122 | ||
123 | if (pte_none(pte)) { | |
124 | if (vma_is_anonymous(vma)) { | |
125 | mpfn = MIGRATE_PFN_MIGRATE; | |
126 | migrate->cpages++; | |
127 | } | |
128 | goto next; | |
129 | } | |
130 | ||
131 | if (!pte_present(pte)) { | |
132 | /* | |
133 | * Only care about unaddressable device page special | |
134 | * page table entry. Other special swap entries are not | |
135 | * migratable, and we ignore regular swapped page. | |
136 | */ | |
137 | entry = pte_to_swp_entry(pte); | |
138 | if (!is_device_private_entry(entry)) | |
139 | goto next; | |
140 | ||
141 | page = pfn_swap_entry_to_page(entry); | |
142 | if (!(migrate->flags & | |
143 | MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || | |
144 | page->pgmap->owner != migrate->pgmap_owner) | |
145 | goto next; | |
146 | ||
147 | mpfn = migrate_pfn(page_to_pfn(page)) | | |
148 | MIGRATE_PFN_MIGRATE; | |
149 | if (is_writable_device_private_entry(entry)) | |
150 | mpfn |= MIGRATE_PFN_WRITE; | |
151 | } else { | |
76cbbead | 152 | pfn = pte_pfn(pte); |
dd19e6d8 AS |
153 | if (is_zero_pfn(pfn) && |
154 | (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) { | |
76cbbead CH |
155 | mpfn = MIGRATE_PFN_MIGRATE; |
156 | migrate->cpages++; | |
157 | goto next; | |
158 | } | |
159 | page = vm_normal_page(migrate->vma, addr, pte); | |
dd19e6d8 AS |
160 | if (page && !is_zone_device_page(page) && |
161 | !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) | |
162 | goto next; | |
163 | else if (page && is_device_coherent_page(page) && | |
164 | (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) || | |
165 | page->pgmap->owner != migrate->pgmap_owner)) | |
166 | goto next; | |
76cbbead CH |
167 | mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; |
168 | mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; | |
169 | } | |
170 | ||
171 | /* FIXME support THP */ | |
172 | if (!page || !page->mapping || PageTransCompound(page)) { | |
173 | mpfn = 0; | |
174 | goto next; | |
175 | } | |
176 | ||
177 | /* | |
178 | * By getting a reference on the page we pin it and that blocks | |
179 | * any kind of migration. Side effect is that it "freezes" the | |
180 | * pte. | |
181 | * | |
182 | * We drop this reference after isolating the page from the lru | |
183 | * for non device page (device page are not on the lru and thus | |
184 | * can't be dropped from it). | |
185 | */ | |
186 | get_page(page); | |
187 | ||
188 | /* | |
0742e490 AP |
189 | * We rely on trylock_page() to avoid deadlock between |
190 | * concurrent migrations where each is waiting on the others | |
191 | * page lock. If we can't immediately lock the page we fail this | |
192 | * migration as it is only best effort anyway. | |
193 | * | |
194 | * If we can lock the page it's safe to set up a migration entry | |
195 | * now. In the common case where the page is mapped once in a | |
196 | * single process setting up the migration entry now is an | |
197 | * optimisation to avoid walking the rmap later with | |
198 | * try_to_migrate(). | |
76cbbead CH |
199 | */ |
200 | if (trylock_page(page)) { | |
6c287605 | 201 | bool anon_exclusive; |
76cbbead CH |
202 | pte_t swp_pte; |
203 | ||
a3589e1d | 204 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
6c287605 DH |
205 | anon_exclusive = PageAnon(page) && PageAnonExclusive(page); |
206 | if (anon_exclusive) { | |
fd35ca3d | 207 | pte = ptep_clear_flush(vma, addr, ptep); |
6c287605 DH |
208 | |
209 | if (page_try_share_anon_rmap(page)) { | |
210 | set_pte_at(mm, addr, ptep, pte); | |
211 | unlock_page(page); | |
212 | put_page(page); | |
213 | mpfn = 0; | |
214 | goto next; | |
215 | } | |
216 | } else { | |
fd35ca3d | 217 | pte = ptep_get_and_clear(mm, addr, ptep); |
6c287605 DH |
218 | } |
219 | ||
76cbbead | 220 | migrate->cpages++; |
76cbbead | 221 | |
fd35ca3d AP |
222 | /* Set the dirty flag on the folio now the pte is gone. */ |
223 | if (pte_dirty(pte)) | |
224 | folio_mark_dirty(page_folio(page)); | |
225 | ||
76cbbead CH |
226 | /* Setup special migration page table entry */ |
227 | if (mpfn & MIGRATE_PFN_WRITE) | |
228 | entry = make_writable_migration_entry( | |
229 | page_to_pfn(page)); | |
6c287605 DH |
230 | else if (anon_exclusive) |
231 | entry = make_readable_exclusive_migration_entry( | |
232 | page_to_pfn(page)); | |
76cbbead CH |
233 | else |
234 | entry = make_readable_migration_entry( | |
235 | page_to_pfn(page)); | |
236 | swp_pte = swp_entry_to_pte(entry); | |
237 | if (pte_present(pte)) { | |
238 | if (pte_soft_dirty(pte)) | |
239 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
240 | if (pte_uffd_wp(pte)) | |
241 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
242 | } else { | |
243 | if (pte_swp_soft_dirty(pte)) | |
244 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
245 | if (pte_swp_uffd_wp(pte)) | |
246 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
247 | } | |
248 | set_pte_at(mm, addr, ptep, swp_pte); | |
249 | ||
250 | /* | |
251 | * This is like regular unmap: we remove the rmap and | |
252 | * drop page refcount. Page won't be freed, as we took | |
253 | * a reference just above. | |
254 | */ | |
255 | page_remove_rmap(page, vma, false); | |
256 | put_page(page); | |
257 | ||
258 | if (pte_present(pte)) | |
259 | unmapped++; | |
260 | } else { | |
261 | put_page(page); | |
262 | mpfn = 0; | |
263 | } | |
264 | ||
265 | next: | |
266 | migrate->dst[migrate->npages] = 0; | |
267 | migrate->src[migrate->npages++] = mpfn; | |
268 | } | |
76cbbead CH |
269 | |
270 | /* Only flush the TLB if we actually modified any entries */ | |
271 | if (unmapped) | |
272 | flush_tlb_range(walk->vma, start, end); | |
273 | ||
60bae737 AP |
274 | arch_leave_lazy_mmu_mode(); |
275 | pte_unmap_unlock(ptep - 1, ptl); | |
276 | ||
76cbbead CH |
277 | return 0; |
278 | } | |
279 | ||
280 | static const struct mm_walk_ops migrate_vma_walk_ops = { | |
281 | .pmd_entry = migrate_vma_collect_pmd, | |
282 | .pte_hole = migrate_vma_collect_hole, | |
283 | }; | |
284 | ||
285 | /* | |
286 | * migrate_vma_collect() - collect pages over a range of virtual addresses | |
287 | * @migrate: migrate struct containing all migration information | |
288 | * | |
289 | * This will walk the CPU page table. For each virtual address backed by a | |
290 | * valid page, it updates the src array and takes a reference on the page, in | |
291 | * order to pin the page until we lock it and unmap it. | |
292 | */ | |
293 | static void migrate_vma_collect(struct migrate_vma *migrate) | |
294 | { | |
295 | struct mmu_notifier_range range; | |
296 | ||
297 | /* | |
298 | * Note that the pgmap_owner is passed to the mmu notifier callback so | |
299 | * that the registered device driver can skip invalidating device | |
300 | * private page mappings that won't be migrated. | |
301 | */ | |
302 | mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, | |
303 | migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end, | |
304 | migrate->pgmap_owner); | |
305 | mmu_notifier_invalidate_range_start(&range); | |
306 | ||
307 | walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, | |
308 | &migrate_vma_walk_ops, migrate); | |
309 | ||
310 | mmu_notifier_invalidate_range_end(&range); | |
311 | migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); | |
312 | } | |
313 | ||
314 | /* | |
315 | * migrate_vma_check_page() - check if page is pinned or not | |
316 | * @page: struct page to check | |
317 | * | |
318 | * Pinned pages cannot be migrated. This is the same test as in | |
319 | * folio_migrate_mapping(), except that here we allow migration of a | |
320 | * ZONE_DEVICE page. | |
321 | */ | |
322 | static bool migrate_vma_check_page(struct page *page) | |
323 | { | |
324 | /* | |
325 | * One extra ref because caller holds an extra reference, either from | |
326 | * isolate_lru_page() for a regular page, or migrate_vma_collect() for | |
327 | * a device page. | |
328 | */ | |
329 | int extra = 1; | |
330 | ||
331 | /* | |
332 | * FIXME support THP (transparent huge page), it is bit more complex to | |
333 | * check them than regular pages, because they can be mapped with a pmd | |
334 | * or with a pte (split pte mapping). | |
335 | */ | |
336 | if (PageCompound(page)) | |
337 | return false; | |
338 | ||
339 | /* Page from ZONE_DEVICE have one extra reference */ | |
340 | if (is_zone_device_page(page)) | |
341 | extra++; | |
342 | ||
343 | /* For file back page */ | |
344 | if (page_mapping(page)) | |
345 | extra += 1 + page_has_private(page); | |
346 | ||
347 | if ((page_count(page) - extra) > page_mapcount(page)) | |
348 | return false; | |
349 | ||
350 | return true; | |
351 | } | |
352 | ||
353 | /* | |
354 | * migrate_vma_unmap() - replace page mapping with special migration pte entry | |
355 | * @migrate: migrate struct containing all migration information | |
356 | * | |
357 | * Isolate pages from the LRU and replace mappings (CPU page table pte) with a | |
358 | * special migration pte entry and check if it has been pinned. Pinned pages are | |
359 | * restored because we cannot migrate them. | |
360 | * | |
361 | * This is the last step before we call the device driver callback to allocate | |
362 | * destination memory and copy contents of original page over to new page. | |
363 | */ | |
364 | static void migrate_vma_unmap(struct migrate_vma *migrate) | |
365 | { | |
366 | const unsigned long npages = migrate->npages; | |
367 | unsigned long i, restore = 0; | |
368 | bool allow_drain = true; | |
369 | ||
370 | lru_add_drain(); | |
371 | ||
372 | for (i = 0; i < npages; i++) { | |
373 | struct page *page = migrate_pfn_to_page(migrate->src[i]); | |
4b8554c5 | 374 | struct folio *folio; |
76cbbead CH |
375 | |
376 | if (!page) | |
377 | continue; | |
378 | ||
379 | /* ZONE_DEVICE pages are not on LRU */ | |
380 | if (!is_zone_device_page(page)) { | |
381 | if (!PageLRU(page) && allow_drain) { | |
382 | /* Drain CPU's pagevec */ | |
383 | lru_add_drain_all(); | |
384 | allow_drain = false; | |
385 | } | |
386 | ||
387 | if (isolate_lru_page(page)) { | |
388 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
389 | migrate->cpages--; | |
390 | restore++; | |
391 | continue; | |
392 | } | |
393 | ||
394 | /* Drop the reference we took in collect */ | |
395 | put_page(page); | |
396 | } | |
397 | ||
4b8554c5 MWO |
398 | folio = page_folio(page); |
399 | if (folio_mapped(folio)) | |
400 | try_to_migrate(folio, 0); | |
76cbbead CH |
401 | |
402 | if (page_mapped(page) || !migrate_vma_check_page(page)) { | |
403 | if (!is_zone_device_page(page)) { | |
404 | get_page(page); | |
405 | putback_lru_page(page); | |
406 | } | |
407 | ||
408 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
409 | migrate->cpages--; | |
410 | restore++; | |
411 | continue; | |
412 | } | |
413 | } | |
414 | ||
415 | for (i = 0; i < npages && restore; i++) { | |
416 | struct page *page = migrate_pfn_to_page(migrate->src[i]); | |
4eecb8b9 | 417 | struct folio *folio; |
76cbbead CH |
418 | |
419 | if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) | |
420 | continue; | |
421 | ||
4eecb8b9 MWO |
422 | folio = page_folio(page); |
423 | remove_migration_ptes(folio, folio, false); | |
76cbbead CH |
424 | |
425 | migrate->src[i] = 0; | |
4eecb8b9 MWO |
426 | folio_unlock(folio); |
427 | folio_put(folio); | |
76cbbead CH |
428 | restore--; |
429 | } | |
430 | } | |
431 | ||
432 | /** | |
433 | * migrate_vma_setup() - prepare to migrate a range of memory | |
434 | * @args: contains the vma, start, and pfns arrays for the migration | |
435 | * | |
436 | * Returns: negative errno on failures, 0 when 0 or more pages were migrated | |
437 | * without an error. | |
438 | * | |
439 | * Prepare to migrate a range of memory virtual address range by collecting all | |
440 | * the pages backing each virtual address in the range, saving them inside the | |
441 | * src array. Then lock those pages and unmap them. Once the pages are locked | |
442 | * and unmapped, check whether each page is pinned or not. Pages that aren't | |
443 | * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the | |
444 | * corresponding src array entry. Then restores any pages that are pinned, by | |
445 | * remapping and unlocking those pages. | |
446 | * | |
447 | * The caller should then allocate destination memory and copy source memory to | |
448 | * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE | |
449 | * flag set). Once these are allocated and copied, the caller must update each | |
450 | * corresponding entry in the dst array with the pfn value of the destination | |
451 | * page and with MIGRATE_PFN_VALID. Destination pages must be locked via | |
452 | * lock_page(). | |
453 | * | |
454 | * Note that the caller does not have to migrate all the pages that are marked | |
455 | * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from | |
456 | * device memory to system memory. If the caller cannot migrate a device page | |
457 | * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe | |
458 | * consequences for the userspace process, so it must be avoided if at all | |
459 | * possible. | |
460 | * | |
461 | * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we | |
462 | * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus | |
463 | * allowing the caller to allocate device memory for those unbacked virtual | |
464 | * addresses. For this the caller simply has to allocate device memory and | |
465 | * properly set the destination entry like for regular migration. Note that | |
466 | * this can still fail, and thus inside the device driver you must check if the | |
467 | * migration was successful for those entries after calling migrate_vma_pages(), | |
468 | * just like for regular migration. | |
469 | * | |
470 | * After that, the callers must call migrate_vma_pages() to go over each entry | |
471 | * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag | |
472 | * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, | |
473 | * then migrate_vma_pages() to migrate struct page information from the source | |
474 | * struct page to the destination struct page. If it fails to migrate the | |
475 | * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the | |
476 | * src array. | |
477 | * | |
478 | * At this point all successfully migrated pages have an entry in the src | |
479 | * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst | |
480 | * array entry with MIGRATE_PFN_VALID flag set. | |
481 | * | |
482 | * Once migrate_vma_pages() returns the caller may inspect which pages were | |
483 | * successfully migrated, and which were not. Successfully migrated pages will | |
484 | * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. | |
485 | * | |
486 | * It is safe to update device page table after migrate_vma_pages() because | |
487 | * both destination and source page are still locked, and the mmap_lock is held | |
488 | * in read mode (hence no one can unmap the range being migrated). | |
489 | * | |
490 | * Once the caller is done cleaning up things and updating its page table (if it | |
491 | * chose to do so, this is not an obligation) it finally calls | |
492 | * migrate_vma_finalize() to update the CPU page table to point to new pages | |
493 | * for successfully migrated pages or otherwise restore the CPU page table to | |
494 | * point to the original source pages. | |
495 | */ | |
496 | int migrate_vma_setup(struct migrate_vma *args) | |
497 | { | |
498 | long nr_pages = (args->end - args->start) >> PAGE_SHIFT; | |
499 | ||
500 | args->start &= PAGE_MASK; | |
501 | args->end &= PAGE_MASK; | |
502 | if (!args->vma || is_vm_hugetlb_page(args->vma) || | |
503 | (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) | |
504 | return -EINVAL; | |
505 | if (nr_pages <= 0) | |
506 | return -EINVAL; | |
507 | if (args->start < args->vma->vm_start || | |
508 | args->start >= args->vma->vm_end) | |
509 | return -EINVAL; | |
510 | if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) | |
511 | return -EINVAL; | |
512 | if (!args->src || !args->dst) | |
513 | return -EINVAL; | |
514 | ||
515 | memset(args->src, 0, sizeof(*args->src) * nr_pages); | |
516 | args->cpages = 0; | |
517 | args->npages = 0; | |
518 | ||
519 | migrate_vma_collect(args); | |
520 | ||
521 | if (args->cpages) | |
522 | migrate_vma_unmap(args); | |
523 | ||
524 | /* | |
525 | * At this point pages are locked and unmapped, and thus they have | |
526 | * stable content and can safely be copied to destination memory that | |
527 | * is allocated by the drivers. | |
528 | */ | |
529 | return 0; | |
530 | ||
531 | } | |
532 | EXPORT_SYMBOL(migrate_vma_setup); | |
533 | ||
534 | /* | |
535 | * This code closely matches the code in: | |
536 | * __handle_mm_fault() | |
537 | * handle_pte_fault() | |
538 | * do_anonymous_page() | |
539 | * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE | |
f25cbb7a | 540 | * private or coherent page. |
76cbbead CH |
541 | */ |
542 | static void migrate_vma_insert_page(struct migrate_vma *migrate, | |
543 | unsigned long addr, | |
544 | struct page *page, | |
545 | unsigned long *src) | |
546 | { | |
547 | struct vm_area_struct *vma = migrate->vma; | |
548 | struct mm_struct *mm = vma->vm_mm; | |
549 | bool flush = false; | |
550 | spinlock_t *ptl; | |
551 | pte_t entry; | |
552 | pgd_t *pgdp; | |
553 | p4d_t *p4dp; | |
554 | pud_t *pudp; | |
555 | pmd_t *pmdp; | |
556 | pte_t *ptep; | |
557 | ||
558 | /* Only allow populating anonymous memory */ | |
559 | if (!vma_is_anonymous(vma)) | |
560 | goto abort; | |
561 | ||
562 | pgdp = pgd_offset(mm, addr); | |
563 | p4dp = p4d_alloc(mm, pgdp, addr); | |
564 | if (!p4dp) | |
565 | goto abort; | |
566 | pudp = pud_alloc(mm, p4dp, addr); | |
567 | if (!pudp) | |
568 | goto abort; | |
569 | pmdp = pmd_alloc(mm, pudp, addr); | |
570 | if (!pmdp) | |
571 | goto abort; | |
572 | ||
573 | if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) | |
574 | goto abort; | |
575 | ||
576 | /* | |
577 | * Use pte_alloc() instead of pte_alloc_map(). We can't run | |
578 | * pte_offset_map() on pmds where a huge pmd might be created | |
579 | * from a different thread. | |
580 | * | |
581 | * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when | |
582 | * parallel threads are excluded by other means. | |
583 | * | |
584 | * Here we only have mmap_read_lock(mm). | |
585 | */ | |
586 | if (pte_alloc(mm, pmdp)) | |
587 | goto abort; | |
588 | ||
589 | /* See the comment in pte_alloc_one_map() */ | |
590 | if (unlikely(pmd_trans_unstable(pmdp))) | |
591 | goto abort; | |
592 | ||
593 | if (unlikely(anon_vma_prepare(vma))) | |
594 | goto abort; | |
595 | if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) | |
596 | goto abort; | |
597 | ||
598 | /* | |
599 | * The memory barrier inside __SetPageUptodate makes sure that | |
600 | * preceding stores to the page contents become visible before | |
601 | * the set_pte_at() write. | |
602 | */ | |
603 | __SetPageUptodate(page); | |
604 | ||
605 | if (is_device_private_page(page)) { | |
606 | swp_entry_t swp_entry; | |
607 | ||
608 | if (vma->vm_flags & VM_WRITE) | |
609 | swp_entry = make_writable_device_private_entry( | |
610 | page_to_pfn(page)); | |
611 | else | |
612 | swp_entry = make_readable_device_private_entry( | |
613 | page_to_pfn(page)); | |
614 | entry = swp_entry_to_pte(swp_entry); | |
615 | } else { | |
f25cbb7a AS |
616 | if (is_zone_device_page(page) && |
617 | !is_device_coherent_page(page)) { | |
76cbbead CH |
618 | pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); |
619 | goto abort; | |
620 | } | |
621 | entry = mk_pte(page, vma->vm_page_prot); | |
622 | if (vma->vm_flags & VM_WRITE) | |
623 | entry = pte_mkwrite(pte_mkdirty(entry)); | |
624 | } | |
625 | ||
626 | ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); | |
627 | ||
628 | if (check_stable_address_space(mm)) | |
629 | goto unlock_abort; | |
630 | ||
631 | if (pte_present(*ptep)) { | |
632 | unsigned long pfn = pte_pfn(*ptep); | |
633 | ||
634 | if (!is_zero_pfn(pfn)) | |
635 | goto unlock_abort; | |
636 | flush = true; | |
637 | } else if (!pte_none(*ptep)) | |
638 | goto unlock_abort; | |
639 | ||
640 | /* | |
641 | * Check for userfaultfd but do not deliver the fault. Instead, | |
642 | * just back off. | |
643 | */ | |
644 | if (userfaultfd_missing(vma)) | |
645 | goto unlock_abort; | |
646 | ||
647 | inc_mm_counter(mm, MM_ANONPAGES); | |
40f2bbf7 | 648 | page_add_new_anon_rmap(page, vma, addr); |
76cbbead CH |
649 | if (!is_zone_device_page(page)) |
650 | lru_cache_add_inactive_or_unevictable(page, vma); | |
651 | get_page(page); | |
652 | ||
653 | if (flush) { | |
654 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | |
655 | ptep_clear_flush_notify(vma, addr, ptep); | |
656 | set_pte_at_notify(mm, addr, ptep, entry); | |
657 | update_mmu_cache(vma, addr, ptep); | |
658 | } else { | |
659 | /* No need to invalidate - it was non-present before */ | |
660 | set_pte_at(mm, addr, ptep, entry); | |
661 | update_mmu_cache(vma, addr, ptep); | |
662 | } | |
663 | ||
664 | pte_unmap_unlock(ptep, ptl); | |
665 | *src = MIGRATE_PFN_MIGRATE; | |
666 | return; | |
667 | ||
668 | unlock_abort: | |
669 | pte_unmap_unlock(ptep, ptl); | |
670 | abort: | |
671 | *src &= ~MIGRATE_PFN_MIGRATE; | |
672 | } | |
673 | ||
674 | /** | |
675 | * migrate_vma_pages() - migrate meta-data from src page to dst page | |
676 | * @migrate: migrate struct containing all migration information | |
677 | * | |
678 | * This migrates struct page meta-data from source struct page to destination | |
679 | * struct page. This effectively finishes the migration from source page to the | |
680 | * destination page. | |
681 | */ | |
682 | void migrate_vma_pages(struct migrate_vma *migrate) | |
683 | { | |
684 | const unsigned long npages = migrate->npages; | |
685 | const unsigned long start = migrate->start; | |
686 | struct mmu_notifier_range range; | |
687 | unsigned long addr, i; | |
688 | bool notified = false; | |
689 | ||
690 | for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { | |
691 | struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); | |
692 | struct page *page = migrate_pfn_to_page(migrate->src[i]); | |
693 | struct address_space *mapping; | |
694 | int r; | |
695 | ||
696 | if (!newpage) { | |
697 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
698 | continue; | |
699 | } | |
700 | ||
701 | if (!page) { | |
b05a79d4 AP |
702 | /* |
703 | * The only time there is no vma is when called from | |
704 | * migrate_device_coherent_page(). However this isn't | |
705 | * called if the page could not be unmapped. | |
706 | */ | |
707 | VM_BUG_ON(!migrate->vma); | |
76cbbead CH |
708 | if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) |
709 | continue; | |
710 | if (!notified) { | |
711 | notified = true; | |
712 | ||
713 | mmu_notifier_range_init_owner(&range, | |
714 | MMU_NOTIFY_MIGRATE, 0, migrate->vma, | |
715 | migrate->vma->vm_mm, addr, migrate->end, | |
716 | migrate->pgmap_owner); | |
717 | mmu_notifier_invalidate_range_start(&range); | |
718 | } | |
719 | migrate_vma_insert_page(migrate, addr, newpage, | |
720 | &migrate->src[i]); | |
721 | continue; | |
722 | } | |
723 | ||
724 | mapping = page_mapping(page); | |
725 | ||
f25cbb7a AS |
726 | if (is_device_private_page(newpage) || |
727 | is_device_coherent_page(newpage)) { | |
76cbbead | 728 | /* |
f25cbb7a AS |
729 | * For now only support anonymous memory migrating to |
730 | * device private or coherent memory. | |
76cbbead CH |
731 | */ |
732 | if (mapping) { | |
733 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
734 | continue; | |
735 | } | |
736 | } else if (is_zone_device_page(newpage)) { | |
737 | /* | |
738 | * Other types of ZONE_DEVICE page are not supported. | |
739 | */ | |
740 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
741 | continue; | |
742 | } | |
743 | ||
54184650 MWO |
744 | r = migrate_folio(mapping, page_folio(newpage), |
745 | page_folio(page), MIGRATE_SYNC_NO_COPY); | |
76cbbead CH |
746 | if (r != MIGRATEPAGE_SUCCESS) |
747 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
748 | } | |
749 | ||
750 | /* | |
751 | * No need to double call mmu_notifier->invalidate_range() callback as | |
752 | * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() | |
753 | * did already call it. | |
754 | */ | |
755 | if (notified) | |
756 | mmu_notifier_invalidate_range_only_end(&range); | |
757 | } | |
758 | EXPORT_SYMBOL(migrate_vma_pages); | |
759 | ||
760 | /** | |
761 | * migrate_vma_finalize() - restore CPU page table entry | |
762 | * @migrate: migrate struct containing all migration information | |
763 | * | |
764 | * This replaces the special migration pte entry with either a mapping to the | |
765 | * new page if migration was successful for that page, or to the original page | |
766 | * otherwise. | |
767 | * | |
768 | * This also unlocks the pages and puts them back on the lru, or drops the extra | |
769 | * refcount, for device pages. | |
770 | */ | |
771 | void migrate_vma_finalize(struct migrate_vma *migrate) | |
772 | { | |
773 | const unsigned long npages = migrate->npages; | |
774 | unsigned long i; | |
775 | ||
776 | for (i = 0; i < npages; i++) { | |
4eecb8b9 | 777 | struct folio *dst, *src; |
76cbbead CH |
778 | struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); |
779 | struct page *page = migrate_pfn_to_page(migrate->src[i]); | |
780 | ||
781 | if (!page) { | |
782 | if (newpage) { | |
783 | unlock_page(newpage); | |
784 | put_page(newpage); | |
785 | } | |
786 | continue; | |
787 | } | |
788 | ||
789 | if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { | |
790 | if (newpage) { | |
791 | unlock_page(newpage); | |
792 | put_page(newpage); | |
793 | } | |
794 | newpage = page; | |
795 | } | |
796 | ||
4eecb8b9 MWO |
797 | src = page_folio(page); |
798 | dst = page_folio(newpage); | |
799 | remove_migration_ptes(src, dst, false); | |
800 | folio_unlock(src); | |
76cbbead CH |
801 | |
802 | if (is_zone_device_page(page)) | |
803 | put_page(page); | |
804 | else | |
805 | putback_lru_page(page); | |
806 | ||
807 | if (newpage != page) { | |
808 | unlock_page(newpage); | |
809 | if (is_zone_device_page(newpage)) | |
810 | put_page(newpage); | |
811 | else | |
812 | putback_lru_page(newpage); | |
813 | } | |
814 | } | |
815 | } | |
816 | EXPORT_SYMBOL(migrate_vma_finalize); | |
b05a79d4 AP |
817 | |
818 | /* | |
819 | * Migrate a device coherent page back to normal memory. The caller should have | |
820 | * a reference on page which will be copied to the new page if migration is | |
821 | * successful or dropped on failure. | |
822 | */ | |
823 | int migrate_device_coherent_page(struct page *page) | |
824 | { | |
825 | unsigned long src_pfn, dst_pfn = 0; | |
826 | struct migrate_vma args; | |
827 | struct page *dpage; | |
828 | ||
829 | WARN_ON_ONCE(PageCompound(page)); | |
830 | ||
831 | lock_page(page); | |
832 | src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; | |
833 | args.src = &src_pfn; | |
834 | args.dst = &dst_pfn; | |
835 | args.cpages = 1; | |
836 | args.npages = 1; | |
837 | args.vma = NULL; | |
838 | ||
839 | /* | |
840 | * We don't have a VMA and don't need to walk the page tables to find | |
841 | * the source page. So call migrate_vma_unmap() directly to unmap the | |
842 | * page as migrate_vma_setup() will fail if args.vma == NULL. | |
843 | */ | |
844 | migrate_vma_unmap(&args); | |
845 | if (!(src_pfn & MIGRATE_PFN_MIGRATE)) | |
846 | return -EBUSY; | |
847 | ||
848 | dpage = alloc_page(GFP_USER | __GFP_NOWARN); | |
849 | if (dpage) { | |
850 | lock_page(dpage); | |
851 | dst_pfn = migrate_pfn(page_to_pfn(dpage)); | |
852 | } | |
853 | ||
854 | migrate_vma_pages(&args); | |
855 | if (src_pfn & MIGRATE_PFN_MIGRATE) | |
856 | copy_highpage(dpage, page); | |
857 | migrate_vma_finalize(&args); | |
858 | ||
859 | if (src_pfn & MIGRATE_PFN_MIGRATE) | |
860 | return 0; | |
861 | return -EBUSY; | |
862 | } |