Commit | Line | Data |
---|---|---|
76cbbead CH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Device Memory Migration functionality. | |
4 | * | |
5 | * Originally written by Jérôme Glisse. | |
6 | */ | |
7 | #include <linux/export.h> | |
8 | #include <linux/memremap.h> | |
9 | #include <linux/migrate.h> | |
fd35ca3d | 10 | #include <linux/mm.h> |
76cbbead CH |
11 | #include <linux/mm_inline.h> |
12 | #include <linux/mmu_notifier.h> | |
13 | #include <linux/oom.h> | |
14 | #include <linux/pagewalk.h> | |
15 | #include <linux/rmap.h> | |
16 | #include <linux/swapops.h> | |
17 | #include <asm/tlbflush.h> | |
18 | #include "internal.h" | |
19 | ||
20 | static int migrate_vma_collect_skip(unsigned long start, | |
21 | unsigned long end, | |
22 | struct mm_walk *walk) | |
23 | { | |
24 | struct migrate_vma *migrate = walk->private; | |
25 | unsigned long addr; | |
26 | ||
27 | for (addr = start; addr < end; addr += PAGE_SIZE) { | |
28 | migrate->dst[migrate->npages] = 0; | |
29 | migrate->src[migrate->npages++] = 0; | |
30 | } | |
31 | ||
32 | return 0; | |
33 | } | |
34 | ||
35 | static int migrate_vma_collect_hole(unsigned long start, | |
36 | unsigned long end, | |
37 | __always_unused int depth, | |
38 | struct mm_walk *walk) | |
39 | { | |
40 | struct migrate_vma *migrate = walk->private; | |
41 | unsigned long addr; | |
42 | ||
43 | /* Only allow populating anonymous memory. */ | |
44 | if (!vma_is_anonymous(walk->vma)) | |
45 | return migrate_vma_collect_skip(start, end, walk); | |
46 | ||
47 | for (addr = start; addr < end; addr += PAGE_SIZE) { | |
48 | migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; | |
49 | migrate->dst[migrate->npages] = 0; | |
50 | migrate->npages++; | |
51 | migrate->cpages++; | |
52 | } | |
53 | ||
54 | return 0; | |
55 | } | |
56 | ||
57 | static int migrate_vma_collect_pmd(pmd_t *pmdp, | |
58 | unsigned long start, | |
59 | unsigned long end, | |
60 | struct mm_walk *walk) | |
61 | { | |
62 | struct migrate_vma *migrate = walk->private; | |
63 | struct vm_area_struct *vma = walk->vma; | |
64 | struct mm_struct *mm = vma->vm_mm; | |
65 | unsigned long addr = start, unmapped = 0; | |
66 | spinlock_t *ptl; | |
67 | pte_t *ptep; | |
68 | ||
69 | again: | |
70 | if (pmd_none(*pmdp)) | |
71 | return migrate_vma_collect_hole(start, end, -1, walk); | |
72 | ||
73 | if (pmd_trans_huge(*pmdp)) { | |
74 | struct page *page; | |
75 | ||
76 | ptl = pmd_lock(mm, pmdp); | |
77 | if (unlikely(!pmd_trans_huge(*pmdp))) { | |
78 | spin_unlock(ptl); | |
79 | goto again; | |
80 | } | |
81 | ||
82 | page = pmd_page(*pmdp); | |
83 | if (is_huge_zero_page(page)) { | |
84 | spin_unlock(ptl); | |
85 | split_huge_pmd(vma, pmdp, addr); | |
86 | if (pmd_trans_unstable(pmdp)) | |
87 | return migrate_vma_collect_skip(start, end, | |
88 | walk); | |
89 | } else { | |
90 | int ret; | |
91 | ||
92 | get_page(page); | |
93 | spin_unlock(ptl); | |
94 | if (unlikely(!trylock_page(page))) | |
95 | return migrate_vma_collect_skip(start, end, | |
96 | walk); | |
97 | ret = split_huge_page(page); | |
98 | unlock_page(page); | |
99 | put_page(page); | |
100 | if (ret) | |
101 | return migrate_vma_collect_skip(start, end, | |
102 | walk); | |
103 | if (pmd_none(*pmdp)) | |
104 | return migrate_vma_collect_hole(start, end, -1, | |
105 | walk); | |
106 | } | |
107 | } | |
108 | ||
109 | if (unlikely(pmd_bad(*pmdp))) | |
110 | return migrate_vma_collect_skip(start, end, walk); | |
111 | ||
112 | ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); | |
113 | arch_enter_lazy_mmu_mode(); | |
114 | ||
115 | for (; addr < end; addr += PAGE_SIZE, ptep++) { | |
116 | unsigned long mpfn = 0, pfn; | |
117 | struct page *page; | |
118 | swp_entry_t entry; | |
119 | pte_t pte; | |
120 | ||
121 | pte = *ptep; | |
122 | ||
123 | if (pte_none(pte)) { | |
124 | if (vma_is_anonymous(vma)) { | |
125 | mpfn = MIGRATE_PFN_MIGRATE; | |
126 | migrate->cpages++; | |
127 | } | |
128 | goto next; | |
129 | } | |
130 | ||
131 | if (!pte_present(pte)) { | |
132 | /* | |
133 | * Only care about unaddressable device page special | |
134 | * page table entry. Other special swap entries are not | |
135 | * migratable, and we ignore regular swapped page. | |
136 | */ | |
137 | entry = pte_to_swp_entry(pte); | |
138 | if (!is_device_private_entry(entry)) | |
139 | goto next; | |
140 | ||
141 | page = pfn_swap_entry_to_page(entry); | |
142 | if (!(migrate->flags & | |
143 | MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || | |
144 | page->pgmap->owner != migrate->pgmap_owner) | |
145 | goto next; | |
146 | ||
147 | mpfn = migrate_pfn(page_to_pfn(page)) | | |
148 | MIGRATE_PFN_MIGRATE; | |
149 | if (is_writable_device_private_entry(entry)) | |
150 | mpfn |= MIGRATE_PFN_WRITE; | |
151 | } else { | |
76cbbead | 152 | pfn = pte_pfn(pte); |
dd19e6d8 AS |
153 | if (is_zero_pfn(pfn) && |
154 | (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) { | |
76cbbead CH |
155 | mpfn = MIGRATE_PFN_MIGRATE; |
156 | migrate->cpages++; | |
157 | goto next; | |
158 | } | |
159 | page = vm_normal_page(migrate->vma, addr, pte); | |
dd19e6d8 AS |
160 | if (page && !is_zone_device_page(page) && |
161 | !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) | |
162 | goto next; | |
163 | else if (page && is_device_coherent_page(page) && | |
164 | (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) || | |
165 | page->pgmap->owner != migrate->pgmap_owner)) | |
166 | goto next; | |
76cbbead CH |
167 | mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; |
168 | mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; | |
169 | } | |
170 | ||
171 | /* FIXME support THP */ | |
172 | if (!page || !page->mapping || PageTransCompound(page)) { | |
173 | mpfn = 0; | |
174 | goto next; | |
175 | } | |
176 | ||
177 | /* | |
178 | * By getting a reference on the page we pin it and that blocks | |
179 | * any kind of migration. Side effect is that it "freezes" the | |
180 | * pte. | |
181 | * | |
182 | * We drop this reference after isolating the page from the lru | |
183 | * for non device page (device page are not on the lru and thus | |
184 | * can't be dropped from it). | |
185 | */ | |
186 | get_page(page); | |
187 | ||
188 | /* | |
0742e490 AP |
189 | * We rely on trylock_page() to avoid deadlock between |
190 | * concurrent migrations where each is waiting on the others | |
191 | * page lock. If we can't immediately lock the page we fail this | |
192 | * migration as it is only best effort anyway. | |
193 | * | |
194 | * If we can lock the page it's safe to set up a migration entry | |
195 | * now. In the common case where the page is mapped once in a | |
196 | * single process setting up the migration entry now is an | |
197 | * optimisation to avoid walking the rmap later with | |
198 | * try_to_migrate(). | |
76cbbead CH |
199 | */ |
200 | if (trylock_page(page)) { | |
6c287605 | 201 | bool anon_exclusive; |
76cbbead CH |
202 | pte_t swp_pte; |
203 | ||
a3589e1d | 204 | flush_cache_page(vma, addr, pte_pfn(*ptep)); |
6c287605 DH |
205 | anon_exclusive = PageAnon(page) && PageAnonExclusive(page); |
206 | if (anon_exclusive) { | |
fd35ca3d | 207 | pte = ptep_clear_flush(vma, addr, ptep); |
6c287605 DH |
208 | |
209 | if (page_try_share_anon_rmap(page)) { | |
210 | set_pte_at(mm, addr, ptep, pte); | |
211 | unlock_page(page); | |
212 | put_page(page); | |
213 | mpfn = 0; | |
214 | goto next; | |
215 | } | |
216 | } else { | |
fd35ca3d | 217 | pte = ptep_get_and_clear(mm, addr, ptep); |
6c287605 DH |
218 | } |
219 | ||
76cbbead | 220 | migrate->cpages++; |
76cbbead | 221 | |
fd35ca3d AP |
222 | /* Set the dirty flag on the folio now the pte is gone. */ |
223 | if (pte_dirty(pte)) | |
224 | folio_mark_dirty(page_folio(page)); | |
225 | ||
76cbbead CH |
226 | /* Setup special migration page table entry */ |
227 | if (mpfn & MIGRATE_PFN_WRITE) | |
228 | entry = make_writable_migration_entry( | |
229 | page_to_pfn(page)); | |
6c287605 DH |
230 | else if (anon_exclusive) |
231 | entry = make_readable_exclusive_migration_entry( | |
232 | page_to_pfn(page)); | |
76cbbead CH |
233 | else |
234 | entry = make_readable_migration_entry( | |
235 | page_to_pfn(page)); | |
2e346877 PX |
236 | if (pte_present(pte)) { |
237 | if (pte_young(pte)) | |
238 | entry = make_migration_entry_young(entry); | |
239 | if (pte_dirty(pte)) | |
240 | entry = make_migration_entry_dirty(entry); | |
241 | } | |
76cbbead CH |
242 | swp_pte = swp_entry_to_pte(entry); |
243 | if (pte_present(pte)) { | |
244 | if (pte_soft_dirty(pte)) | |
245 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
246 | if (pte_uffd_wp(pte)) | |
247 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
248 | } else { | |
249 | if (pte_swp_soft_dirty(pte)) | |
250 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | |
251 | if (pte_swp_uffd_wp(pte)) | |
252 | swp_pte = pte_swp_mkuffd_wp(swp_pte); | |
253 | } | |
254 | set_pte_at(mm, addr, ptep, swp_pte); | |
255 | ||
256 | /* | |
257 | * This is like regular unmap: we remove the rmap and | |
258 | * drop page refcount. Page won't be freed, as we took | |
259 | * a reference just above. | |
260 | */ | |
261 | page_remove_rmap(page, vma, false); | |
262 | put_page(page); | |
263 | ||
264 | if (pte_present(pte)) | |
265 | unmapped++; | |
266 | } else { | |
267 | put_page(page); | |
268 | mpfn = 0; | |
269 | } | |
270 | ||
271 | next: | |
272 | migrate->dst[migrate->npages] = 0; | |
273 | migrate->src[migrate->npages++] = mpfn; | |
274 | } | |
76cbbead CH |
275 | |
276 | /* Only flush the TLB if we actually modified any entries */ | |
277 | if (unmapped) | |
278 | flush_tlb_range(walk->vma, start, end); | |
279 | ||
60bae737 AP |
280 | arch_leave_lazy_mmu_mode(); |
281 | pte_unmap_unlock(ptep - 1, ptl); | |
282 | ||
76cbbead CH |
283 | return 0; |
284 | } | |
285 | ||
286 | static const struct mm_walk_ops migrate_vma_walk_ops = { | |
287 | .pmd_entry = migrate_vma_collect_pmd, | |
288 | .pte_hole = migrate_vma_collect_hole, | |
289 | }; | |
290 | ||
291 | /* | |
292 | * migrate_vma_collect() - collect pages over a range of virtual addresses | |
293 | * @migrate: migrate struct containing all migration information | |
294 | * | |
295 | * This will walk the CPU page table. For each virtual address backed by a | |
296 | * valid page, it updates the src array and takes a reference on the page, in | |
297 | * order to pin the page until we lock it and unmap it. | |
298 | */ | |
299 | static void migrate_vma_collect(struct migrate_vma *migrate) | |
300 | { | |
301 | struct mmu_notifier_range range; | |
302 | ||
303 | /* | |
304 | * Note that the pgmap_owner is passed to the mmu notifier callback so | |
305 | * that the registered device driver can skip invalidating device | |
306 | * private page mappings that won't be migrated. | |
307 | */ | |
308 | mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, | |
309 | migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end, | |
310 | migrate->pgmap_owner); | |
311 | mmu_notifier_invalidate_range_start(&range); | |
312 | ||
313 | walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, | |
314 | &migrate_vma_walk_ops, migrate); | |
315 | ||
316 | mmu_notifier_invalidate_range_end(&range); | |
317 | migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); | |
318 | } | |
319 | ||
320 | /* | |
321 | * migrate_vma_check_page() - check if page is pinned or not | |
322 | * @page: struct page to check | |
323 | * | |
324 | * Pinned pages cannot be migrated. This is the same test as in | |
325 | * folio_migrate_mapping(), except that here we allow migration of a | |
326 | * ZONE_DEVICE page. | |
327 | */ | |
16ce101d | 328 | static bool migrate_vma_check_page(struct page *page, struct page *fault_page) |
76cbbead CH |
329 | { |
330 | /* | |
331 | * One extra ref because caller holds an extra reference, either from | |
332 | * isolate_lru_page() for a regular page, or migrate_vma_collect() for | |
333 | * a device page. | |
334 | */ | |
16ce101d | 335 | int extra = 1 + (page == fault_page); |
76cbbead CH |
336 | |
337 | /* | |
338 | * FIXME support THP (transparent huge page), it is bit more complex to | |
339 | * check them than regular pages, because they can be mapped with a pmd | |
340 | * or with a pte (split pte mapping). | |
341 | */ | |
342 | if (PageCompound(page)) | |
343 | return false; | |
344 | ||
345 | /* Page from ZONE_DEVICE have one extra reference */ | |
346 | if (is_zone_device_page(page)) | |
347 | extra++; | |
348 | ||
349 | /* For file back page */ | |
350 | if (page_mapping(page)) | |
351 | extra += 1 + page_has_private(page); | |
352 | ||
353 | if ((page_count(page) - extra) > page_mapcount(page)) | |
354 | return false; | |
355 | ||
356 | return true; | |
357 | } | |
358 | ||
359 | /* | |
360 | * migrate_vma_unmap() - replace page mapping with special migration pte entry | |
361 | * @migrate: migrate struct containing all migration information | |
362 | * | |
363 | * Isolate pages from the LRU and replace mappings (CPU page table pte) with a | |
364 | * special migration pte entry and check if it has been pinned. Pinned pages are | |
365 | * restored because we cannot migrate them. | |
366 | * | |
367 | * This is the last step before we call the device driver callback to allocate | |
368 | * destination memory and copy contents of original page over to new page. | |
369 | */ | |
370 | static void migrate_vma_unmap(struct migrate_vma *migrate) | |
371 | { | |
372 | const unsigned long npages = migrate->npages; | |
373 | unsigned long i, restore = 0; | |
374 | bool allow_drain = true; | |
375 | ||
376 | lru_add_drain(); | |
377 | ||
378 | for (i = 0; i < npages; i++) { | |
379 | struct page *page = migrate_pfn_to_page(migrate->src[i]); | |
4b8554c5 | 380 | struct folio *folio; |
76cbbead CH |
381 | |
382 | if (!page) | |
383 | continue; | |
384 | ||
385 | /* ZONE_DEVICE pages are not on LRU */ | |
386 | if (!is_zone_device_page(page)) { | |
387 | if (!PageLRU(page) && allow_drain) { | |
388 | /* Drain CPU's pagevec */ | |
389 | lru_add_drain_all(); | |
390 | allow_drain = false; | |
391 | } | |
392 | ||
393 | if (isolate_lru_page(page)) { | |
394 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
395 | migrate->cpages--; | |
396 | restore++; | |
397 | continue; | |
398 | } | |
399 | ||
400 | /* Drop the reference we took in collect */ | |
401 | put_page(page); | |
402 | } | |
403 | ||
4b8554c5 MWO |
404 | folio = page_folio(page); |
405 | if (folio_mapped(folio)) | |
406 | try_to_migrate(folio, 0); | |
76cbbead | 407 | |
16ce101d AP |
408 | if (page_mapped(page) || |
409 | !migrate_vma_check_page(page, migrate->fault_page)) { | |
76cbbead CH |
410 | if (!is_zone_device_page(page)) { |
411 | get_page(page); | |
412 | putback_lru_page(page); | |
413 | } | |
414 | ||
415 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
416 | migrate->cpages--; | |
417 | restore++; | |
418 | continue; | |
419 | } | |
420 | } | |
421 | ||
422 | for (i = 0; i < npages && restore; i++) { | |
423 | struct page *page = migrate_pfn_to_page(migrate->src[i]); | |
4eecb8b9 | 424 | struct folio *folio; |
76cbbead CH |
425 | |
426 | if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) | |
427 | continue; | |
428 | ||
4eecb8b9 MWO |
429 | folio = page_folio(page); |
430 | remove_migration_ptes(folio, folio, false); | |
76cbbead CH |
431 | |
432 | migrate->src[i] = 0; | |
4eecb8b9 MWO |
433 | folio_unlock(folio); |
434 | folio_put(folio); | |
76cbbead CH |
435 | restore--; |
436 | } | |
437 | } | |
438 | ||
439 | /** | |
440 | * migrate_vma_setup() - prepare to migrate a range of memory | |
441 | * @args: contains the vma, start, and pfns arrays for the migration | |
442 | * | |
443 | * Returns: negative errno on failures, 0 when 0 or more pages were migrated | |
444 | * without an error. | |
445 | * | |
446 | * Prepare to migrate a range of memory virtual address range by collecting all | |
447 | * the pages backing each virtual address in the range, saving them inside the | |
448 | * src array. Then lock those pages and unmap them. Once the pages are locked | |
449 | * and unmapped, check whether each page is pinned or not. Pages that aren't | |
450 | * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the | |
451 | * corresponding src array entry. Then restores any pages that are pinned, by | |
452 | * remapping and unlocking those pages. | |
453 | * | |
454 | * The caller should then allocate destination memory and copy source memory to | |
455 | * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE | |
456 | * flag set). Once these are allocated and copied, the caller must update each | |
457 | * corresponding entry in the dst array with the pfn value of the destination | |
458 | * page and with MIGRATE_PFN_VALID. Destination pages must be locked via | |
459 | * lock_page(). | |
460 | * | |
461 | * Note that the caller does not have to migrate all the pages that are marked | |
462 | * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from | |
463 | * device memory to system memory. If the caller cannot migrate a device page | |
464 | * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe | |
465 | * consequences for the userspace process, so it must be avoided if at all | |
466 | * possible. | |
467 | * | |
468 | * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we | |
469 | * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus | |
470 | * allowing the caller to allocate device memory for those unbacked virtual | |
471 | * addresses. For this the caller simply has to allocate device memory and | |
472 | * properly set the destination entry like for regular migration. Note that | |
473 | * this can still fail, and thus inside the device driver you must check if the | |
474 | * migration was successful for those entries after calling migrate_vma_pages(), | |
475 | * just like for regular migration. | |
476 | * | |
477 | * After that, the callers must call migrate_vma_pages() to go over each entry | |
478 | * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag | |
479 | * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, | |
480 | * then migrate_vma_pages() to migrate struct page information from the source | |
481 | * struct page to the destination struct page. If it fails to migrate the | |
482 | * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the | |
483 | * src array. | |
484 | * | |
485 | * At this point all successfully migrated pages have an entry in the src | |
486 | * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst | |
487 | * array entry with MIGRATE_PFN_VALID flag set. | |
488 | * | |
489 | * Once migrate_vma_pages() returns the caller may inspect which pages were | |
490 | * successfully migrated, and which were not. Successfully migrated pages will | |
491 | * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. | |
492 | * | |
493 | * It is safe to update device page table after migrate_vma_pages() because | |
494 | * both destination and source page are still locked, and the mmap_lock is held | |
495 | * in read mode (hence no one can unmap the range being migrated). | |
496 | * | |
497 | * Once the caller is done cleaning up things and updating its page table (if it | |
498 | * chose to do so, this is not an obligation) it finally calls | |
499 | * migrate_vma_finalize() to update the CPU page table to point to new pages | |
500 | * for successfully migrated pages or otherwise restore the CPU page table to | |
501 | * point to the original source pages. | |
502 | */ | |
503 | int migrate_vma_setup(struct migrate_vma *args) | |
504 | { | |
505 | long nr_pages = (args->end - args->start) >> PAGE_SHIFT; | |
506 | ||
507 | args->start &= PAGE_MASK; | |
508 | args->end &= PAGE_MASK; | |
509 | if (!args->vma || is_vm_hugetlb_page(args->vma) || | |
510 | (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) | |
511 | return -EINVAL; | |
512 | if (nr_pages <= 0) | |
513 | return -EINVAL; | |
514 | if (args->start < args->vma->vm_start || | |
515 | args->start >= args->vma->vm_end) | |
516 | return -EINVAL; | |
517 | if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) | |
518 | return -EINVAL; | |
519 | if (!args->src || !args->dst) | |
520 | return -EINVAL; | |
16ce101d AP |
521 | if (args->fault_page && !is_device_private_page(args->fault_page)) |
522 | return -EINVAL; | |
76cbbead CH |
523 | |
524 | memset(args->src, 0, sizeof(*args->src) * nr_pages); | |
525 | args->cpages = 0; | |
526 | args->npages = 0; | |
527 | ||
528 | migrate_vma_collect(args); | |
529 | ||
530 | if (args->cpages) | |
531 | migrate_vma_unmap(args); | |
532 | ||
533 | /* | |
534 | * At this point pages are locked and unmapped, and thus they have | |
535 | * stable content and can safely be copied to destination memory that | |
536 | * is allocated by the drivers. | |
537 | */ | |
538 | return 0; | |
539 | ||
540 | } | |
541 | EXPORT_SYMBOL(migrate_vma_setup); | |
542 | ||
543 | /* | |
544 | * This code closely matches the code in: | |
545 | * __handle_mm_fault() | |
546 | * handle_pte_fault() | |
547 | * do_anonymous_page() | |
548 | * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE | |
f25cbb7a | 549 | * private or coherent page. |
76cbbead CH |
550 | */ |
551 | static void migrate_vma_insert_page(struct migrate_vma *migrate, | |
552 | unsigned long addr, | |
553 | struct page *page, | |
554 | unsigned long *src) | |
555 | { | |
556 | struct vm_area_struct *vma = migrate->vma; | |
557 | struct mm_struct *mm = vma->vm_mm; | |
558 | bool flush = false; | |
559 | spinlock_t *ptl; | |
560 | pte_t entry; | |
561 | pgd_t *pgdp; | |
562 | p4d_t *p4dp; | |
563 | pud_t *pudp; | |
564 | pmd_t *pmdp; | |
565 | pte_t *ptep; | |
566 | ||
567 | /* Only allow populating anonymous memory */ | |
568 | if (!vma_is_anonymous(vma)) | |
569 | goto abort; | |
570 | ||
571 | pgdp = pgd_offset(mm, addr); | |
572 | p4dp = p4d_alloc(mm, pgdp, addr); | |
573 | if (!p4dp) | |
574 | goto abort; | |
575 | pudp = pud_alloc(mm, p4dp, addr); | |
576 | if (!pudp) | |
577 | goto abort; | |
578 | pmdp = pmd_alloc(mm, pudp, addr); | |
579 | if (!pmdp) | |
580 | goto abort; | |
581 | ||
582 | if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) | |
583 | goto abort; | |
584 | ||
585 | /* | |
586 | * Use pte_alloc() instead of pte_alloc_map(). We can't run | |
587 | * pte_offset_map() on pmds where a huge pmd might be created | |
588 | * from a different thread. | |
589 | * | |
590 | * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when | |
591 | * parallel threads are excluded by other means. | |
592 | * | |
593 | * Here we only have mmap_read_lock(mm). | |
594 | */ | |
595 | if (pte_alloc(mm, pmdp)) | |
596 | goto abort; | |
597 | ||
598 | /* See the comment in pte_alloc_one_map() */ | |
599 | if (unlikely(pmd_trans_unstable(pmdp))) | |
600 | goto abort; | |
601 | ||
602 | if (unlikely(anon_vma_prepare(vma))) | |
603 | goto abort; | |
604 | if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) | |
605 | goto abort; | |
606 | ||
607 | /* | |
608 | * The memory barrier inside __SetPageUptodate makes sure that | |
609 | * preceding stores to the page contents become visible before | |
610 | * the set_pte_at() write. | |
611 | */ | |
612 | __SetPageUptodate(page); | |
613 | ||
614 | if (is_device_private_page(page)) { | |
615 | swp_entry_t swp_entry; | |
616 | ||
617 | if (vma->vm_flags & VM_WRITE) | |
618 | swp_entry = make_writable_device_private_entry( | |
619 | page_to_pfn(page)); | |
620 | else | |
621 | swp_entry = make_readable_device_private_entry( | |
622 | page_to_pfn(page)); | |
623 | entry = swp_entry_to_pte(swp_entry); | |
624 | } else { | |
f25cbb7a AS |
625 | if (is_zone_device_page(page) && |
626 | !is_device_coherent_page(page)) { | |
76cbbead CH |
627 | pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); |
628 | goto abort; | |
629 | } | |
630 | entry = mk_pte(page, vma->vm_page_prot); | |
631 | if (vma->vm_flags & VM_WRITE) | |
632 | entry = pte_mkwrite(pte_mkdirty(entry)); | |
633 | } | |
634 | ||
635 | ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); | |
636 | ||
637 | if (check_stable_address_space(mm)) | |
638 | goto unlock_abort; | |
639 | ||
640 | if (pte_present(*ptep)) { | |
641 | unsigned long pfn = pte_pfn(*ptep); | |
642 | ||
643 | if (!is_zero_pfn(pfn)) | |
644 | goto unlock_abort; | |
645 | flush = true; | |
646 | } else if (!pte_none(*ptep)) | |
647 | goto unlock_abort; | |
648 | ||
649 | /* | |
650 | * Check for userfaultfd but do not deliver the fault. Instead, | |
651 | * just back off. | |
652 | */ | |
653 | if (userfaultfd_missing(vma)) | |
654 | goto unlock_abort; | |
655 | ||
656 | inc_mm_counter(mm, MM_ANONPAGES); | |
40f2bbf7 | 657 | page_add_new_anon_rmap(page, vma, addr); |
76cbbead CH |
658 | if (!is_zone_device_page(page)) |
659 | lru_cache_add_inactive_or_unevictable(page, vma); | |
660 | get_page(page); | |
661 | ||
662 | if (flush) { | |
663 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | |
664 | ptep_clear_flush_notify(vma, addr, ptep); | |
665 | set_pte_at_notify(mm, addr, ptep, entry); | |
666 | update_mmu_cache(vma, addr, ptep); | |
667 | } else { | |
668 | /* No need to invalidate - it was non-present before */ | |
669 | set_pte_at(mm, addr, ptep, entry); | |
670 | update_mmu_cache(vma, addr, ptep); | |
671 | } | |
672 | ||
673 | pte_unmap_unlock(ptep, ptl); | |
674 | *src = MIGRATE_PFN_MIGRATE; | |
675 | return; | |
676 | ||
677 | unlock_abort: | |
678 | pte_unmap_unlock(ptep, ptl); | |
679 | abort: | |
680 | *src &= ~MIGRATE_PFN_MIGRATE; | |
681 | } | |
682 | ||
683 | /** | |
684 | * migrate_vma_pages() - migrate meta-data from src page to dst page | |
685 | * @migrate: migrate struct containing all migration information | |
686 | * | |
687 | * This migrates struct page meta-data from source struct page to destination | |
688 | * struct page. This effectively finishes the migration from source page to the | |
689 | * destination page. | |
690 | */ | |
691 | void migrate_vma_pages(struct migrate_vma *migrate) | |
692 | { | |
693 | const unsigned long npages = migrate->npages; | |
694 | const unsigned long start = migrate->start; | |
695 | struct mmu_notifier_range range; | |
696 | unsigned long addr, i; | |
697 | bool notified = false; | |
698 | ||
699 | for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { | |
700 | struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); | |
701 | struct page *page = migrate_pfn_to_page(migrate->src[i]); | |
702 | struct address_space *mapping; | |
703 | int r; | |
704 | ||
705 | if (!newpage) { | |
706 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
707 | continue; | |
708 | } | |
709 | ||
710 | if (!page) { | |
b05a79d4 AP |
711 | /* |
712 | * The only time there is no vma is when called from | |
713 | * migrate_device_coherent_page(). However this isn't | |
714 | * called if the page could not be unmapped. | |
715 | */ | |
716 | VM_BUG_ON(!migrate->vma); | |
76cbbead CH |
717 | if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) |
718 | continue; | |
719 | if (!notified) { | |
720 | notified = true; | |
721 | ||
722 | mmu_notifier_range_init_owner(&range, | |
723 | MMU_NOTIFY_MIGRATE, 0, migrate->vma, | |
724 | migrate->vma->vm_mm, addr, migrate->end, | |
725 | migrate->pgmap_owner); | |
726 | mmu_notifier_invalidate_range_start(&range); | |
727 | } | |
728 | migrate_vma_insert_page(migrate, addr, newpage, | |
729 | &migrate->src[i]); | |
730 | continue; | |
731 | } | |
732 | ||
733 | mapping = page_mapping(page); | |
734 | ||
f25cbb7a AS |
735 | if (is_device_private_page(newpage) || |
736 | is_device_coherent_page(newpage)) { | |
76cbbead | 737 | /* |
f25cbb7a AS |
738 | * For now only support anonymous memory migrating to |
739 | * device private or coherent memory. | |
76cbbead CH |
740 | */ |
741 | if (mapping) { | |
742 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
743 | continue; | |
744 | } | |
745 | } else if (is_zone_device_page(newpage)) { | |
746 | /* | |
747 | * Other types of ZONE_DEVICE page are not supported. | |
748 | */ | |
749 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
750 | continue; | |
751 | } | |
752 | ||
16ce101d AP |
753 | if (migrate->fault_page == page) |
754 | r = migrate_folio_extra(mapping, page_folio(newpage), | |
755 | page_folio(page), | |
756 | MIGRATE_SYNC_NO_COPY, 1); | |
757 | else | |
758 | r = migrate_folio(mapping, page_folio(newpage), | |
759 | page_folio(page), MIGRATE_SYNC_NO_COPY); | |
76cbbead CH |
760 | if (r != MIGRATEPAGE_SUCCESS) |
761 | migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; | |
762 | } | |
763 | ||
764 | /* | |
765 | * No need to double call mmu_notifier->invalidate_range() callback as | |
766 | * the above ptep_clear_flush_notify() inside migrate_vma_insert_page() | |
767 | * did already call it. | |
768 | */ | |
769 | if (notified) | |
770 | mmu_notifier_invalidate_range_only_end(&range); | |
771 | } | |
772 | EXPORT_SYMBOL(migrate_vma_pages); | |
773 | ||
774 | /** | |
775 | * migrate_vma_finalize() - restore CPU page table entry | |
776 | * @migrate: migrate struct containing all migration information | |
777 | * | |
778 | * This replaces the special migration pte entry with either a mapping to the | |
779 | * new page if migration was successful for that page, or to the original page | |
780 | * otherwise. | |
781 | * | |
782 | * This also unlocks the pages and puts them back on the lru, or drops the extra | |
783 | * refcount, for device pages. | |
784 | */ | |
785 | void migrate_vma_finalize(struct migrate_vma *migrate) | |
786 | { | |
787 | const unsigned long npages = migrate->npages; | |
788 | unsigned long i; | |
789 | ||
790 | for (i = 0; i < npages; i++) { | |
4eecb8b9 | 791 | struct folio *dst, *src; |
76cbbead CH |
792 | struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); |
793 | struct page *page = migrate_pfn_to_page(migrate->src[i]); | |
794 | ||
795 | if (!page) { | |
796 | if (newpage) { | |
797 | unlock_page(newpage); | |
798 | put_page(newpage); | |
799 | } | |
800 | continue; | |
801 | } | |
802 | ||
803 | if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { | |
804 | if (newpage) { | |
805 | unlock_page(newpage); | |
806 | put_page(newpage); | |
807 | } | |
808 | newpage = page; | |
809 | } | |
810 | ||
4eecb8b9 MWO |
811 | src = page_folio(page); |
812 | dst = page_folio(newpage); | |
813 | remove_migration_ptes(src, dst, false); | |
814 | folio_unlock(src); | |
76cbbead CH |
815 | |
816 | if (is_zone_device_page(page)) | |
817 | put_page(page); | |
818 | else | |
819 | putback_lru_page(page); | |
820 | ||
821 | if (newpage != page) { | |
822 | unlock_page(newpage); | |
823 | if (is_zone_device_page(newpage)) | |
824 | put_page(newpage); | |
825 | else | |
826 | putback_lru_page(newpage); | |
827 | } | |
828 | } | |
829 | } | |
830 | EXPORT_SYMBOL(migrate_vma_finalize); | |
b05a79d4 AP |
831 | |
832 | /* | |
833 | * Migrate a device coherent page back to normal memory. The caller should have | |
834 | * a reference on page which will be copied to the new page if migration is | |
835 | * successful or dropped on failure. | |
836 | */ | |
837 | int migrate_device_coherent_page(struct page *page) | |
838 | { | |
839 | unsigned long src_pfn, dst_pfn = 0; | |
840 | struct migrate_vma args; | |
841 | struct page *dpage; | |
842 | ||
843 | WARN_ON_ONCE(PageCompound(page)); | |
844 | ||
845 | lock_page(page); | |
846 | src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; | |
847 | args.src = &src_pfn; | |
848 | args.dst = &dst_pfn; | |
849 | args.cpages = 1; | |
850 | args.npages = 1; | |
851 | args.vma = NULL; | |
852 | ||
853 | /* | |
854 | * We don't have a VMA and don't need to walk the page tables to find | |
855 | * the source page. So call migrate_vma_unmap() directly to unmap the | |
856 | * page as migrate_vma_setup() will fail if args.vma == NULL. | |
857 | */ | |
858 | migrate_vma_unmap(&args); | |
859 | if (!(src_pfn & MIGRATE_PFN_MIGRATE)) | |
860 | return -EBUSY; | |
861 | ||
862 | dpage = alloc_page(GFP_USER | __GFP_NOWARN); | |
863 | if (dpage) { | |
864 | lock_page(dpage); | |
865 | dst_pfn = migrate_pfn(page_to_pfn(dpage)); | |
866 | } | |
867 | ||
868 | migrate_vma_pages(&args); | |
869 | if (src_pfn & MIGRATE_PFN_MIGRATE) | |
870 | copy_highpage(dpage, page); | |
871 | migrate_vma_finalize(&args); | |
872 | ||
873 | if (src_pfn & MIGRATE_PFN_MIGRATE) | |
874 | return 0; | |
875 | return -EBUSY; | |
876 | } |