mm/hmm: check the device private page owner in hmm_range_fault()
[linux-block.git] / mm / hmm.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
133ff0ea
JG
2/*
3 * Copyright 2013 Red Hat Inc.
4 *
f813f219 5 * Authors: Jérôme Glisse <jglisse@redhat.com>
133ff0ea
JG
6 */
7/*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
a520110e 11#include <linux/pagewalk.h>
133ff0ea 12#include <linux/hmm.h>
858b54da 13#include <linux/init.h>
da4c3c73
JG
14#include <linux/rmap.h>
15#include <linux/swap.h>
133ff0ea
JG
16#include <linux/slab.h>
17#include <linux/sched.h>
4ef589dc
JG
18#include <linux/mmzone.h>
19#include <linux/pagemap.h>
da4c3c73
JG
20#include <linux/swapops.h>
21#include <linux/hugetlb.h>
4ef589dc 22#include <linux/memremap.h>
c8a53b2d 23#include <linux/sched/mm.h>
7b2d55d2 24#include <linux/jump_label.h>
55c0ece8 25#include <linux/dma-mapping.h>
c0b12405 26#include <linux/mmu_notifier.h>
4ef589dc
JG
27#include <linux/memory_hotplug.h>
28
74eee180
JG
29struct hmm_vma_walk {
30 struct hmm_range *range;
992de9a8 31 struct dev_pagemap *pgmap;
74eee180 32 unsigned long last;
9a4903e4 33 unsigned int flags;
74eee180
JG
34};
35
d28c2c9a
RC
36static int hmm_pfns_fill(unsigned long addr, unsigned long end,
37 struct hmm_range *range, enum hmm_pfn_value_e value)
da4c3c73 38{
ff05c0c6 39 uint64_t *pfns = range->pfns;
da4c3c73
JG
40 unsigned long i;
41
42 i = (addr - range->start) >> PAGE_SHIFT;
43 for (; addr < end; addr += PAGE_SIZE, i++)
d28c2c9a 44 pfns[i] = range->values[value];
da4c3c73
JG
45
46 return 0;
47}
48
5504ed29 49/*
f8c888a3 50 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
d2e8d551 51 * @addr: range virtual start address (inclusive)
5504ed29 52 * @end: range virtual end address (exclusive)
2aee09d8
JG
53 * @fault: should we fault or not ?
54 * @write_fault: write fault ?
5504ed29 55 * @walk: mm_walk structure
f8c888a3 56 * Return: -EBUSY after page fault, or page fault error
5504ed29
JG
57 *
58 * This function will be called whenever pmd_none() or pte_none() returns true,
59 * or whenever there is no page directory covering the virtual address range.
60 */
f8c888a3 61static int hmm_vma_fault(unsigned long addr, unsigned long end,
2aee09d8
JG
62 bool fault, bool write_fault,
63 struct mm_walk *walk)
da4c3c73 64{
74eee180
JG
65 struct hmm_vma_walk *hmm_vma_walk = walk->private;
66 struct hmm_range *range = hmm_vma_walk->range;
5a0c38d3 67 struct vm_area_struct *vma = walk->vma;
ff05c0c6 68 uint64_t *pfns = range->pfns;
f8c888a3 69 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
5a0c38d3 70 unsigned int fault_flags = FAULT_FLAG_REMOTE;
da4c3c73 71
f8c888a3 72 WARN_ON_ONCE(!fault && !write_fault);
74eee180 73 hmm_vma_walk->last = addr;
63d5066f 74
5a0c38d3
CH
75 if (!vma)
76 goto out_error;
da4c3c73 77
5a0c38d3
CH
78 if (write_fault) {
79 if (!(vma->vm_flags & VM_WRITE))
80 return -EPERM;
81 fault_flags |= FAULT_FLAG_WRITE;
74eee180
JG
82 }
83
5a0c38d3
CH
84 for (; addr < end; addr += PAGE_SIZE, i++)
85 if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR)
86 goto out_error;
87
f8c888a3 88 return -EBUSY;
5a0c38d3
CH
89
90out_error:
91 pfns[i] = range->values[HMM_PFN_ERROR];
92 return -EFAULT;
2aee09d8
JG
93}
94
95static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
96 uint64_t pfns, uint64_t cpu_flags,
97 bool *fault, bool *write_fault)
98{
f88a1e90
JG
99 struct hmm_range *range = hmm_vma_walk->range;
100
d45d464b 101 if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
2aee09d8
JG
102 return;
103
023a019a
JG
104 /*
105 * So we not only consider the individual per page request we also
106 * consider the default flags requested for the range. The API can
d2e8d551
RC
107 * be used 2 ways. The first one where the HMM user coalesces
108 * multiple page faults into one request and sets flags per pfn for
109 * those faults. The second one where the HMM user wants to pre-
023a019a
JG
110 * fault a range with specific flags. For the latter one it is a
111 * waste to have the user pre-fill the pfn arrays with a default
112 * flags value.
113 */
114 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
115
2aee09d8 116 /* We aren't ask to do anything ... */
f88a1e90 117 if (!(pfns & range->flags[HMM_PFN_VALID]))
2aee09d8 118 return;
f88a1e90
JG
119
120 /* If CPU page table is not valid then we need to fault */
121 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
122 /* Need to write fault ? */
123 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
124 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
125 *write_fault = true;
2aee09d8
JG
126 *fault = true;
127 }
128}
129
130static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
131 const uint64_t *pfns, unsigned long npages,
132 uint64_t cpu_flags, bool *fault,
133 bool *write_fault)
134{
135 unsigned long i;
136
d45d464b 137 if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) {
2aee09d8
JG
138 *fault = *write_fault = false;
139 return;
140 }
141
a3e0d41c 142 *fault = *write_fault = false;
2aee09d8
JG
143 for (i = 0; i < npages; ++i) {
144 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
145 fault, write_fault);
a3e0d41c 146 if ((*write_fault))
2aee09d8
JG
147 return;
148 }
149}
150
151static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
b7a16c7a 152 __always_unused int depth, struct mm_walk *walk)
2aee09d8
JG
153{
154 struct hmm_vma_walk *hmm_vma_walk = walk->private;
155 struct hmm_range *range = hmm_vma_walk->range;
156 bool fault, write_fault;
157 unsigned long i, npages;
158 uint64_t *pfns;
159
160 i = (addr - range->start) >> PAGE_SHIFT;
161 npages = (end - addr) >> PAGE_SHIFT;
162 pfns = &range->pfns[i];
163 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
164 0, &fault, &write_fault);
f8c888a3
CH
165 if (fault || write_fault)
166 return hmm_vma_fault(addr, end, fault, write_fault, walk);
167 hmm_vma_walk->last = addr;
168 return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE);
2aee09d8
JG
169}
170
f88a1e90 171static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
2aee09d8
JG
172{
173 if (pmd_protnone(pmd))
174 return 0;
f88a1e90
JG
175 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
176 range->flags[HMM_PFN_WRITE] :
177 range->flags[HMM_PFN_VALID];
da4c3c73
JG
178}
179
992de9a8 180#ifdef CONFIG_TRANSPARENT_HUGEPAGE
9d3973d6
CH
181static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
182 unsigned long end, uint64_t *pfns, pmd_t pmd)
183{
53f5c3f4 184 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 185 struct hmm_range *range = hmm_vma_walk->range;
2aee09d8 186 unsigned long pfn, npages, i;
2aee09d8 187 bool fault, write_fault;
f88a1e90 188 uint64_t cpu_flags;
53f5c3f4 189
2aee09d8 190 npages = (end - addr) >> PAGE_SHIFT;
f88a1e90 191 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
2aee09d8
JG
192 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
193 &fault, &write_fault);
53f5c3f4 194
24cee8ab 195 if (fault || write_fault)
f8c888a3 196 return hmm_vma_fault(addr, end, fault, write_fault, walk);
53f5c3f4 197
309f9a4f 198 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
992de9a8
JG
199 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
200 if (pmd_devmap(pmd)) {
201 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
202 hmm_vma_walk->pgmap);
203 if (unlikely(!hmm_vma_walk->pgmap))
204 return -EBUSY;
205 }
391aab11 206 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
992de9a8
JG
207 }
208 if (hmm_vma_walk->pgmap) {
209 put_dev_pagemap(hmm_vma_walk->pgmap);
210 hmm_vma_walk->pgmap = NULL;
211 }
53f5c3f4
JG
212 hmm_vma_walk->last = end;
213 return 0;
214}
9d3973d6
CH
215#else /* CONFIG_TRANSPARENT_HUGEPAGE */
216/* stub to allow the code below to compile */
217int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
218 unsigned long end, uint64_t *pfns, pmd_t pmd);
219#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53f5c3f4 220
08ddddda
CH
221static inline bool hmm_is_device_private_entry(struct hmm_range *range,
222 swp_entry_t entry)
223{
224 return is_device_private_entry(entry) &&
225 device_private_entry_to_page(entry)->pgmap->owner ==
226 range->dev_private_owner;
227}
228
f88a1e90 229static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
2aee09d8 230{
789c2af8 231 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2aee09d8 232 return 0;
f88a1e90
JG
233 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
234 range->flags[HMM_PFN_WRITE] :
235 range->flags[HMM_PFN_VALID];
2aee09d8
JG
236}
237
53f5c3f4
JG
238static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
239 unsigned long end, pmd_t *pmdp, pte_t *ptep,
240 uint64_t *pfn)
241{
242 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 243 struct hmm_range *range = hmm_vma_walk->range;
2aee09d8
JG
244 bool fault, write_fault;
245 uint64_t cpu_flags;
53f5c3f4 246 pte_t pte = *ptep;
f88a1e90 247 uint64_t orig_pfn = *pfn;
53f5c3f4 248
f88a1e90 249 *pfn = range->values[HMM_PFN_NONE];
73231612 250 fault = write_fault = false;
53f5c3f4
JG
251
252 if (pte_none(pte)) {
73231612
JG
253 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
254 &fault, &write_fault);
2aee09d8 255 if (fault || write_fault)
53f5c3f4
JG
256 goto fault;
257 return 0;
258 }
259
260 if (!pte_present(pte)) {
261 swp_entry_t entry = pte_to_swp_entry(pte);
262
53f5c3f4 263 /*
17ffdc48
CH
264 * Never fault in device private pages pages, but just report
265 * the PFN even if not present.
53f5c3f4 266 */
08ddddda 267 if (hmm_is_device_private_entry(range, entry)) {
391aab11
JG
268 *pfn = hmm_device_entry_from_pfn(range,
269 swp_offset(entry));
17ffdc48
CH
270 *pfn |= range->flags[HMM_PFN_VALID];
271 if (is_write_device_private_entry(entry))
272 *pfn |= range->flags[HMM_PFN_WRITE];
53f5c3f4
JG
273 return 0;
274 }
275
76612d6c
JG
276 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
277 &write_fault);
278 if (!fault && !write_fault)
53f5c3f4 279 return 0;
76612d6c
JG
280
281 if (!non_swap_entry(entry))
282 goto fault;
283
284 if (is_migration_entry(entry)) {
285 pte_unmap(ptep);
286 hmm_vma_walk->last = addr;
287 migration_entry_wait(walk->mm, pmdp, addr);
288 return -EBUSY;
53f5c3f4
JG
289 }
290
291 /* Report error for everything else */
dfdc2207 292 pte_unmap(ptep);
f88a1e90 293 *pfn = range->values[HMM_PFN_ERROR];
53f5c3f4
JG
294 return -EFAULT;
295 }
296
76612d6c
JG
297 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
298 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, &fault,
299 &write_fault);
2aee09d8 300 if (fault || write_fault)
53f5c3f4
JG
301 goto fault;
302
992de9a8
JG
303 if (pte_devmap(pte)) {
304 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
305 hmm_vma_walk->pgmap);
dfdc2207
JG
306 if (unlikely(!hmm_vma_walk->pgmap)) {
307 pte_unmap(ptep);
992de9a8 308 return -EBUSY;
dfdc2207 309 }
40550627
JG
310 }
311
312 /*
313 * Since each architecture defines a struct page for the zero page, just
314 * fall through and treat it like a normal page.
315 */
316 if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
317 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
318 &write_fault);
319 if (fault || write_fault) {
dfdc2207 320 pte_unmap(ptep);
ac541f25
RC
321 return -EFAULT;
322 }
40550627
JG
323 *pfn = range->values[HMM_PFN_SPECIAL];
324 return 0;
992de9a8
JG
325 }
326
391aab11 327 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
53f5c3f4
JG
328 return 0;
329
330fault:
992de9a8
JG
331 if (hmm_vma_walk->pgmap) {
332 put_dev_pagemap(hmm_vma_walk->pgmap);
333 hmm_vma_walk->pgmap = NULL;
334 }
53f5c3f4
JG
335 pte_unmap(ptep);
336 /* Fault any virtual address we were asked to fault */
f8c888a3 337 return hmm_vma_fault(addr, end, fault, write_fault, walk);
53f5c3f4
JG
338}
339
da4c3c73
JG
340static int hmm_vma_walk_pmd(pmd_t *pmdp,
341 unsigned long start,
342 unsigned long end,
343 struct mm_walk *walk)
344{
74eee180
JG
345 struct hmm_vma_walk *hmm_vma_walk = walk->private;
346 struct hmm_range *range = hmm_vma_walk->range;
2288a9a6
JG
347 uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
348 unsigned long npages = (end - start) >> PAGE_SHIFT;
349 unsigned long addr = start;
350 bool fault, write_fault;
da4c3c73 351 pte_t *ptep;
d08faca0 352 pmd_t pmd;
da4c3c73 353
da4c3c73 354again:
d08faca0
JG
355 pmd = READ_ONCE(*pmdp);
356 if (pmd_none(pmd))
b7a16c7a 357 return hmm_vma_walk_hole(start, end, -1, walk);
da4c3c73 358
d08faca0 359 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
d08faca0
JG
360 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
361 0, &fault, &write_fault);
362 if (fault || write_fault) {
363 hmm_vma_walk->last = addr;
d2e8d551 364 pmd_migration_entry_wait(walk->mm, pmdp);
73231612 365 return -EBUSY;
d08faca0 366 }
7d082987 367 return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
2288a9a6
JG
368 }
369
370 if (!pmd_present(pmd)) {
371 hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
372 &write_fault);
373 if (fault || write_fault)
374 return -EFAULT;
d28c2c9a 375 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a6 376 }
da4c3c73 377
d08faca0 378 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
da4c3c73 379 /*
d2e8d551 380 * No need to take pmd_lock here, even if some other thread
da4c3c73
JG
381 * is splitting the huge pmd we will get that event through
382 * mmu_notifier callback.
383 *
d2e8d551 384 * So just read pmd value and check again it's a transparent
da4c3c73
JG
385 * huge or device mapping one and compute corresponding pfn
386 * values.
387 */
388 pmd = pmd_read_atomic(pmdp);
389 barrier();
390 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
391 goto again;
74eee180 392
2288a9a6 393 return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
da4c3c73
JG
394 }
395
d08faca0 396 /*
d2e8d551 397 * We have handled all the valid cases above ie either none, migration,
d08faca0
JG
398 * huge or transparent huge. At this point either it is a valid pmd
399 * entry pointing to pte directory or it is a bad pmd that will not
400 * recover.
401 */
2288a9a6
JG
402 if (pmd_bad(pmd)) {
403 hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault,
404 &write_fault);
405 if (fault || write_fault)
406 return -EFAULT;
d28c2c9a 407 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a6 408 }
da4c3c73
JG
409
410 ptep = pte_offset_map(pmdp, addr);
2288a9a6 411 for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
53f5c3f4 412 int r;
74eee180 413
2288a9a6 414 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
53f5c3f4 415 if (r) {
dfdc2207 416 /* hmm_vma_handle_pte() did pte_unmap() */
53f5c3f4
JG
417 hmm_vma_walk->last = addr;
418 return r;
74eee180 419 }
da4c3c73 420 }
992de9a8
JG
421 if (hmm_vma_walk->pgmap) {
422 /*
423 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
424 * so that we can leverage get_dev_pagemap() optimization which
425 * will not re-take a reference on a pgmap if we already have
426 * one.
427 */
428 put_dev_pagemap(hmm_vma_walk->pgmap);
429 hmm_vma_walk->pgmap = NULL;
430 }
da4c3c73
JG
431 pte_unmap(ptep - 1);
432
53f5c3f4 433 hmm_vma_walk->last = addr;
da4c3c73
JG
434 return 0;
435}
436
f0b3c45c
CH
437#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
438 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
439static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
440{
441 if (!pud_present(pud))
442 return 0;
443 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
444 range->flags[HMM_PFN_WRITE] :
445 range->flags[HMM_PFN_VALID];
446}
447
448static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
449 struct mm_walk *walk)
992de9a8
JG
450{
451 struct hmm_vma_walk *hmm_vma_walk = walk->private;
452 struct hmm_range *range = hmm_vma_walk->range;
3afc4236 453 unsigned long addr = start;
992de9a8 454 pud_t pud;
3afc4236
SP
455 int ret = 0;
456 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
457
458 if (!ptl)
459 return 0;
460
461 /* Normally we don't want to split the huge page */
462 walk->action = ACTION_CONTINUE;
992de9a8 463
992de9a8 464 pud = READ_ONCE(*pudp);
3afc4236 465 if (pud_none(pud)) {
05fc1df9
JG
466 spin_unlock(ptl);
467 return hmm_vma_walk_hole(start, end, -1, walk);
3afc4236 468 }
992de9a8
JG
469
470 if (pud_huge(pud) && pud_devmap(pud)) {
471 unsigned long i, npages, pfn;
472 uint64_t *pfns, cpu_flags;
473 bool fault, write_fault;
474
3afc4236 475 if (!pud_present(pud)) {
05fc1df9
JG
476 spin_unlock(ptl);
477 return hmm_vma_walk_hole(start, end, -1, walk);
3afc4236 478 }
992de9a8
JG
479
480 i = (addr - range->start) >> PAGE_SHIFT;
481 npages = (end - addr) >> PAGE_SHIFT;
482 pfns = &range->pfns[i];
483
484 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
485 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
486 cpu_flags, &fault, &write_fault);
3afc4236 487 if (fault || write_fault) {
05fc1df9 488 spin_unlock(ptl);
f8c888a3 489 return hmm_vma_fault(addr, end, fault, write_fault,
05fc1df9 490 walk);
3afc4236 491 }
992de9a8 492
992de9a8
JG
493 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
494 for (i = 0; i < npages; ++i, ++pfn) {
495 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
496 hmm_vma_walk->pgmap);
3afc4236
SP
497 if (unlikely(!hmm_vma_walk->pgmap)) {
498 ret = -EBUSY;
499 goto out_unlock;
500 }
391aab11
JG
501 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
502 cpu_flags;
992de9a8
JG
503 }
504 if (hmm_vma_walk->pgmap) {
505 put_dev_pagemap(hmm_vma_walk->pgmap);
506 hmm_vma_walk->pgmap = NULL;
507 }
508 hmm_vma_walk->last = end;
3afc4236 509 goto out_unlock;
992de9a8
JG
510 }
511
3afc4236
SP
512 /* Ask for the PUD to be split */
513 walk->action = ACTION_SUBTREE;
992de9a8 514
3afc4236
SP
515out_unlock:
516 spin_unlock(ptl);
517 return ret;
992de9a8 518}
f0b3c45c
CH
519#else
520#define hmm_vma_walk_pud NULL
521#endif
992de9a8 522
251bbe59 523#ifdef CONFIG_HUGETLB_PAGE
63d5066f
JG
524static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
525 unsigned long start, unsigned long end,
526 struct mm_walk *walk)
527{
05c23af4 528 unsigned long addr = start, i, pfn;
63d5066f
JG
529 struct hmm_vma_walk *hmm_vma_walk = walk->private;
530 struct hmm_range *range = hmm_vma_walk->range;
531 struct vm_area_struct *vma = walk->vma;
63d5066f
JG
532 uint64_t orig_pfn, cpu_flags;
533 bool fault, write_fault;
534 spinlock_t *ptl;
535 pte_t entry;
63d5066f 536
d2e8d551 537 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
63d5066f
JG
538 entry = huge_ptep_get(pte);
539
7f08263d 540 i = (start - range->start) >> PAGE_SHIFT;
63d5066f
JG
541 orig_pfn = range->pfns[i];
542 range->pfns[i] = range->values[HMM_PFN_NONE];
543 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
544 fault = write_fault = false;
545 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
546 &fault, &write_fault);
547 if (fault || write_fault) {
45050692 548 spin_unlock(ptl);
f8c888a3 549 return hmm_vma_fault(addr, end, fault, write_fault, walk);
63d5066f
JG
550 }
551
05c23af4 552 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
7f08263d 553 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
391aab11
JG
554 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
555 cpu_flags;
63d5066f 556 hmm_vma_walk->last = end;
63d5066f 557 spin_unlock(ptl);
45050692 558 return 0;
63d5066f 559}
251bbe59
CH
560#else
561#define hmm_vma_walk_hugetlb_entry NULL
562#endif /* CONFIG_HUGETLB_PAGE */
63d5066f 563
d28c2c9a
RC
564static int hmm_vma_walk_test(unsigned long start, unsigned long end,
565 struct mm_walk *walk)
33cd47dc 566{
d28c2c9a
RC
567 struct hmm_vma_walk *hmm_vma_walk = walk->private;
568 struct hmm_range *range = hmm_vma_walk->range;
569 struct vm_area_struct *vma = walk->vma;
570
571 /*
c2579c9c
JG
572 * Skip vma ranges that don't have struct page backing them or map I/O
573 * devices directly.
574 *
d28c2c9a 575 * If the vma does not allow read access, then assume that it does not
c2579c9c
JG
576 * allow write access either. HMM does not support architectures that
577 * allow write without read.
d28c2c9a 578 */
c2579c9c
JG
579 if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) ||
580 !(vma->vm_flags & VM_READ)) {
d28c2c9a
RC
581 bool fault, write_fault;
582
583 /*
584 * Check to see if a fault is requested for any page in the
585 * range.
586 */
587 hmm_range_need_fault(hmm_vma_walk, range->pfns +
588 ((start - range->start) >> PAGE_SHIFT),
589 (end - start) >> PAGE_SHIFT,
590 0, &fault, &write_fault);
591 if (fault || write_fault)
592 return -EFAULT;
593
c2579c9c 594 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
d28c2c9a
RC
595 hmm_vma_walk->last = end;
596
597 /* Skip this vma and continue processing the next vma. */
598 return 1;
599 }
600
601 return 0;
33cd47dc
JG
602}
603
7b86ac33
CH
604static const struct mm_walk_ops hmm_walk_ops = {
605 .pud_entry = hmm_vma_walk_pud,
606 .pmd_entry = hmm_vma_walk_pmd,
607 .pte_hole = hmm_vma_walk_hole,
608 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
d28c2c9a 609 .test_walk = hmm_vma_walk_test,
7b86ac33
CH
610};
611
9a4903e4
CH
612/**
613 * hmm_range_fault - try to fault some address in a virtual address range
614 * @range: range being faulted
615 * @flags: HMM_FAULT_* flags
616 *
617 * Return: the number of valid pages in range->pfns[] (from range start
618 * address), which may be zero. On error one of the following status codes
619 * can be returned:
73231612 620 *
9a4903e4
CH
621 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
622 * (e.g., device file vma).
623 * -ENOMEM: Out of memory.
624 * -EPERM: Invalid permission (e.g., asking for write and range is read
625 * only).
9a4903e4
CH
626 * -EBUSY: The range has been invalidated and the caller needs to wait for
627 * the invalidation to finish.
628 * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access
629 * that range) number of valid pages in range->pfns[] (from
630 * range start address).
74eee180
JG
631 *
632 * This is similar to a regular CPU page fault except that it will not trigger
73231612
JG
633 * any memory migration if the memory being faulted is not accessible by CPUs
634 * and caller does not ask for migration.
74eee180 635 *
ff05c0c6
JG
636 * On error, for one virtual address in the range, the function will mark the
637 * corresponding HMM pfn entry with an error flag.
74eee180 638 */
9a4903e4 639long hmm_range_fault(struct hmm_range *range, unsigned int flags)
74eee180 640{
d28c2c9a
RC
641 struct hmm_vma_walk hmm_vma_walk = {
642 .range = range,
643 .last = range->start,
644 .flags = flags,
645 };
a22dd506 646 struct mm_struct *mm = range->notifier->mm;
74eee180
JG
647 int ret;
648
04ec32fb 649 lockdep_assert_held(&mm->mmap_sem);
704f3f2c 650
a3e0d41c
JG
651 do {
652 /* If range is no longer valid force retry. */
a22dd506
JG
653 if (mmu_interval_check_retry(range->notifier,
654 range->notifier_seq))
2bcbeaef 655 return -EBUSY;
d28c2c9a
RC
656 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
657 &hmm_walk_ops, &hmm_vma_walk);
658 } while (ret == -EBUSY);
74eee180 659
d28c2c9a
RC
660 if (ret)
661 return ret;
73231612 662 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
74eee180 663}
73231612 664EXPORT_SYMBOL(hmm_range_fault);