Linux 5.7-rc4
[linux-block.git] / mm / hmm.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
133ff0ea
JG
2/*
3 * Copyright 2013 Red Hat Inc.
4 *
f813f219 5 * Authors: Jérôme Glisse <jglisse@redhat.com>
133ff0ea
JG
6 */
7/*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
a520110e 11#include <linux/pagewalk.h>
133ff0ea 12#include <linux/hmm.h>
858b54da 13#include <linux/init.h>
da4c3c73
JG
14#include <linux/rmap.h>
15#include <linux/swap.h>
133ff0ea
JG
16#include <linux/slab.h>
17#include <linux/sched.h>
4ef589dc
JG
18#include <linux/mmzone.h>
19#include <linux/pagemap.h>
da4c3c73
JG
20#include <linux/swapops.h>
21#include <linux/hugetlb.h>
4ef589dc 22#include <linux/memremap.h>
c8a53b2d 23#include <linux/sched/mm.h>
7b2d55d2 24#include <linux/jump_label.h>
55c0ece8 25#include <linux/dma-mapping.h>
c0b12405 26#include <linux/mmu_notifier.h>
4ef589dc
JG
27#include <linux/memory_hotplug.h>
28
74eee180
JG
29struct hmm_vma_walk {
30 struct hmm_range *range;
31 unsigned long last;
74eee180
JG
32};
33
a3eb13c1
JG
34enum {
35 HMM_NEED_FAULT = 1 << 0,
36 HMM_NEED_WRITE_FAULT = 1 << 1,
37 HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
38};
39
f970b977
JG
40/*
41 * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
42 * @range: range use to encode HMM pfn value
43 * @pfn: pfn value for which to create the device entry
44 * Return: valid device entry for the pfn
45 */
46static uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
47 unsigned long pfn)
48{
49 return (pfn << range->pfn_shift) | range->flags[HMM_PFN_VALID];
50}
51
d28c2c9a
RC
52static int hmm_pfns_fill(unsigned long addr, unsigned long end,
53 struct hmm_range *range, enum hmm_pfn_value_e value)
da4c3c73 54{
ff05c0c6 55 uint64_t *pfns = range->pfns;
da4c3c73
JG
56 unsigned long i;
57
58 i = (addr - range->start) >> PAGE_SHIFT;
59 for (; addr < end; addr += PAGE_SIZE, i++)
d28c2c9a 60 pfns[i] = range->values[value];
da4c3c73
JG
61
62 return 0;
63}
64
5504ed29 65/*
f8c888a3 66 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
d2e8d551 67 * @addr: range virtual start address (inclusive)
5504ed29 68 * @end: range virtual end address (exclusive)
a3eb13c1 69 * @required_fault: HMM_NEED_* flags
5504ed29 70 * @walk: mm_walk structure
f8c888a3 71 * Return: -EBUSY after page fault, or page fault error
5504ed29
JG
72 *
73 * This function will be called whenever pmd_none() or pte_none() returns true,
74 * or whenever there is no page directory covering the virtual address range.
75 */
f8c888a3 76static int hmm_vma_fault(unsigned long addr, unsigned long end,
a3eb13c1 77 unsigned int required_fault, struct mm_walk *walk)
da4c3c73 78{
74eee180 79 struct hmm_vma_walk *hmm_vma_walk = walk->private;
5a0c38d3 80 struct vm_area_struct *vma = walk->vma;
5a0c38d3 81 unsigned int fault_flags = FAULT_FLAG_REMOTE;
da4c3c73 82
a3eb13c1 83 WARN_ON_ONCE(!required_fault);
74eee180 84 hmm_vma_walk->last = addr;
63d5066f 85
a3eb13c1 86 if (required_fault & HMM_NEED_WRITE_FAULT) {
5a0c38d3
CH
87 if (!(vma->vm_flags & VM_WRITE))
88 return -EPERM;
89 fault_flags |= FAULT_FLAG_WRITE;
74eee180
JG
90 }
91
53bfe17f 92 for (; addr < end; addr += PAGE_SIZE)
5a0c38d3 93 if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR)
53bfe17f 94 return -EFAULT;
f8c888a3 95 return -EBUSY;
2aee09d8
JG
96}
97
a3eb13c1
JG
98static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
99 uint64_t pfns, uint64_t cpu_flags)
2aee09d8 100{
f88a1e90
JG
101 struct hmm_range *range = hmm_vma_walk->range;
102
023a019a
JG
103 /*
104 * So we not only consider the individual per page request we also
105 * consider the default flags requested for the range. The API can
d2e8d551
RC
106 * be used 2 ways. The first one where the HMM user coalesces
107 * multiple page faults into one request and sets flags per pfn for
108 * those faults. The second one where the HMM user wants to pre-
023a019a
JG
109 * fault a range with specific flags. For the latter one it is a
110 * waste to have the user pre-fill the pfn arrays with a default
111 * flags value.
112 */
113 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
114
2aee09d8 115 /* We aren't ask to do anything ... */
f88a1e90 116 if (!(pfns & range->flags[HMM_PFN_VALID]))
a3eb13c1 117 return 0;
f88a1e90 118
f88a1e90
JG
119 /* Need to write fault ? */
120 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
a3eb13c1
JG
121 !(cpu_flags & range->flags[HMM_PFN_WRITE]))
122 return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
123
124 /* If CPU page table is not valid then we need to fault */
125 if (!(cpu_flags & range->flags[HMM_PFN_VALID]))
126 return HMM_NEED_FAULT;
127 return 0;
2aee09d8
JG
128}
129
a3eb13c1
JG
130static unsigned int
131hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
132 const uint64_t *pfns, unsigned long npages,
133 uint64_t cpu_flags)
2aee09d8 134{
6bfef2f9 135 struct hmm_range *range = hmm_vma_walk->range;
a3eb13c1 136 unsigned int required_fault = 0;
2aee09d8
JG
137 unsigned long i;
138
6bfef2f9
JG
139 /*
140 * If the default flags do not request to fault pages, and the mask does
141 * not allow for individual pages to be faulted, then
142 * hmm_pte_need_fault() will always return 0.
143 */
144 if (!((range->default_flags | range->pfn_flags_mask) &
145 range->flags[HMM_PFN_VALID]))
a3eb13c1 146 return 0;
2aee09d8
JG
147
148 for (i = 0; i < npages; ++i) {
a3eb13c1
JG
149 required_fault |=
150 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags);
151 if (required_fault == HMM_NEED_ALL_BITS)
152 return required_fault;
2aee09d8 153 }
a3eb13c1 154 return required_fault;
2aee09d8
JG
155}
156
157static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
b7a16c7a 158 __always_unused int depth, struct mm_walk *walk)
2aee09d8
JG
159{
160 struct hmm_vma_walk *hmm_vma_walk = walk->private;
161 struct hmm_range *range = hmm_vma_walk->range;
a3eb13c1 162 unsigned int required_fault;
2aee09d8
JG
163 unsigned long i, npages;
164 uint64_t *pfns;
165
166 i = (addr - range->start) >> PAGE_SHIFT;
167 npages = (end - addr) >> PAGE_SHIFT;
168 pfns = &range->pfns[i];
a3eb13c1 169 required_fault = hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0);
bd5d3587
JG
170 if (!walk->vma) {
171 if (required_fault)
172 return -EFAULT;
173 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
174 }
a3eb13c1
JG
175 if (required_fault)
176 return hmm_vma_fault(addr, end, required_fault, walk);
f8c888a3
CH
177 hmm_vma_walk->last = addr;
178 return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE);
2aee09d8
JG
179}
180
f88a1e90 181static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
2aee09d8
JG
182{
183 if (pmd_protnone(pmd))
184 return 0;
f88a1e90
JG
185 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
186 range->flags[HMM_PFN_WRITE] :
187 range->flags[HMM_PFN_VALID];
da4c3c73
JG
188}
189
992de9a8 190#ifdef CONFIG_TRANSPARENT_HUGEPAGE
9d3973d6
CH
191static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
192 unsigned long end, uint64_t *pfns, pmd_t pmd)
193{
53f5c3f4 194 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 195 struct hmm_range *range = hmm_vma_walk->range;
2aee09d8 196 unsigned long pfn, npages, i;
a3eb13c1 197 unsigned int required_fault;
f88a1e90 198 uint64_t cpu_flags;
53f5c3f4 199
2aee09d8 200 npages = (end - addr) >> PAGE_SHIFT;
f88a1e90 201 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
a3eb13c1
JG
202 required_fault =
203 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags);
204 if (required_fault)
205 return hmm_vma_fault(addr, end, required_fault, walk);
53f5c3f4 206
309f9a4f 207 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
068354ad 208 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
391aab11 209 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
53f5c3f4
JG
210 hmm_vma_walk->last = end;
211 return 0;
212}
9d3973d6
CH
213#else /* CONFIG_TRANSPARENT_HUGEPAGE */
214/* stub to allow the code below to compile */
215int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
216 unsigned long end, uint64_t *pfns, pmd_t pmd);
217#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53f5c3f4 218
08ddddda
CH
219static inline bool hmm_is_device_private_entry(struct hmm_range *range,
220 swp_entry_t entry)
221{
222 return is_device_private_entry(entry) &&
223 device_private_entry_to_page(entry)->pgmap->owner ==
224 range->dev_private_owner;
225}
226
f88a1e90 227static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
2aee09d8 228{
789c2af8 229 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2aee09d8 230 return 0;
f88a1e90
JG
231 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
232 range->flags[HMM_PFN_WRITE] :
233 range->flags[HMM_PFN_VALID];
2aee09d8
JG
234}
235
53f5c3f4
JG
236static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
237 unsigned long end, pmd_t *pmdp, pte_t *ptep,
238 uint64_t *pfn)
239{
240 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 241 struct hmm_range *range = hmm_vma_walk->range;
a3eb13c1 242 unsigned int required_fault;
2aee09d8 243 uint64_t cpu_flags;
53f5c3f4 244 pte_t pte = *ptep;
f88a1e90 245 uint64_t orig_pfn = *pfn;
53f5c3f4 246
53f5c3f4 247 if (pte_none(pte)) {
a3eb13c1
JG
248 required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0);
249 if (required_fault)
53f5c3f4 250 goto fault;
846babe8 251 *pfn = range->values[HMM_PFN_NONE];
53f5c3f4
JG
252 return 0;
253 }
254
255 if (!pte_present(pte)) {
256 swp_entry_t entry = pte_to_swp_entry(pte);
257
53f5c3f4 258 /*
17ffdc48
CH
259 * Never fault in device private pages pages, but just report
260 * the PFN even if not present.
53f5c3f4 261 */
08ddddda 262 if (hmm_is_device_private_entry(range, entry)) {
391aab11 263 *pfn = hmm_device_entry_from_pfn(range,
f66c9a33 264 device_private_entry_to_pfn(entry));
17ffdc48
CH
265 *pfn |= range->flags[HMM_PFN_VALID];
266 if (is_write_device_private_entry(entry))
267 *pfn |= range->flags[HMM_PFN_WRITE];
53f5c3f4
JG
268 return 0;
269 }
270
a3eb13c1 271 required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0);
846babe8
JG
272 if (!required_fault) {
273 *pfn = range->values[HMM_PFN_NONE];
53f5c3f4 274 return 0;
846babe8 275 }
76612d6c
JG
276
277 if (!non_swap_entry(entry))
278 goto fault;
279
280 if (is_migration_entry(entry)) {
281 pte_unmap(ptep);
282 hmm_vma_walk->last = addr;
283 migration_entry_wait(walk->mm, pmdp, addr);
284 return -EBUSY;
53f5c3f4
JG
285 }
286
287 /* Report error for everything else */
dfdc2207 288 pte_unmap(ptep);
53f5c3f4
JG
289 return -EFAULT;
290 }
291
76612d6c 292 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
a3eb13c1
JG
293 required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags);
294 if (required_fault)
53f5c3f4
JG
295 goto fault;
296
40550627
JG
297 /*
298 * Since each architecture defines a struct page for the zero page, just
299 * fall through and treat it like a normal page.
300 */
301 if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
a3eb13c1 302 if (hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0)) {
dfdc2207 303 pte_unmap(ptep);
ac541f25
RC
304 return -EFAULT;
305 }
40550627
JG
306 *pfn = range->values[HMM_PFN_SPECIAL];
307 return 0;
992de9a8
JG
308 }
309
391aab11 310 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
53f5c3f4
JG
311 return 0;
312
313fault:
314 pte_unmap(ptep);
315 /* Fault any virtual address we were asked to fault */
a3eb13c1 316 return hmm_vma_fault(addr, end, required_fault, walk);
53f5c3f4
JG
317}
318
da4c3c73
JG
319static int hmm_vma_walk_pmd(pmd_t *pmdp,
320 unsigned long start,
321 unsigned long end,
322 struct mm_walk *walk)
323{
74eee180
JG
324 struct hmm_vma_walk *hmm_vma_walk = walk->private;
325 struct hmm_range *range = hmm_vma_walk->range;
2288a9a6
JG
326 uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT];
327 unsigned long npages = (end - start) >> PAGE_SHIFT;
328 unsigned long addr = start;
da4c3c73 329 pte_t *ptep;
d08faca0 330 pmd_t pmd;
da4c3c73 331
da4c3c73 332again:
d08faca0
JG
333 pmd = READ_ONCE(*pmdp);
334 if (pmd_none(pmd))
b7a16c7a 335 return hmm_vma_walk_hole(start, end, -1, walk);
da4c3c73 336
d08faca0 337 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
a3eb13c1 338 if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) {
d08faca0 339 hmm_vma_walk->last = addr;
d2e8d551 340 pmd_migration_entry_wait(walk->mm, pmdp);
73231612 341 return -EBUSY;
d08faca0 342 }
7d082987 343 return hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
2288a9a6
JG
344 }
345
346 if (!pmd_present(pmd)) {
a3eb13c1 347 if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
2288a9a6 348 return -EFAULT;
d28c2c9a 349 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a6 350 }
da4c3c73 351
d08faca0 352 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
da4c3c73 353 /*
d2e8d551 354 * No need to take pmd_lock here, even if some other thread
da4c3c73
JG
355 * is splitting the huge pmd we will get that event through
356 * mmu_notifier callback.
357 *
d2e8d551 358 * So just read pmd value and check again it's a transparent
da4c3c73
JG
359 * huge or device mapping one and compute corresponding pfn
360 * values.
361 */
362 pmd = pmd_read_atomic(pmdp);
363 barrier();
364 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
365 goto again;
74eee180 366
2288a9a6 367 return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd);
da4c3c73
JG
368 }
369
d08faca0 370 /*
d2e8d551 371 * We have handled all the valid cases above ie either none, migration,
d08faca0
JG
372 * huge or transparent huge. At this point either it is a valid pmd
373 * entry pointing to pte directory or it is a bad pmd that will not
374 * recover.
375 */
2288a9a6 376 if (pmd_bad(pmd)) {
a3eb13c1 377 if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0))
2288a9a6 378 return -EFAULT;
d28c2c9a 379 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a6 380 }
da4c3c73
JG
381
382 ptep = pte_offset_map(pmdp, addr);
2288a9a6 383 for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) {
53f5c3f4 384 int r;
74eee180 385
2288a9a6 386 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns);
53f5c3f4 387 if (r) {
dfdc2207 388 /* hmm_vma_handle_pte() did pte_unmap() */
53f5c3f4
JG
389 hmm_vma_walk->last = addr;
390 return r;
74eee180 391 }
da4c3c73
JG
392 }
393 pte_unmap(ptep - 1);
394
53f5c3f4 395 hmm_vma_walk->last = addr;
da4c3c73
JG
396 return 0;
397}
398
f0b3c45c
CH
399#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
400 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
401static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
402{
403 if (!pud_present(pud))
404 return 0;
405 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
406 range->flags[HMM_PFN_WRITE] :
407 range->flags[HMM_PFN_VALID];
408}
409
410static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
411 struct mm_walk *walk)
992de9a8
JG
412{
413 struct hmm_vma_walk *hmm_vma_walk = walk->private;
414 struct hmm_range *range = hmm_vma_walk->range;
3afc4236 415 unsigned long addr = start;
992de9a8 416 pud_t pud;
3afc4236
SP
417 int ret = 0;
418 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
419
420 if (!ptl)
421 return 0;
422
423 /* Normally we don't want to split the huge page */
424 walk->action = ACTION_CONTINUE;
992de9a8 425
992de9a8 426 pud = READ_ONCE(*pudp);
3afc4236 427 if (pud_none(pud)) {
05fc1df9
JG
428 spin_unlock(ptl);
429 return hmm_vma_walk_hole(start, end, -1, walk);
3afc4236 430 }
992de9a8
JG
431
432 if (pud_huge(pud) && pud_devmap(pud)) {
433 unsigned long i, npages, pfn;
a3eb13c1 434 unsigned int required_fault;
992de9a8 435 uint64_t *pfns, cpu_flags;
992de9a8 436
3afc4236 437 if (!pud_present(pud)) {
05fc1df9
JG
438 spin_unlock(ptl);
439 return hmm_vma_walk_hole(start, end, -1, walk);
3afc4236 440 }
992de9a8
JG
441
442 i = (addr - range->start) >> PAGE_SHIFT;
443 npages = (end - addr) >> PAGE_SHIFT;
444 pfns = &range->pfns[i];
445
446 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
a3eb13c1
JG
447 required_fault = hmm_range_need_fault(hmm_vma_walk, pfns,
448 npages, cpu_flags);
449 if (required_fault) {
05fc1df9 450 spin_unlock(ptl);
a3eb13c1 451 return hmm_vma_fault(addr, end, required_fault, walk);
3afc4236 452 }
992de9a8 453
992de9a8 454 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
068354ad 455 for (i = 0; i < npages; ++i, ++pfn)
391aab11
JG
456 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
457 cpu_flags;
992de9a8 458 hmm_vma_walk->last = end;
3afc4236 459 goto out_unlock;
992de9a8
JG
460 }
461
3afc4236
SP
462 /* Ask for the PUD to be split */
463 walk->action = ACTION_SUBTREE;
992de9a8 464
3afc4236
SP
465out_unlock:
466 spin_unlock(ptl);
467 return ret;
992de9a8 468}
f0b3c45c
CH
469#else
470#define hmm_vma_walk_pud NULL
471#endif
992de9a8 472
251bbe59 473#ifdef CONFIG_HUGETLB_PAGE
63d5066f
JG
474static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
475 unsigned long start, unsigned long end,
476 struct mm_walk *walk)
477{
05c23af4 478 unsigned long addr = start, i, pfn;
63d5066f
JG
479 struct hmm_vma_walk *hmm_vma_walk = walk->private;
480 struct hmm_range *range = hmm_vma_walk->range;
481 struct vm_area_struct *vma = walk->vma;
63d5066f 482 uint64_t orig_pfn, cpu_flags;
a3eb13c1 483 unsigned int required_fault;
63d5066f
JG
484 spinlock_t *ptl;
485 pte_t entry;
63d5066f 486
d2e8d551 487 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
63d5066f
JG
488 entry = huge_ptep_get(pte);
489
7f08263d 490 i = (start - range->start) >> PAGE_SHIFT;
63d5066f 491 orig_pfn = range->pfns[i];
63d5066f 492 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
a3eb13c1
JG
493 required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags);
494 if (required_fault) {
45050692 495 spin_unlock(ptl);
a3eb13c1 496 return hmm_vma_fault(addr, end, required_fault, walk);
63d5066f
JG
497 }
498
05c23af4 499 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
7f08263d 500 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
391aab11
JG
501 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
502 cpu_flags;
63d5066f 503 hmm_vma_walk->last = end;
63d5066f 504 spin_unlock(ptl);
45050692 505 return 0;
63d5066f 506}
251bbe59
CH
507#else
508#define hmm_vma_walk_hugetlb_entry NULL
509#endif /* CONFIG_HUGETLB_PAGE */
63d5066f 510
d28c2c9a
RC
511static int hmm_vma_walk_test(unsigned long start, unsigned long end,
512 struct mm_walk *walk)
33cd47dc 513{
d28c2c9a
RC
514 struct hmm_vma_walk *hmm_vma_walk = walk->private;
515 struct hmm_range *range = hmm_vma_walk->range;
516 struct vm_area_struct *vma = walk->vma;
517
a3eb13c1
JG
518 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) &&
519 vma->vm_flags & VM_READ)
520 return 0;
521
d28c2c9a 522 /*
a3eb13c1
JG
523 * vma ranges that don't have struct page backing them or map I/O
524 * devices directly cannot be handled by hmm_range_fault().
c2579c9c 525 *
d28c2c9a 526 * If the vma does not allow read access, then assume that it does not
c2579c9c
JG
527 * allow write access either. HMM does not support architectures that
528 * allow write without read.
a3eb13c1
JG
529 *
530 * If a fault is requested for an unsupported range then it is a hard
531 * failure.
d28c2c9a 532 */
a3eb13c1
JG
533 if (hmm_range_need_fault(hmm_vma_walk,
534 range->pfns +
535 ((start - range->start) >> PAGE_SHIFT),
536 (end - start) >> PAGE_SHIFT, 0))
537 return -EFAULT;
d28c2c9a 538
a3eb13c1
JG
539 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
540 hmm_vma_walk->last = end;
d28c2c9a 541
a3eb13c1
JG
542 /* Skip this vma and continue processing the next vma. */
543 return 1;
33cd47dc
JG
544}
545
7b86ac33
CH
546static const struct mm_walk_ops hmm_walk_ops = {
547 .pud_entry = hmm_vma_walk_pud,
548 .pmd_entry = hmm_vma_walk_pmd,
549 .pte_hole = hmm_vma_walk_hole,
550 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
d28c2c9a 551 .test_walk = hmm_vma_walk_test,
7b86ac33
CH
552};
553
9a4903e4
CH
554/**
555 * hmm_range_fault - try to fault some address in a virtual address range
f970b977 556 * @range: argument structure
9a4903e4
CH
557 *
558 * Return: the number of valid pages in range->pfns[] (from range start
559 * address), which may be zero. On error one of the following status codes
560 * can be returned:
73231612 561 *
9a4903e4
CH
562 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
563 * (e.g., device file vma).
564 * -ENOMEM: Out of memory.
565 * -EPERM: Invalid permission (e.g., asking for write and range is read
566 * only).
9a4903e4
CH
567 * -EBUSY: The range has been invalidated and the caller needs to wait for
568 * the invalidation to finish.
f970b977
JG
569 * -EFAULT: A page was requested to be valid and could not be made valid
570 * ie it has no backing VMA or it is illegal to access
74eee180 571 *
f970b977
JG
572 * This is similar to get_user_pages(), except that it can read the page tables
573 * without mutating them (ie causing faults).
74eee180 574 */
6bfef2f9 575long hmm_range_fault(struct hmm_range *range)
74eee180 576{
d28c2c9a
RC
577 struct hmm_vma_walk hmm_vma_walk = {
578 .range = range,
579 .last = range->start,
d28c2c9a 580 };
a22dd506 581 struct mm_struct *mm = range->notifier->mm;
74eee180
JG
582 int ret;
583
04ec32fb 584 lockdep_assert_held(&mm->mmap_sem);
704f3f2c 585
a3e0d41c
JG
586 do {
587 /* If range is no longer valid force retry. */
a22dd506
JG
588 if (mmu_interval_check_retry(range->notifier,
589 range->notifier_seq))
2bcbeaef 590 return -EBUSY;
d28c2c9a
RC
591 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
592 &hmm_walk_ops, &hmm_vma_walk);
593 } while (ret == -EBUSY);
74eee180 594
d28c2c9a
RC
595 if (ret)
596 return ret;
73231612 597 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
74eee180 598}
73231612 599EXPORT_SYMBOL(hmm_range_fault);