mm/hmm: a few more C style and comment clean ups
[linux-2.6-block.git] / mm / hmm.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
133ff0ea
JG
2/*
3 * Copyright 2013 Red Hat Inc.
4 *
f813f219 5 * Authors: Jérôme Glisse <jglisse@redhat.com>
133ff0ea
JG
6 */
7/*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
11#include <linux/mm.h>
12#include <linux/hmm.h>
858b54da 13#include <linux/init.h>
da4c3c73
JG
14#include <linux/rmap.h>
15#include <linux/swap.h>
133ff0ea
JG
16#include <linux/slab.h>
17#include <linux/sched.h>
4ef589dc
JG
18#include <linux/mmzone.h>
19#include <linux/pagemap.h>
da4c3c73
JG
20#include <linux/swapops.h>
21#include <linux/hugetlb.h>
4ef589dc 22#include <linux/memremap.h>
c8a53b2d 23#include <linux/sched/mm.h>
7b2d55d2 24#include <linux/jump_label.h>
55c0ece8 25#include <linux/dma-mapping.h>
c0b12405 26#include <linux/mmu_notifier.h>
4ef589dc
JG
27#include <linux/memory_hotplug.h>
28
c0b12405
JG
29static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
30
704f3f2c
JG
31/**
32 * hmm_get_or_create - register HMM against an mm (HMM internal)
133ff0ea
JG
33 *
34 * @mm: mm struct to attach to
d2e8d551 35 * Return: an HMM object, either by referencing the existing
704f3f2c 36 * (per-process) object, or by creating a new one.
133ff0ea 37 *
704f3f2c
JG
38 * This is not intended to be used directly by device drivers. If mm already
39 * has an HMM struct then it get a reference on it and returns it. Otherwise
40 * it allocates an HMM struct, initializes it, associate it with the mm and
41 * returns it.
133ff0ea 42 */
704f3f2c 43static struct hmm *hmm_get_or_create(struct mm_struct *mm)
133ff0ea 44{
8a9320b7 45 struct hmm *hmm;
133ff0ea 46
fec88ab0 47 lockdep_assert_held_write(&mm->mmap_sem);
133ff0ea 48
8a9320b7
JG
49 /* Abuse the page_table_lock to also protect mm->hmm. */
50 spin_lock(&mm->page_table_lock);
51 hmm = mm->hmm;
52 if (mm->hmm && kref_get_unless_zero(&mm->hmm->kref))
53 goto out_unlock;
54 spin_unlock(&mm->page_table_lock);
c0b12405
JG
55
56 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
57 if (!hmm)
58 return NULL;
a3e0d41c 59 init_waitqueue_head(&hmm->wq);
c0b12405
JG
60 INIT_LIST_HEAD(&hmm->mirrors);
61 init_rwsem(&hmm->mirrors_sem);
c0b12405 62 hmm->mmu_notifier.ops = NULL;
da4c3c73 63 INIT_LIST_HEAD(&hmm->ranges);
5a136b4a 64 spin_lock_init(&hmm->ranges_lock);
704f3f2c 65 kref_init(&hmm->kref);
a3e0d41c 66 hmm->notifiers = 0;
c0b12405
JG
67 hmm->mm = mm;
68
8a9320b7
JG
69 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
70 if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
71 kfree(hmm);
72 return NULL;
73 }
c0b12405 74
8a9320b7 75 mmgrab(hmm->mm);
86a2d598
RC
76
77 /*
8a9320b7
JG
78 * We hold the exclusive mmap_sem here so we know that mm->hmm is
79 * still NULL or 0 kref, and is safe to update.
86a2d598 80 */
86a2d598 81 spin_lock(&mm->page_table_lock);
8a9320b7 82 mm->hmm = hmm;
c0b12405 83
8a9320b7 84out_unlock:
86a2d598 85 spin_unlock(&mm->page_table_lock);
704f3f2c 86 return hmm;
133ff0ea 87}
86a2d598 88
6d7c3cde
JG
89static void hmm_free_rcu(struct rcu_head *rcu)
90{
8a9320b7
JG
91 struct hmm *hmm = container_of(rcu, struct hmm, rcu);
92
93 mmdrop(hmm->mm);
86a2d598 94 kfree(hmm);
133ff0ea
JG
95}
96
704f3f2c
JG
97static void hmm_free(struct kref *kref)
98{
99 struct hmm *hmm = container_of(kref, struct hmm, kref);
704f3f2c 100
8a9320b7
JG
101 spin_lock(&hmm->mm->page_table_lock);
102 if (hmm->mm->hmm == hmm)
103 hmm->mm->hmm = NULL;
104 spin_unlock(&hmm->mm->page_table_lock);
704f3f2c 105
8a9320b7 106 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm);
6d7c3cde 107 mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu);
704f3f2c
JG
108}
109
110static inline void hmm_put(struct hmm *hmm)
111{
112 kref_put(&hmm->kref, hmm_free);
113}
114
a3e0d41c 115static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
133ff0ea 116{
6d7c3cde 117 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
c0b12405 118 struct hmm_mirror *mirror;
704f3f2c 119
6d7c3cde
JG
120 /* Bail out if hmm is in the process of being freed */
121 if (!kref_get_unless_zero(&hmm->kref))
704f3f2c 122 return;
6d7c3cde 123
47f24598
JG
124 /*
125 * Since hmm_range_register() holds the mmget() lock hmm_release() is
126 * prevented as long as a range exists.
127 */
128 WARN_ON(!list_empty_careful(&hmm->ranges));
e1401513 129
14331726
JG
130 down_read(&hmm->mirrors_sem);
131 list_for_each_entry(mirror, &hmm->mirrors, list) {
132 /*
133 * Note: The driver is not allowed to trigger
134 * hmm_mirror_unregister() from this thread.
135 */
136 if (mirror->ops->release)
e1401513 137 mirror->ops->release(mirror);
704f3f2c 138 }
14331726 139 up_read(&hmm->mirrors_sem);
704f3f2c 140
704f3f2c 141 hmm_put(hmm);
133ff0ea 142}
c0b12405 143
5a136b4a 144static void notifiers_decrement(struct hmm *hmm)
c0b12405 145{
5a136b4a 146 unsigned long flags;
da4c3c73 147
5a136b4a
JG
148 spin_lock_irqsave(&hmm->ranges_lock, flags);
149 hmm->notifiers--;
150 if (!hmm->notifiers) {
151 struct hmm_range *range;
e1401513 152
5a136b4a
JG
153 list_for_each_entry(range, &hmm->ranges, list) {
154 if (range->valid)
155 continue;
156 range->valid = true;
e1401513 157 }
5a136b4a 158 wake_up_all(&hmm->wq);
e1401513 159 }
5a136b4a 160 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
e1401513
RC
161}
162
93065ac7 163static int hmm_invalidate_range_start(struct mmu_notifier *mn,
a3e0d41c 164 const struct mmu_notifier_range *nrange)
c0b12405 165{
6d7c3cde 166 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
a3e0d41c 167 struct hmm_mirror *mirror;
a3e0d41c 168 struct hmm_range *range;
5a136b4a 169 unsigned long flags;
a3e0d41c 170 int ret = 0;
c0b12405 171
6d7c3cde
JG
172 if (!kref_get_unless_zero(&hmm->kref))
173 return 0;
c0b12405 174
5a136b4a 175 spin_lock_irqsave(&hmm->ranges_lock, flags);
a3e0d41c
JG
176 hmm->notifiers++;
177 list_for_each_entry(range, &hmm->ranges, list) {
1f961807 178 if (nrange->end < range->start || nrange->start >= range->end)
a3e0d41c
JG
179 continue;
180
181 range->valid = false;
182 }
5a136b4a 183 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
a3e0d41c 184
dfcd6660 185 if (mmu_notifier_range_blockable(nrange))
a3e0d41c
JG
186 down_read(&hmm->mirrors_sem);
187 else if (!down_read_trylock(&hmm->mirrors_sem)) {
188 ret = -EAGAIN;
189 goto out;
190 }
5a136b4a 191
a3e0d41c 192 list_for_each_entry(mirror, &hmm->mirrors, list) {
5a136b4a 193 int rc;
a3e0d41c 194
1f961807 195 rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange);
5a136b4a 196 if (rc) {
1f961807
RC
197 if (WARN_ON(mmu_notifier_range_blockable(nrange) ||
198 rc != -EAGAIN))
5a136b4a 199 continue;
a3e0d41c 200 ret = -EAGAIN;
085ea250 201 break;
a3e0d41c
JG
202 }
203 }
204 up_read(&hmm->mirrors_sem);
205
206out:
5a136b4a
JG
207 if (ret)
208 notifiers_decrement(hmm);
704f3f2c
JG
209 hmm_put(hmm);
210 return ret;
c0b12405
JG
211}
212
213static void hmm_invalidate_range_end(struct mmu_notifier *mn,
a3e0d41c 214 const struct mmu_notifier_range *nrange)
c0b12405 215{
6d7c3cde 216 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
c0b12405 217
6d7c3cde
JG
218 if (!kref_get_unless_zero(&hmm->kref))
219 return;
a3e0d41c 220
5a136b4a 221 notifiers_decrement(hmm);
704f3f2c 222 hmm_put(hmm);
c0b12405
JG
223}
224
225static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
e1401513 226 .release = hmm_release,
c0b12405
JG
227 .invalidate_range_start = hmm_invalidate_range_start,
228 .invalidate_range_end = hmm_invalidate_range_end,
229};
230
231/*
232 * hmm_mirror_register() - register a mirror against an mm
233 *
234 * @mirror: new mirror struct to register
235 * @mm: mm to register against
085ea250 236 * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
c0b12405
JG
237 *
238 * To start mirroring a process address space, the device driver must register
239 * an HMM mirror struct.
c0b12405
JG
240 */
241int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
242{
fec88ab0 243 lockdep_assert_held_write(&mm->mmap_sem);
8a1a0cd0 244
c0b12405
JG
245 /* Sanity check */
246 if (!mm || !mirror || !mirror->ops)
247 return -EINVAL;
248
704f3f2c 249 mirror->hmm = hmm_get_or_create(mm);
c0b12405
JG
250 if (!mirror->hmm)
251 return -ENOMEM;
252
253 down_write(&mirror->hmm->mirrors_sem);
704f3f2c
JG
254 list_add(&mirror->list, &mirror->hmm->mirrors);
255 up_write(&mirror->hmm->mirrors_sem);
c0b12405
JG
256
257 return 0;
258}
259EXPORT_SYMBOL(hmm_mirror_register);
260
261/*
262 * hmm_mirror_unregister() - unregister a mirror
263 *
085ea250 264 * @mirror: mirror struct to unregister
c0b12405
JG
265 *
266 * Stop mirroring a process address space, and cleanup.
267 */
268void hmm_mirror_unregister(struct hmm_mirror *mirror)
269{
187229c2 270 struct hmm *hmm = mirror->hmm;
c0b12405
JG
271
272 down_write(&hmm->mirrors_sem);
14331726 273 list_del(&mirror->list);
c0b12405 274 up_write(&hmm->mirrors_sem);
704f3f2c 275 hmm_put(hmm);
c0b12405
JG
276}
277EXPORT_SYMBOL(hmm_mirror_unregister);
da4c3c73 278
74eee180
JG
279struct hmm_vma_walk {
280 struct hmm_range *range;
992de9a8 281 struct dev_pagemap *pgmap;
74eee180
JG
282 unsigned long last;
283 bool fault;
284 bool block;
74eee180
JG
285};
286
2aee09d8
JG
287static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
288 bool write_fault, uint64_t *pfn)
74eee180 289{
9b1ae605 290 unsigned int flags = FAULT_FLAG_REMOTE;
74eee180 291 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 292 struct hmm_range *range = hmm_vma_walk->range;
74eee180 293 struct vm_area_struct *vma = walk->vma;
50a7ca3c 294 vm_fault_t ret;
74eee180
JG
295
296 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
2aee09d8 297 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
50a7ca3c 298 ret = handle_mm_fault(vma, addr, flags);
e709accc
JG
299 if (ret & VM_FAULT_RETRY) {
300 /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */
73231612 301 return -EAGAIN;
e709accc 302 }
50a7ca3c 303 if (ret & VM_FAULT_ERROR) {
f88a1e90 304 *pfn = range->values[HMM_PFN_ERROR];
74eee180
JG
305 return -EFAULT;
306 }
307
73231612 308 return -EBUSY;
74eee180
JG
309}
310
da4c3c73
JG
311static int hmm_pfns_bad(unsigned long addr,
312 unsigned long end,
313 struct mm_walk *walk)
314{
c719547f
JG
315 struct hmm_vma_walk *hmm_vma_walk = walk->private;
316 struct hmm_range *range = hmm_vma_walk->range;
ff05c0c6 317 uint64_t *pfns = range->pfns;
da4c3c73
JG
318 unsigned long i;
319
320 i = (addr - range->start) >> PAGE_SHIFT;
321 for (; addr < end; addr += PAGE_SIZE, i++)
f88a1e90 322 pfns[i] = range->values[HMM_PFN_ERROR];
da4c3c73
JG
323
324 return 0;
325}
326
5504ed29 327/*
d2e8d551
RC
328 * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
329 * @addr: range virtual start address (inclusive)
5504ed29 330 * @end: range virtual end address (exclusive)
2aee09d8
JG
331 * @fault: should we fault or not ?
332 * @write_fault: write fault ?
5504ed29 333 * @walk: mm_walk structure
085ea250 334 * Return: 0 on success, -EBUSY after page fault, or page fault error
5504ed29
JG
335 *
336 * This function will be called whenever pmd_none() or pte_none() returns true,
337 * or whenever there is no page directory covering the virtual address range.
338 */
2aee09d8
JG
339static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
340 bool fault, bool write_fault,
341 struct mm_walk *walk)
da4c3c73 342{
74eee180
JG
343 struct hmm_vma_walk *hmm_vma_walk = walk->private;
344 struct hmm_range *range = hmm_vma_walk->range;
ff05c0c6 345 uint64_t *pfns = range->pfns;
63d5066f 346 unsigned long i, page_size;
da4c3c73 347
74eee180 348 hmm_vma_walk->last = addr;
63d5066f
JG
349 page_size = hmm_range_page_size(range);
350 i = (addr - range->start) >> range->page_shift;
351
352 for (; addr < end; addr += page_size, i++) {
f88a1e90 353 pfns[i] = range->values[HMM_PFN_NONE];
2aee09d8 354 if (fault || write_fault) {
74eee180 355 int ret;
da4c3c73 356
2aee09d8
JG
357 ret = hmm_vma_do_fault(walk, addr, write_fault,
358 &pfns[i]);
73231612 359 if (ret != -EBUSY)
74eee180
JG
360 return ret;
361 }
362 }
363
73231612 364 return (fault || write_fault) ? -EBUSY : 0;
2aee09d8
JG
365}
366
367static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
368 uint64_t pfns, uint64_t cpu_flags,
369 bool *fault, bool *write_fault)
370{
f88a1e90
JG
371 struct hmm_range *range = hmm_vma_walk->range;
372
2aee09d8
JG
373 if (!hmm_vma_walk->fault)
374 return;
375
023a019a
JG
376 /*
377 * So we not only consider the individual per page request we also
378 * consider the default flags requested for the range. The API can
d2e8d551
RC
379 * be used 2 ways. The first one where the HMM user coalesces
380 * multiple page faults into one request and sets flags per pfn for
381 * those faults. The second one where the HMM user wants to pre-
023a019a
JG
382 * fault a range with specific flags. For the latter one it is a
383 * waste to have the user pre-fill the pfn arrays with a default
384 * flags value.
385 */
386 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
387
2aee09d8 388 /* We aren't ask to do anything ... */
f88a1e90 389 if (!(pfns & range->flags[HMM_PFN_VALID]))
2aee09d8 390 return;
d2e8d551 391 /* If this is device memory then only fault if explicitly requested */
f88a1e90
JG
392 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
393 /* Do we fault on device memory ? */
394 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
395 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
396 *fault = true;
397 }
2aee09d8
JG
398 return;
399 }
f88a1e90
JG
400
401 /* If CPU page table is not valid then we need to fault */
402 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
403 /* Need to write fault ? */
404 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
405 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
406 *write_fault = true;
2aee09d8
JG
407 *fault = true;
408 }
409}
410
411static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
412 const uint64_t *pfns, unsigned long npages,
413 uint64_t cpu_flags, bool *fault,
414 bool *write_fault)
415{
416 unsigned long i;
417
418 if (!hmm_vma_walk->fault) {
419 *fault = *write_fault = false;
420 return;
421 }
422
a3e0d41c 423 *fault = *write_fault = false;
2aee09d8
JG
424 for (i = 0; i < npages; ++i) {
425 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
426 fault, write_fault);
a3e0d41c 427 if ((*write_fault))
2aee09d8
JG
428 return;
429 }
430}
431
432static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
433 struct mm_walk *walk)
434{
435 struct hmm_vma_walk *hmm_vma_walk = walk->private;
436 struct hmm_range *range = hmm_vma_walk->range;
437 bool fault, write_fault;
438 unsigned long i, npages;
439 uint64_t *pfns;
440
441 i = (addr - range->start) >> PAGE_SHIFT;
442 npages = (end - addr) >> PAGE_SHIFT;
443 pfns = &range->pfns[i];
444 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
445 0, &fault, &write_fault);
446 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
447}
448
f88a1e90 449static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
2aee09d8
JG
450{
451 if (pmd_protnone(pmd))
452 return 0;
f88a1e90
JG
453 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
454 range->flags[HMM_PFN_WRITE] :
455 range->flags[HMM_PFN_VALID];
da4c3c73
JG
456}
457
992de9a8
JG
458static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
459{
460 if (!pud_present(pud))
461 return 0;
462 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
463 range->flags[HMM_PFN_WRITE] :
464 range->flags[HMM_PFN_VALID];
465}
466
53f5c3f4
JG
467static int hmm_vma_handle_pmd(struct mm_walk *walk,
468 unsigned long addr,
469 unsigned long end,
470 uint64_t *pfns,
471 pmd_t pmd)
472{
992de9a8 473#ifdef CONFIG_TRANSPARENT_HUGEPAGE
53f5c3f4 474 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 475 struct hmm_range *range = hmm_vma_walk->range;
2aee09d8 476 unsigned long pfn, npages, i;
2aee09d8 477 bool fault, write_fault;
f88a1e90 478 uint64_t cpu_flags;
53f5c3f4 479
2aee09d8 480 npages = (end - addr) >> PAGE_SHIFT;
f88a1e90 481 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
2aee09d8
JG
482 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
483 &fault, &write_fault);
53f5c3f4 484
2aee09d8
JG
485 if (pmd_protnone(pmd) || fault || write_fault)
486 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
53f5c3f4
JG
487
488 pfn = pmd_pfn(pmd) + pte_index(addr);
992de9a8
JG
489 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
490 if (pmd_devmap(pmd)) {
491 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
492 hmm_vma_walk->pgmap);
493 if (unlikely(!hmm_vma_walk->pgmap))
494 return -EBUSY;
495 }
391aab11 496 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
992de9a8
JG
497 }
498 if (hmm_vma_walk->pgmap) {
499 put_dev_pagemap(hmm_vma_walk->pgmap);
500 hmm_vma_walk->pgmap = NULL;
501 }
53f5c3f4
JG
502 hmm_vma_walk->last = end;
503 return 0;
992de9a8 504#else
d2e8d551 505 /* If THP is not enabled then we should never reach this code ! */
992de9a8
JG
506 return -EINVAL;
507#endif
53f5c3f4
JG
508}
509
f88a1e90 510static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
2aee09d8 511{
789c2af8 512 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2aee09d8 513 return 0;
f88a1e90
JG
514 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
515 range->flags[HMM_PFN_WRITE] :
516 range->flags[HMM_PFN_VALID];
2aee09d8
JG
517}
518
53f5c3f4
JG
519static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
520 unsigned long end, pmd_t *pmdp, pte_t *ptep,
521 uint64_t *pfn)
522{
523 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 524 struct hmm_range *range = hmm_vma_walk->range;
2aee09d8
JG
525 bool fault, write_fault;
526 uint64_t cpu_flags;
53f5c3f4 527 pte_t pte = *ptep;
f88a1e90 528 uint64_t orig_pfn = *pfn;
53f5c3f4 529
f88a1e90 530 *pfn = range->values[HMM_PFN_NONE];
73231612 531 fault = write_fault = false;
53f5c3f4
JG
532
533 if (pte_none(pte)) {
73231612
JG
534 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
535 &fault, &write_fault);
2aee09d8 536 if (fault || write_fault)
53f5c3f4
JG
537 goto fault;
538 return 0;
539 }
540
541 if (!pte_present(pte)) {
542 swp_entry_t entry = pte_to_swp_entry(pte);
543
544 if (!non_swap_entry(entry)) {
2aee09d8 545 if (fault || write_fault)
53f5c3f4
JG
546 goto fault;
547 return 0;
548 }
549
550 /*
551 * This is a special swap entry, ignore migration, use
552 * device and report anything else as error.
553 */
554 if (is_device_private_entry(entry)) {
f88a1e90
JG
555 cpu_flags = range->flags[HMM_PFN_VALID] |
556 range->flags[HMM_PFN_DEVICE_PRIVATE];
2aee09d8 557 cpu_flags |= is_write_device_private_entry(entry) ?
f88a1e90
JG
558 range->flags[HMM_PFN_WRITE] : 0;
559 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
560 &fault, &write_fault);
561 if (fault || write_fault)
562 goto fault;
391aab11
JG
563 *pfn = hmm_device_entry_from_pfn(range,
564 swp_offset(entry));
f88a1e90 565 *pfn |= cpu_flags;
53f5c3f4
JG
566 return 0;
567 }
568
569 if (is_migration_entry(entry)) {
2aee09d8 570 if (fault || write_fault) {
53f5c3f4
JG
571 pte_unmap(ptep);
572 hmm_vma_walk->last = addr;
d2e8d551 573 migration_entry_wait(walk->mm, pmdp, addr);
73231612 574 return -EBUSY;
53f5c3f4
JG
575 }
576 return 0;
577 }
578
579 /* Report error for everything else */
f88a1e90 580 *pfn = range->values[HMM_PFN_ERROR];
53f5c3f4 581 return -EFAULT;
73231612
JG
582 } else {
583 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
584 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
585 &fault, &write_fault);
53f5c3f4
JG
586 }
587
2aee09d8 588 if (fault || write_fault)
53f5c3f4
JG
589 goto fault;
590
992de9a8
JG
591 if (pte_devmap(pte)) {
592 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
593 hmm_vma_walk->pgmap);
594 if (unlikely(!hmm_vma_walk->pgmap))
595 return -EBUSY;
596 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
597 *pfn = range->values[HMM_PFN_SPECIAL];
598 return -EFAULT;
599 }
600
391aab11 601 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
53f5c3f4
JG
602 return 0;
603
604fault:
992de9a8
JG
605 if (hmm_vma_walk->pgmap) {
606 put_dev_pagemap(hmm_vma_walk->pgmap);
607 hmm_vma_walk->pgmap = NULL;
608 }
53f5c3f4
JG
609 pte_unmap(ptep);
610 /* Fault any virtual address we were asked to fault */
2aee09d8 611 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
53f5c3f4
JG
612}
613
da4c3c73
JG
614static int hmm_vma_walk_pmd(pmd_t *pmdp,
615 unsigned long start,
616 unsigned long end,
617 struct mm_walk *walk)
618{
74eee180
JG
619 struct hmm_vma_walk *hmm_vma_walk = walk->private;
620 struct hmm_range *range = hmm_vma_walk->range;
ff05c0c6 621 uint64_t *pfns = range->pfns;
da4c3c73 622 unsigned long addr = start, i;
da4c3c73 623 pte_t *ptep;
d08faca0 624 pmd_t pmd;
da4c3c73 625
da4c3c73 626again:
d08faca0
JG
627 pmd = READ_ONCE(*pmdp);
628 if (pmd_none(pmd))
da4c3c73
JG
629 return hmm_vma_walk_hole(start, end, walk);
630
d08faca0 631 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
da4c3c73
JG
632 return hmm_pfns_bad(start, end, walk);
633
d08faca0
JG
634 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
635 bool fault, write_fault;
636 unsigned long npages;
637 uint64_t *pfns;
638
639 i = (addr - range->start) >> PAGE_SHIFT;
640 npages = (end - addr) >> PAGE_SHIFT;
641 pfns = &range->pfns[i];
642
643 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
644 0, &fault, &write_fault);
645 if (fault || write_fault) {
646 hmm_vma_walk->last = addr;
d2e8d551 647 pmd_migration_entry_wait(walk->mm, pmdp);
73231612 648 return -EBUSY;
d08faca0
JG
649 }
650 return 0;
651 } else if (!pmd_present(pmd))
652 return hmm_pfns_bad(start, end, walk);
da4c3c73 653
d08faca0 654 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
da4c3c73 655 /*
d2e8d551 656 * No need to take pmd_lock here, even if some other thread
da4c3c73
JG
657 * is splitting the huge pmd we will get that event through
658 * mmu_notifier callback.
659 *
d2e8d551 660 * So just read pmd value and check again it's a transparent
da4c3c73
JG
661 * huge or device mapping one and compute corresponding pfn
662 * values.
663 */
664 pmd = pmd_read_atomic(pmdp);
665 barrier();
666 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
667 goto again;
74eee180 668
d08faca0 669 i = (addr - range->start) >> PAGE_SHIFT;
53f5c3f4 670 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
da4c3c73
JG
671 }
672
d08faca0 673 /*
d2e8d551 674 * We have handled all the valid cases above ie either none, migration,
d08faca0
JG
675 * huge or transparent huge. At this point either it is a valid pmd
676 * entry pointing to pte directory or it is a bad pmd that will not
677 * recover.
678 */
679 if (pmd_bad(pmd))
da4c3c73
JG
680 return hmm_pfns_bad(start, end, walk);
681
682 ptep = pte_offset_map(pmdp, addr);
d08faca0 683 i = (addr - range->start) >> PAGE_SHIFT;
da4c3c73 684 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
53f5c3f4 685 int r;
74eee180 686
53f5c3f4
JG
687 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
688 if (r) {
689 /* hmm_vma_handle_pte() did unmap pte directory */
690 hmm_vma_walk->last = addr;
691 return r;
74eee180 692 }
da4c3c73 693 }
992de9a8
JG
694 if (hmm_vma_walk->pgmap) {
695 /*
696 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
697 * so that we can leverage get_dev_pagemap() optimization which
698 * will not re-take a reference on a pgmap if we already have
699 * one.
700 */
701 put_dev_pagemap(hmm_vma_walk->pgmap);
702 hmm_vma_walk->pgmap = NULL;
703 }
da4c3c73
JG
704 pte_unmap(ptep - 1);
705
53f5c3f4 706 hmm_vma_walk->last = addr;
da4c3c73
JG
707 return 0;
708}
709
992de9a8
JG
710static int hmm_vma_walk_pud(pud_t *pudp,
711 unsigned long start,
712 unsigned long end,
713 struct mm_walk *walk)
714{
715 struct hmm_vma_walk *hmm_vma_walk = walk->private;
716 struct hmm_range *range = hmm_vma_walk->range;
717 unsigned long addr = start, next;
718 pmd_t *pmdp;
719 pud_t pud;
720 int ret;
721
722again:
723 pud = READ_ONCE(*pudp);
724 if (pud_none(pud))
725 return hmm_vma_walk_hole(start, end, walk);
726
727 if (pud_huge(pud) && pud_devmap(pud)) {
728 unsigned long i, npages, pfn;
729 uint64_t *pfns, cpu_flags;
730 bool fault, write_fault;
731
732 if (!pud_present(pud))
733 return hmm_vma_walk_hole(start, end, walk);
734
735 i = (addr - range->start) >> PAGE_SHIFT;
736 npages = (end - addr) >> PAGE_SHIFT;
737 pfns = &range->pfns[i];
738
739 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
740 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
741 cpu_flags, &fault, &write_fault);
742 if (fault || write_fault)
743 return hmm_vma_walk_hole_(addr, end, fault,
744 write_fault, walk);
745
992de9a8
JG
746 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
747 for (i = 0; i < npages; ++i, ++pfn) {
748 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
749 hmm_vma_walk->pgmap);
750 if (unlikely(!hmm_vma_walk->pgmap))
751 return -EBUSY;
391aab11
JG
752 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
753 cpu_flags;
992de9a8
JG
754 }
755 if (hmm_vma_walk->pgmap) {
756 put_dev_pagemap(hmm_vma_walk->pgmap);
757 hmm_vma_walk->pgmap = NULL;
758 }
759 hmm_vma_walk->last = end;
760 return 0;
992de9a8
JG
761 }
762
763 split_huge_pud(walk->vma, pudp, addr);
764 if (pud_none(*pudp))
765 goto again;
766
767 pmdp = pmd_offset(pudp, addr);
768 do {
769 next = pmd_addr_end(addr, end);
770 ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
771 if (ret)
772 return ret;
773 } while (pmdp++, addr = next, addr != end);
774
775 return 0;
776}
777
63d5066f
JG
778static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
779 unsigned long start, unsigned long end,
780 struct mm_walk *walk)
781{
782#ifdef CONFIG_HUGETLB_PAGE
783 unsigned long addr = start, i, pfn, mask, size, pfn_inc;
784 struct hmm_vma_walk *hmm_vma_walk = walk->private;
785 struct hmm_range *range = hmm_vma_walk->range;
786 struct vm_area_struct *vma = walk->vma;
787 struct hstate *h = hstate_vma(vma);
788 uint64_t orig_pfn, cpu_flags;
789 bool fault, write_fault;
790 spinlock_t *ptl;
791 pte_t entry;
792 int ret = 0;
793
d2e8d551 794 size = huge_page_size(h);
63d5066f
JG
795 mask = size - 1;
796 if (range->page_shift != PAGE_SHIFT) {
d2e8d551 797 /* Make sure we are looking at a full page. */
63d5066f
JG
798 if (start & mask)
799 return -EINVAL;
800 if (end < (start + size))
801 return -EINVAL;
802 pfn_inc = size >> PAGE_SHIFT;
803 } else {
804 pfn_inc = 1;
805 size = PAGE_SIZE;
806 }
807
d2e8d551 808 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
63d5066f
JG
809 entry = huge_ptep_get(pte);
810
811 i = (start - range->start) >> range->page_shift;
812 orig_pfn = range->pfns[i];
813 range->pfns[i] = range->values[HMM_PFN_NONE];
814 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
815 fault = write_fault = false;
816 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
817 &fault, &write_fault);
818 if (fault || write_fault) {
819 ret = -ENOENT;
820 goto unlock;
821 }
822
823 pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
824 for (; addr < end; addr += size, i++, pfn += pfn_inc)
391aab11
JG
825 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
826 cpu_flags;
63d5066f
JG
827 hmm_vma_walk->last = end;
828
829unlock:
830 spin_unlock(ptl);
831
832 if (ret == -ENOENT)
833 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
834
835 return ret;
836#else /* CONFIG_HUGETLB_PAGE */
837 return -EINVAL;
838#endif
839}
840
f88a1e90
JG
841static void hmm_pfns_clear(struct hmm_range *range,
842 uint64_t *pfns,
33cd47dc
JG
843 unsigned long addr,
844 unsigned long end)
845{
846 for (; addr < end; addr += PAGE_SIZE, pfns++)
f88a1e90 847 *pfns = range->values[HMM_PFN_NONE];
33cd47dc
JG
848}
849
da4c3c73 850/*
a3e0d41c 851 * hmm_range_register() - start tracking change to CPU page table over a range
25f23a0c 852 * @range: range
a3e0d41c
JG
853 * @mm: the mm struct for the range of virtual address
854 * @start: start virtual address (inclusive)
855 * @end: end virtual address (exclusive)
63d5066f 856 * @page_shift: expect page shift for the range
d2e8d551 857 * Return: 0 on success, -EFAULT if the address space is no longer valid
25f23a0c 858 *
a3e0d41c 859 * Track updates to the CPU page table see include/linux/hmm.h
da4c3c73 860 */
a3e0d41c 861int hmm_range_register(struct hmm_range *range,
e36acfe6 862 struct hmm_mirror *mirror,
a3e0d41c 863 unsigned long start,
63d5066f
JG
864 unsigned long end,
865 unsigned page_shift)
da4c3c73 866{
63d5066f 867 unsigned long mask = ((1UL << page_shift) - 1UL);
e36acfe6 868 struct hmm *hmm = mirror->hmm;
5a136b4a 869 unsigned long flags;
63d5066f 870
a3e0d41c 871 range->valid = false;
704f3f2c
JG
872 range->hmm = NULL;
873
63d5066f
JG
874 if ((start & mask) || (end & mask))
875 return -EINVAL;
876 if (start >= end)
da4c3c73
JG
877 return -EINVAL;
878
63d5066f 879 range->page_shift = page_shift;
a3e0d41c
JG
880 range->start = start;
881 range->end = end;
882
47f24598
JG
883 /* Prevent hmm_release() from running while the range is valid */
884 if (!mmget_not_zero(hmm->mm))
a3e0d41c 885 return -EFAULT;
da4c3c73 886
085ea250 887 /* Initialize range to track CPU page table updates. */
5a136b4a 888 spin_lock_irqsave(&hmm->ranges_lock, flags);
855ce7d2 889
085ea250 890 range->hmm = hmm;
e36acfe6 891 kref_get(&hmm->kref);
157816f3 892 list_add(&range->list, &hmm->ranges);
86586a41 893
704f3f2c 894 /*
a3e0d41c
JG
895 * If there are any concurrent notifiers we have to wait for them for
896 * the range to be valid (see hmm_range_wait_until_valid()).
704f3f2c 897 */
085ea250 898 if (!hmm->notifiers)
a3e0d41c 899 range->valid = true;
5a136b4a 900 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
a3e0d41c
JG
901
902 return 0;
da4c3c73 903}
a3e0d41c 904EXPORT_SYMBOL(hmm_range_register);
da4c3c73
JG
905
906/*
a3e0d41c
JG
907 * hmm_range_unregister() - stop tracking change to CPU page table over a range
908 * @range: range
da4c3c73
JG
909 *
910 * Range struct is used to track updates to the CPU page table after a call to
a3e0d41c 911 * hmm_range_register(). See include/linux/hmm.h for how to use it.
da4c3c73 912 */
a3e0d41c 913void hmm_range_unregister(struct hmm_range *range)
da4c3c73 914{
085ea250 915 struct hmm *hmm = range->hmm;
5a136b4a 916 unsigned long flags;
da4c3c73 917
5a136b4a 918 spin_lock_irqsave(&hmm->ranges_lock, flags);
47f24598 919 list_del_init(&range->list);
5a136b4a 920 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
da4c3c73 921
a3e0d41c 922 /* Drop reference taken by hmm_range_register() */
47f24598 923 mmput(hmm->mm);
085ea250 924 hmm_put(hmm);
2dcc3eb8
JG
925
926 /*
927 * The range is now invalid and the ref on the hmm is dropped, so
928 * poison the pointer. Leave other fields in place, for the caller's
929 * use.
930 */
a3e0d41c 931 range->valid = false;
2dcc3eb8 932 memset(&range->hmm, POISON_INUSE, sizeof(range->hmm));
da4c3c73 933}
a3e0d41c
JG
934EXPORT_SYMBOL(hmm_range_unregister);
935
936/*
937 * hmm_range_snapshot() - snapshot CPU page table for a range
938 * @range: range
085ea250 939 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
a3e0d41c 940 * permission (for instance asking for write and range is read only),
2bcbeaef 941 * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
a3e0d41c
JG
942 * vma or it is illegal to access that range), number of valid pages
943 * in range->pfns[] (from range start address).
944 *
945 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
946 * validity is tracked by range struct. See in include/linux/hmm.h for example
947 * on how to use.
948 */
949long hmm_range_snapshot(struct hmm_range *range)
950{
63d5066f 951 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
a3e0d41c
JG
952 unsigned long start = range->start, end;
953 struct hmm_vma_walk hmm_vma_walk;
954 struct hmm *hmm = range->hmm;
955 struct vm_area_struct *vma;
956 struct mm_walk mm_walk;
957
47f24598 958 lockdep_assert_held(&hmm->mm->mmap_sem);
a3e0d41c
JG
959 do {
960 /* If range is no longer valid force retry. */
961 if (!range->valid)
2bcbeaef 962 return -EBUSY;
a3e0d41c
JG
963
964 vma = find_vma(hmm->mm, start);
63d5066f 965 if (vma == NULL || (vma->vm_flags & device_vma))
a3e0d41c
JG
966 return -EFAULT;
967
63d5066f 968 if (is_vm_hugetlb_page(vma)) {
1c2308f0
JG
969 if (huge_page_shift(hstate_vma(vma)) !=
970 range->page_shift &&
63d5066f
JG
971 range->page_shift != PAGE_SHIFT)
972 return -EINVAL;
973 } else {
974 if (range->page_shift != PAGE_SHIFT)
975 return -EINVAL;
976 }
977
a3e0d41c
JG
978 if (!(vma->vm_flags & VM_READ)) {
979 /*
980 * If vma do not allow read access, then assume that it
981 * does not allow write access, either. HMM does not
982 * support architecture that allow write without read.
983 */
984 hmm_pfns_clear(range, range->pfns,
985 range->start, range->end);
986 return -EPERM;
987 }
988
989 range->vma = vma;
992de9a8 990 hmm_vma_walk.pgmap = NULL;
a3e0d41c
JG
991 hmm_vma_walk.last = start;
992 hmm_vma_walk.fault = false;
993 hmm_vma_walk.range = range;
994 mm_walk.private = &hmm_vma_walk;
995 end = min(range->end, vma->vm_end);
996
997 mm_walk.vma = vma;
998 mm_walk.mm = vma->vm_mm;
999 mm_walk.pte_entry = NULL;
1000 mm_walk.test_walk = NULL;
1001 mm_walk.hugetlb_entry = NULL;
992de9a8 1002 mm_walk.pud_entry = hmm_vma_walk_pud;
a3e0d41c
JG
1003 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1004 mm_walk.pte_hole = hmm_vma_walk_hole;
63d5066f 1005 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
a3e0d41c
JG
1006
1007 walk_page_range(start, end, &mm_walk);
1008 start = end;
1009 } while (start < range->end);
1010
1011 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1012}
1013EXPORT_SYMBOL(hmm_range_snapshot);
74eee180
JG
1014
1015/*
73231612 1016 * hmm_range_fault() - try to fault some address in a virtual address range
08232a45 1017 * @range: range being faulted
74eee180 1018 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
085ea250 1019 * Return: number of valid pages in range->pfns[] (from range start
73231612
JG
1020 * address). This may be zero. If the return value is negative,
1021 * then one of the following values may be returned:
1022 *
1023 * -EINVAL invalid arguments or mm or virtual address are in an
63d5066f 1024 * invalid vma (for instance device file vma).
73231612
JG
1025 * -ENOMEM: Out of memory.
1026 * -EPERM: Invalid permission (for instance asking for write and
1027 * range is read only).
1028 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1029 * happens if block argument is false.
1030 * -EBUSY: If the the range is being invalidated and you should wait
1031 * for invalidation to finish.
1032 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1033 * that range), number of valid pages in range->pfns[] (from
1034 * range start address).
74eee180
JG
1035 *
1036 * This is similar to a regular CPU page fault except that it will not trigger
73231612
JG
1037 * any memory migration if the memory being faulted is not accessible by CPUs
1038 * and caller does not ask for migration.
74eee180 1039 *
ff05c0c6
JG
1040 * On error, for one virtual address in the range, the function will mark the
1041 * corresponding HMM pfn entry with an error flag.
74eee180 1042 */
73231612 1043long hmm_range_fault(struct hmm_range *range, bool block)
74eee180 1044{
63d5066f 1045 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
a3e0d41c 1046 unsigned long start = range->start, end;
74eee180 1047 struct hmm_vma_walk hmm_vma_walk;
a3e0d41c
JG
1048 struct hmm *hmm = range->hmm;
1049 struct vm_area_struct *vma;
74eee180 1050 struct mm_walk mm_walk;
74eee180
JG
1051 int ret;
1052
47f24598 1053 lockdep_assert_held(&hmm->mm->mmap_sem);
704f3f2c 1054
a3e0d41c
JG
1055 do {
1056 /* If range is no longer valid force retry. */
2bcbeaef
CH
1057 if (!range->valid)
1058 return -EBUSY;
74eee180 1059
a3e0d41c 1060 vma = find_vma(hmm->mm, start);
63d5066f 1061 if (vma == NULL || (vma->vm_flags & device_vma))
a3e0d41c 1062 return -EFAULT;
704f3f2c 1063
63d5066f
JG
1064 if (is_vm_hugetlb_page(vma)) {
1065 if (huge_page_shift(hstate_vma(vma)) !=
1066 range->page_shift &&
1067 range->page_shift != PAGE_SHIFT)
1068 return -EINVAL;
1069 } else {
1070 if (range->page_shift != PAGE_SHIFT)
1071 return -EINVAL;
1072 }
1073
a3e0d41c
JG
1074 if (!(vma->vm_flags & VM_READ)) {
1075 /*
1076 * If vma do not allow read access, then assume that it
1077 * does not allow write access, either. HMM does not
1078 * support architecture that allow write without read.
1079 */
1080 hmm_pfns_clear(range, range->pfns,
1081 range->start, range->end);
1082 return -EPERM;
1083 }
74eee180 1084
a3e0d41c 1085 range->vma = vma;
992de9a8 1086 hmm_vma_walk.pgmap = NULL;
a3e0d41c
JG
1087 hmm_vma_walk.last = start;
1088 hmm_vma_walk.fault = true;
1089 hmm_vma_walk.block = block;
1090 hmm_vma_walk.range = range;
1091 mm_walk.private = &hmm_vma_walk;
1092 end = min(range->end, vma->vm_end);
1093
1094 mm_walk.vma = vma;
1095 mm_walk.mm = vma->vm_mm;
1096 mm_walk.pte_entry = NULL;
1097 mm_walk.test_walk = NULL;
1098 mm_walk.hugetlb_entry = NULL;
992de9a8 1099 mm_walk.pud_entry = hmm_vma_walk_pud;
a3e0d41c
JG
1100 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1101 mm_walk.pte_hole = hmm_vma_walk_hole;
63d5066f 1102 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
a3e0d41c
JG
1103
1104 do {
1105 ret = walk_page_range(start, end, &mm_walk);
1106 start = hmm_vma_walk.last;
1107
1108 /* Keep trying while the range is valid. */
1109 } while (ret == -EBUSY && range->valid);
1110
1111 if (ret) {
1112 unsigned long i;
1113
1114 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1115 hmm_pfns_clear(range, &range->pfns[i],
1116 hmm_vma_walk.last, range->end);
1117 return ret;
1118 }
1119 start = end;
74eee180 1120
a3e0d41c 1121 } while (start < range->end);
704f3f2c 1122
73231612 1123 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
74eee180 1124}
73231612 1125EXPORT_SYMBOL(hmm_range_fault);
55c0ece8
JG
1126
1127/**
1128 * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
1129 * @range: range being faulted
1130 * @device: device against to dma map page to
1131 * @daddrs: dma address of mapped pages
1132 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
085ea250 1133 * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
55c0ece8
JG
1134 * drop and you need to try again, some other error value otherwise
1135 *
1136 * Note same usage pattern as hmm_range_fault().
1137 */
1138long hmm_range_dma_map(struct hmm_range *range,
1139 struct device *device,
1140 dma_addr_t *daddrs,
1141 bool block)
1142{
1143 unsigned long i, npages, mapped;
1144 long ret;
1145
1146 ret = hmm_range_fault(range, block);
1147 if (ret <= 0)
1148 return ret ? ret : -EBUSY;
1149
1150 npages = (range->end - range->start) >> PAGE_SHIFT;
1151 for (i = 0, mapped = 0; i < npages; ++i) {
1152 enum dma_data_direction dir = DMA_TO_DEVICE;
1153 struct page *page;
1154
1155 /*
1156 * FIXME need to update DMA API to provide invalid DMA address
1157 * value instead of a function to test dma address value. This
1158 * would remove lot of dumb code duplicated accross many arch.
1159 *
1160 * For now setting it to 0 here is good enough as the pfns[]
1161 * value is what is use to check what is valid and what isn't.
1162 */
1163 daddrs[i] = 0;
1164
391aab11 1165 page = hmm_device_entry_to_page(range, range->pfns[i]);
55c0ece8
JG
1166 if (page == NULL)
1167 continue;
1168
1169 /* Check if range is being invalidated */
1170 if (!range->valid) {
1171 ret = -EBUSY;
1172 goto unmap;
1173 }
1174
1175 /* If it is read and write than map bi-directional. */
1176 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1177 dir = DMA_BIDIRECTIONAL;
1178
1179 daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
1180 if (dma_mapping_error(device, daddrs[i])) {
1181 ret = -EFAULT;
1182 goto unmap;
1183 }
1184
1185 mapped++;
1186 }
1187
1188 return mapped;
1189
1190unmap:
1191 for (npages = i, i = 0; (i < npages) && mapped; ++i) {
1192 enum dma_data_direction dir = DMA_TO_DEVICE;
1193 struct page *page;
1194
391aab11 1195 page = hmm_device_entry_to_page(range, range->pfns[i]);
55c0ece8
JG
1196 if (page == NULL)
1197 continue;
1198
1199 if (dma_mapping_error(device, daddrs[i]))
1200 continue;
1201
1202 /* If it is read and write than map bi-directional. */
1203 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1204 dir = DMA_BIDIRECTIONAL;
1205
1206 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1207 mapped--;
1208 }
1209
1210 return ret;
1211}
1212EXPORT_SYMBOL(hmm_range_dma_map);
1213
1214/**
1215 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1216 * @range: range being unmapped
1217 * @vma: the vma against which the range (optional)
1218 * @device: device against which dma map was done
1219 * @daddrs: dma address of mapped pages
1220 * @dirty: dirty page if it had the write flag set
085ea250 1221 * Return: number of page unmapped on success, -EINVAL otherwise
55c0ece8
JG
1222 *
1223 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1224 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1225 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1226 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1227 */
1228long hmm_range_dma_unmap(struct hmm_range *range,
1229 struct vm_area_struct *vma,
1230 struct device *device,
1231 dma_addr_t *daddrs,
1232 bool dirty)
1233{
1234 unsigned long i, npages;
1235 long cpages = 0;
1236
1237 /* Sanity check. */
1238 if (range->end <= range->start)
1239 return -EINVAL;
1240 if (!daddrs)
1241 return -EINVAL;
1242 if (!range->pfns)
1243 return -EINVAL;
1244
1245 npages = (range->end - range->start) >> PAGE_SHIFT;
1246 for (i = 0; i < npages; ++i) {
1247 enum dma_data_direction dir = DMA_TO_DEVICE;
1248 struct page *page;
1249
391aab11 1250 page = hmm_device_entry_to_page(range, range->pfns[i]);
55c0ece8
JG
1251 if (page == NULL)
1252 continue;
1253
1254 /* If it is read and write than map bi-directional. */
1255 if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
1256 dir = DMA_BIDIRECTIONAL;
1257
1258 /*
1259 * See comments in function description on why it is
1260 * safe here to call set_page_dirty()
1261 */
1262 if (dirty)
1263 set_page_dirty(page);
1264 }
1265
1266 /* Unmap and clear pfns/dma address */
1267 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1268 range->pfns[i] = range->values[HMM_PFN_NONE];
1269 /* FIXME see comments in hmm_vma_dma_map() */
1270 daddrs[i] = 0;
1271 cpages++;
1272 }
1273
1274 return cpages;
1275}
1276EXPORT_SYMBOL(hmm_range_dma_unmap);