Commit | Line | Data |
---|---|---|
133ff0ea JG |
1 | /* |
2 | * Copyright 2013 Red Hat Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * Authors: Jérôme Glisse <jglisse@redhat.com> | |
15 | */ | |
16 | /* | |
17 | * Refer to include/linux/hmm.h for information about heterogeneous memory | |
18 | * management or HMM for short. | |
19 | */ | |
20 | #include <linux/mm.h> | |
21 | #include <linux/hmm.h> | |
da4c3c73 JG |
22 | #include <linux/rmap.h> |
23 | #include <linux/swap.h> | |
133ff0ea JG |
24 | #include <linux/slab.h> |
25 | #include <linux/sched.h> | |
da4c3c73 JG |
26 | #include <linux/swapops.h> |
27 | #include <linux/hugetlb.h> | |
c0b12405 | 28 | #include <linux/mmu_notifier.h> |
133ff0ea JG |
29 | |
30 | ||
31 | #ifdef CONFIG_HMM | |
c0b12405 JG |
32 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops; |
33 | ||
133ff0ea JG |
34 | /* |
35 | * struct hmm - HMM per mm struct | |
36 | * | |
37 | * @mm: mm struct this HMM struct is bound to | |
da4c3c73 | 38 | * @lock: lock protecting ranges list |
c0b12405 | 39 | * @sequence: we track updates to the CPU page table with a sequence number |
da4c3c73 | 40 | * @ranges: list of range being snapshotted |
c0b12405 JG |
41 | * @mirrors: list of mirrors for this mm |
42 | * @mmu_notifier: mmu notifier to track updates to CPU page table | |
43 | * @mirrors_sem: read/write semaphore protecting the mirrors list | |
133ff0ea JG |
44 | */ |
45 | struct hmm { | |
46 | struct mm_struct *mm; | |
da4c3c73 | 47 | spinlock_t lock; |
c0b12405 | 48 | atomic_t sequence; |
da4c3c73 | 49 | struct list_head ranges; |
c0b12405 JG |
50 | struct list_head mirrors; |
51 | struct mmu_notifier mmu_notifier; | |
52 | struct rw_semaphore mirrors_sem; | |
133ff0ea JG |
53 | }; |
54 | ||
55 | /* | |
56 | * hmm_register - register HMM against an mm (HMM internal) | |
57 | * | |
58 | * @mm: mm struct to attach to | |
59 | * | |
60 | * This is not intended to be used directly by device drivers. It allocates an | |
61 | * HMM struct if mm does not have one, and initializes it. | |
62 | */ | |
63 | static struct hmm *hmm_register(struct mm_struct *mm) | |
64 | { | |
c0b12405 JG |
65 | struct hmm *hmm = READ_ONCE(mm->hmm); |
66 | bool cleanup = false; | |
133ff0ea JG |
67 | |
68 | /* | |
69 | * The hmm struct can only be freed once the mm_struct goes away, | |
70 | * hence we should always have pre-allocated an new hmm struct | |
71 | * above. | |
72 | */ | |
c0b12405 JG |
73 | if (hmm) |
74 | return hmm; | |
75 | ||
76 | hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); | |
77 | if (!hmm) | |
78 | return NULL; | |
79 | INIT_LIST_HEAD(&hmm->mirrors); | |
80 | init_rwsem(&hmm->mirrors_sem); | |
81 | atomic_set(&hmm->sequence, 0); | |
82 | hmm->mmu_notifier.ops = NULL; | |
da4c3c73 JG |
83 | INIT_LIST_HEAD(&hmm->ranges); |
84 | spin_lock_init(&hmm->lock); | |
c0b12405 JG |
85 | hmm->mm = mm; |
86 | ||
87 | /* | |
88 | * We should only get here if hold the mmap_sem in write mode ie on | |
89 | * registration of first mirror through hmm_mirror_register() | |
90 | */ | |
91 | hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; | |
92 | if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) { | |
93 | kfree(hmm); | |
94 | return NULL; | |
95 | } | |
96 | ||
97 | spin_lock(&mm->page_table_lock); | |
98 | if (!mm->hmm) | |
99 | mm->hmm = hmm; | |
100 | else | |
101 | cleanup = true; | |
102 | spin_unlock(&mm->page_table_lock); | |
103 | ||
104 | if (cleanup) { | |
105 | mmu_notifier_unregister(&hmm->mmu_notifier, mm); | |
106 | kfree(hmm); | |
107 | } | |
108 | ||
133ff0ea JG |
109 | return mm->hmm; |
110 | } | |
111 | ||
112 | void hmm_mm_destroy(struct mm_struct *mm) | |
113 | { | |
114 | kfree(mm->hmm); | |
115 | } | |
116 | #endif /* CONFIG_HMM */ | |
c0b12405 JG |
117 | |
118 | #if IS_ENABLED(CONFIG_HMM_MIRROR) | |
119 | static void hmm_invalidate_range(struct hmm *hmm, | |
120 | enum hmm_update_type action, | |
121 | unsigned long start, | |
122 | unsigned long end) | |
123 | { | |
124 | struct hmm_mirror *mirror; | |
da4c3c73 JG |
125 | struct hmm_range *range; |
126 | ||
127 | spin_lock(&hmm->lock); | |
128 | list_for_each_entry(range, &hmm->ranges, list) { | |
129 | unsigned long addr, idx, npages; | |
130 | ||
131 | if (end < range->start || start >= range->end) | |
132 | continue; | |
133 | ||
134 | range->valid = false; | |
135 | addr = max(start, range->start); | |
136 | idx = (addr - range->start) >> PAGE_SHIFT; | |
137 | npages = (min(range->end, end) - addr) >> PAGE_SHIFT; | |
138 | memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); | |
139 | } | |
140 | spin_unlock(&hmm->lock); | |
c0b12405 JG |
141 | |
142 | down_read(&hmm->mirrors_sem); | |
143 | list_for_each_entry(mirror, &hmm->mirrors, list) | |
144 | mirror->ops->sync_cpu_device_pagetables(mirror, action, | |
145 | start, end); | |
146 | up_read(&hmm->mirrors_sem); | |
147 | } | |
148 | ||
149 | static void hmm_invalidate_range_start(struct mmu_notifier *mn, | |
150 | struct mm_struct *mm, | |
151 | unsigned long start, | |
152 | unsigned long end) | |
153 | { | |
154 | struct hmm *hmm = mm->hmm; | |
155 | ||
156 | VM_BUG_ON(!hmm); | |
157 | ||
158 | atomic_inc(&hmm->sequence); | |
159 | } | |
160 | ||
161 | static void hmm_invalidate_range_end(struct mmu_notifier *mn, | |
162 | struct mm_struct *mm, | |
163 | unsigned long start, | |
164 | unsigned long end) | |
165 | { | |
166 | struct hmm *hmm = mm->hmm; | |
167 | ||
168 | VM_BUG_ON(!hmm); | |
169 | ||
170 | hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end); | |
171 | } | |
172 | ||
173 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { | |
174 | .invalidate_range_start = hmm_invalidate_range_start, | |
175 | .invalidate_range_end = hmm_invalidate_range_end, | |
176 | }; | |
177 | ||
178 | /* | |
179 | * hmm_mirror_register() - register a mirror against an mm | |
180 | * | |
181 | * @mirror: new mirror struct to register | |
182 | * @mm: mm to register against | |
183 | * | |
184 | * To start mirroring a process address space, the device driver must register | |
185 | * an HMM mirror struct. | |
186 | * | |
187 | * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! | |
188 | */ | |
189 | int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) | |
190 | { | |
191 | /* Sanity check */ | |
192 | if (!mm || !mirror || !mirror->ops) | |
193 | return -EINVAL; | |
194 | ||
195 | mirror->hmm = hmm_register(mm); | |
196 | if (!mirror->hmm) | |
197 | return -ENOMEM; | |
198 | ||
199 | down_write(&mirror->hmm->mirrors_sem); | |
200 | list_add(&mirror->list, &mirror->hmm->mirrors); | |
201 | up_write(&mirror->hmm->mirrors_sem); | |
202 | ||
203 | return 0; | |
204 | } | |
205 | EXPORT_SYMBOL(hmm_mirror_register); | |
206 | ||
207 | /* | |
208 | * hmm_mirror_unregister() - unregister a mirror | |
209 | * | |
210 | * @mirror: new mirror struct to register | |
211 | * | |
212 | * Stop mirroring a process address space, and cleanup. | |
213 | */ | |
214 | void hmm_mirror_unregister(struct hmm_mirror *mirror) | |
215 | { | |
216 | struct hmm *hmm = mirror->hmm; | |
217 | ||
218 | down_write(&hmm->mirrors_sem); | |
219 | list_del(&mirror->list); | |
220 | up_write(&hmm->mirrors_sem); | |
221 | } | |
222 | EXPORT_SYMBOL(hmm_mirror_unregister); | |
da4c3c73 | 223 | |
74eee180 JG |
224 | struct hmm_vma_walk { |
225 | struct hmm_range *range; | |
226 | unsigned long last; | |
227 | bool fault; | |
228 | bool block; | |
229 | bool write; | |
230 | }; | |
231 | ||
232 | static int hmm_vma_do_fault(struct mm_walk *walk, | |
233 | unsigned long addr, | |
234 | hmm_pfn_t *pfn) | |
235 | { | |
236 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; | |
237 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
238 | struct vm_area_struct *vma = walk->vma; | |
239 | int r; | |
240 | ||
241 | flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; | |
242 | flags |= hmm_vma_walk->write ? FAULT_FLAG_WRITE : 0; | |
243 | r = handle_mm_fault(vma, addr, flags); | |
244 | if (r & VM_FAULT_RETRY) | |
245 | return -EBUSY; | |
246 | if (r & VM_FAULT_ERROR) { | |
247 | *pfn = HMM_PFN_ERROR; | |
248 | return -EFAULT; | |
249 | } | |
250 | ||
251 | return -EAGAIN; | |
252 | } | |
253 | ||
da4c3c73 JG |
254 | static void hmm_pfns_special(hmm_pfn_t *pfns, |
255 | unsigned long addr, | |
256 | unsigned long end) | |
257 | { | |
258 | for (; addr < end; addr += PAGE_SIZE, pfns++) | |
259 | *pfns = HMM_PFN_SPECIAL; | |
260 | } | |
261 | ||
262 | static int hmm_pfns_bad(unsigned long addr, | |
263 | unsigned long end, | |
264 | struct mm_walk *walk) | |
265 | { | |
266 | struct hmm_range *range = walk->private; | |
267 | hmm_pfn_t *pfns = range->pfns; | |
268 | unsigned long i; | |
269 | ||
270 | i = (addr - range->start) >> PAGE_SHIFT; | |
271 | for (; addr < end; addr += PAGE_SIZE, i++) | |
272 | pfns[i] = HMM_PFN_ERROR; | |
273 | ||
274 | return 0; | |
275 | } | |
276 | ||
74eee180 JG |
277 | static void hmm_pfns_clear(hmm_pfn_t *pfns, |
278 | unsigned long addr, | |
279 | unsigned long end) | |
280 | { | |
281 | for (; addr < end; addr += PAGE_SIZE, pfns++) | |
282 | *pfns = 0; | |
283 | } | |
284 | ||
da4c3c73 JG |
285 | static int hmm_vma_walk_hole(unsigned long addr, |
286 | unsigned long end, | |
287 | struct mm_walk *walk) | |
288 | { | |
74eee180 JG |
289 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
290 | struct hmm_range *range = hmm_vma_walk->range; | |
da4c3c73 JG |
291 | hmm_pfn_t *pfns = range->pfns; |
292 | unsigned long i; | |
293 | ||
74eee180 | 294 | hmm_vma_walk->last = addr; |
da4c3c73 | 295 | i = (addr - range->start) >> PAGE_SHIFT; |
74eee180 | 296 | for (; addr < end; addr += PAGE_SIZE, i++) { |
da4c3c73 | 297 | pfns[i] = HMM_PFN_EMPTY; |
74eee180 JG |
298 | if (hmm_vma_walk->fault) { |
299 | int ret; | |
da4c3c73 | 300 | |
74eee180 JG |
301 | ret = hmm_vma_do_fault(walk, addr, &pfns[i]); |
302 | if (ret != -EAGAIN) | |
303 | return ret; | |
304 | } | |
305 | } | |
306 | ||
307 | return hmm_vma_walk->fault ? -EAGAIN : 0; | |
da4c3c73 JG |
308 | } |
309 | ||
310 | static int hmm_vma_walk_clear(unsigned long addr, | |
311 | unsigned long end, | |
312 | struct mm_walk *walk) | |
313 | { | |
74eee180 JG |
314 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
315 | struct hmm_range *range = hmm_vma_walk->range; | |
da4c3c73 JG |
316 | hmm_pfn_t *pfns = range->pfns; |
317 | unsigned long i; | |
318 | ||
74eee180 | 319 | hmm_vma_walk->last = addr; |
da4c3c73 | 320 | i = (addr - range->start) >> PAGE_SHIFT; |
74eee180 | 321 | for (; addr < end; addr += PAGE_SIZE, i++) { |
da4c3c73 | 322 | pfns[i] = 0; |
74eee180 JG |
323 | if (hmm_vma_walk->fault) { |
324 | int ret; | |
da4c3c73 | 325 | |
74eee180 JG |
326 | ret = hmm_vma_do_fault(walk, addr, &pfns[i]); |
327 | if (ret != -EAGAIN) | |
328 | return ret; | |
329 | } | |
330 | } | |
331 | ||
332 | return hmm_vma_walk->fault ? -EAGAIN : 0; | |
da4c3c73 JG |
333 | } |
334 | ||
335 | static int hmm_vma_walk_pmd(pmd_t *pmdp, | |
336 | unsigned long start, | |
337 | unsigned long end, | |
338 | struct mm_walk *walk) | |
339 | { | |
74eee180 JG |
340 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
341 | struct hmm_range *range = hmm_vma_walk->range; | |
da4c3c73 JG |
342 | struct vm_area_struct *vma = walk->vma; |
343 | hmm_pfn_t *pfns = range->pfns; | |
344 | unsigned long addr = start, i; | |
74eee180 | 345 | bool write_fault; |
da4c3c73 JG |
346 | hmm_pfn_t flag; |
347 | pte_t *ptep; | |
348 | ||
349 | i = (addr - range->start) >> PAGE_SHIFT; | |
350 | flag = vma->vm_flags & VM_READ ? HMM_PFN_READ : 0; | |
74eee180 | 351 | write_fault = hmm_vma_walk->fault & hmm_vma_walk->write; |
da4c3c73 JG |
352 | |
353 | again: | |
354 | if (pmd_none(*pmdp)) | |
355 | return hmm_vma_walk_hole(start, end, walk); | |
356 | ||
357 | if (pmd_huge(*pmdp) && vma->vm_flags & VM_HUGETLB) | |
358 | return hmm_pfns_bad(start, end, walk); | |
359 | ||
360 | if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) { | |
361 | unsigned long pfn; | |
362 | pmd_t pmd; | |
363 | ||
364 | /* | |
365 | * No need to take pmd_lock here, even if some other threads | |
366 | * is splitting the huge pmd we will get that event through | |
367 | * mmu_notifier callback. | |
368 | * | |
369 | * So just read pmd value and check again its a transparent | |
370 | * huge or device mapping one and compute corresponding pfn | |
371 | * values. | |
372 | */ | |
373 | pmd = pmd_read_atomic(pmdp); | |
374 | barrier(); | |
375 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) | |
376 | goto again; | |
377 | if (pmd_protnone(pmd)) | |
378 | return hmm_vma_walk_clear(start, end, walk); | |
379 | ||
74eee180 JG |
380 | if (write_fault && !pmd_write(pmd)) |
381 | return hmm_vma_walk_clear(start, end, walk); | |
382 | ||
da4c3c73 JG |
383 | pfn = pmd_pfn(pmd) + pte_index(addr); |
384 | flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; | |
385 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) | |
386 | pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; | |
387 | return 0; | |
388 | } | |
389 | ||
390 | if (pmd_bad(*pmdp)) | |
391 | return hmm_pfns_bad(start, end, walk); | |
392 | ||
393 | ptep = pte_offset_map(pmdp, addr); | |
394 | for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { | |
395 | pte_t pte = *ptep; | |
396 | ||
397 | pfns[i] = 0; | |
398 | ||
74eee180 | 399 | if (pte_none(pte)) { |
da4c3c73 | 400 | pfns[i] = HMM_PFN_EMPTY; |
74eee180 JG |
401 | if (hmm_vma_walk->fault) |
402 | goto fault; | |
da4c3c73 JG |
403 | continue; |
404 | } | |
405 | ||
74eee180 JG |
406 | if (!pte_present(pte)) { |
407 | swp_entry_t entry; | |
408 | ||
409 | if (!non_swap_entry(entry)) { | |
410 | if (hmm_vma_walk->fault) | |
411 | goto fault; | |
412 | continue; | |
413 | } | |
414 | ||
415 | entry = pte_to_swp_entry(pte); | |
416 | ||
417 | /* | |
418 | * This is a special swap entry, ignore migration, use | |
419 | * device and report anything else as error. | |
420 | */ | |
421 | if (is_migration_entry(entry)) { | |
422 | if (hmm_vma_walk->fault) { | |
423 | pte_unmap(ptep); | |
424 | hmm_vma_walk->last = addr; | |
425 | migration_entry_wait(vma->vm_mm, | |
426 | pmdp, addr); | |
427 | return -EAGAIN; | |
428 | } | |
429 | continue; | |
430 | } else { | |
431 | /* Report error for everything else */ | |
432 | pfns[i] = HMM_PFN_ERROR; | |
433 | } | |
434 | continue; | |
435 | } | |
436 | ||
437 | if (write_fault && !pte_write(pte)) | |
438 | goto fault; | |
439 | ||
da4c3c73 JG |
440 | pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag; |
441 | pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0; | |
74eee180 JG |
442 | continue; |
443 | ||
444 | fault: | |
445 | pte_unmap(ptep); | |
446 | /* Fault all pages in range */ | |
447 | return hmm_vma_walk_clear(start, end, walk); | |
da4c3c73 JG |
448 | } |
449 | pte_unmap(ptep - 1); | |
450 | ||
451 | return 0; | |
452 | } | |
453 | ||
454 | /* | |
455 | * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses | |
456 | * @vma: virtual memory area containing the virtual address range | |
457 | * @range: used to track snapshot validity | |
458 | * @start: range virtual start address (inclusive) | |
459 | * @end: range virtual end address (exclusive) | |
460 | * @entries: array of hmm_pfn_t: provided by the caller, filled in by function | |
461 | * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, 0 success | |
462 | * | |
463 | * This snapshots the CPU page table for a range of virtual addresses. Snapshot | |
464 | * validity is tracked by range struct. See hmm_vma_range_done() for further | |
465 | * information. | |
466 | * | |
467 | * The range struct is initialized here. It tracks the CPU page table, but only | |
468 | * if the function returns success (0), in which case the caller must then call | |
469 | * hmm_vma_range_done() to stop CPU page table update tracking on this range. | |
470 | * | |
471 | * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS | |
472 | * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED ! | |
473 | */ | |
474 | int hmm_vma_get_pfns(struct vm_area_struct *vma, | |
475 | struct hmm_range *range, | |
476 | unsigned long start, | |
477 | unsigned long end, | |
478 | hmm_pfn_t *pfns) | |
479 | { | |
74eee180 | 480 | struct hmm_vma_walk hmm_vma_walk; |
da4c3c73 JG |
481 | struct mm_walk mm_walk; |
482 | struct hmm *hmm; | |
483 | ||
484 | /* FIXME support hugetlb fs */ | |
485 | if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { | |
486 | hmm_pfns_special(pfns, start, end); | |
487 | return -EINVAL; | |
488 | } | |
489 | ||
490 | /* Sanity check, this really should not happen ! */ | |
491 | if (start < vma->vm_start || start >= vma->vm_end) | |
492 | return -EINVAL; | |
493 | if (end < vma->vm_start || end > vma->vm_end) | |
494 | return -EINVAL; | |
495 | ||
496 | hmm = hmm_register(vma->vm_mm); | |
497 | if (!hmm) | |
498 | return -ENOMEM; | |
499 | /* Caller must have registered a mirror, via hmm_mirror_register() ! */ | |
500 | if (!hmm->mmu_notifier.ops) | |
501 | return -EINVAL; | |
502 | ||
503 | /* Initialize range to track CPU page table update */ | |
504 | range->start = start; | |
505 | range->pfns = pfns; | |
506 | range->end = end; | |
507 | spin_lock(&hmm->lock); | |
508 | range->valid = true; | |
509 | list_add_rcu(&range->list, &hmm->ranges); | |
510 | spin_unlock(&hmm->lock); | |
511 | ||
74eee180 JG |
512 | hmm_vma_walk.fault = false; |
513 | hmm_vma_walk.range = range; | |
514 | mm_walk.private = &hmm_vma_walk; | |
515 | ||
da4c3c73 JG |
516 | mm_walk.vma = vma; |
517 | mm_walk.mm = vma->vm_mm; | |
da4c3c73 JG |
518 | mm_walk.pte_entry = NULL; |
519 | mm_walk.test_walk = NULL; | |
520 | mm_walk.hugetlb_entry = NULL; | |
521 | mm_walk.pmd_entry = hmm_vma_walk_pmd; | |
522 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
523 | ||
524 | walk_page_range(start, end, &mm_walk); | |
da4c3c73 JG |
525 | return 0; |
526 | } | |
527 | EXPORT_SYMBOL(hmm_vma_get_pfns); | |
528 | ||
529 | /* | |
530 | * hmm_vma_range_done() - stop tracking change to CPU page table over a range | |
531 | * @vma: virtual memory area containing the virtual address range | |
532 | * @range: range being tracked | |
533 | * Returns: false if range data has been invalidated, true otherwise | |
534 | * | |
535 | * Range struct is used to track updates to the CPU page table after a call to | |
536 | * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done | |
537 | * using the data, or wants to lock updates to the data it got from those | |
538 | * functions, it must call the hmm_vma_range_done() function, which will then | |
539 | * stop tracking CPU page table updates. | |
540 | * | |
541 | * Note that device driver must still implement general CPU page table update | |
542 | * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using | |
543 | * the mmu_notifier API directly. | |
544 | * | |
545 | * CPU page table update tracking done through hmm_range is only temporary and | |
546 | * to be used while trying to duplicate CPU page table contents for a range of | |
547 | * virtual addresses. | |
548 | * | |
549 | * There are two ways to use this : | |
550 | * again: | |
74eee180 | 551 | * hmm_vma_get_pfns(vma, range, start, end, pfns); or hmm_vma_fault(...); |
da4c3c73 JG |
552 | * trans = device_build_page_table_update_transaction(pfns); |
553 | * device_page_table_lock(); | |
554 | * if (!hmm_vma_range_done(vma, range)) { | |
555 | * device_page_table_unlock(); | |
556 | * goto again; | |
557 | * } | |
558 | * device_commit_transaction(trans); | |
559 | * device_page_table_unlock(); | |
560 | * | |
561 | * Or: | |
74eee180 | 562 | * hmm_vma_get_pfns(vma, range, start, end, pfns); or hmm_vma_fault(...); |
da4c3c73 JG |
563 | * device_page_table_lock(); |
564 | * hmm_vma_range_done(vma, range); | |
565 | * device_update_page_table(pfns); | |
566 | * device_page_table_unlock(); | |
567 | */ | |
568 | bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range) | |
569 | { | |
570 | unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; | |
571 | struct hmm *hmm; | |
572 | ||
573 | if (range->end <= range->start) { | |
574 | BUG(); | |
575 | return false; | |
576 | } | |
577 | ||
578 | hmm = hmm_register(vma->vm_mm); | |
579 | if (!hmm) { | |
580 | memset(range->pfns, 0, sizeof(*range->pfns) * npages); | |
581 | return false; | |
582 | } | |
583 | ||
584 | spin_lock(&hmm->lock); | |
585 | list_del_rcu(&range->list); | |
586 | spin_unlock(&hmm->lock); | |
587 | ||
588 | return range->valid; | |
589 | } | |
590 | EXPORT_SYMBOL(hmm_vma_range_done); | |
74eee180 JG |
591 | |
592 | /* | |
593 | * hmm_vma_fault() - try to fault some address in a virtual address range | |
594 | * @vma: virtual memory area containing the virtual address range | |
595 | * @range: use to track pfns array content validity | |
596 | * @start: fault range virtual start address (inclusive) | |
597 | * @end: fault range virtual end address (exclusive) | |
598 | * @pfns: array of hmm_pfn_t, only entry with fault flag set will be faulted | |
599 | * @write: is it a write fault | |
600 | * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) | |
601 | * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop) | |
602 | * | |
603 | * This is similar to a regular CPU page fault except that it will not trigger | |
604 | * any memory migration if the memory being faulted is not accessible by CPUs. | |
605 | * | |
606 | * On error, for one virtual address in the range, the function will set the | |
607 | * hmm_pfn_t error flag for the corresponding pfn entry. | |
608 | * | |
609 | * Expected use pattern: | |
610 | * retry: | |
611 | * down_read(&mm->mmap_sem); | |
612 | * // Find vma and address device wants to fault, initialize hmm_pfn_t | |
613 | * // array accordingly | |
614 | * ret = hmm_vma_fault(vma, start, end, pfns, allow_retry); | |
615 | * switch (ret) { | |
616 | * case -EAGAIN: | |
617 | * hmm_vma_range_done(vma, range); | |
618 | * // You might want to rate limit or yield to play nicely, you may | |
619 | * // also commit any valid pfn in the array assuming that you are | |
620 | * // getting true from hmm_vma_range_monitor_end() | |
621 | * goto retry; | |
622 | * case 0: | |
623 | * break; | |
624 | * default: | |
625 | * // Handle error ! | |
626 | * up_read(&mm->mmap_sem) | |
627 | * return; | |
628 | * } | |
629 | * // Take device driver lock that serialize device page table update | |
630 | * driver_lock_device_page_table_update(); | |
631 | * hmm_vma_range_done(vma, range); | |
632 | * // Commit pfns we got from hmm_vma_fault() | |
633 | * driver_unlock_device_page_table_update(); | |
634 | * up_read(&mm->mmap_sem) | |
635 | * | |
636 | * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0) | |
637 | * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION ! | |
638 | * | |
639 | * YOU HAVE BEEN WARNED ! | |
640 | */ | |
641 | int hmm_vma_fault(struct vm_area_struct *vma, | |
642 | struct hmm_range *range, | |
643 | unsigned long start, | |
644 | unsigned long end, | |
645 | hmm_pfn_t *pfns, | |
646 | bool write, | |
647 | bool block) | |
648 | { | |
649 | struct hmm_vma_walk hmm_vma_walk; | |
650 | struct mm_walk mm_walk; | |
651 | struct hmm *hmm; | |
652 | int ret; | |
653 | ||
654 | /* Sanity check, this really should not happen ! */ | |
655 | if (start < vma->vm_start || start >= vma->vm_end) | |
656 | return -EINVAL; | |
657 | if (end < vma->vm_start || end > vma->vm_end) | |
658 | return -EINVAL; | |
659 | ||
660 | hmm = hmm_register(vma->vm_mm); | |
661 | if (!hmm) { | |
662 | hmm_pfns_clear(pfns, start, end); | |
663 | return -ENOMEM; | |
664 | } | |
665 | /* Caller must have registered a mirror using hmm_mirror_register() */ | |
666 | if (!hmm->mmu_notifier.ops) | |
667 | return -EINVAL; | |
668 | ||
669 | /* Initialize range to track CPU page table update */ | |
670 | range->start = start; | |
671 | range->pfns = pfns; | |
672 | range->end = end; | |
673 | spin_lock(&hmm->lock); | |
674 | range->valid = true; | |
675 | list_add_rcu(&range->list, &hmm->ranges); | |
676 | spin_unlock(&hmm->lock); | |
677 | ||
678 | /* FIXME support hugetlb fs */ | |
679 | if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { | |
680 | hmm_pfns_special(pfns, start, end); | |
681 | return 0; | |
682 | } | |
683 | ||
684 | hmm_vma_walk.fault = true; | |
685 | hmm_vma_walk.write = write; | |
686 | hmm_vma_walk.block = block; | |
687 | hmm_vma_walk.range = range; | |
688 | mm_walk.private = &hmm_vma_walk; | |
689 | hmm_vma_walk.last = range->start; | |
690 | ||
691 | mm_walk.vma = vma; | |
692 | mm_walk.mm = vma->vm_mm; | |
693 | mm_walk.pte_entry = NULL; | |
694 | mm_walk.test_walk = NULL; | |
695 | mm_walk.hugetlb_entry = NULL; | |
696 | mm_walk.pmd_entry = hmm_vma_walk_pmd; | |
697 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
698 | ||
699 | do { | |
700 | ret = walk_page_range(start, end, &mm_walk); | |
701 | start = hmm_vma_walk.last; | |
702 | } while (ret == -EAGAIN); | |
703 | ||
704 | if (ret) { | |
705 | unsigned long i; | |
706 | ||
707 | i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; | |
708 | hmm_pfns_clear(&pfns[i], hmm_vma_walk.last, end); | |
709 | hmm_vma_range_done(vma, range); | |
710 | } | |
711 | return ret; | |
712 | } | |
713 | EXPORT_SYMBOL(hmm_vma_fault); | |
c0b12405 | 714 | #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |