Commit | Line | Data |
---|---|---|
133ff0ea JG |
1 | /* |
2 | * Copyright 2013 Red Hat Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
f813f219 | 14 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
133ff0ea JG |
15 | */ |
16 | /* | |
17 | * Refer to include/linux/hmm.h for information about heterogeneous memory | |
18 | * management or HMM for short. | |
19 | */ | |
20 | #include <linux/mm.h> | |
21 | #include <linux/hmm.h> | |
858b54da | 22 | #include <linux/init.h> |
da4c3c73 JG |
23 | #include <linux/rmap.h> |
24 | #include <linux/swap.h> | |
133ff0ea JG |
25 | #include <linux/slab.h> |
26 | #include <linux/sched.h> | |
4ef589dc JG |
27 | #include <linux/mmzone.h> |
28 | #include <linux/pagemap.h> | |
da4c3c73 JG |
29 | #include <linux/swapops.h> |
30 | #include <linux/hugetlb.h> | |
4ef589dc | 31 | #include <linux/memremap.h> |
7b2d55d2 | 32 | #include <linux/jump_label.h> |
c0b12405 | 33 | #include <linux/mmu_notifier.h> |
4ef589dc JG |
34 | #include <linux/memory_hotplug.h> |
35 | ||
36 | #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) | |
133ff0ea | 37 | |
6b368cd4 | 38 | #if IS_ENABLED(CONFIG_HMM_MIRROR) |
c0b12405 JG |
39 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops; |
40 | ||
704f3f2c JG |
41 | static inline struct hmm *mm_get_hmm(struct mm_struct *mm) |
42 | { | |
43 | struct hmm *hmm = READ_ONCE(mm->hmm); | |
44 | ||
45 | if (hmm && kref_get_unless_zero(&hmm->kref)) | |
46 | return hmm; | |
47 | ||
48 | return NULL; | |
49 | } | |
50 | ||
51 | /** | |
52 | * hmm_get_or_create - register HMM against an mm (HMM internal) | |
133ff0ea JG |
53 | * |
54 | * @mm: mm struct to attach to | |
704f3f2c JG |
55 | * Returns: returns an HMM object, either by referencing the existing |
56 | * (per-process) object, or by creating a new one. | |
133ff0ea | 57 | * |
704f3f2c JG |
58 | * This is not intended to be used directly by device drivers. If mm already |
59 | * has an HMM struct then it get a reference on it and returns it. Otherwise | |
60 | * it allocates an HMM struct, initializes it, associate it with the mm and | |
61 | * returns it. | |
133ff0ea | 62 | */ |
704f3f2c | 63 | static struct hmm *hmm_get_or_create(struct mm_struct *mm) |
133ff0ea | 64 | { |
704f3f2c | 65 | struct hmm *hmm = mm_get_hmm(mm); |
c0b12405 | 66 | bool cleanup = false; |
133ff0ea | 67 | |
c0b12405 JG |
68 | if (hmm) |
69 | return hmm; | |
70 | ||
71 | hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); | |
72 | if (!hmm) | |
73 | return NULL; | |
a3e0d41c | 74 | init_waitqueue_head(&hmm->wq); |
c0b12405 JG |
75 | INIT_LIST_HEAD(&hmm->mirrors); |
76 | init_rwsem(&hmm->mirrors_sem); | |
c0b12405 | 77 | hmm->mmu_notifier.ops = NULL; |
da4c3c73 | 78 | INIT_LIST_HEAD(&hmm->ranges); |
a3e0d41c | 79 | mutex_init(&hmm->lock); |
704f3f2c | 80 | kref_init(&hmm->kref); |
a3e0d41c JG |
81 | hmm->notifiers = 0; |
82 | hmm->dead = false; | |
c0b12405 JG |
83 | hmm->mm = mm; |
84 | ||
c0b12405 JG |
85 | spin_lock(&mm->page_table_lock); |
86 | if (!mm->hmm) | |
87 | mm->hmm = hmm; | |
88 | else | |
89 | cleanup = true; | |
90 | spin_unlock(&mm->page_table_lock); | |
91 | ||
86a2d598 RC |
92 | if (cleanup) |
93 | goto error; | |
94 | ||
95 | /* | |
96 | * We should only get here if hold the mmap_sem in write mode ie on | |
97 | * registration of first mirror through hmm_mirror_register() | |
98 | */ | |
99 | hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; | |
100 | if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) | |
101 | goto error_mm; | |
c0b12405 | 102 | |
704f3f2c | 103 | return hmm; |
86a2d598 RC |
104 | |
105 | error_mm: | |
106 | spin_lock(&mm->page_table_lock); | |
107 | if (mm->hmm == hmm) | |
108 | mm->hmm = NULL; | |
109 | spin_unlock(&mm->page_table_lock); | |
110 | error: | |
111 | kfree(hmm); | |
112 | return NULL; | |
133ff0ea JG |
113 | } |
114 | ||
704f3f2c JG |
115 | static void hmm_free(struct kref *kref) |
116 | { | |
117 | struct hmm *hmm = container_of(kref, struct hmm, kref); | |
118 | struct mm_struct *mm = hmm->mm; | |
119 | ||
120 | mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm); | |
121 | ||
122 | spin_lock(&mm->page_table_lock); | |
123 | if (mm->hmm == hmm) | |
124 | mm->hmm = NULL; | |
125 | spin_unlock(&mm->page_table_lock); | |
126 | ||
127 | kfree(hmm); | |
128 | } | |
129 | ||
130 | static inline void hmm_put(struct hmm *hmm) | |
131 | { | |
132 | kref_put(&hmm->kref, hmm_free); | |
133 | } | |
134 | ||
133ff0ea JG |
135 | void hmm_mm_destroy(struct mm_struct *mm) |
136 | { | |
704f3f2c JG |
137 | struct hmm *hmm; |
138 | ||
139 | spin_lock(&mm->page_table_lock); | |
140 | hmm = mm_get_hmm(mm); | |
141 | mm->hmm = NULL; | |
142 | if (hmm) { | |
143 | hmm->mm = NULL; | |
a3e0d41c | 144 | hmm->dead = true; |
704f3f2c JG |
145 | spin_unlock(&mm->page_table_lock); |
146 | hmm_put(hmm); | |
147 | return; | |
148 | } | |
149 | ||
150 | spin_unlock(&mm->page_table_lock); | |
133ff0ea | 151 | } |
c0b12405 | 152 | |
a3e0d41c | 153 | static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) |
c0b12405 | 154 | { |
a3e0d41c | 155 | struct hmm *hmm = mm_get_hmm(mm); |
c0b12405 | 156 | struct hmm_mirror *mirror; |
da4c3c73 JG |
157 | struct hmm_range *range; |
158 | ||
a3e0d41c JG |
159 | /* Report this HMM as dying. */ |
160 | hmm->dead = true; | |
da4c3c73 | 161 | |
a3e0d41c JG |
162 | /* Wake-up everyone waiting on any range. */ |
163 | mutex_lock(&hmm->lock); | |
164 | list_for_each_entry(range, &hmm->ranges, list) { | |
da4c3c73 | 165 | range->valid = false; |
da4c3c73 | 166 | } |
a3e0d41c JG |
167 | wake_up_all(&hmm->wq); |
168 | mutex_unlock(&hmm->lock); | |
e1401513 RC |
169 | |
170 | down_write(&hmm->mirrors_sem); | |
171 | mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, | |
172 | list); | |
173 | while (mirror) { | |
174 | list_del_init(&mirror->list); | |
175 | if (mirror->ops->release) { | |
176 | /* | |
177 | * Drop mirrors_sem so callback can wait on any pending | |
178 | * work that might itself trigger mmu_notifier callback | |
179 | * and thus would deadlock with us. | |
180 | */ | |
181 | up_write(&hmm->mirrors_sem); | |
182 | mirror->ops->release(mirror); | |
183 | down_write(&hmm->mirrors_sem); | |
184 | } | |
185 | mirror = list_first_entry_or_null(&hmm->mirrors, | |
186 | struct hmm_mirror, list); | |
187 | } | |
188 | up_write(&hmm->mirrors_sem); | |
704f3f2c JG |
189 | |
190 | hmm_put(hmm); | |
e1401513 RC |
191 | } |
192 | ||
93065ac7 | 193 | static int hmm_invalidate_range_start(struct mmu_notifier *mn, |
a3e0d41c | 194 | const struct mmu_notifier_range *nrange) |
c0b12405 | 195 | { |
a3e0d41c JG |
196 | struct hmm *hmm = mm_get_hmm(nrange->mm); |
197 | struct hmm_mirror *mirror; | |
ec131b2d | 198 | struct hmm_update update; |
a3e0d41c JG |
199 | struct hmm_range *range; |
200 | int ret = 0; | |
c0b12405 JG |
201 | |
202 | VM_BUG_ON(!hmm); | |
203 | ||
a3e0d41c JG |
204 | update.start = nrange->start; |
205 | update.end = nrange->end; | |
ec131b2d | 206 | update.event = HMM_UPDATE_INVALIDATE; |
a3e0d41c JG |
207 | update.blockable = nrange->blockable; |
208 | ||
209 | if (nrange->blockable) | |
210 | mutex_lock(&hmm->lock); | |
211 | else if (!mutex_trylock(&hmm->lock)) { | |
212 | ret = -EAGAIN; | |
213 | goto out; | |
214 | } | |
215 | hmm->notifiers++; | |
216 | list_for_each_entry(range, &hmm->ranges, list) { | |
217 | if (update.end < range->start || update.start >= range->end) | |
218 | continue; | |
219 | ||
220 | range->valid = false; | |
221 | } | |
222 | mutex_unlock(&hmm->lock); | |
223 | ||
224 | if (nrange->blockable) | |
225 | down_read(&hmm->mirrors_sem); | |
226 | else if (!down_read_trylock(&hmm->mirrors_sem)) { | |
227 | ret = -EAGAIN; | |
228 | goto out; | |
229 | } | |
230 | list_for_each_entry(mirror, &hmm->mirrors, list) { | |
231 | int ret; | |
232 | ||
233 | ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update); | |
234 | if (!update.blockable && ret == -EAGAIN) { | |
235 | up_read(&hmm->mirrors_sem); | |
236 | ret = -EAGAIN; | |
237 | goto out; | |
238 | } | |
239 | } | |
240 | up_read(&hmm->mirrors_sem); | |
241 | ||
242 | out: | |
704f3f2c JG |
243 | hmm_put(hmm); |
244 | return ret; | |
c0b12405 JG |
245 | } |
246 | ||
247 | static void hmm_invalidate_range_end(struct mmu_notifier *mn, | |
a3e0d41c | 248 | const struct mmu_notifier_range *nrange) |
c0b12405 | 249 | { |
a3e0d41c | 250 | struct hmm *hmm = mm_get_hmm(nrange->mm); |
c0b12405 JG |
251 | |
252 | VM_BUG_ON(!hmm); | |
253 | ||
a3e0d41c JG |
254 | mutex_lock(&hmm->lock); |
255 | hmm->notifiers--; | |
256 | if (!hmm->notifiers) { | |
257 | struct hmm_range *range; | |
258 | ||
259 | list_for_each_entry(range, &hmm->ranges, list) { | |
260 | if (range->valid) | |
261 | continue; | |
262 | range->valid = true; | |
263 | } | |
264 | wake_up_all(&hmm->wq); | |
265 | } | |
266 | mutex_unlock(&hmm->lock); | |
267 | ||
704f3f2c | 268 | hmm_put(hmm); |
c0b12405 JG |
269 | } |
270 | ||
271 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { | |
e1401513 | 272 | .release = hmm_release, |
c0b12405 JG |
273 | .invalidate_range_start = hmm_invalidate_range_start, |
274 | .invalidate_range_end = hmm_invalidate_range_end, | |
275 | }; | |
276 | ||
277 | /* | |
278 | * hmm_mirror_register() - register a mirror against an mm | |
279 | * | |
280 | * @mirror: new mirror struct to register | |
281 | * @mm: mm to register against | |
282 | * | |
283 | * To start mirroring a process address space, the device driver must register | |
284 | * an HMM mirror struct. | |
285 | * | |
286 | * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! | |
287 | */ | |
288 | int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) | |
289 | { | |
290 | /* Sanity check */ | |
291 | if (!mm || !mirror || !mirror->ops) | |
292 | return -EINVAL; | |
293 | ||
704f3f2c | 294 | mirror->hmm = hmm_get_or_create(mm); |
c0b12405 JG |
295 | if (!mirror->hmm) |
296 | return -ENOMEM; | |
297 | ||
298 | down_write(&mirror->hmm->mirrors_sem); | |
704f3f2c JG |
299 | list_add(&mirror->list, &mirror->hmm->mirrors); |
300 | up_write(&mirror->hmm->mirrors_sem); | |
c0b12405 JG |
301 | |
302 | return 0; | |
303 | } | |
304 | EXPORT_SYMBOL(hmm_mirror_register); | |
305 | ||
306 | /* | |
307 | * hmm_mirror_unregister() - unregister a mirror | |
308 | * | |
309 | * @mirror: new mirror struct to register | |
310 | * | |
311 | * Stop mirroring a process address space, and cleanup. | |
312 | */ | |
313 | void hmm_mirror_unregister(struct hmm_mirror *mirror) | |
314 | { | |
704f3f2c | 315 | struct hmm *hmm = READ_ONCE(mirror->hmm); |
c01cbba2 | 316 | |
704f3f2c | 317 | if (hmm == NULL) |
c01cbba2 | 318 | return; |
c0b12405 JG |
319 | |
320 | down_write(&hmm->mirrors_sem); | |
e1401513 | 321 | list_del_init(&mirror->list); |
704f3f2c | 322 | /* To protect us against double unregister ... */ |
c01cbba2 | 323 | mirror->hmm = NULL; |
c0b12405 | 324 | up_write(&hmm->mirrors_sem); |
c01cbba2 | 325 | |
704f3f2c | 326 | hmm_put(hmm); |
c0b12405 JG |
327 | } |
328 | EXPORT_SYMBOL(hmm_mirror_unregister); | |
da4c3c73 | 329 | |
74eee180 JG |
330 | struct hmm_vma_walk { |
331 | struct hmm_range *range; | |
332 | unsigned long last; | |
333 | bool fault; | |
334 | bool block; | |
74eee180 JG |
335 | }; |
336 | ||
2aee09d8 JG |
337 | static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, |
338 | bool write_fault, uint64_t *pfn) | |
74eee180 JG |
339 | { |
340 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; | |
341 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
f88a1e90 | 342 | struct hmm_range *range = hmm_vma_walk->range; |
74eee180 | 343 | struct vm_area_struct *vma = walk->vma; |
50a7ca3c | 344 | vm_fault_t ret; |
74eee180 JG |
345 | |
346 | flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; | |
2aee09d8 | 347 | flags |= write_fault ? FAULT_FLAG_WRITE : 0; |
50a7ca3c SJ |
348 | ret = handle_mm_fault(vma, addr, flags); |
349 | if (ret & VM_FAULT_RETRY) | |
73231612 | 350 | return -EAGAIN; |
50a7ca3c | 351 | if (ret & VM_FAULT_ERROR) { |
f88a1e90 | 352 | *pfn = range->values[HMM_PFN_ERROR]; |
74eee180 JG |
353 | return -EFAULT; |
354 | } | |
355 | ||
73231612 | 356 | return -EBUSY; |
74eee180 JG |
357 | } |
358 | ||
da4c3c73 JG |
359 | static int hmm_pfns_bad(unsigned long addr, |
360 | unsigned long end, | |
361 | struct mm_walk *walk) | |
362 | { | |
c719547f JG |
363 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
364 | struct hmm_range *range = hmm_vma_walk->range; | |
ff05c0c6 | 365 | uint64_t *pfns = range->pfns; |
da4c3c73 JG |
366 | unsigned long i; |
367 | ||
368 | i = (addr - range->start) >> PAGE_SHIFT; | |
369 | for (; addr < end; addr += PAGE_SIZE, i++) | |
f88a1e90 | 370 | pfns[i] = range->values[HMM_PFN_ERROR]; |
da4c3c73 JG |
371 | |
372 | return 0; | |
373 | } | |
374 | ||
5504ed29 JG |
375 | /* |
376 | * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s) | |
377 | * @start: range virtual start address (inclusive) | |
378 | * @end: range virtual end address (exclusive) | |
2aee09d8 JG |
379 | * @fault: should we fault or not ? |
380 | * @write_fault: write fault ? | |
5504ed29 | 381 | * @walk: mm_walk structure |
73231612 | 382 | * Returns: 0 on success, -EBUSY after page fault, or page fault error |
5504ed29 JG |
383 | * |
384 | * This function will be called whenever pmd_none() or pte_none() returns true, | |
385 | * or whenever there is no page directory covering the virtual address range. | |
386 | */ | |
2aee09d8 JG |
387 | static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, |
388 | bool fault, bool write_fault, | |
389 | struct mm_walk *walk) | |
da4c3c73 | 390 | { |
74eee180 JG |
391 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
392 | struct hmm_range *range = hmm_vma_walk->range; | |
ff05c0c6 | 393 | uint64_t *pfns = range->pfns; |
63d5066f | 394 | unsigned long i, page_size; |
da4c3c73 | 395 | |
74eee180 | 396 | hmm_vma_walk->last = addr; |
63d5066f JG |
397 | page_size = hmm_range_page_size(range); |
398 | i = (addr - range->start) >> range->page_shift; | |
399 | ||
400 | for (; addr < end; addr += page_size, i++) { | |
f88a1e90 | 401 | pfns[i] = range->values[HMM_PFN_NONE]; |
2aee09d8 | 402 | if (fault || write_fault) { |
74eee180 | 403 | int ret; |
da4c3c73 | 404 | |
2aee09d8 JG |
405 | ret = hmm_vma_do_fault(walk, addr, write_fault, |
406 | &pfns[i]); | |
73231612 | 407 | if (ret != -EBUSY) |
74eee180 JG |
408 | return ret; |
409 | } | |
410 | } | |
411 | ||
73231612 | 412 | return (fault || write_fault) ? -EBUSY : 0; |
2aee09d8 JG |
413 | } |
414 | ||
415 | static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
416 | uint64_t pfns, uint64_t cpu_flags, | |
417 | bool *fault, bool *write_fault) | |
418 | { | |
f88a1e90 JG |
419 | struct hmm_range *range = hmm_vma_walk->range; |
420 | ||
2aee09d8 JG |
421 | if (!hmm_vma_walk->fault) |
422 | return; | |
423 | ||
023a019a JG |
424 | /* |
425 | * So we not only consider the individual per page request we also | |
426 | * consider the default flags requested for the range. The API can | |
427 | * be use in 2 fashions. The first one where the HMM user coalesce | |
428 | * multiple page fault into one request and set flags per pfns for | |
429 | * of those faults. The second one where the HMM user want to pre- | |
430 | * fault a range with specific flags. For the latter one it is a | |
431 | * waste to have the user pre-fill the pfn arrays with a default | |
432 | * flags value. | |
433 | */ | |
434 | pfns = (pfns & range->pfn_flags_mask) | range->default_flags; | |
435 | ||
2aee09d8 | 436 | /* We aren't ask to do anything ... */ |
f88a1e90 | 437 | if (!(pfns & range->flags[HMM_PFN_VALID])) |
2aee09d8 | 438 | return; |
f88a1e90 JG |
439 | /* If this is device memory than only fault if explicitly requested */ |
440 | if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { | |
441 | /* Do we fault on device memory ? */ | |
442 | if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { | |
443 | *write_fault = pfns & range->flags[HMM_PFN_WRITE]; | |
444 | *fault = true; | |
445 | } | |
2aee09d8 JG |
446 | return; |
447 | } | |
f88a1e90 JG |
448 | |
449 | /* If CPU page table is not valid then we need to fault */ | |
450 | *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); | |
451 | /* Need to write fault ? */ | |
452 | if ((pfns & range->flags[HMM_PFN_WRITE]) && | |
453 | !(cpu_flags & range->flags[HMM_PFN_WRITE])) { | |
454 | *write_fault = true; | |
2aee09d8 JG |
455 | *fault = true; |
456 | } | |
457 | } | |
458 | ||
459 | static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
460 | const uint64_t *pfns, unsigned long npages, | |
461 | uint64_t cpu_flags, bool *fault, | |
462 | bool *write_fault) | |
463 | { | |
464 | unsigned long i; | |
465 | ||
466 | if (!hmm_vma_walk->fault) { | |
467 | *fault = *write_fault = false; | |
468 | return; | |
469 | } | |
470 | ||
a3e0d41c | 471 | *fault = *write_fault = false; |
2aee09d8 JG |
472 | for (i = 0; i < npages; ++i) { |
473 | hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, | |
474 | fault, write_fault); | |
a3e0d41c | 475 | if ((*write_fault)) |
2aee09d8 JG |
476 | return; |
477 | } | |
478 | } | |
479 | ||
480 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, | |
481 | struct mm_walk *walk) | |
482 | { | |
483 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
484 | struct hmm_range *range = hmm_vma_walk->range; | |
485 | bool fault, write_fault; | |
486 | unsigned long i, npages; | |
487 | uint64_t *pfns; | |
488 | ||
489 | i = (addr - range->start) >> PAGE_SHIFT; | |
490 | npages = (end - addr) >> PAGE_SHIFT; | |
491 | pfns = &range->pfns[i]; | |
492 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, | |
493 | 0, &fault, &write_fault); | |
494 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); | |
495 | } | |
496 | ||
f88a1e90 | 497 | static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) |
2aee09d8 JG |
498 | { |
499 | if (pmd_protnone(pmd)) | |
500 | return 0; | |
f88a1e90 JG |
501 | return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | |
502 | range->flags[HMM_PFN_WRITE] : | |
503 | range->flags[HMM_PFN_VALID]; | |
da4c3c73 JG |
504 | } |
505 | ||
53f5c3f4 JG |
506 | static int hmm_vma_handle_pmd(struct mm_walk *walk, |
507 | unsigned long addr, | |
508 | unsigned long end, | |
509 | uint64_t *pfns, | |
510 | pmd_t pmd) | |
511 | { | |
512 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
f88a1e90 | 513 | struct hmm_range *range = hmm_vma_walk->range; |
2aee09d8 | 514 | unsigned long pfn, npages, i; |
2aee09d8 | 515 | bool fault, write_fault; |
f88a1e90 | 516 | uint64_t cpu_flags; |
53f5c3f4 | 517 | |
2aee09d8 | 518 | npages = (end - addr) >> PAGE_SHIFT; |
f88a1e90 | 519 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
2aee09d8 JG |
520 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, |
521 | &fault, &write_fault); | |
53f5c3f4 | 522 | |
2aee09d8 JG |
523 | if (pmd_protnone(pmd) || fault || write_fault) |
524 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); | |
53f5c3f4 JG |
525 | |
526 | pfn = pmd_pfn(pmd) + pte_index(addr); | |
53f5c3f4 | 527 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) |
f88a1e90 | 528 | pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; |
53f5c3f4 JG |
529 | hmm_vma_walk->last = end; |
530 | return 0; | |
531 | } | |
532 | ||
f88a1e90 | 533 | static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) |
2aee09d8 JG |
534 | { |
535 | if (pte_none(pte) || !pte_present(pte)) | |
536 | return 0; | |
f88a1e90 JG |
537 | return pte_write(pte) ? range->flags[HMM_PFN_VALID] | |
538 | range->flags[HMM_PFN_WRITE] : | |
539 | range->flags[HMM_PFN_VALID]; | |
2aee09d8 JG |
540 | } |
541 | ||
53f5c3f4 JG |
542 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
543 | unsigned long end, pmd_t *pmdp, pte_t *ptep, | |
544 | uint64_t *pfn) | |
545 | { | |
546 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
f88a1e90 | 547 | struct hmm_range *range = hmm_vma_walk->range; |
53f5c3f4 | 548 | struct vm_area_struct *vma = walk->vma; |
2aee09d8 JG |
549 | bool fault, write_fault; |
550 | uint64_t cpu_flags; | |
53f5c3f4 | 551 | pte_t pte = *ptep; |
f88a1e90 | 552 | uint64_t orig_pfn = *pfn; |
53f5c3f4 | 553 | |
f88a1e90 | 554 | *pfn = range->values[HMM_PFN_NONE]; |
73231612 | 555 | fault = write_fault = false; |
53f5c3f4 JG |
556 | |
557 | if (pte_none(pte)) { | |
73231612 JG |
558 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, |
559 | &fault, &write_fault); | |
2aee09d8 | 560 | if (fault || write_fault) |
53f5c3f4 JG |
561 | goto fault; |
562 | return 0; | |
563 | } | |
564 | ||
565 | if (!pte_present(pte)) { | |
566 | swp_entry_t entry = pte_to_swp_entry(pte); | |
567 | ||
568 | if (!non_swap_entry(entry)) { | |
2aee09d8 | 569 | if (fault || write_fault) |
53f5c3f4 JG |
570 | goto fault; |
571 | return 0; | |
572 | } | |
573 | ||
574 | /* | |
575 | * This is a special swap entry, ignore migration, use | |
576 | * device and report anything else as error. | |
577 | */ | |
578 | if (is_device_private_entry(entry)) { | |
f88a1e90 JG |
579 | cpu_flags = range->flags[HMM_PFN_VALID] | |
580 | range->flags[HMM_PFN_DEVICE_PRIVATE]; | |
2aee09d8 | 581 | cpu_flags |= is_write_device_private_entry(entry) ? |
f88a1e90 JG |
582 | range->flags[HMM_PFN_WRITE] : 0; |
583 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, | |
584 | &fault, &write_fault); | |
585 | if (fault || write_fault) | |
586 | goto fault; | |
587 | *pfn = hmm_pfn_from_pfn(range, swp_offset(entry)); | |
588 | *pfn |= cpu_flags; | |
53f5c3f4 JG |
589 | return 0; |
590 | } | |
591 | ||
592 | if (is_migration_entry(entry)) { | |
2aee09d8 | 593 | if (fault || write_fault) { |
53f5c3f4 JG |
594 | pte_unmap(ptep); |
595 | hmm_vma_walk->last = addr; | |
596 | migration_entry_wait(vma->vm_mm, | |
2aee09d8 | 597 | pmdp, addr); |
73231612 | 598 | return -EBUSY; |
53f5c3f4 JG |
599 | } |
600 | return 0; | |
601 | } | |
602 | ||
603 | /* Report error for everything else */ | |
f88a1e90 | 604 | *pfn = range->values[HMM_PFN_ERROR]; |
53f5c3f4 | 605 | return -EFAULT; |
73231612 JG |
606 | } else { |
607 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); | |
608 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, | |
609 | &fault, &write_fault); | |
53f5c3f4 JG |
610 | } |
611 | ||
2aee09d8 | 612 | if (fault || write_fault) |
53f5c3f4 JG |
613 | goto fault; |
614 | ||
f88a1e90 | 615 | *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags; |
53f5c3f4 JG |
616 | return 0; |
617 | ||
618 | fault: | |
619 | pte_unmap(ptep); | |
620 | /* Fault any virtual address we were asked to fault */ | |
2aee09d8 | 621 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
53f5c3f4 JG |
622 | } |
623 | ||
da4c3c73 JG |
624 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
625 | unsigned long start, | |
626 | unsigned long end, | |
627 | struct mm_walk *walk) | |
628 | { | |
74eee180 JG |
629 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
630 | struct hmm_range *range = hmm_vma_walk->range; | |
d08faca0 | 631 | struct vm_area_struct *vma = walk->vma; |
ff05c0c6 | 632 | uint64_t *pfns = range->pfns; |
da4c3c73 | 633 | unsigned long addr = start, i; |
da4c3c73 | 634 | pte_t *ptep; |
d08faca0 | 635 | pmd_t pmd; |
da4c3c73 | 636 | |
da4c3c73 JG |
637 | |
638 | again: | |
d08faca0 JG |
639 | pmd = READ_ONCE(*pmdp); |
640 | if (pmd_none(pmd)) | |
da4c3c73 JG |
641 | return hmm_vma_walk_hole(start, end, walk); |
642 | ||
d08faca0 | 643 | if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB)) |
da4c3c73 JG |
644 | return hmm_pfns_bad(start, end, walk); |
645 | ||
d08faca0 JG |
646 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
647 | bool fault, write_fault; | |
648 | unsigned long npages; | |
649 | uint64_t *pfns; | |
650 | ||
651 | i = (addr - range->start) >> PAGE_SHIFT; | |
652 | npages = (end - addr) >> PAGE_SHIFT; | |
653 | pfns = &range->pfns[i]; | |
654 | ||
655 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, | |
656 | 0, &fault, &write_fault); | |
657 | if (fault || write_fault) { | |
658 | hmm_vma_walk->last = addr; | |
659 | pmd_migration_entry_wait(vma->vm_mm, pmdp); | |
73231612 | 660 | return -EBUSY; |
d08faca0 JG |
661 | } |
662 | return 0; | |
663 | } else if (!pmd_present(pmd)) | |
664 | return hmm_pfns_bad(start, end, walk); | |
da4c3c73 | 665 | |
d08faca0 | 666 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
da4c3c73 JG |
667 | /* |
668 | * No need to take pmd_lock here, even if some other threads | |
669 | * is splitting the huge pmd we will get that event through | |
670 | * mmu_notifier callback. | |
671 | * | |
672 | * So just read pmd value and check again its a transparent | |
673 | * huge or device mapping one and compute corresponding pfn | |
674 | * values. | |
675 | */ | |
676 | pmd = pmd_read_atomic(pmdp); | |
677 | barrier(); | |
678 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) | |
679 | goto again; | |
74eee180 | 680 | |
d08faca0 | 681 | i = (addr - range->start) >> PAGE_SHIFT; |
53f5c3f4 | 682 | return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); |
da4c3c73 JG |
683 | } |
684 | ||
d08faca0 JG |
685 | /* |
686 | * We have handled all the valid case above ie either none, migration, | |
687 | * huge or transparent huge. At this point either it is a valid pmd | |
688 | * entry pointing to pte directory or it is a bad pmd that will not | |
689 | * recover. | |
690 | */ | |
691 | if (pmd_bad(pmd)) | |
da4c3c73 JG |
692 | return hmm_pfns_bad(start, end, walk); |
693 | ||
694 | ptep = pte_offset_map(pmdp, addr); | |
d08faca0 | 695 | i = (addr - range->start) >> PAGE_SHIFT; |
da4c3c73 | 696 | for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { |
53f5c3f4 | 697 | int r; |
74eee180 | 698 | |
53f5c3f4 JG |
699 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); |
700 | if (r) { | |
701 | /* hmm_vma_handle_pte() did unmap pte directory */ | |
702 | hmm_vma_walk->last = addr; | |
703 | return r; | |
74eee180 | 704 | } |
da4c3c73 JG |
705 | } |
706 | pte_unmap(ptep - 1); | |
707 | ||
53f5c3f4 | 708 | hmm_vma_walk->last = addr; |
da4c3c73 JG |
709 | return 0; |
710 | } | |
711 | ||
63d5066f JG |
712 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, |
713 | unsigned long start, unsigned long end, | |
714 | struct mm_walk *walk) | |
715 | { | |
716 | #ifdef CONFIG_HUGETLB_PAGE | |
717 | unsigned long addr = start, i, pfn, mask, size, pfn_inc; | |
718 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
719 | struct hmm_range *range = hmm_vma_walk->range; | |
720 | struct vm_area_struct *vma = walk->vma; | |
721 | struct hstate *h = hstate_vma(vma); | |
722 | uint64_t orig_pfn, cpu_flags; | |
723 | bool fault, write_fault; | |
724 | spinlock_t *ptl; | |
725 | pte_t entry; | |
726 | int ret = 0; | |
727 | ||
728 | size = 1UL << huge_page_shift(h); | |
729 | mask = size - 1; | |
730 | if (range->page_shift != PAGE_SHIFT) { | |
731 | /* Make sure we are looking at full page. */ | |
732 | if (start & mask) | |
733 | return -EINVAL; | |
734 | if (end < (start + size)) | |
735 | return -EINVAL; | |
736 | pfn_inc = size >> PAGE_SHIFT; | |
737 | } else { | |
738 | pfn_inc = 1; | |
739 | size = PAGE_SIZE; | |
740 | } | |
741 | ||
742 | ||
743 | ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); | |
744 | entry = huge_ptep_get(pte); | |
745 | ||
746 | i = (start - range->start) >> range->page_shift; | |
747 | orig_pfn = range->pfns[i]; | |
748 | range->pfns[i] = range->values[HMM_PFN_NONE]; | |
749 | cpu_flags = pte_to_hmm_pfn_flags(range, entry); | |
750 | fault = write_fault = false; | |
751 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, | |
752 | &fault, &write_fault); | |
753 | if (fault || write_fault) { | |
754 | ret = -ENOENT; | |
755 | goto unlock; | |
756 | } | |
757 | ||
758 | pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); | |
759 | for (; addr < end; addr += size, i++, pfn += pfn_inc) | |
760 | range->pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags; | |
761 | hmm_vma_walk->last = end; | |
762 | ||
763 | unlock: | |
764 | spin_unlock(ptl); | |
765 | ||
766 | if (ret == -ENOENT) | |
767 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); | |
768 | ||
769 | return ret; | |
770 | #else /* CONFIG_HUGETLB_PAGE */ | |
771 | return -EINVAL; | |
772 | #endif | |
773 | } | |
774 | ||
f88a1e90 JG |
775 | static void hmm_pfns_clear(struct hmm_range *range, |
776 | uint64_t *pfns, | |
33cd47dc JG |
777 | unsigned long addr, |
778 | unsigned long end) | |
779 | { | |
780 | for (; addr < end; addr += PAGE_SIZE, pfns++) | |
f88a1e90 | 781 | *pfns = range->values[HMM_PFN_NONE]; |
33cd47dc JG |
782 | } |
783 | ||
855ce7d2 JG |
784 | static void hmm_pfns_special(struct hmm_range *range) |
785 | { | |
786 | unsigned long addr = range->start, i = 0; | |
787 | ||
788 | for (; addr < range->end; addr += PAGE_SIZE, i++) | |
f88a1e90 | 789 | range->pfns[i] = range->values[HMM_PFN_SPECIAL]; |
855ce7d2 JG |
790 | } |
791 | ||
da4c3c73 | 792 | /* |
a3e0d41c | 793 | * hmm_range_register() - start tracking change to CPU page table over a range |
25f23a0c | 794 | * @range: range |
a3e0d41c JG |
795 | * @mm: the mm struct for the range of virtual address |
796 | * @start: start virtual address (inclusive) | |
797 | * @end: end virtual address (exclusive) | |
63d5066f | 798 | * @page_shift: expect page shift for the range |
a3e0d41c | 799 | * Returns 0 on success, -EFAULT if the address space is no longer valid |
25f23a0c | 800 | * |
a3e0d41c | 801 | * Track updates to the CPU page table see include/linux/hmm.h |
da4c3c73 | 802 | */ |
a3e0d41c JG |
803 | int hmm_range_register(struct hmm_range *range, |
804 | struct mm_struct *mm, | |
805 | unsigned long start, | |
63d5066f JG |
806 | unsigned long end, |
807 | unsigned page_shift) | |
da4c3c73 | 808 | { |
63d5066f JG |
809 | unsigned long mask = ((1UL << page_shift) - 1UL); |
810 | ||
a3e0d41c | 811 | range->valid = false; |
704f3f2c JG |
812 | range->hmm = NULL; |
813 | ||
63d5066f JG |
814 | if ((start & mask) || (end & mask)) |
815 | return -EINVAL; | |
816 | if (start >= end) | |
da4c3c73 JG |
817 | return -EINVAL; |
818 | ||
63d5066f | 819 | range->page_shift = page_shift; |
a3e0d41c JG |
820 | range->start = start; |
821 | range->end = end; | |
822 | ||
823 | range->hmm = hmm_get_or_create(mm); | |
824 | if (!range->hmm) | |
825 | return -EFAULT; | |
704f3f2c JG |
826 | |
827 | /* Check if hmm_mm_destroy() was call. */ | |
a3e0d41c JG |
828 | if (range->hmm->mm == NULL || range->hmm->dead) { |
829 | hmm_put(range->hmm); | |
830 | return -EFAULT; | |
704f3f2c | 831 | } |
da4c3c73 | 832 | |
a3e0d41c JG |
833 | /* Initialize range to track CPU page table update */ |
834 | mutex_lock(&range->hmm->lock); | |
855ce7d2 | 835 | |
a3e0d41c | 836 | list_add_rcu(&range->list, &range->hmm->ranges); |
86586a41 | 837 | |
704f3f2c | 838 | /* |
a3e0d41c JG |
839 | * If there are any concurrent notifiers we have to wait for them for |
840 | * the range to be valid (see hmm_range_wait_until_valid()). | |
704f3f2c | 841 | */ |
a3e0d41c JG |
842 | if (!range->hmm->notifiers) |
843 | range->valid = true; | |
844 | mutex_unlock(&range->hmm->lock); | |
845 | ||
846 | return 0; | |
da4c3c73 | 847 | } |
a3e0d41c | 848 | EXPORT_SYMBOL(hmm_range_register); |
da4c3c73 JG |
849 | |
850 | /* | |
a3e0d41c JG |
851 | * hmm_range_unregister() - stop tracking change to CPU page table over a range |
852 | * @range: range | |
da4c3c73 JG |
853 | * |
854 | * Range struct is used to track updates to the CPU page table after a call to | |
a3e0d41c | 855 | * hmm_range_register(). See include/linux/hmm.h for how to use it. |
da4c3c73 | 856 | */ |
a3e0d41c | 857 | void hmm_range_unregister(struct hmm_range *range) |
da4c3c73 | 858 | { |
704f3f2c | 859 | /* Sanity check this really should not happen. */ |
a3e0d41c JG |
860 | if (range->hmm == NULL || range->end <= range->start) |
861 | return; | |
da4c3c73 | 862 | |
a3e0d41c | 863 | mutex_lock(&range->hmm->lock); |
da4c3c73 | 864 | list_del_rcu(&range->list); |
a3e0d41c | 865 | mutex_unlock(&range->hmm->lock); |
da4c3c73 | 866 | |
a3e0d41c JG |
867 | /* Drop reference taken by hmm_range_register() */ |
868 | range->valid = false; | |
704f3f2c JG |
869 | hmm_put(range->hmm); |
870 | range->hmm = NULL; | |
da4c3c73 | 871 | } |
a3e0d41c JG |
872 | EXPORT_SYMBOL(hmm_range_unregister); |
873 | ||
874 | /* | |
875 | * hmm_range_snapshot() - snapshot CPU page table for a range | |
876 | * @range: range | |
877 | * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid | |
878 | * permission (for instance asking for write and range is read only), | |
879 | * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid | |
880 | * vma or it is illegal to access that range), number of valid pages | |
881 | * in range->pfns[] (from range start address). | |
882 | * | |
883 | * This snapshots the CPU page table for a range of virtual addresses. Snapshot | |
884 | * validity is tracked by range struct. See in include/linux/hmm.h for example | |
885 | * on how to use. | |
886 | */ | |
887 | long hmm_range_snapshot(struct hmm_range *range) | |
888 | { | |
63d5066f | 889 | const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; |
a3e0d41c JG |
890 | unsigned long start = range->start, end; |
891 | struct hmm_vma_walk hmm_vma_walk; | |
892 | struct hmm *hmm = range->hmm; | |
893 | struct vm_area_struct *vma; | |
894 | struct mm_walk mm_walk; | |
895 | ||
896 | /* Check if hmm_mm_destroy() was call. */ | |
897 | if (hmm->mm == NULL || hmm->dead) | |
898 | return -EFAULT; | |
899 | ||
900 | do { | |
901 | /* If range is no longer valid force retry. */ | |
902 | if (!range->valid) | |
903 | return -EAGAIN; | |
904 | ||
905 | vma = find_vma(hmm->mm, start); | |
63d5066f | 906 | if (vma == NULL || (vma->vm_flags & device_vma)) |
a3e0d41c JG |
907 | return -EFAULT; |
908 | ||
63d5066f JG |
909 | /* FIXME support dax */ |
910 | if (vma_is_dax(vma)) { | |
a3e0d41c JG |
911 | hmm_pfns_special(range); |
912 | return -EINVAL; | |
913 | } | |
914 | ||
63d5066f JG |
915 | if (is_vm_hugetlb_page(vma)) { |
916 | struct hstate *h = hstate_vma(vma); | |
917 | ||
918 | if (huge_page_shift(h) != range->page_shift && | |
919 | range->page_shift != PAGE_SHIFT) | |
920 | return -EINVAL; | |
921 | } else { | |
922 | if (range->page_shift != PAGE_SHIFT) | |
923 | return -EINVAL; | |
924 | } | |
925 | ||
a3e0d41c JG |
926 | if (!(vma->vm_flags & VM_READ)) { |
927 | /* | |
928 | * If vma do not allow read access, then assume that it | |
929 | * does not allow write access, either. HMM does not | |
930 | * support architecture that allow write without read. | |
931 | */ | |
932 | hmm_pfns_clear(range, range->pfns, | |
933 | range->start, range->end); | |
934 | return -EPERM; | |
935 | } | |
936 | ||
937 | range->vma = vma; | |
938 | hmm_vma_walk.last = start; | |
939 | hmm_vma_walk.fault = false; | |
940 | hmm_vma_walk.range = range; | |
941 | mm_walk.private = &hmm_vma_walk; | |
942 | end = min(range->end, vma->vm_end); | |
943 | ||
944 | mm_walk.vma = vma; | |
945 | mm_walk.mm = vma->vm_mm; | |
946 | mm_walk.pte_entry = NULL; | |
947 | mm_walk.test_walk = NULL; | |
948 | mm_walk.hugetlb_entry = NULL; | |
949 | mm_walk.pmd_entry = hmm_vma_walk_pmd; | |
950 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
63d5066f | 951 | mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; |
a3e0d41c JG |
952 | |
953 | walk_page_range(start, end, &mm_walk); | |
954 | start = end; | |
955 | } while (start < range->end); | |
956 | ||
957 | return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; | |
958 | } | |
959 | EXPORT_SYMBOL(hmm_range_snapshot); | |
74eee180 JG |
960 | |
961 | /* | |
73231612 | 962 | * hmm_range_fault() - try to fault some address in a virtual address range |
08232a45 | 963 | * @range: range being faulted |
74eee180 | 964 | * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) |
73231612 JG |
965 | * Returns: number of valid pages in range->pfns[] (from range start |
966 | * address). This may be zero. If the return value is negative, | |
967 | * then one of the following values may be returned: | |
968 | * | |
969 | * -EINVAL invalid arguments or mm or virtual address are in an | |
63d5066f | 970 | * invalid vma (for instance device file vma). |
73231612 JG |
971 | * -ENOMEM: Out of memory. |
972 | * -EPERM: Invalid permission (for instance asking for write and | |
973 | * range is read only). | |
974 | * -EAGAIN: If you need to retry and mmap_sem was drop. This can only | |
975 | * happens if block argument is false. | |
976 | * -EBUSY: If the the range is being invalidated and you should wait | |
977 | * for invalidation to finish. | |
978 | * -EFAULT: Invalid (ie either no valid vma or it is illegal to access | |
979 | * that range), number of valid pages in range->pfns[] (from | |
980 | * range start address). | |
74eee180 JG |
981 | * |
982 | * This is similar to a regular CPU page fault except that it will not trigger | |
73231612 JG |
983 | * any memory migration if the memory being faulted is not accessible by CPUs |
984 | * and caller does not ask for migration. | |
74eee180 | 985 | * |
ff05c0c6 JG |
986 | * On error, for one virtual address in the range, the function will mark the |
987 | * corresponding HMM pfn entry with an error flag. | |
74eee180 | 988 | */ |
73231612 | 989 | long hmm_range_fault(struct hmm_range *range, bool block) |
74eee180 | 990 | { |
63d5066f | 991 | const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; |
a3e0d41c | 992 | unsigned long start = range->start, end; |
74eee180 | 993 | struct hmm_vma_walk hmm_vma_walk; |
a3e0d41c JG |
994 | struct hmm *hmm = range->hmm; |
995 | struct vm_area_struct *vma; | |
74eee180 | 996 | struct mm_walk mm_walk; |
74eee180 JG |
997 | int ret; |
998 | ||
a3e0d41c JG |
999 | /* Check if hmm_mm_destroy() was call. */ |
1000 | if (hmm->mm == NULL || hmm->dead) | |
1001 | return -EFAULT; | |
704f3f2c | 1002 | |
a3e0d41c JG |
1003 | do { |
1004 | /* If range is no longer valid force retry. */ | |
1005 | if (!range->valid) { | |
1006 | up_read(&hmm->mm->mmap_sem); | |
1007 | return -EAGAIN; | |
1008 | } | |
74eee180 | 1009 | |
a3e0d41c | 1010 | vma = find_vma(hmm->mm, start); |
63d5066f | 1011 | if (vma == NULL || (vma->vm_flags & device_vma)) |
a3e0d41c | 1012 | return -EFAULT; |
704f3f2c | 1013 | |
63d5066f JG |
1014 | /* FIXME support dax */ |
1015 | if (vma_is_dax(vma)) { | |
a3e0d41c JG |
1016 | hmm_pfns_special(range); |
1017 | return -EINVAL; | |
1018 | } | |
855ce7d2 | 1019 | |
63d5066f JG |
1020 | if (is_vm_hugetlb_page(vma)) { |
1021 | if (huge_page_shift(hstate_vma(vma)) != | |
1022 | range->page_shift && | |
1023 | range->page_shift != PAGE_SHIFT) | |
1024 | return -EINVAL; | |
1025 | } else { | |
1026 | if (range->page_shift != PAGE_SHIFT) | |
1027 | return -EINVAL; | |
1028 | } | |
1029 | ||
a3e0d41c JG |
1030 | if (!(vma->vm_flags & VM_READ)) { |
1031 | /* | |
1032 | * If vma do not allow read access, then assume that it | |
1033 | * does not allow write access, either. HMM does not | |
1034 | * support architecture that allow write without read. | |
1035 | */ | |
1036 | hmm_pfns_clear(range, range->pfns, | |
1037 | range->start, range->end); | |
1038 | return -EPERM; | |
1039 | } | |
74eee180 | 1040 | |
a3e0d41c JG |
1041 | range->vma = vma; |
1042 | hmm_vma_walk.last = start; | |
1043 | hmm_vma_walk.fault = true; | |
1044 | hmm_vma_walk.block = block; | |
1045 | hmm_vma_walk.range = range; | |
1046 | mm_walk.private = &hmm_vma_walk; | |
1047 | end = min(range->end, vma->vm_end); | |
1048 | ||
1049 | mm_walk.vma = vma; | |
1050 | mm_walk.mm = vma->vm_mm; | |
1051 | mm_walk.pte_entry = NULL; | |
1052 | mm_walk.test_walk = NULL; | |
1053 | mm_walk.hugetlb_entry = NULL; | |
1054 | mm_walk.pmd_entry = hmm_vma_walk_pmd; | |
1055 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
63d5066f | 1056 | mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; |
a3e0d41c JG |
1057 | |
1058 | do { | |
1059 | ret = walk_page_range(start, end, &mm_walk); | |
1060 | start = hmm_vma_walk.last; | |
1061 | ||
1062 | /* Keep trying while the range is valid. */ | |
1063 | } while (ret == -EBUSY && range->valid); | |
1064 | ||
1065 | if (ret) { | |
1066 | unsigned long i; | |
1067 | ||
1068 | i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; | |
1069 | hmm_pfns_clear(range, &range->pfns[i], | |
1070 | hmm_vma_walk.last, range->end); | |
1071 | return ret; | |
1072 | } | |
1073 | start = end; | |
74eee180 | 1074 | |
a3e0d41c | 1075 | } while (start < range->end); |
704f3f2c | 1076 | |
73231612 | 1077 | return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; |
74eee180 | 1078 | } |
73231612 | 1079 | EXPORT_SYMBOL(hmm_range_fault); |
c0b12405 | 1080 | #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |
4ef589dc JG |
1081 | |
1082 | ||
df6ad698 | 1083 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
4ef589dc JG |
1084 | struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, |
1085 | unsigned long addr) | |
1086 | { | |
1087 | struct page *page; | |
1088 | ||
1089 | page = alloc_page_vma(GFP_HIGHUSER, vma, addr); | |
1090 | if (!page) | |
1091 | return NULL; | |
1092 | lock_page(page); | |
1093 | return page; | |
1094 | } | |
1095 | EXPORT_SYMBOL(hmm_vma_alloc_locked_page); | |
1096 | ||
1097 | ||
1098 | static void hmm_devmem_ref_release(struct percpu_ref *ref) | |
1099 | { | |
1100 | struct hmm_devmem *devmem; | |
1101 | ||
1102 | devmem = container_of(ref, struct hmm_devmem, ref); | |
1103 | complete(&devmem->completion); | |
1104 | } | |
1105 | ||
1106 | static void hmm_devmem_ref_exit(void *data) | |
1107 | { | |
1108 | struct percpu_ref *ref = data; | |
1109 | struct hmm_devmem *devmem; | |
1110 | ||
1111 | devmem = container_of(ref, struct hmm_devmem, ref); | |
bbecd94e | 1112 | wait_for_completion(&devmem->completion); |
4ef589dc | 1113 | percpu_ref_exit(ref); |
4ef589dc JG |
1114 | } |
1115 | ||
bbecd94e | 1116 | static void hmm_devmem_ref_kill(struct percpu_ref *ref) |
4ef589dc | 1117 | { |
4ef589dc | 1118 | percpu_ref_kill(ref); |
4ef589dc JG |
1119 | } |
1120 | ||
b57e622e | 1121 | static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma, |
4ef589dc JG |
1122 | unsigned long addr, |
1123 | const struct page *page, | |
1124 | unsigned int flags, | |
1125 | pmd_t *pmdp) | |
1126 | { | |
1127 | struct hmm_devmem *devmem = page->pgmap->data; | |
1128 | ||
1129 | return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); | |
1130 | } | |
1131 | ||
1132 | static void hmm_devmem_free(struct page *page, void *data) | |
1133 | { | |
1134 | struct hmm_devmem *devmem = data; | |
1135 | ||
2fa147bd DW |
1136 | page->mapping = NULL; |
1137 | ||
4ef589dc JG |
1138 | devmem->ops->free(devmem, page); |
1139 | } | |
1140 | ||
4ef589dc JG |
1141 | /* |
1142 | * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory | |
1143 | * | |
1144 | * @ops: memory event device driver callback (see struct hmm_devmem_ops) | |
1145 | * @device: device struct to bind the resource too | |
1146 | * @size: size in bytes of the device memory to add | |
1147 | * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise | |
1148 | * | |
1149 | * This function first finds an empty range of physical address big enough to | |
1150 | * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which | |
1151 | * in turn allocates struct pages. It does not do anything beyond that; all | |
1152 | * events affecting the memory will go through the various callbacks provided | |
1153 | * by hmm_devmem_ops struct. | |
1154 | * | |
1155 | * Device driver should call this function during device initialization and | |
1156 | * is then responsible of memory management. HMM only provides helpers. | |
1157 | */ | |
1158 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |
1159 | struct device *device, | |
1160 | unsigned long size) | |
1161 | { | |
1162 | struct hmm_devmem *devmem; | |
1163 | resource_size_t addr; | |
bbecd94e | 1164 | void *result; |
4ef589dc JG |
1165 | int ret; |
1166 | ||
e7638488 | 1167 | dev_pagemap_get_ops(); |
4ef589dc | 1168 | |
58ef15b7 | 1169 | devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
4ef589dc JG |
1170 | if (!devmem) |
1171 | return ERR_PTR(-ENOMEM); | |
1172 | ||
1173 | init_completion(&devmem->completion); | |
1174 | devmem->pfn_first = -1UL; | |
1175 | devmem->pfn_last = -1UL; | |
1176 | devmem->resource = NULL; | |
1177 | devmem->device = device; | |
1178 | devmem->ops = ops; | |
1179 | ||
1180 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, | |
1181 | 0, GFP_KERNEL); | |
1182 | if (ret) | |
58ef15b7 | 1183 | return ERR_PTR(ret); |
4ef589dc | 1184 | |
58ef15b7 | 1185 | ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref); |
4ef589dc | 1186 | if (ret) |
58ef15b7 | 1187 | return ERR_PTR(ret); |
4ef589dc JG |
1188 | |
1189 | size = ALIGN(size, PA_SECTION_SIZE); | |
1190 | addr = min((unsigned long)iomem_resource.end, | |
1191 | (1UL << MAX_PHYSMEM_BITS) - 1); | |
1192 | addr = addr - size + 1UL; | |
1193 | ||
1194 | /* | |
1195 | * FIXME add a new helper to quickly walk resource tree and find free | |
1196 | * range | |
1197 | * | |
1198 | * FIXME what about ioport_resource resource ? | |
1199 | */ | |
1200 | for (; addr > size && addr >= iomem_resource.start; addr -= size) { | |
1201 | ret = region_intersects(addr, size, 0, IORES_DESC_NONE); | |
1202 | if (ret != REGION_DISJOINT) | |
1203 | continue; | |
1204 | ||
1205 | devmem->resource = devm_request_mem_region(device, addr, size, | |
1206 | dev_name(device)); | |
58ef15b7 DW |
1207 | if (!devmem->resource) |
1208 | return ERR_PTR(-ENOMEM); | |
4ef589dc JG |
1209 | break; |
1210 | } | |
58ef15b7 DW |
1211 | if (!devmem->resource) |
1212 | return ERR_PTR(-ERANGE); | |
4ef589dc JG |
1213 | |
1214 | devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; | |
1215 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; | |
1216 | devmem->pfn_last = devmem->pfn_first + | |
1217 | (resource_size(devmem->resource) >> PAGE_SHIFT); | |
063a7d1d | 1218 | devmem->page_fault = hmm_devmem_fault; |
4ef589dc | 1219 | |
bbecd94e DW |
1220 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; |
1221 | devmem->pagemap.res = *devmem->resource; | |
bbecd94e DW |
1222 | devmem->pagemap.page_free = hmm_devmem_free; |
1223 | devmem->pagemap.altmap_valid = false; | |
1224 | devmem->pagemap.ref = &devmem->ref; | |
1225 | devmem->pagemap.data = devmem; | |
1226 | devmem->pagemap.kill = hmm_devmem_ref_kill; | |
4ef589dc | 1227 | |
bbecd94e DW |
1228 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); |
1229 | if (IS_ERR(result)) | |
1230 | return result; | |
4ef589dc | 1231 | return devmem; |
4ef589dc | 1232 | } |
02917e9f | 1233 | EXPORT_SYMBOL_GPL(hmm_devmem_add); |
4ef589dc | 1234 | |
d3df0a42 JG |
1235 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
1236 | struct device *device, | |
1237 | struct resource *res) | |
1238 | { | |
1239 | struct hmm_devmem *devmem; | |
bbecd94e | 1240 | void *result; |
d3df0a42 JG |
1241 | int ret; |
1242 | ||
1243 | if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) | |
1244 | return ERR_PTR(-EINVAL); | |
1245 | ||
e7638488 | 1246 | dev_pagemap_get_ops(); |
d3df0a42 | 1247 | |
58ef15b7 | 1248 | devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL); |
d3df0a42 JG |
1249 | if (!devmem) |
1250 | return ERR_PTR(-ENOMEM); | |
1251 | ||
1252 | init_completion(&devmem->completion); | |
1253 | devmem->pfn_first = -1UL; | |
1254 | devmem->pfn_last = -1UL; | |
1255 | devmem->resource = res; | |
1256 | devmem->device = device; | |
1257 | devmem->ops = ops; | |
1258 | ||
1259 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, | |
1260 | 0, GFP_KERNEL); | |
1261 | if (ret) | |
58ef15b7 | 1262 | return ERR_PTR(ret); |
d3df0a42 | 1263 | |
58ef15b7 DW |
1264 | ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, |
1265 | &devmem->ref); | |
d3df0a42 | 1266 | if (ret) |
58ef15b7 | 1267 | return ERR_PTR(ret); |
d3df0a42 JG |
1268 | |
1269 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; | |
1270 | devmem->pfn_last = devmem->pfn_first + | |
1271 | (resource_size(devmem->resource) >> PAGE_SHIFT); | |
063a7d1d | 1272 | devmem->page_fault = hmm_devmem_fault; |
d3df0a42 | 1273 | |
bbecd94e DW |
1274 | devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; |
1275 | devmem->pagemap.res = *devmem->resource; | |
bbecd94e DW |
1276 | devmem->pagemap.page_free = hmm_devmem_free; |
1277 | devmem->pagemap.altmap_valid = false; | |
1278 | devmem->pagemap.ref = &devmem->ref; | |
1279 | devmem->pagemap.data = devmem; | |
1280 | devmem->pagemap.kill = hmm_devmem_ref_kill; | |
d3df0a42 | 1281 | |
bbecd94e DW |
1282 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); |
1283 | if (IS_ERR(result)) | |
1284 | return result; | |
d3df0a42 | 1285 | return devmem; |
d3df0a42 | 1286 | } |
02917e9f | 1287 | EXPORT_SYMBOL_GPL(hmm_devmem_add_resource); |
d3df0a42 | 1288 | |
858b54da JG |
1289 | /* |
1290 | * A device driver that wants to handle multiple devices memory through a | |
1291 | * single fake device can use hmm_device to do so. This is purely a helper | |
1292 | * and it is not needed to make use of any HMM functionality. | |
1293 | */ | |
1294 | #define HMM_DEVICE_MAX 256 | |
1295 | ||
1296 | static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX); | |
1297 | static DEFINE_SPINLOCK(hmm_device_lock); | |
1298 | static struct class *hmm_device_class; | |
1299 | static dev_t hmm_device_devt; | |
1300 | ||
1301 | static void hmm_device_release(struct device *device) | |
1302 | { | |
1303 | struct hmm_device *hmm_device; | |
1304 | ||
1305 | hmm_device = container_of(device, struct hmm_device, device); | |
1306 | spin_lock(&hmm_device_lock); | |
1307 | clear_bit(hmm_device->minor, hmm_device_mask); | |
1308 | spin_unlock(&hmm_device_lock); | |
1309 | ||
1310 | kfree(hmm_device); | |
1311 | } | |
1312 | ||
1313 | struct hmm_device *hmm_device_new(void *drvdata) | |
1314 | { | |
1315 | struct hmm_device *hmm_device; | |
1316 | ||
1317 | hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL); | |
1318 | if (!hmm_device) | |
1319 | return ERR_PTR(-ENOMEM); | |
1320 | ||
1321 | spin_lock(&hmm_device_lock); | |
1322 | hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX); | |
1323 | if (hmm_device->minor >= HMM_DEVICE_MAX) { | |
1324 | spin_unlock(&hmm_device_lock); | |
1325 | kfree(hmm_device); | |
1326 | return ERR_PTR(-EBUSY); | |
1327 | } | |
1328 | set_bit(hmm_device->minor, hmm_device_mask); | |
1329 | spin_unlock(&hmm_device_lock); | |
1330 | ||
1331 | dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor); | |
1332 | hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt), | |
1333 | hmm_device->minor); | |
1334 | hmm_device->device.release = hmm_device_release; | |
1335 | dev_set_drvdata(&hmm_device->device, drvdata); | |
1336 | hmm_device->device.class = hmm_device_class; | |
1337 | device_initialize(&hmm_device->device); | |
1338 | ||
1339 | return hmm_device; | |
1340 | } | |
1341 | EXPORT_SYMBOL(hmm_device_new); | |
1342 | ||
1343 | void hmm_device_put(struct hmm_device *hmm_device) | |
1344 | { | |
1345 | put_device(&hmm_device->device); | |
1346 | } | |
1347 | EXPORT_SYMBOL(hmm_device_put); | |
1348 | ||
1349 | static int __init hmm_init(void) | |
1350 | { | |
1351 | int ret; | |
1352 | ||
1353 | ret = alloc_chrdev_region(&hmm_device_devt, 0, | |
1354 | HMM_DEVICE_MAX, | |
1355 | "hmm_device"); | |
1356 | if (ret) | |
1357 | return ret; | |
1358 | ||
1359 | hmm_device_class = class_create(THIS_MODULE, "hmm_device"); | |
1360 | if (IS_ERR(hmm_device_class)) { | |
1361 | unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX); | |
1362 | return PTR_ERR(hmm_device_class); | |
1363 | } | |
1364 | return 0; | |
1365 | } | |
1366 | ||
1367 | device_initcall(hmm_init); | |
df6ad698 | 1368 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |