Commit | Line | Data |
---|---|---|
133ff0ea JG |
1 | /* |
2 | * Copyright 2013 Red Hat Inc. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * Authors: Jérôme Glisse <jglisse@redhat.com> | |
15 | */ | |
16 | /* | |
17 | * Refer to include/linux/hmm.h for information about heterogeneous memory | |
18 | * management or HMM for short. | |
19 | */ | |
20 | #include <linux/mm.h> | |
21 | #include <linux/hmm.h> | |
da4c3c73 JG |
22 | #include <linux/rmap.h> |
23 | #include <linux/swap.h> | |
133ff0ea JG |
24 | #include <linux/slab.h> |
25 | #include <linux/sched.h> | |
4ef589dc JG |
26 | #include <linux/mmzone.h> |
27 | #include <linux/pagemap.h> | |
da4c3c73 JG |
28 | #include <linux/swapops.h> |
29 | #include <linux/hugetlb.h> | |
4ef589dc | 30 | #include <linux/memremap.h> |
7b2d55d2 | 31 | #include <linux/jump_label.h> |
c0b12405 | 32 | #include <linux/mmu_notifier.h> |
4ef589dc JG |
33 | #include <linux/memory_hotplug.h> |
34 | ||
35 | #define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT) | |
133ff0ea JG |
36 | |
37 | ||
7b2d55d2 JG |
38 | /* |
39 | * Device private memory see HMM (Documentation/vm/hmm.txt) or hmm.h | |
40 | */ | |
41 | DEFINE_STATIC_KEY_FALSE(device_private_key); | |
42 | EXPORT_SYMBOL(device_private_key); | |
43 | ||
44 | ||
133ff0ea | 45 | #ifdef CONFIG_HMM |
c0b12405 JG |
46 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops; |
47 | ||
133ff0ea JG |
48 | /* |
49 | * struct hmm - HMM per mm struct | |
50 | * | |
51 | * @mm: mm struct this HMM struct is bound to | |
da4c3c73 | 52 | * @lock: lock protecting ranges list |
c0b12405 | 53 | * @sequence: we track updates to the CPU page table with a sequence number |
da4c3c73 | 54 | * @ranges: list of range being snapshotted |
c0b12405 JG |
55 | * @mirrors: list of mirrors for this mm |
56 | * @mmu_notifier: mmu notifier to track updates to CPU page table | |
57 | * @mirrors_sem: read/write semaphore protecting the mirrors list | |
133ff0ea JG |
58 | */ |
59 | struct hmm { | |
60 | struct mm_struct *mm; | |
da4c3c73 | 61 | spinlock_t lock; |
c0b12405 | 62 | atomic_t sequence; |
da4c3c73 | 63 | struct list_head ranges; |
c0b12405 JG |
64 | struct list_head mirrors; |
65 | struct mmu_notifier mmu_notifier; | |
66 | struct rw_semaphore mirrors_sem; | |
133ff0ea JG |
67 | }; |
68 | ||
69 | /* | |
70 | * hmm_register - register HMM against an mm (HMM internal) | |
71 | * | |
72 | * @mm: mm struct to attach to | |
73 | * | |
74 | * This is not intended to be used directly by device drivers. It allocates an | |
75 | * HMM struct if mm does not have one, and initializes it. | |
76 | */ | |
77 | static struct hmm *hmm_register(struct mm_struct *mm) | |
78 | { | |
c0b12405 JG |
79 | struct hmm *hmm = READ_ONCE(mm->hmm); |
80 | bool cleanup = false; | |
133ff0ea JG |
81 | |
82 | /* | |
83 | * The hmm struct can only be freed once the mm_struct goes away, | |
84 | * hence we should always have pre-allocated an new hmm struct | |
85 | * above. | |
86 | */ | |
c0b12405 JG |
87 | if (hmm) |
88 | return hmm; | |
89 | ||
90 | hmm = kmalloc(sizeof(*hmm), GFP_KERNEL); | |
91 | if (!hmm) | |
92 | return NULL; | |
93 | INIT_LIST_HEAD(&hmm->mirrors); | |
94 | init_rwsem(&hmm->mirrors_sem); | |
95 | atomic_set(&hmm->sequence, 0); | |
96 | hmm->mmu_notifier.ops = NULL; | |
da4c3c73 JG |
97 | INIT_LIST_HEAD(&hmm->ranges); |
98 | spin_lock_init(&hmm->lock); | |
c0b12405 JG |
99 | hmm->mm = mm; |
100 | ||
101 | /* | |
102 | * We should only get here if hold the mmap_sem in write mode ie on | |
103 | * registration of first mirror through hmm_mirror_register() | |
104 | */ | |
105 | hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; | |
106 | if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) { | |
107 | kfree(hmm); | |
108 | return NULL; | |
109 | } | |
110 | ||
111 | spin_lock(&mm->page_table_lock); | |
112 | if (!mm->hmm) | |
113 | mm->hmm = hmm; | |
114 | else | |
115 | cleanup = true; | |
116 | spin_unlock(&mm->page_table_lock); | |
117 | ||
118 | if (cleanup) { | |
119 | mmu_notifier_unregister(&hmm->mmu_notifier, mm); | |
120 | kfree(hmm); | |
121 | } | |
122 | ||
133ff0ea JG |
123 | return mm->hmm; |
124 | } | |
125 | ||
126 | void hmm_mm_destroy(struct mm_struct *mm) | |
127 | { | |
128 | kfree(mm->hmm); | |
129 | } | |
130 | #endif /* CONFIG_HMM */ | |
c0b12405 JG |
131 | |
132 | #if IS_ENABLED(CONFIG_HMM_MIRROR) | |
133 | static void hmm_invalidate_range(struct hmm *hmm, | |
134 | enum hmm_update_type action, | |
135 | unsigned long start, | |
136 | unsigned long end) | |
137 | { | |
138 | struct hmm_mirror *mirror; | |
da4c3c73 JG |
139 | struct hmm_range *range; |
140 | ||
141 | spin_lock(&hmm->lock); | |
142 | list_for_each_entry(range, &hmm->ranges, list) { | |
143 | unsigned long addr, idx, npages; | |
144 | ||
145 | if (end < range->start || start >= range->end) | |
146 | continue; | |
147 | ||
148 | range->valid = false; | |
149 | addr = max(start, range->start); | |
150 | idx = (addr - range->start) >> PAGE_SHIFT; | |
151 | npages = (min(range->end, end) - addr) >> PAGE_SHIFT; | |
152 | memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); | |
153 | } | |
154 | spin_unlock(&hmm->lock); | |
c0b12405 JG |
155 | |
156 | down_read(&hmm->mirrors_sem); | |
157 | list_for_each_entry(mirror, &hmm->mirrors, list) | |
158 | mirror->ops->sync_cpu_device_pagetables(mirror, action, | |
159 | start, end); | |
160 | up_read(&hmm->mirrors_sem); | |
161 | } | |
162 | ||
163 | static void hmm_invalidate_range_start(struct mmu_notifier *mn, | |
164 | struct mm_struct *mm, | |
165 | unsigned long start, | |
166 | unsigned long end) | |
167 | { | |
168 | struct hmm *hmm = mm->hmm; | |
169 | ||
170 | VM_BUG_ON(!hmm); | |
171 | ||
172 | atomic_inc(&hmm->sequence); | |
173 | } | |
174 | ||
175 | static void hmm_invalidate_range_end(struct mmu_notifier *mn, | |
176 | struct mm_struct *mm, | |
177 | unsigned long start, | |
178 | unsigned long end) | |
179 | { | |
180 | struct hmm *hmm = mm->hmm; | |
181 | ||
182 | VM_BUG_ON(!hmm); | |
183 | ||
184 | hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end); | |
185 | } | |
186 | ||
187 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { | |
188 | .invalidate_range_start = hmm_invalidate_range_start, | |
189 | .invalidate_range_end = hmm_invalidate_range_end, | |
190 | }; | |
191 | ||
192 | /* | |
193 | * hmm_mirror_register() - register a mirror against an mm | |
194 | * | |
195 | * @mirror: new mirror struct to register | |
196 | * @mm: mm to register against | |
197 | * | |
198 | * To start mirroring a process address space, the device driver must register | |
199 | * an HMM mirror struct. | |
200 | * | |
201 | * THE mm->mmap_sem MUST BE HELD IN WRITE MODE ! | |
202 | */ | |
203 | int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) | |
204 | { | |
205 | /* Sanity check */ | |
206 | if (!mm || !mirror || !mirror->ops) | |
207 | return -EINVAL; | |
208 | ||
209 | mirror->hmm = hmm_register(mm); | |
210 | if (!mirror->hmm) | |
211 | return -ENOMEM; | |
212 | ||
213 | down_write(&mirror->hmm->mirrors_sem); | |
214 | list_add(&mirror->list, &mirror->hmm->mirrors); | |
215 | up_write(&mirror->hmm->mirrors_sem); | |
216 | ||
217 | return 0; | |
218 | } | |
219 | EXPORT_SYMBOL(hmm_mirror_register); | |
220 | ||
221 | /* | |
222 | * hmm_mirror_unregister() - unregister a mirror | |
223 | * | |
224 | * @mirror: new mirror struct to register | |
225 | * | |
226 | * Stop mirroring a process address space, and cleanup. | |
227 | */ | |
228 | void hmm_mirror_unregister(struct hmm_mirror *mirror) | |
229 | { | |
230 | struct hmm *hmm = mirror->hmm; | |
231 | ||
232 | down_write(&hmm->mirrors_sem); | |
233 | list_del(&mirror->list); | |
234 | up_write(&hmm->mirrors_sem); | |
235 | } | |
236 | EXPORT_SYMBOL(hmm_mirror_unregister); | |
da4c3c73 | 237 | |
74eee180 JG |
238 | struct hmm_vma_walk { |
239 | struct hmm_range *range; | |
240 | unsigned long last; | |
241 | bool fault; | |
242 | bool block; | |
243 | bool write; | |
244 | }; | |
245 | ||
246 | static int hmm_vma_do_fault(struct mm_walk *walk, | |
247 | unsigned long addr, | |
248 | hmm_pfn_t *pfn) | |
249 | { | |
250 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; | |
251 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
252 | struct vm_area_struct *vma = walk->vma; | |
253 | int r; | |
254 | ||
255 | flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; | |
256 | flags |= hmm_vma_walk->write ? FAULT_FLAG_WRITE : 0; | |
257 | r = handle_mm_fault(vma, addr, flags); | |
258 | if (r & VM_FAULT_RETRY) | |
259 | return -EBUSY; | |
260 | if (r & VM_FAULT_ERROR) { | |
261 | *pfn = HMM_PFN_ERROR; | |
262 | return -EFAULT; | |
263 | } | |
264 | ||
265 | return -EAGAIN; | |
266 | } | |
267 | ||
da4c3c73 JG |
268 | static void hmm_pfns_special(hmm_pfn_t *pfns, |
269 | unsigned long addr, | |
270 | unsigned long end) | |
271 | { | |
272 | for (; addr < end; addr += PAGE_SIZE, pfns++) | |
273 | *pfns = HMM_PFN_SPECIAL; | |
274 | } | |
275 | ||
276 | static int hmm_pfns_bad(unsigned long addr, | |
277 | unsigned long end, | |
278 | struct mm_walk *walk) | |
279 | { | |
280 | struct hmm_range *range = walk->private; | |
281 | hmm_pfn_t *pfns = range->pfns; | |
282 | unsigned long i; | |
283 | ||
284 | i = (addr - range->start) >> PAGE_SHIFT; | |
285 | for (; addr < end; addr += PAGE_SIZE, i++) | |
286 | pfns[i] = HMM_PFN_ERROR; | |
287 | ||
288 | return 0; | |
289 | } | |
290 | ||
74eee180 JG |
291 | static void hmm_pfns_clear(hmm_pfn_t *pfns, |
292 | unsigned long addr, | |
293 | unsigned long end) | |
294 | { | |
295 | for (; addr < end; addr += PAGE_SIZE, pfns++) | |
296 | *pfns = 0; | |
297 | } | |
298 | ||
da4c3c73 JG |
299 | static int hmm_vma_walk_hole(unsigned long addr, |
300 | unsigned long end, | |
301 | struct mm_walk *walk) | |
302 | { | |
74eee180 JG |
303 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
304 | struct hmm_range *range = hmm_vma_walk->range; | |
da4c3c73 JG |
305 | hmm_pfn_t *pfns = range->pfns; |
306 | unsigned long i; | |
307 | ||
74eee180 | 308 | hmm_vma_walk->last = addr; |
da4c3c73 | 309 | i = (addr - range->start) >> PAGE_SHIFT; |
74eee180 | 310 | for (; addr < end; addr += PAGE_SIZE, i++) { |
da4c3c73 | 311 | pfns[i] = HMM_PFN_EMPTY; |
74eee180 JG |
312 | if (hmm_vma_walk->fault) { |
313 | int ret; | |
da4c3c73 | 314 | |
74eee180 JG |
315 | ret = hmm_vma_do_fault(walk, addr, &pfns[i]); |
316 | if (ret != -EAGAIN) | |
317 | return ret; | |
318 | } | |
319 | } | |
320 | ||
321 | return hmm_vma_walk->fault ? -EAGAIN : 0; | |
da4c3c73 JG |
322 | } |
323 | ||
324 | static int hmm_vma_walk_clear(unsigned long addr, | |
325 | unsigned long end, | |
326 | struct mm_walk *walk) | |
327 | { | |
74eee180 JG |
328 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
329 | struct hmm_range *range = hmm_vma_walk->range; | |
da4c3c73 JG |
330 | hmm_pfn_t *pfns = range->pfns; |
331 | unsigned long i; | |
332 | ||
74eee180 | 333 | hmm_vma_walk->last = addr; |
da4c3c73 | 334 | i = (addr - range->start) >> PAGE_SHIFT; |
74eee180 | 335 | for (; addr < end; addr += PAGE_SIZE, i++) { |
da4c3c73 | 336 | pfns[i] = 0; |
74eee180 JG |
337 | if (hmm_vma_walk->fault) { |
338 | int ret; | |
da4c3c73 | 339 | |
74eee180 JG |
340 | ret = hmm_vma_do_fault(walk, addr, &pfns[i]); |
341 | if (ret != -EAGAIN) | |
342 | return ret; | |
343 | } | |
344 | } | |
345 | ||
346 | return hmm_vma_walk->fault ? -EAGAIN : 0; | |
da4c3c73 JG |
347 | } |
348 | ||
349 | static int hmm_vma_walk_pmd(pmd_t *pmdp, | |
350 | unsigned long start, | |
351 | unsigned long end, | |
352 | struct mm_walk *walk) | |
353 | { | |
74eee180 JG |
354 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
355 | struct hmm_range *range = hmm_vma_walk->range; | |
da4c3c73 JG |
356 | struct vm_area_struct *vma = walk->vma; |
357 | hmm_pfn_t *pfns = range->pfns; | |
358 | unsigned long addr = start, i; | |
74eee180 | 359 | bool write_fault; |
da4c3c73 JG |
360 | hmm_pfn_t flag; |
361 | pte_t *ptep; | |
362 | ||
363 | i = (addr - range->start) >> PAGE_SHIFT; | |
364 | flag = vma->vm_flags & VM_READ ? HMM_PFN_READ : 0; | |
74eee180 | 365 | write_fault = hmm_vma_walk->fault & hmm_vma_walk->write; |
da4c3c73 JG |
366 | |
367 | again: | |
368 | if (pmd_none(*pmdp)) | |
369 | return hmm_vma_walk_hole(start, end, walk); | |
370 | ||
371 | if (pmd_huge(*pmdp) && vma->vm_flags & VM_HUGETLB) | |
372 | return hmm_pfns_bad(start, end, walk); | |
373 | ||
374 | if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) { | |
375 | unsigned long pfn; | |
376 | pmd_t pmd; | |
377 | ||
378 | /* | |
379 | * No need to take pmd_lock here, even if some other threads | |
380 | * is splitting the huge pmd we will get that event through | |
381 | * mmu_notifier callback. | |
382 | * | |
383 | * So just read pmd value and check again its a transparent | |
384 | * huge or device mapping one and compute corresponding pfn | |
385 | * values. | |
386 | */ | |
387 | pmd = pmd_read_atomic(pmdp); | |
388 | barrier(); | |
389 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) | |
390 | goto again; | |
391 | if (pmd_protnone(pmd)) | |
392 | return hmm_vma_walk_clear(start, end, walk); | |
393 | ||
74eee180 JG |
394 | if (write_fault && !pmd_write(pmd)) |
395 | return hmm_vma_walk_clear(start, end, walk); | |
396 | ||
da4c3c73 JG |
397 | pfn = pmd_pfn(pmd) + pte_index(addr); |
398 | flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; | |
399 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) | |
400 | pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; | |
401 | return 0; | |
402 | } | |
403 | ||
404 | if (pmd_bad(*pmdp)) | |
405 | return hmm_pfns_bad(start, end, walk); | |
406 | ||
407 | ptep = pte_offset_map(pmdp, addr); | |
408 | for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { | |
409 | pte_t pte = *ptep; | |
410 | ||
411 | pfns[i] = 0; | |
412 | ||
74eee180 | 413 | if (pte_none(pte)) { |
da4c3c73 | 414 | pfns[i] = HMM_PFN_EMPTY; |
74eee180 JG |
415 | if (hmm_vma_walk->fault) |
416 | goto fault; | |
da4c3c73 JG |
417 | continue; |
418 | } | |
419 | ||
74eee180 JG |
420 | if (!pte_present(pte)) { |
421 | swp_entry_t entry; | |
422 | ||
423 | if (!non_swap_entry(entry)) { | |
424 | if (hmm_vma_walk->fault) | |
425 | goto fault; | |
426 | continue; | |
427 | } | |
428 | ||
429 | entry = pte_to_swp_entry(pte); | |
430 | ||
431 | /* | |
432 | * This is a special swap entry, ignore migration, use | |
433 | * device and report anything else as error. | |
434 | */ | |
4ef589dc JG |
435 | if (is_device_private_entry(entry)) { |
436 | pfns[i] = hmm_pfn_t_from_pfn(swp_offset(entry)); | |
437 | if (is_write_device_private_entry(entry)) { | |
438 | pfns[i] |= HMM_PFN_WRITE; | |
439 | } else if (write_fault) | |
440 | goto fault; | |
441 | pfns[i] |= HMM_PFN_DEVICE_UNADDRESSABLE; | |
442 | pfns[i] |= flag; | |
443 | } else if (is_migration_entry(entry)) { | |
74eee180 JG |
444 | if (hmm_vma_walk->fault) { |
445 | pte_unmap(ptep); | |
446 | hmm_vma_walk->last = addr; | |
447 | migration_entry_wait(vma->vm_mm, | |
448 | pmdp, addr); | |
449 | return -EAGAIN; | |
450 | } | |
451 | continue; | |
452 | } else { | |
453 | /* Report error for everything else */ | |
454 | pfns[i] = HMM_PFN_ERROR; | |
455 | } | |
456 | continue; | |
457 | } | |
458 | ||
459 | if (write_fault && !pte_write(pte)) | |
460 | goto fault; | |
461 | ||
da4c3c73 JG |
462 | pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag; |
463 | pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0; | |
74eee180 JG |
464 | continue; |
465 | ||
466 | fault: | |
467 | pte_unmap(ptep); | |
468 | /* Fault all pages in range */ | |
469 | return hmm_vma_walk_clear(start, end, walk); | |
da4c3c73 JG |
470 | } |
471 | pte_unmap(ptep - 1); | |
472 | ||
473 | return 0; | |
474 | } | |
475 | ||
476 | /* | |
477 | * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses | |
478 | * @vma: virtual memory area containing the virtual address range | |
479 | * @range: used to track snapshot validity | |
480 | * @start: range virtual start address (inclusive) | |
481 | * @end: range virtual end address (exclusive) | |
482 | * @entries: array of hmm_pfn_t: provided by the caller, filled in by function | |
483 | * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, 0 success | |
484 | * | |
485 | * This snapshots the CPU page table for a range of virtual addresses. Snapshot | |
486 | * validity is tracked by range struct. See hmm_vma_range_done() for further | |
487 | * information. | |
488 | * | |
489 | * The range struct is initialized here. It tracks the CPU page table, but only | |
490 | * if the function returns success (0), in which case the caller must then call | |
491 | * hmm_vma_range_done() to stop CPU page table update tracking on this range. | |
492 | * | |
493 | * NOT CALLING hmm_vma_range_done() IF FUNCTION RETURNS 0 WILL LEAD TO SERIOUS | |
494 | * MEMORY CORRUPTION ! YOU HAVE BEEN WARNED ! | |
495 | */ | |
496 | int hmm_vma_get_pfns(struct vm_area_struct *vma, | |
497 | struct hmm_range *range, | |
498 | unsigned long start, | |
499 | unsigned long end, | |
500 | hmm_pfn_t *pfns) | |
501 | { | |
74eee180 | 502 | struct hmm_vma_walk hmm_vma_walk; |
da4c3c73 JG |
503 | struct mm_walk mm_walk; |
504 | struct hmm *hmm; | |
505 | ||
506 | /* FIXME support hugetlb fs */ | |
507 | if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { | |
508 | hmm_pfns_special(pfns, start, end); | |
509 | return -EINVAL; | |
510 | } | |
511 | ||
512 | /* Sanity check, this really should not happen ! */ | |
513 | if (start < vma->vm_start || start >= vma->vm_end) | |
514 | return -EINVAL; | |
515 | if (end < vma->vm_start || end > vma->vm_end) | |
516 | return -EINVAL; | |
517 | ||
518 | hmm = hmm_register(vma->vm_mm); | |
519 | if (!hmm) | |
520 | return -ENOMEM; | |
521 | /* Caller must have registered a mirror, via hmm_mirror_register() ! */ | |
522 | if (!hmm->mmu_notifier.ops) | |
523 | return -EINVAL; | |
524 | ||
525 | /* Initialize range to track CPU page table update */ | |
526 | range->start = start; | |
527 | range->pfns = pfns; | |
528 | range->end = end; | |
529 | spin_lock(&hmm->lock); | |
530 | range->valid = true; | |
531 | list_add_rcu(&range->list, &hmm->ranges); | |
532 | spin_unlock(&hmm->lock); | |
533 | ||
74eee180 JG |
534 | hmm_vma_walk.fault = false; |
535 | hmm_vma_walk.range = range; | |
536 | mm_walk.private = &hmm_vma_walk; | |
537 | ||
da4c3c73 JG |
538 | mm_walk.vma = vma; |
539 | mm_walk.mm = vma->vm_mm; | |
da4c3c73 JG |
540 | mm_walk.pte_entry = NULL; |
541 | mm_walk.test_walk = NULL; | |
542 | mm_walk.hugetlb_entry = NULL; | |
543 | mm_walk.pmd_entry = hmm_vma_walk_pmd; | |
544 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
545 | ||
546 | walk_page_range(start, end, &mm_walk); | |
da4c3c73 JG |
547 | return 0; |
548 | } | |
549 | EXPORT_SYMBOL(hmm_vma_get_pfns); | |
550 | ||
551 | /* | |
552 | * hmm_vma_range_done() - stop tracking change to CPU page table over a range | |
553 | * @vma: virtual memory area containing the virtual address range | |
554 | * @range: range being tracked | |
555 | * Returns: false if range data has been invalidated, true otherwise | |
556 | * | |
557 | * Range struct is used to track updates to the CPU page table after a call to | |
558 | * either hmm_vma_get_pfns() or hmm_vma_fault(). Once the device driver is done | |
559 | * using the data, or wants to lock updates to the data it got from those | |
560 | * functions, it must call the hmm_vma_range_done() function, which will then | |
561 | * stop tracking CPU page table updates. | |
562 | * | |
563 | * Note that device driver must still implement general CPU page table update | |
564 | * tracking either by using hmm_mirror (see hmm_mirror_register()) or by using | |
565 | * the mmu_notifier API directly. | |
566 | * | |
567 | * CPU page table update tracking done through hmm_range is only temporary and | |
568 | * to be used while trying to duplicate CPU page table contents for a range of | |
569 | * virtual addresses. | |
570 | * | |
571 | * There are two ways to use this : | |
572 | * again: | |
74eee180 | 573 | * hmm_vma_get_pfns(vma, range, start, end, pfns); or hmm_vma_fault(...); |
da4c3c73 JG |
574 | * trans = device_build_page_table_update_transaction(pfns); |
575 | * device_page_table_lock(); | |
576 | * if (!hmm_vma_range_done(vma, range)) { | |
577 | * device_page_table_unlock(); | |
578 | * goto again; | |
579 | * } | |
580 | * device_commit_transaction(trans); | |
581 | * device_page_table_unlock(); | |
582 | * | |
583 | * Or: | |
74eee180 | 584 | * hmm_vma_get_pfns(vma, range, start, end, pfns); or hmm_vma_fault(...); |
da4c3c73 JG |
585 | * device_page_table_lock(); |
586 | * hmm_vma_range_done(vma, range); | |
587 | * device_update_page_table(pfns); | |
588 | * device_page_table_unlock(); | |
589 | */ | |
590 | bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range) | |
591 | { | |
592 | unsigned long npages = (range->end - range->start) >> PAGE_SHIFT; | |
593 | struct hmm *hmm; | |
594 | ||
595 | if (range->end <= range->start) { | |
596 | BUG(); | |
597 | return false; | |
598 | } | |
599 | ||
600 | hmm = hmm_register(vma->vm_mm); | |
601 | if (!hmm) { | |
602 | memset(range->pfns, 0, sizeof(*range->pfns) * npages); | |
603 | return false; | |
604 | } | |
605 | ||
606 | spin_lock(&hmm->lock); | |
607 | list_del_rcu(&range->list); | |
608 | spin_unlock(&hmm->lock); | |
609 | ||
610 | return range->valid; | |
611 | } | |
612 | EXPORT_SYMBOL(hmm_vma_range_done); | |
74eee180 JG |
613 | |
614 | /* | |
615 | * hmm_vma_fault() - try to fault some address in a virtual address range | |
616 | * @vma: virtual memory area containing the virtual address range | |
617 | * @range: use to track pfns array content validity | |
618 | * @start: fault range virtual start address (inclusive) | |
619 | * @end: fault range virtual end address (exclusive) | |
620 | * @pfns: array of hmm_pfn_t, only entry with fault flag set will be faulted | |
621 | * @write: is it a write fault | |
622 | * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) | |
623 | * Returns: 0 success, error otherwise (-EAGAIN means mmap_sem have been drop) | |
624 | * | |
625 | * This is similar to a regular CPU page fault except that it will not trigger | |
626 | * any memory migration if the memory being faulted is not accessible by CPUs. | |
627 | * | |
628 | * On error, for one virtual address in the range, the function will set the | |
629 | * hmm_pfn_t error flag for the corresponding pfn entry. | |
630 | * | |
631 | * Expected use pattern: | |
632 | * retry: | |
633 | * down_read(&mm->mmap_sem); | |
634 | * // Find vma and address device wants to fault, initialize hmm_pfn_t | |
635 | * // array accordingly | |
636 | * ret = hmm_vma_fault(vma, start, end, pfns, allow_retry); | |
637 | * switch (ret) { | |
638 | * case -EAGAIN: | |
639 | * hmm_vma_range_done(vma, range); | |
640 | * // You might want to rate limit or yield to play nicely, you may | |
641 | * // also commit any valid pfn in the array assuming that you are | |
642 | * // getting true from hmm_vma_range_monitor_end() | |
643 | * goto retry; | |
644 | * case 0: | |
645 | * break; | |
646 | * default: | |
647 | * // Handle error ! | |
648 | * up_read(&mm->mmap_sem) | |
649 | * return; | |
650 | * } | |
651 | * // Take device driver lock that serialize device page table update | |
652 | * driver_lock_device_page_table_update(); | |
653 | * hmm_vma_range_done(vma, range); | |
654 | * // Commit pfns we got from hmm_vma_fault() | |
655 | * driver_unlock_device_page_table_update(); | |
656 | * up_read(&mm->mmap_sem) | |
657 | * | |
658 | * YOU MUST CALL hmm_vma_range_done() AFTER THIS FUNCTION RETURN SUCCESS (0) | |
659 | * BEFORE FREEING THE range struct OR YOU WILL HAVE SERIOUS MEMORY CORRUPTION ! | |
660 | * | |
661 | * YOU HAVE BEEN WARNED ! | |
662 | */ | |
663 | int hmm_vma_fault(struct vm_area_struct *vma, | |
664 | struct hmm_range *range, | |
665 | unsigned long start, | |
666 | unsigned long end, | |
667 | hmm_pfn_t *pfns, | |
668 | bool write, | |
669 | bool block) | |
670 | { | |
671 | struct hmm_vma_walk hmm_vma_walk; | |
672 | struct mm_walk mm_walk; | |
673 | struct hmm *hmm; | |
674 | int ret; | |
675 | ||
676 | /* Sanity check, this really should not happen ! */ | |
677 | if (start < vma->vm_start || start >= vma->vm_end) | |
678 | return -EINVAL; | |
679 | if (end < vma->vm_start || end > vma->vm_end) | |
680 | return -EINVAL; | |
681 | ||
682 | hmm = hmm_register(vma->vm_mm); | |
683 | if (!hmm) { | |
684 | hmm_pfns_clear(pfns, start, end); | |
685 | return -ENOMEM; | |
686 | } | |
687 | /* Caller must have registered a mirror using hmm_mirror_register() */ | |
688 | if (!hmm->mmu_notifier.ops) | |
689 | return -EINVAL; | |
690 | ||
691 | /* Initialize range to track CPU page table update */ | |
692 | range->start = start; | |
693 | range->pfns = pfns; | |
694 | range->end = end; | |
695 | spin_lock(&hmm->lock); | |
696 | range->valid = true; | |
697 | list_add_rcu(&range->list, &hmm->ranges); | |
698 | spin_unlock(&hmm->lock); | |
699 | ||
700 | /* FIXME support hugetlb fs */ | |
701 | if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { | |
702 | hmm_pfns_special(pfns, start, end); | |
703 | return 0; | |
704 | } | |
705 | ||
706 | hmm_vma_walk.fault = true; | |
707 | hmm_vma_walk.write = write; | |
708 | hmm_vma_walk.block = block; | |
709 | hmm_vma_walk.range = range; | |
710 | mm_walk.private = &hmm_vma_walk; | |
711 | hmm_vma_walk.last = range->start; | |
712 | ||
713 | mm_walk.vma = vma; | |
714 | mm_walk.mm = vma->vm_mm; | |
715 | mm_walk.pte_entry = NULL; | |
716 | mm_walk.test_walk = NULL; | |
717 | mm_walk.hugetlb_entry = NULL; | |
718 | mm_walk.pmd_entry = hmm_vma_walk_pmd; | |
719 | mm_walk.pte_hole = hmm_vma_walk_hole; | |
720 | ||
721 | do { | |
722 | ret = walk_page_range(start, end, &mm_walk); | |
723 | start = hmm_vma_walk.last; | |
724 | } while (ret == -EAGAIN); | |
725 | ||
726 | if (ret) { | |
727 | unsigned long i; | |
728 | ||
729 | i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; | |
730 | hmm_pfns_clear(&pfns[i], hmm_vma_walk.last, end); | |
731 | hmm_vma_range_done(vma, range); | |
732 | } | |
733 | return ret; | |
734 | } | |
735 | EXPORT_SYMBOL(hmm_vma_fault); | |
c0b12405 | 736 | #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |
4ef589dc JG |
737 | |
738 | ||
739 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) | |
740 | struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, | |
741 | unsigned long addr) | |
742 | { | |
743 | struct page *page; | |
744 | ||
745 | page = alloc_page_vma(GFP_HIGHUSER, vma, addr); | |
746 | if (!page) | |
747 | return NULL; | |
748 | lock_page(page); | |
749 | return page; | |
750 | } | |
751 | EXPORT_SYMBOL(hmm_vma_alloc_locked_page); | |
752 | ||
753 | ||
754 | static void hmm_devmem_ref_release(struct percpu_ref *ref) | |
755 | { | |
756 | struct hmm_devmem *devmem; | |
757 | ||
758 | devmem = container_of(ref, struct hmm_devmem, ref); | |
759 | complete(&devmem->completion); | |
760 | } | |
761 | ||
762 | static void hmm_devmem_ref_exit(void *data) | |
763 | { | |
764 | struct percpu_ref *ref = data; | |
765 | struct hmm_devmem *devmem; | |
766 | ||
767 | devmem = container_of(ref, struct hmm_devmem, ref); | |
768 | percpu_ref_exit(ref); | |
769 | devm_remove_action(devmem->device, &hmm_devmem_ref_exit, data); | |
770 | } | |
771 | ||
772 | static void hmm_devmem_ref_kill(void *data) | |
773 | { | |
774 | struct percpu_ref *ref = data; | |
775 | struct hmm_devmem *devmem; | |
776 | ||
777 | devmem = container_of(ref, struct hmm_devmem, ref); | |
778 | percpu_ref_kill(ref); | |
779 | wait_for_completion(&devmem->completion); | |
780 | devm_remove_action(devmem->device, &hmm_devmem_ref_kill, data); | |
781 | } | |
782 | ||
783 | static int hmm_devmem_fault(struct vm_area_struct *vma, | |
784 | unsigned long addr, | |
785 | const struct page *page, | |
786 | unsigned int flags, | |
787 | pmd_t *pmdp) | |
788 | { | |
789 | struct hmm_devmem *devmem = page->pgmap->data; | |
790 | ||
791 | return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp); | |
792 | } | |
793 | ||
794 | static void hmm_devmem_free(struct page *page, void *data) | |
795 | { | |
796 | struct hmm_devmem *devmem = data; | |
797 | ||
798 | devmem->ops->free(devmem, page); | |
799 | } | |
800 | ||
801 | static DEFINE_MUTEX(hmm_devmem_lock); | |
802 | static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL); | |
803 | ||
804 | static void hmm_devmem_radix_release(struct resource *resource) | |
805 | { | |
806 | resource_size_t key, align_start, align_size, align_end; | |
807 | ||
808 | align_start = resource->start & ~(PA_SECTION_SIZE - 1); | |
809 | align_size = ALIGN(resource_size(resource), PA_SECTION_SIZE); | |
810 | align_end = align_start + align_size - 1; | |
811 | ||
812 | mutex_lock(&hmm_devmem_lock); | |
813 | for (key = resource->start; | |
814 | key <= resource->end; | |
815 | key += PA_SECTION_SIZE) | |
816 | radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT); | |
817 | mutex_unlock(&hmm_devmem_lock); | |
818 | } | |
819 | ||
820 | static void hmm_devmem_release(struct device *dev, void *data) | |
821 | { | |
822 | struct hmm_devmem *devmem = data; | |
823 | struct resource *resource = devmem->resource; | |
824 | unsigned long start_pfn, npages; | |
825 | struct zone *zone; | |
826 | struct page *page; | |
827 | ||
828 | if (percpu_ref_tryget_live(&devmem->ref)) { | |
829 | dev_WARN(dev, "%s: page mapping is still live!\n", __func__); | |
830 | percpu_ref_put(&devmem->ref); | |
831 | } | |
832 | ||
833 | /* pages are dead and unused, undo the arch mapping */ | |
834 | start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT; | |
835 | npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT; | |
836 | ||
837 | page = pfn_to_page(start_pfn); | |
838 | zone = page_zone(page); | |
839 | ||
840 | mem_hotplug_begin(); | |
841 | __remove_pages(zone, start_pfn, npages); | |
842 | mem_hotplug_done(); | |
843 | ||
844 | hmm_devmem_radix_release(resource); | |
845 | } | |
846 | ||
847 | static struct hmm_devmem *hmm_devmem_find(resource_size_t phys) | |
848 | { | |
849 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
850 | ||
851 | return radix_tree_lookup(&hmm_devmem_radix, phys >> PA_SECTION_SHIFT); | |
852 | } | |
853 | ||
854 | static int hmm_devmem_pages_create(struct hmm_devmem *devmem) | |
855 | { | |
856 | resource_size_t key, align_start, align_size, align_end; | |
857 | struct device *device = devmem->device; | |
858 | int ret, nid, is_ram; | |
859 | unsigned long pfn; | |
860 | ||
861 | align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); | |
862 | align_size = ALIGN(devmem->resource->start + | |
863 | resource_size(devmem->resource), | |
864 | PA_SECTION_SIZE) - align_start; | |
865 | ||
866 | is_ram = region_intersects(align_start, align_size, | |
867 | IORESOURCE_SYSTEM_RAM, | |
868 | IORES_DESC_NONE); | |
869 | if (is_ram == REGION_MIXED) { | |
870 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | |
871 | __func__, devmem->resource); | |
872 | return -ENXIO; | |
873 | } | |
874 | if (is_ram == REGION_INTERSECTS) | |
875 | return -ENXIO; | |
876 | ||
877 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; | |
878 | devmem->pagemap.res = devmem->resource; | |
879 | devmem->pagemap.page_fault = hmm_devmem_fault; | |
880 | devmem->pagemap.page_free = hmm_devmem_free; | |
881 | devmem->pagemap.dev = devmem->device; | |
882 | devmem->pagemap.ref = &devmem->ref; | |
883 | devmem->pagemap.data = devmem; | |
884 | ||
885 | mutex_lock(&hmm_devmem_lock); | |
886 | align_end = align_start + align_size - 1; | |
887 | for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) { | |
888 | struct hmm_devmem *dup; | |
889 | ||
890 | rcu_read_lock(); | |
891 | dup = hmm_devmem_find(key); | |
892 | rcu_read_unlock(); | |
893 | if (dup) { | |
894 | dev_err(device, "%s: collides with mapping for %s\n", | |
895 | __func__, dev_name(dup->device)); | |
896 | mutex_unlock(&hmm_devmem_lock); | |
897 | ret = -EBUSY; | |
898 | goto error; | |
899 | } | |
900 | ret = radix_tree_insert(&hmm_devmem_radix, | |
901 | key >> PA_SECTION_SHIFT, | |
902 | devmem); | |
903 | if (ret) { | |
904 | dev_err(device, "%s: failed: %d\n", __func__, ret); | |
905 | mutex_unlock(&hmm_devmem_lock); | |
906 | goto error_radix; | |
907 | } | |
908 | } | |
909 | mutex_unlock(&hmm_devmem_lock); | |
910 | ||
911 | nid = dev_to_node(device); | |
912 | if (nid < 0) | |
913 | nid = numa_mem_id(); | |
914 | ||
915 | mem_hotplug_begin(); | |
916 | /* | |
917 | * For device private memory we call add_pages() as we only need to | |
918 | * allocate and initialize struct page for the device memory. More- | |
919 | * over the device memory is un-accessible thus we do not want to | |
920 | * create a linear mapping for the memory like arch_add_memory() | |
921 | * would do. | |
922 | */ | |
923 | ret = add_pages(nid, align_start >> PAGE_SHIFT, | |
924 | align_size >> PAGE_SHIFT, false); | |
925 | if (ret) { | |
926 | mem_hotplug_done(); | |
927 | goto error_add_memory; | |
928 | } | |
929 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | |
930 | align_start >> PAGE_SHIFT, | |
931 | align_size >> PAGE_SHIFT); | |
932 | mem_hotplug_done(); | |
933 | ||
934 | for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { | |
935 | struct page *page = pfn_to_page(pfn); | |
936 | ||
937 | page->pgmap = &devmem->pagemap; | |
938 | } | |
939 | return 0; | |
940 | ||
941 | error_add_memory: | |
942 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); | |
943 | error_radix: | |
944 | hmm_devmem_radix_release(devmem->resource); | |
945 | error: | |
946 | return ret; | |
947 | } | |
948 | ||
949 | static int hmm_devmem_match(struct device *dev, void *data, void *match_data) | |
950 | { | |
951 | struct hmm_devmem *devmem = data; | |
952 | ||
953 | return devmem->resource == match_data; | |
954 | } | |
955 | ||
956 | static void hmm_devmem_pages_remove(struct hmm_devmem *devmem) | |
957 | { | |
958 | devres_release(devmem->device, &hmm_devmem_release, | |
959 | &hmm_devmem_match, devmem->resource); | |
960 | } | |
961 | ||
962 | /* | |
963 | * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory | |
964 | * | |
965 | * @ops: memory event device driver callback (see struct hmm_devmem_ops) | |
966 | * @device: device struct to bind the resource too | |
967 | * @size: size in bytes of the device memory to add | |
968 | * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise | |
969 | * | |
970 | * This function first finds an empty range of physical address big enough to | |
971 | * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which | |
972 | * in turn allocates struct pages. It does not do anything beyond that; all | |
973 | * events affecting the memory will go through the various callbacks provided | |
974 | * by hmm_devmem_ops struct. | |
975 | * | |
976 | * Device driver should call this function during device initialization and | |
977 | * is then responsible of memory management. HMM only provides helpers. | |
978 | */ | |
979 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |
980 | struct device *device, | |
981 | unsigned long size) | |
982 | { | |
983 | struct hmm_devmem *devmem; | |
984 | resource_size_t addr; | |
985 | int ret; | |
986 | ||
987 | static_branch_enable(&device_private_key); | |
988 | ||
989 | devmem = devres_alloc_node(&hmm_devmem_release, sizeof(*devmem), | |
990 | GFP_KERNEL, dev_to_node(device)); | |
991 | if (!devmem) | |
992 | return ERR_PTR(-ENOMEM); | |
993 | ||
994 | init_completion(&devmem->completion); | |
995 | devmem->pfn_first = -1UL; | |
996 | devmem->pfn_last = -1UL; | |
997 | devmem->resource = NULL; | |
998 | devmem->device = device; | |
999 | devmem->ops = ops; | |
1000 | ||
1001 | ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release, | |
1002 | 0, GFP_KERNEL); | |
1003 | if (ret) | |
1004 | goto error_percpu_ref; | |
1005 | ||
1006 | ret = devm_add_action(device, hmm_devmem_ref_exit, &devmem->ref); | |
1007 | if (ret) | |
1008 | goto error_devm_add_action; | |
1009 | ||
1010 | size = ALIGN(size, PA_SECTION_SIZE); | |
1011 | addr = min((unsigned long)iomem_resource.end, | |
1012 | (1UL << MAX_PHYSMEM_BITS) - 1); | |
1013 | addr = addr - size + 1UL; | |
1014 | ||
1015 | /* | |
1016 | * FIXME add a new helper to quickly walk resource tree and find free | |
1017 | * range | |
1018 | * | |
1019 | * FIXME what about ioport_resource resource ? | |
1020 | */ | |
1021 | for (; addr > size && addr >= iomem_resource.start; addr -= size) { | |
1022 | ret = region_intersects(addr, size, 0, IORES_DESC_NONE); | |
1023 | if (ret != REGION_DISJOINT) | |
1024 | continue; | |
1025 | ||
1026 | devmem->resource = devm_request_mem_region(device, addr, size, | |
1027 | dev_name(device)); | |
1028 | if (!devmem->resource) { | |
1029 | ret = -ENOMEM; | |
1030 | goto error_no_resource; | |
1031 | } | |
1032 | break; | |
1033 | } | |
1034 | if (!devmem->resource) { | |
1035 | ret = -ERANGE; | |
1036 | goto error_no_resource; | |
1037 | } | |
1038 | ||
1039 | devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY; | |
1040 | devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT; | |
1041 | devmem->pfn_last = devmem->pfn_first + | |
1042 | (resource_size(devmem->resource) >> PAGE_SHIFT); | |
1043 | ||
1044 | ret = hmm_devmem_pages_create(devmem); | |
1045 | if (ret) | |
1046 | goto error_pages; | |
1047 | ||
1048 | devres_add(device, devmem); | |
1049 | ||
1050 | ret = devm_add_action(device, hmm_devmem_ref_kill, &devmem->ref); | |
1051 | if (ret) { | |
1052 | hmm_devmem_remove(devmem); | |
1053 | return ERR_PTR(ret); | |
1054 | } | |
1055 | ||
1056 | return devmem; | |
1057 | ||
1058 | error_pages: | |
1059 | devm_release_mem_region(device, devmem->resource->start, | |
1060 | resource_size(devmem->resource)); | |
1061 | error_no_resource: | |
1062 | error_devm_add_action: | |
1063 | hmm_devmem_ref_kill(&devmem->ref); | |
1064 | hmm_devmem_ref_exit(&devmem->ref); | |
1065 | error_percpu_ref: | |
1066 | devres_free(devmem); | |
1067 | return ERR_PTR(ret); | |
1068 | } | |
1069 | EXPORT_SYMBOL(hmm_devmem_add); | |
1070 | ||
1071 | /* | |
1072 | * hmm_devmem_remove() - remove device memory (kill and free ZONE_DEVICE) | |
1073 | * | |
1074 | * @devmem: hmm_devmem struct use to track and manage the ZONE_DEVICE memory | |
1075 | * | |
1076 | * This will hot-unplug memory that was hotplugged by hmm_devmem_add on behalf | |
1077 | * of the device driver. It will free struct page and remove the resource that | |
1078 | * reserved the physical address range for this device memory. | |
1079 | */ | |
1080 | void hmm_devmem_remove(struct hmm_devmem *devmem) | |
1081 | { | |
1082 | resource_size_t start, size; | |
1083 | struct device *device; | |
1084 | ||
1085 | if (!devmem) | |
1086 | return; | |
1087 | ||
1088 | device = devmem->device; | |
1089 | start = devmem->resource->start; | |
1090 | size = resource_size(devmem->resource); | |
1091 | ||
1092 | hmm_devmem_ref_kill(&devmem->ref); | |
1093 | hmm_devmem_ref_exit(&devmem->ref); | |
1094 | hmm_devmem_pages_remove(devmem); | |
1095 | ||
1096 | devm_release_mem_region(device, start, size); | |
1097 | } | |
1098 | EXPORT_SYMBOL(hmm_devmem_remove); | |
1099 | #endif /* IS_ENABLED(CONFIG_DEVICE_PRIVATE) */ |