Commit | Line | Data |
---|---|---|
c942fddf | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
133ff0ea JG |
2 | /* |
3 | * Copyright 2013 Red Hat Inc. | |
4 | * | |
f813f219 | 5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
133ff0ea JG |
6 | */ |
7 | /* | |
8 | * Refer to include/linux/hmm.h for information about heterogeneous memory | |
9 | * management or HMM for short. | |
10 | */ | |
a520110e | 11 | #include <linux/pagewalk.h> |
133ff0ea | 12 | #include <linux/hmm.h> |
858b54da | 13 | #include <linux/init.h> |
da4c3c73 JG |
14 | #include <linux/rmap.h> |
15 | #include <linux/swap.h> | |
133ff0ea JG |
16 | #include <linux/slab.h> |
17 | #include <linux/sched.h> | |
4ef589dc JG |
18 | #include <linux/mmzone.h> |
19 | #include <linux/pagemap.h> | |
da4c3c73 JG |
20 | #include <linux/swapops.h> |
21 | #include <linux/hugetlb.h> | |
4ef589dc | 22 | #include <linux/memremap.h> |
c8a53b2d | 23 | #include <linux/sched/mm.h> |
7b2d55d2 | 24 | #include <linux/jump_label.h> |
55c0ece8 | 25 | #include <linux/dma-mapping.h> |
c0b12405 | 26 | #include <linux/mmu_notifier.h> |
4ef589dc JG |
27 | #include <linux/memory_hotplug.h> |
28 | ||
74eee180 JG |
29 | struct hmm_vma_walk { |
30 | struct hmm_range *range; | |
31 | unsigned long last; | |
74eee180 JG |
32 | }; |
33 | ||
a3eb13c1 JG |
34 | enum { |
35 | HMM_NEED_FAULT = 1 << 0, | |
36 | HMM_NEED_WRITE_FAULT = 1 << 1, | |
37 | HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT, | |
38 | }; | |
39 | ||
f970b977 JG |
40 | /* |
41 | * hmm_device_entry_from_pfn() - create a valid device entry value from pfn | |
42 | * @range: range use to encode HMM pfn value | |
43 | * @pfn: pfn value for which to create the device entry | |
44 | * Return: valid device entry for the pfn | |
45 | */ | |
46 | static uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, | |
47 | unsigned long pfn) | |
48 | { | |
49 | return (pfn << range->pfn_shift) | range->flags[HMM_PFN_VALID]; | |
50 | } | |
51 | ||
d28c2c9a RC |
52 | static int hmm_pfns_fill(unsigned long addr, unsigned long end, |
53 | struct hmm_range *range, enum hmm_pfn_value_e value) | |
da4c3c73 | 54 | { |
ff05c0c6 | 55 | uint64_t *pfns = range->pfns; |
da4c3c73 JG |
56 | unsigned long i; |
57 | ||
58 | i = (addr - range->start) >> PAGE_SHIFT; | |
59 | for (; addr < end; addr += PAGE_SIZE, i++) | |
d28c2c9a | 60 | pfns[i] = range->values[value]; |
da4c3c73 JG |
61 | |
62 | return 0; | |
63 | } | |
64 | ||
5504ed29 | 65 | /* |
f8c888a3 | 66 | * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) |
d2e8d551 | 67 | * @addr: range virtual start address (inclusive) |
5504ed29 | 68 | * @end: range virtual end address (exclusive) |
a3eb13c1 | 69 | * @required_fault: HMM_NEED_* flags |
5504ed29 | 70 | * @walk: mm_walk structure |
f8c888a3 | 71 | * Return: -EBUSY after page fault, or page fault error |
5504ed29 JG |
72 | * |
73 | * This function will be called whenever pmd_none() or pte_none() returns true, | |
74 | * or whenever there is no page directory covering the virtual address range. | |
75 | */ | |
f8c888a3 | 76 | static int hmm_vma_fault(unsigned long addr, unsigned long end, |
a3eb13c1 | 77 | unsigned int required_fault, struct mm_walk *walk) |
da4c3c73 | 78 | { |
74eee180 JG |
79 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
80 | struct hmm_range *range = hmm_vma_walk->range; | |
5a0c38d3 | 81 | struct vm_area_struct *vma = walk->vma; |
ff05c0c6 | 82 | uint64_t *pfns = range->pfns; |
f8c888a3 | 83 | unsigned long i = (addr - range->start) >> PAGE_SHIFT; |
5a0c38d3 | 84 | unsigned int fault_flags = FAULT_FLAG_REMOTE; |
da4c3c73 | 85 | |
a3eb13c1 | 86 | WARN_ON_ONCE(!required_fault); |
74eee180 | 87 | hmm_vma_walk->last = addr; |
63d5066f | 88 | |
5a0c38d3 CH |
89 | if (!vma) |
90 | goto out_error; | |
da4c3c73 | 91 | |
a3eb13c1 | 92 | if (required_fault & HMM_NEED_WRITE_FAULT) { |
5a0c38d3 CH |
93 | if (!(vma->vm_flags & VM_WRITE)) |
94 | return -EPERM; | |
95 | fault_flags |= FAULT_FLAG_WRITE; | |
74eee180 JG |
96 | } |
97 | ||
5a0c38d3 CH |
98 | for (; addr < end; addr += PAGE_SIZE, i++) |
99 | if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR) | |
100 | goto out_error; | |
101 | ||
f8c888a3 | 102 | return -EBUSY; |
5a0c38d3 CH |
103 | |
104 | out_error: | |
105 | pfns[i] = range->values[HMM_PFN_ERROR]; | |
106 | return -EFAULT; | |
2aee09d8 JG |
107 | } |
108 | ||
a3eb13c1 JG |
109 | static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
110 | uint64_t pfns, uint64_t cpu_flags) | |
2aee09d8 | 111 | { |
f88a1e90 JG |
112 | struct hmm_range *range = hmm_vma_walk->range; |
113 | ||
023a019a JG |
114 | /* |
115 | * So we not only consider the individual per page request we also | |
116 | * consider the default flags requested for the range. The API can | |
d2e8d551 RC |
117 | * be used 2 ways. The first one where the HMM user coalesces |
118 | * multiple page faults into one request and sets flags per pfn for | |
119 | * those faults. The second one where the HMM user wants to pre- | |
023a019a JG |
120 | * fault a range with specific flags. For the latter one it is a |
121 | * waste to have the user pre-fill the pfn arrays with a default | |
122 | * flags value. | |
123 | */ | |
124 | pfns = (pfns & range->pfn_flags_mask) | range->default_flags; | |
125 | ||
2aee09d8 | 126 | /* We aren't ask to do anything ... */ |
f88a1e90 | 127 | if (!(pfns & range->flags[HMM_PFN_VALID])) |
a3eb13c1 | 128 | return 0; |
f88a1e90 | 129 | |
f88a1e90 JG |
130 | /* Need to write fault ? */ |
131 | if ((pfns & range->flags[HMM_PFN_WRITE]) && | |
a3eb13c1 JG |
132 | !(cpu_flags & range->flags[HMM_PFN_WRITE])) |
133 | return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT; | |
134 | ||
135 | /* If CPU page table is not valid then we need to fault */ | |
136 | if (!(cpu_flags & range->flags[HMM_PFN_VALID])) | |
137 | return HMM_NEED_FAULT; | |
138 | return 0; | |
2aee09d8 JG |
139 | } |
140 | ||
a3eb13c1 JG |
141 | static unsigned int |
142 | hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, | |
143 | const uint64_t *pfns, unsigned long npages, | |
144 | uint64_t cpu_flags) | |
2aee09d8 | 145 | { |
6bfef2f9 | 146 | struct hmm_range *range = hmm_vma_walk->range; |
a3eb13c1 | 147 | unsigned int required_fault = 0; |
2aee09d8 JG |
148 | unsigned long i; |
149 | ||
6bfef2f9 JG |
150 | /* |
151 | * If the default flags do not request to fault pages, and the mask does | |
152 | * not allow for individual pages to be faulted, then | |
153 | * hmm_pte_need_fault() will always return 0. | |
154 | */ | |
155 | if (!((range->default_flags | range->pfn_flags_mask) & | |
156 | range->flags[HMM_PFN_VALID])) | |
a3eb13c1 | 157 | return 0; |
2aee09d8 JG |
158 | |
159 | for (i = 0; i < npages; ++i) { | |
a3eb13c1 JG |
160 | required_fault |= |
161 | hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags); | |
162 | if (required_fault == HMM_NEED_ALL_BITS) | |
163 | return required_fault; | |
2aee09d8 | 164 | } |
a3eb13c1 | 165 | return required_fault; |
2aee09d8 JG |
166 | } |
167 | ||
168 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, | |
b7a16c7a | 169 | __always_unused int depth, struct mm_walk *walk) |
2aee09d8 JG |
170 | { |
171 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
172 | struct hmm_range *range = hmm_vma_walk->range; | |
a3eb13c1 | 173 | unsigned int required_fault; |
2aee09d8 JG |
174 | unsigned long i, npages; |
175 | uint64_t *pfns; | |
176 | ||
177 | i = (addr - range->start) >> PAGE_SHIFT; | |
178 | npages = (end - addr) >> PAGE_SHIFT; | |
179 | pfns = &range->pfns[i]; | |
a3eb13c1 JG |
180 | required_fault = hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0); |
181 | if (required_fault) | |
182 | return hmm_vma_fault(addr, end, required_fault, walk); | |
f8c888a3 CH |
183 | hmm_vma_walk->last = addr; |
184 | return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE); | |
2aee09d8 JG |
185 | } |
186 | ||
f88a1e90 | 187 | static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) |
2aee09d8 JG |
188 | { |
189 | if (pmd_protnone(pmd)) | |
190 | return 0; | |
f88a1e90 JG |
191 | return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | |
192 | range->flags[HMM_PFN_WRITE] : | |
193 | range->flags[HMM_PFN_VALID]; | |
da4c3c73 JG |
194 | } |
195 | ||
992de9a8 | 196 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
9d3973d6 CH |
197 | static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
198 | unsigned long end, uint64_t *pfns, pmd_t pmd) | |
199 | { | |
53f5c3f4 | 200 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
f88a1e90 | 201 | struct hmm_range *range = hmm_vma_walk->range; |
2aee09d8 | 202 | unsigned long pfn, npages, i; |
a3eb13c1 | 203 | unsigned int required_fault; |
f88a1e90 | 204 | uint64_t cpu_flags; |
53f5c3f4 | 205 | |
2aee09d8 | 206 | npages = (end - addr) >> PAGE_SHIFT; |
f88a1e90 | 207 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
a3eb13c1 JG |
208 | required_fault = |
209 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags); | |
210 | if (required_fault) | |
211 | return hmm_vma_fault(addr, end, required_fault, walk); | |
53f5c3f4 | 212 | |
309f9a4f | 213 | pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
068354ad | 214 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) |
391aab11 | 215 | pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; |
53f5c3f4 JG |
216 | hmm_vma_walk->last = end; |
217 | return 0; | |
218 | } | |
9d3973d6 CH |
219 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
220 | /* stub to allow the code below to compile */ | |
221 | int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, | |
222 | unsigned long end, uint64_t *pfns, pmd_t pmd); | |
223 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
53f5c3f4 | 224 | |
08ddddda CH |
225 | static inline bool hmm_is_device_private_entry(struct hmm_range *range, |
226 | swp_entry_t entry) | |
227 | { | |
228 | return is_device_private_entry(entry) && | |
229 | device_private_entry_to_page(entry)->pgmap->owner == | |
230 | range->dev_private_owner; | |
231 | } | |
232 | ||
f88a1e90 | 233 | static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) |
2aee09d8 | 234 | { |
789c2af8 | 235 | if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) |
2aee09d8 | 236 | return 0; |
f88a1e90 JG |
237 | return pte_write(pte) ? range->flags[HMM_PFN_VALID] | |
238 | range->flags[HMM_PFN_WRITE] : | |
239 | range->flags[HMM_PFN_VALID]; | |
2aee09d8 JG |
240 | } |
241 | ||
53f5c3f4 JG |
242 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
243 | unsigned long end, pmd_t *pmdp, pte_t *ptep, | |
244 | uint64_t *pfn) | |
245 | { | |
246 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
f88a1e90 | 247 | struct hmm_range *range = hmm_vma_walk->range; |
a3eb13c1 | 248 | unsigned int required_fault; |
2aee09d8 | 249 | uint64_t cpu_flags; |
53f5c3f4 | 250 | pte_t pte = *ptep; |
f88a1e90 | 251 | uint64_t orig_pfn = *pfn; |
53f5c3f4 | 252 | |
f88a1e90 | 253 | *pfn = range->values[HMM_PFN_NONE]; |
53f5c3f4 | 254 | if (pte_none(pte)) { |
a3eb13c1 JG |
255 | required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0); |
256 | if (required_fault) | |
53f5c3f4 JG |
257 | goto fault; |
258 | return 0; | |
259 | } | |
260 | ||
261 | if (!pte_present(pte)) { | |
262 | swp_entry_t entry = pte_to_swp_entry(pte); | |
263 | ||
53f5c3f4 | 264 | /* |
17ffdc48 CH |
265 | * Never fault in device private pages pages, but just report |
266 | * the PFN even if not present. | |
53f5c3f4 | 267 | */ |
08ddddda | 268 | if (hmm_is_device_private_entry(range, entry)) { |
391aab11 JG |
269 | *pfn = hmm_device_entry_from_pfn(range, |
270 | swp_offset(entry)); | |
17ffdc48 CH |
271 | *pfn |= range->flags[HMM_PFN_VALID]; |
272 | if (is_write_device_private_entry(entry)) | |
273 | *pfn |= range->flags[HMM_PFN_WRITE]; | |
53f5c3f4 JG |
274 | return 0; |
275 | } | |
276 | ||
a3eb13c1 JG |
277 | required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0); |
278 | if (!required_fault) | |
53f5c3f4 | 279 | return 0; |
76612d6c JG |
280 | |
281 | if (!non_swap_entry(entry)) | |
282 | goto fault; | |
283 | ||
284 | if (is_migration_entry(entry)) { | |
285 | pte_unmap(ptep); | |
286 | hmm_vma_walk->last = addr; | |
287 | migration_entry_wait(walk->mm, pmdp, addr); | |
288 | return -EBUSY; | |
53f5c3f4 JG |
289 | } |
290 | ||
291 | /* Report error for everything else */ | |
dfdc2207 | 292 | pte_unmap(ptep); |
f88a1e90 | 293 | *pfn = range->values[HMM_PFN_ERROR]; |
53f5c3f4 JG |
294 | return -EFAULT; |
295 | } | |
296 | ||
76612d6c | 297 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
a3eb13c1 JG |
298 | required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags); |
299 | if (required_fault) | |
53f5c3f4 JG |
300 | goto fault; |
301 | ||
40550627 JG |
302 | /* |
303 | * Since each architecture defines a struct page for the zero page, just | |
304 | * fall through and treat it like a normal page. | |
305 | */ | |
306 | if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) { | |
a3eb13c1 | 307 | if (hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0)) { |
dfdc2207 | 308 | pte_unmap(ptep); |
ac541f25 RC |
309 | return -EFAULT; |
310 | } | |
40550627 JG |
311 | *pfn = range->values[HMM_PFN_SPECIAL]; |
312 | return 0; | |
992de9a8 JG |
313 | } |
314 | ||
391aab11 | 315 | *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; |
53f5c3f4 JG |
316 | return 0; |
317 | ||
318 | fault: | |
319 | pte_unmap(ptep); | |
320 | /* Fault any virtual address we were asked to fault */ | |
a3eb13c1 | 321 | return hmm_vma_fault(addr, end, required_fault, walk); |
53f5c3f4 JG |
322 | } |
323 | ||
da4c3c73 JG |
324 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
325 | unsigned long start, | |
326 | unsigned long end, | |
327 | struct mm_walk *walk) | |
328 | { | |
74eee180 JG |
329 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
330 | struct hmm_range *range = hmm_vma_walk->range; | |
2288a9a6 JG |
331 | uint64_t *pfns = &range->pfns[(start - range->start) >> PAGE_SHIFT]; |
332 | unsigned long npages = (end - start) >> PAGE_SHIFT; | |
333 | unsigned long addr = start; | |
da4c3c73 | 334 | pte_t *ptep; |
d08faca0 | 335 | pmd_t pmd; |
da4c3c73 | 336 | |
da4c3c73 | 337 | again: |
d08faca0 JG |
338 | pmd = READ_ONCE(*pmdp); |
339 | if (pmd_none(pmd)) | |
b7a16c7a | 340 | return hmm_vma_walk_hole(start, end, -1, walk); |
da4c3c73 | 341 | |
d08faca0 | 342 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
a3eb13c1 | 343 | if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) { |
d08faca0 | 344 | hmm_vma_walk->last = addr; |
d2e8d551 | 345 | pmd_migration_entry_wait(walk->mm, pmdp); |
73231612 | 346 | return -EBUSY; |
d08faca0 | 347 | } |
7d082987 | 348 | return hmm_pfns_fill(start, end, range, HMM_PFN_NONE); |
2288a9a6 JG |
349 | } |
350 | ||
351 | if (!pmd_present(pmd)) { | |
a3eb13c1 | 352 | if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) |
2288a9a6 | 353 | return -EFAULT; |
d28c2c9a | 354 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
2288a9a6 | 355 | } |
da4c3c73 | 356 | |
d08faca0 | 357 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
da4c3c73 | 358 | /* |
d2e8d551 | 359 | * No need to take pmd_lock here, even if some other thread |
da4c3c73 JG |
360 | * is splitting the huge pmd we will get that event through |
361 | * mmu_notifier callback. | |
362 | * | |
d2e8d551 | 363 | * So just read pmd value and check again it's a transparent |
da4c3c73 JG |
364 | * huge or device mapping one and compute corresponding pfn |
365 | * values. | |
366 | */ | |
367 | pmd = pmd_read_atomic(pmdp); | |
368 | barrier(); | |
369 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) | |
370 | goto again; | |
74eee180 | 371 | |
2288a9a6 | 372 | return hmm_vma_handle_pmd(walk, addr, end, pfns, pmd); |
da4c3c73 JG |
373 | } |
374 | ||
d08faca0 | 375 | /* |
d2e8d551 | 376 | * We have handled all the valid cases above ie either none, migration, |
d08faca0 JG |
377 | * huge or transparent huge. At this point either it is a valid pmd |
378 | * entry pointing to pte directory or it is a bad pmd that will not | |
379 | * recover. | |
380 | */ | |
2288a9a6 | 381 | if (pmd_bad(pmd)) { |
a3eb13c1 | 382 | if (hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0)) |
2288a9a6 | 383 | return -EFAULT; |
d28c2c9a | 384 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
2288a9a6 | 385 | } |
da4c3c73 JG |
386 | |
387 | ptep = pte_offset_map(pmdp, addr); | |
2288a9a6 | 388 | for (; addr < end; addr += PAGE_SIZE, ptep++, pfns++) { |
53f5c3f4 | 389 | int r; |
74eee180 | 390 | |
2288a9a6 | 391 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, pfns); |
53f5c3f4 | 392 | if (r) { |
dfdc2207 | 393 | /* hmm_vma_handle_pte() did pte_unmap() */ |
53f5c3f4 JG |
394 | hmm_vma_walk->last = addr; |
395 | return r; | |
74eee180 | 396 | } |
da4c3c73 JG |
397 | } |
398 | pte_unmap(ptep - 1); | |
399 | ||
53f5c3f4 | 400 | hmm_vma_walk->last = addr; |
da4c3c73 JG |
401 | return 0; |
402 | } | |
403 | ||
f0b3c45c CH |
404 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ |
405 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) | |
406 | static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) | |
407 | { | |
408 | if (!pud_present(pud)) | |
409 | return 0; | |
410 | return pud_write(pud) ? range->flags[HMM_PFN_VALID] | | |
411 | range->flags[HMM_PFN_WRITE] : | |
412 | range->flags[HMM_PFN_VALID]; | |
413 | } | |
414 | ||
415 | static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, | |
416 | struct mm_walk *walk) | |
992de9a8 JG |
417 | { |
418 | struct hmm_vma_walk *hmm_vma_walk = walk->private; | |
419 | struct hmm_range *range = hmm_vma_walk->range; | |
3afc4236 | 420 | unsigned long addr = start; |
992de9a8 | 421 | pud_t pud; |
3afc4236 SP |
422 | int ret = 0; |
423 | spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); | |
424 | ||
425 | if (!ptl) | |
426 | return 0; | |
427 | ||
428 | /* Normally we don't want to split the huge page */ | |
429 | walk->action = ACTION_CONTINUE; | |
992de9a8 | 430 | |
992de9a8 | 431 | pud = READ_ONCE(*pudp); |
3afc4236 | 432 | if (pud_none(pud)) { |
05fc1df9 JG |
433 | spin_unlock(ptl); |
434 | return hmm_vma_walk_hole(start, end, -1, walk); | |
3afc4236 | 435 | } |
992de9a8 JG |
436 | |
437 | if (pud_huge(pud) && pud_devmap(pud)) { | |
438 | unsigned long i, npages, pfn; | |
a3eb13c1 | 439 | unsigned int required_fault; |
992de9a8 | 440 | uint64_t *pfns, cpu_flags; |
992de9a8 | 441 | |
3afc4236 | 442 | if (!pud_present(pud)) { |
05fc1df9 JG |
443 | spin_unlock(ptl); |
444 | return hmm_vma_walk_hole(start, end, -1, walk); | |
3afc4236 | 445 | } |
992de9a8 JG |
446 | |
447 | i = (addr - range->start) >> PAGE_SHIFT; | |
448 | npages = (end - addr) >> PAGE_SHIFT; | |
449 | pfns = &range->pfns[i]; | |
450 | ||
451 | cpu_flags = pud_to_hmm_pfn_flags(range, pud); | |
a3eb13c1 JG |
452 | required_fault = hmm_range_need_fault(hmm_vma_walk, pfns, |
453 | npages, cpu_flags); | |
454 | if (required_fault) { | |
05fc1df9 | 455 | spin_unlock(ptl); |
a3eb13c1 | 456 | return hmm_vma_fault(addr, end, required_fault, walk); |
3afc4236 | 457 | } |
992de9a8 | 458 | |
992de9a8 | 459 | pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
068354ad | 460 | for (i = 0; i < npages; ++i, ++pfn) |
391aab11 JG |
461 | pfns[i] = hmm_device_entry_from_pfn(range, pfn) | |
462 | cpu_flags; | |
992de9a8 | 463 | hmm_vma_walk->last = end; |
3afc4236 | 464 | goto out_unlock; |
992de9a8 JG |
465 | } |
466 | ||
3afc4236 SP |
467 | /* Ask for the PUD to be split */ |
468 | walk->action = ACTION_SUBTREE; | |
992de9a8 | 469 | |
3afc4236 SP |
470 | out_unlock: |
471 | spin_unlock(ptl); | |
472 | return ret; | |
992de9a8 | 473 | } |
f0b3c45c CH |
474 | #else |
475 | #define hmm_vma_walk_pud NULL | |
476 | #endif | |
992de9a8 | 477 | |
251bbe59 | 478 | #ifdef CONFIG_HUGETLB_PAGE |
63d5066f JG |
479 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, |
480 | unsigned long start, unsigned long end, | |
481 | struct mm_walk *walk) | |
482 | { | |
05c23af4 | 483 | unsigned long addr = start, i, pfn; |
63d5066f JG |
484 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
485 | struct hmm_range *range = hmm_vma_walk->range; | |
486 | struct vm_area_struct *vma = walk->vma; | |
63d5066f | 487 | uint64_t orig_pfn, cpu_flags; |
a3eb13c1 | 488 | unsigned int required_fault; |
63d5066f JG |
489 | spinlock_t *ptl; |
490 | pte_t entry; | |
63d5066f | 491 | |
d2e8d551 | 492 | ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); |
63d5066f JG |
493 | entry = huge_ptep_get(pte); |
494 | ||
7f08263d | 495 | i = (start - range->start) >> PAGE_SHIFT; |
63d5066f JG |
496 | orig_pfn = range->pfns[i]; |
497 | range->pfns[i] = range->values[HMM_PFN_NONE]; | |
498 | cpu_flags = pte_to_hmm_pfn_flags(range, entry); | |
a3eb13c1 JG |
499 | required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags); |
500 | if (required_fault) { | |
45050692 | 501 | spin_unlock(ptl); |
a3eb13c1 | 502 | return hmm_vma_fault(addr, end, required_fault, walk); |
63d5066f JG |
503 | } |
504 | ||
05c23af4 | 505 | pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); |
7f08263d | 506 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) |
391aab11 JG |
507 | range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | |
508 | cpu_flags; | |
63d5066f | 509 | hmm_vma_walk->last = end; |
63d5066f | 510 | spin_unlock(ptl); |
45050692 | 511 | return 0; |
63d5066f | 512 | } |
251bbe59 CH |
513 | #else |
514 | #define hmm_vma_walk_hugetlb_entry NULL | |
515 | #endif /* CONFIG_HUGETLB_PAGE */ | |
63d5066f | 516 | |
d28c2c9a RC |
517 | static int hmm_vma_walk_test(unsigned long start, unsigned long end, |
518 | struct mm_walk *walk) | |
33cd47dc | 519 | { |
d28c2c9a RC |
520 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
521 | struct hmm_range *range = hmm_vma_walk->range; | |
522 | struct vm_area_struct *vma = walk->vma; | |
523 | ||
a3eb13c1 JG |
524 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) && |
525 | vma->vm_flags & VM_READ) | |
526 | return 0; | |
527 | ||
d28c2c9a | 528 | /* |
a3eb13c1 JG |
529 | * vma ranges that don't have struct page backing them or map I/O |
530 | * devices directly cannot be handled by hmm_range_fault(). | |
c2579c9c | 531 | * |
d28c2c9a | 532 | * If the vma does not allow read access, then assume that it does not |
c2579c9c JG |
533 | * allow write access either. HMM does not support architectures that |
534 | * allow write without read. | |
a3eb13c1 JG |
535 | * |
536 | * If a fault is requested for an unsupported range then it is a hard | |
537 | * failure. | |
d28c2c9a | 538 | */ |
a3eb13c1 JG |
539 | if (hmm_range_need_fault(hmm_vma_walk, |
540 | range->pfns + | |
541 | ((start - range->start) >> PAGE_SHIFT), | |
542 | (end - start) >> PAGE_SHIFT, 0)) | |
543 | return -EFAULT; | |
d28c2c9a | 544 | |
a3eb13c1 JG |
545 | hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
546 | hmm_vma_walk->last = end; | |
d28c2c9a | 547 | |
a3eb13c1 JG |
548 | /* Skip this vma and continue processing the next vma. */ |
549 | return 1; | |
33cd47dc JG |
550 | } |
551 | ||
7b86ac33 CH |
552 | static const struct mm_walk_ops hmm_walk_ops = { |
553 | .pud_entry = hmm_vma_walk_pud, | |
554 | .pmd_entry = hmm_vma_walk_pmd, | |
555 | .pte_hole = hmm_vma_walk_hole, | |
556 | .hugetlb_entry = hmm_vma_walk_hugetlb_entry, | |
d28c2c9a | 557 | .test_walk = hmm_vma_walk_test, |
7b86ac33 CH |
558 | }; |
559 | ||
9a4903e4 CH |
560 | /** |
561 | * hmm_range_fault - try to fault some address in a virtual address range | |
f970b977 | 562 | * @range: argument structure |
9a4903e4 CH |
563 | * |
564 | * Return: the number of valid pages in range->pfns[] (from range start | |
565 | * address), which may be zero. On error one of the following status codes | |
566 | * can be returned: | |
73231612 | 567 | * |
9a4903e4 CH |
568 | * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma |
569 | * (e.g., device file vma). | |
570 | * -ENOMEM: Out of memory. | |
571 | * -EPERM: Invalid permission (e.g., asking for write and range is read | |
572 | * only). | |
9a4903e4 CH |
573 | * -EBUSY: The range has been invalidated and the caller needs to wait for |
574 | * the invalidation to finish. | |
f970b977 JG |
575 | * -EFAULT: A page was requested to be valid and could not be made valid |
576 | * ie it has no backing VMA or it is illegal to access | |
74eee180 | 577 | * |
f970b977 JG |
578 | * This is similar to get_user_pages(), except that it can read the page tables |
579 | * without mutating them (ie causing faults). | |
74eee180 | 580 | * |
ff05c0c6 JG |
581 | * On error, for one virtual address in the range, the function will mark the |
582 | * corresponding HMM pfn entry with an error flag. | |
74eee180 | 583 | */ |
6bfef2f9 | 584 | long hmm_range_fault(struct hmm_range *range) |
74eee180 | 585 | { |
d28c2c9a RC |
586 | struct hmm_vma_walk hmm_vma_walk = { |
587 | .range = range, | |
588 | .last = range->start, | |
d28c2c9a | 589 | }; |
a22dd506 | 590 | struct mm_struct *mm = range->notifier->mm; |
74eee180 JG |
591 | int ret; |
592 | ||
04ec32fb | 593 | lockdep_assert_held(&mm->mmap_sem); |
704f3f2c | 594 | |
a3e0d41c JG |
595 | do { |
596 | /* If range is no longer valid force retry. */ | |
a22dd506 JG |
597 | if (mmu_interval_check_retry(range->notifier, |
598 | range->notifier_seq)) | |
2bcbeaef | 599 | return -EBUSY; |
d28c2c9a RC |
600 | ret = walk_page_range(mm, hmm_vma_walk.last, range->end, |
601 | &hmm_walk_ops, &hmm_vma_walk); | |
602 | } while (ret == -EBUSY); | |
74eee180 | 603 | |
d28c2c9a RC |
604 | if (ret) |
605 | return ret; | |
73231612 | 606 | return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; |
74eee180 | 607 | } |
73231612 | 608 | EXPORT_SYMBOL(hmm_range_fault); |