Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a520110e | 2 | #include <linux/pagewalk.h> |
e6473092 MM |
3 | #include <linux/highmem.h> |
4 | #include <linux/sched.h> | |
d33b9f45 | 5 | #include <linux/hugetlb.h> |
e6473092 | 6 | |
fbf56346 SP |
7 | static int walk_pte_range_inner(pte_t *pte, unsigned long addr, |
8 | unsigned long end, struct mm_walk *walk) | |
e6473092 | 9 | { |
7b86ac33 | 10 | const struct mm_walk_ops *ops = walk->ops; |
fbf56346 | 11 | int err = 0; |
e6473092 | 12 | |
556637cd | 13 | for (;;) { |
7b86ac33 | 14 | err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); |
e6473092 MM |
15 | if (err) |
16 | break; | |
c02a9875 | 17 | if (addr >= end - PAGE_SIZE) |
556637cd | 18 | break; |
c02a9875 | 19 | addr += PAGE_SIZE; |
556637cd JW |
20 | pte++; |
21 | } | |
fbf56346 SP |
22 | return err; |
23 | } | |
24 | ||
25 | static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | |
26 | struct mm_walk *walk) | |
27 | { | |
28 | pte_t *pte; | |
29 | int err = 0; | |
30 | spinlock_t *ptl; | |
31 | ||
32 | if (walk->no_vma) { | |
33 | pte = pte_offset_map(pmd, addr); | |
34 | err = walk_pte_range_inner(pte, addr, end, walk); | |
35 | pte_unmap(pte); | |
36 | } else { | |
37 | pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | |
38 | err = walk_pte_range_inner(pte, addr, end, walk); | |
39 | pte_unmap_unlock(pte, ptl); | |
40 | } | |
e6473092 | 41 | |
e6473092 MM |
42 | return err; |
43 | } | |
44 | ||
45 | static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, | |
2165009b | 46 | struct mm_walk *walk) |
e6473092 MM |
47 | { |
48 | pmd_t *pmd; | |
49 | unsigned long next; | |
7b86ac33 | 50 | const struct mm_walk_ops *ops = walk->ops; |
e6473092 MM |
51 | int err = 0; |
52 | ||
53 | pmd = pmd_offset(pud, addr); | |
54 | do { | |
03319327 | 55 | again: |
e6473092 | 56 | next = pmd_addr_end(addr, end); |
488ae6a2 | 57 | if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) { |
7b86ac33 CH |
58 | if (ops->pte_hole) |
59 | err = ops->pte_hole(addr, next, walk); | |
e6473092 MM |
60 | if (err) |
61 | break; | |
62 | continue; | |
63 | } | |
3afc4236 SP |
64 | |
65 | walk->action = ACTION_SUBTREE; | |
66 | ||
03319327 DH |
67 | /* |
68 | * This implies that each ->pmd_entry() handler | |
69 | * needs to know about pmd_trans_huge() pmds | |
70 | */ | |
7b86ac33 CH |
71 | if (ops->pmd_entry) |
72 | err = ops->pmd_entry(pmd, addr, next, walk); | |
03319327 DH |
73 | if (err) |
74 | break; | |
75 | ||
3afc4236 SP |
76 | if (walk->action == ACTION_AGAIN) |
77 | goto again; | |
78 | ||
03319327 DH |
79 | /* |
80 | * Check this here so we only break down trans_huge | |
81 | * pages when we _need_ to | |
82 | */ | |
488ae6a2 SP |
83 | if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) || |
84 | walk->action == ACTION_CONTINUE || | |
3afc4236 | 85 | !(ops->pte_entry)) |
03319327 DH |
86 | continue; |
87 | ||
488ae6a2 SP |
88 | if (walk->vma) { |
89 | split_huge_pmd(walk->vma, pmd, addr); | |
90 | if (pmd_trans_unstable(pmd)) | |
91 | goto again; | |
92 | } | |
3afc4236 | 93 | |
03319327 | 94 | err = walk_pte_range(pmd, addr, next, walk); |
e6473092 MM |
95 | if (err) |
96 | break; | |
97 | } while (pmd++, addr = next, addr != end); | |
98 | ||
99 | return err; | |
100 | } | |
101 | ||
c2febafc | 102 | static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, |
2165009b | 103 | struct mm_walk *walk) |
e6473092 MM |
104 | { |
105 | pud_t *pud; | |
106 | unsigned long next; | |
7b86ac33 | 107 | const struct mm_walk_ops *ops = walk->ops; |
e6473092 MM |
108 | int err = 0; |
109 | ||
c2febafc | 110 | pud = pud_offset(p4d, addr); |
e6473092 | 111 | do { |
a00cc7d9 | 112 | again: |
e6473092 | 113 | next = pud_addr_end(addr, end); |
488ae6a2 | 114 | if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) { |
7b86ac33 CH |
115 | if (ops->pte_hole) |
116 | err = ops->pte_hole(addr, next, walk); | |
e6473092 MM |
117 | if (err) |
118 | break; | |
119 | continue; | |
120 | } | |
a00cc7d9 | 121 | |
3afc4236 | 122 | walk->action = ACTION_SUBTREE; |
a00cc7d9 | 123 | |
3afc4236 SP |
124 | if (ops->pud_entry) |
125 | err = ops->pud_entry(pud, addr, next, walk); | |
126 | if (err) | |
127 | break; | |
128 | ||
129 | if (walk->action == ACTION_AGAIN) | |
130 | goto again; | |
131 | ||
488ae6a2 SP |
132 | if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) || |
133 | walk->action == ACTION_CONTINUE || | |
3afc4236 SP |
134 | !(ops->pmd_entry || ops->pte_entry)) |
135 | continue; | |
a00cc7d9 | 136 | |
488ae6a2 SP |
137 | if (walk->vma) |
138 | split_huge_pud(walk->vma, pud, addr); | |
a00cc7d9 MW |
139 | if (pud_none(*pud)) |
140 | goto again; | |
141 | ||
3afc4236 | 142 | err = walk_pmd_range(pud, addr, next, walk); |
e6473092 MM |
143 | if (err) |
144 | break; | |
145 | } while (pud++, addr = next, addr != end); | |
146 | ||
147 | return err; | |
148 | } | |
149 | ||
c2febafc KS |
150 | static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, |
151 | struct mm_walk *walk) | |
152 | { | |
153 | p4d_t *p4d; | |
154 | unsigned long next; | |
7b86ac33 | 155 | const struct mm_walk_ops *ops = walk->ops; |
c2febafc KS |
156 | int err = 0; |
157 | ||
158 | p4d = p4d_offset(pgd, addr); | |
159 | do { | |
160 | next = p4d_addr_end(addr, end); | |
161 | if (p4d_none_or_clear_bad(p4d)) { | |
7b86ac33 CH |
162 | if (ops->pte_hole) |
163 | err = ops->pte_hole(addr, next, walk); | |
c2febafc KS |
164 | if (err) |
165 | break; | |
166 | continue; | |
167 | } | |
3afc4236 SP |
168 | if (ops->p4d_entry) { |
169 | err = ops->p4d_entry(p4d, addr, next, walk); | |
170 | if (err) | |
171 | break; | |
172 | } | |
173 | if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) | |
c2febafc KS |
174 | err = walk_pud_range(p4d, addr, next, walk); |
175 | if (err) | |
176 | break; | |
177 | } while (p4d++, addr = next, addr != end); | |
178 | ||
179 | return err; | |
180 | } | |
181 | ||
fafaa426 NH |
182 | static int walk_pgd_range(unsigned long addr, unsigned long end, |
183 | struct mm_walk *walk) | |
184 | { | |
185 | pgd_t *pgd; | |
186 | unsigned long next; | |
7b86ac33 | 187 | const struct mm_walk_ops *ops = walk->ops; |
fafaa426 NH |
188 | int err = 0; |
189 | ||
190 | pgd = pgd_offset(walk->mm, addr); | |
191 | do { | |
192 | next = pgd_addr_end(addr, end); | |
193 | if (pgd_none_or_clear_bad(pgd)) { | |
7b86ac33 CH |
194 | if (ops->pte_hole) |
195 | err = ops->pte_hole(addr, next, walk); | |
fafaa426 NH |
196 | if (err) |
197 | break; | |
198 | continue; | |
199 | } | |
3afc4236 SP |
200 | if (ops->pgd_entry) { |
201 | err = ops->pgd_entry(pgd, addr, next, walk); | |
202 | if (err) | |
203 | break; | |
204 | } | |
205 | if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || | |
206 | ops->pte_entry) | |
c2febafc | 207 | err = walk_p4d_range(pgd, addr, next, walk); |
fafaa426 NH |
208 | if (err) |
209 | break; | |
210 | } while (pgd++, addr = next, addr != end); | |
211 | ||
212 | return err; | |
213 | } | |
214 | ||
116354d1 NH |
215 | #ifdef CONFIG_HUGETLB_PAGE |
216 | static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, | |
217 | unsigned long end) | |
218 | { | |
219 | unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); | |
220 | return boundary < end ? boundary : end; | |
221 | } | |
222 | ||
fafaa426 | 223 | static int walk_hugetlb_range(unsigned long addr, unsigned long end, |
116354d1 NH |
224 | struct mm_walk *walk) |
225 | { | |
fafaa426 | 226 | struct vm_area_struct *vma = walk->vma; |
116354d1 NH |
227 | struct hstate *h = hstate_vma(vma); |
228 | unsigned long next; | |
229 | unsigned long hmask = huge_page_mask(h); | |
7868a208 | 230 | unsigned long sz = huge_page_size(h); |
116354d1 | 231 | pte_t *pte; |
7b86ac33 | 232 | const struct mm_walk_ops *ops = walk->ops; |
116354d1 NH |
233 | int err = 0; |
234 | ||
235 | do { | |
236 | next = hugetlb_entry_end(h, addr, end); | |
7868a208 | 237 | pte = huge_pte_offset(walk->mm, addr & hmask, sz); |
373c4557 JH |
238 | |
239 | if (pte) | |
7b86ac33 CH |
240 | err = ops->hugetlb_entry(pte, hmask, addr, next, walk); |
241 | else if (ops->pte_hole) | |
242 | err = ops->pte_hole(addr, next, walk); | |
373c4557 | 243 | |
116354d1 | 244 | if (err) |
fafaa426 | 245 | break; |
116354d1 NH |
246 | } while (addr = next, addr != end); |
247 | ||
fafaa426 | 248 | return err; |
116354d1 | 249 | } |
6c6d5280 | 250 | |
6c6d5280 | 251 | #else /* CONFIG_HUGETLB_PAGE */ |
fafaa426 | 252 | static int walk_hugetlb_range(unsigned long addr, unsigned long end, |
6c6d5280 KM |
253 | struct mm_walk *walk) |
254 | { | |
255 | return 0; | |
256 | } | |
257 | ||
258 | #endif /* CONFIG_HUGETLB_PAGE */ | |
259 | ||
fafaa426 NH |
260 | /* |
261 | * Decide whether we really walk over the current vma on [@start, @end) | |
262 | * or skip it via the returned value. Return 0 if we do walk over the | |
263 | * current vma, and return 1 if we skip the vma. Negative values means | |
264 | * error, where we abort the current walk. | |
fafaa426 NH |
265 | */ |
266 | static int walk_page_test(unsigned long start, unsigned long end, | |
267 | struct mm_walk *walk) | |
268 | { | |
269 | struct vm_area_struct *vma = walk->vma; | |
7b86ac33 | 270 | const struct mm_walk_ops *ops = walk->ops; |
6c6d5280 | 271 | |
7b86ac33 CH |
272 | if (ops->test_walk) |
273 | return ops->test_walk(start, end, walk); | |
fafaa426 NH |
274 | |
275 | /* | |
48684a65 NH |
276 | * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP |
277 | * range, so we don't walk over it as we do for normal vmas. However, | |
278 | * Some callers are interested in handling hole range and they don't | |
279 | * want to just ignore any single address range. Such users certainly | |
280 | * define their ->pte_hole() callbacks, so let's delegate them to handle | |
281 | * vma(VM_PFNMAP). | |
fafaa426 | 282 | */ |
48684a65 NH |
283 | if (vma->vm_flags & VM_PFNMAP) { |
284 | int err = 1; | |
7b86ac33 CH |
285 | if (ops->pte_hole) |
286 | err = ops->pte_hole(start, end, walk); | |
48684a65 NH |
287 | return err ? err : 1; |
288 | } | |
fafaa426 NH |
289 | return 0; |
290 | } | |
291 | ||
292 | static int __walk_page_range(unsigned long start, unsigned long end, | |
293 | struct mm_walk *walk) | |
294 | { | |
295 | int err = 0; | |
296 | struct vm_area_struct *vma = walk->vma; | |
ecaad8ac TH |
297 | const struct mm_walk_ops *ops = walk->ops; |
298 | ||
299 | if (vma && ops->pre_vma) { | |
300 | err = ops->pre_vma(start, end, walk); | |
301 | if (err) | |
302 | return err; | |
303 | } | |
fafaa426 NH |
304 | |
305 | if (vma && is_vm_hugetlb_page(vma)) { | |
ecaad8ac | 306 | if (ops->hugetlb_entry) |
fafaa426 NH |
307 | err = walk_hugetlb_range(start, end, walk); |
308 | } else | |
309 | err = walk_pgd_range(start, end, walk); | |
310 | ||
ecaad8ac TH |
311 | if (vma && ops->post_vma) |
312 | ops->post_vma(walk); | |
313 | ||
fafaa426 NH |
314 | return err; |
315 | } | |
116354d1 | 316 | |
e6473092 | 317 | /** |
fafaa426 | 318 | * walk_page_range - walk page table with caller specific callbacks |
7b86ac33 CH |
319 | * @mm: mm_struct representing the target process of page table walk |
320 | * @start: start address of the virtual address range | |
321 | * @end: end address of the virtual address range | |
322 | * @ops: operation to call during the walk | |
323 | * @private: private data for callbacks' usage | |
e6473092 | 324 | * |
7b86ac33 | 325 | * Recursively walk the page table tree of the process represented by @mm |
fafaa426 NH |
326 | * within the virtual address range [@start, @end). During walking, we can do |
327 | * some caller-specific works for each entry, by setting up pmd_entry(), | |
328 | * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these | |
329 | * callbacks, the associated entries/pages are just ignored. | |
330 | * The return values of these callbacks are commonly defined like below: | |
a5d09bed | 331 | * |
fafaa426 NH |
332 | * - 0 : succeeded to handle the current entry, and if you don't reach the |
333 | * end address yet, continue to walk. | |
334 | * - >0 : succeeded to handle the current entry, and return to the caller | |
335 | * with caller specific value. | |
336 | * - <0 : failed to handle the current entry, and return to the caller | |
337 | * with error code. | |
e6473092 | 338 | * |
fafaa426 NH |
339 | * Before starting to walk page table, some callers want to check whether |
340 | * they really want to walk over the current vma, typically by checking | |
7b86ac33 | 341 | * its vm_flags. walk_page_test() and @ops->test_walk() are used for this |
fafaa426 | 342 | * purpose. |
e6473092 | 343 | * |
ecaad8ac TH |
344 | * If operations need to be staged before and committed after a vma is walked, |
345 | * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(), | |
346 | * since it is intended to handle commit-type operations, can't return any | |
347 | * errors. | |
348 | * | |
fafaa426 NH |
349 | * struct mm_walk keeps current values of some common data like vma and pmd, |
350 | * which are useful for the access from callbacks. If you want to pass some | |
7b86ac33 | 351 | * caller-specific data to callbacks, @private should be helpful. |
c27fe4c8 | 352 | * |
fafaa426 | 353 | * Locking: |
7b86ac33 CH |
354 | * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_sem, |
355 | * because these function traverse vma list and/or access to vma's data. | |
e6473092 | 356 | */ |
7b86ac33 CH |
357 | int walk_page_range(struct mm_struct *mm, unsigned long start, |
358 | unsigned long end, const struct mm_walk_ops *ops, | |
359 | void *private) | |
e6473092 | 360 | { |
e6473092 | 361 | int err = 0; |
fafaa426 NH |
362 | unsigned long next; |
363 | struct vm_area_struct *vma; | |
7b86ac33 CH |
364 | struct mm_walk walk = { |
365 | .ops = ops, | |
366 | .mm = mm, | |
367 | .private = private, | |
368 | }; | |
e6473092 | 369 | |
fafaa426 NH |
370 | if (start >= end) |
371 | return -EINVAL; | |
e6473092 | 372 | |
7b86ac33 | 373 | if (!walk.mm) |
2165009b DH |
374 | return -EINVAL; |
375 | ||
b4bc7817 | 376 | lockdep_assert_held(&walk.mm->mmap_sem); |
a9ff785e | 377 | |
7b86ac33 | 378 | vma = find_vma(walk.mm, start); |
e6473092 | 379 | do { |
fafaa426 | 380 | if (!vma) { /* after the last vma */ |
7b86ac33 | 381 | walk.vma = NULL; |
fafaa426 NH |
382 | next = end; |
383 | } else if (start < vma->vm_start) { /* outside vma */ | |
7b86ac33 | 384 | walk.vma = NULL; |
fafaa426 NH |
385 | next = min(end, vma->vm_start); |
386 | } else { /* inside vma */ | |
7b86ac33 | 387 | walk.vma = vma; |
fafaa426 NH |
388 | next = min(end, vma->vm_end); |
389 | vma = vma->vm_next; | |
5f0af70a | 390 | |
7b86ac33 | 391 | err = walk_page_test(start, next, &walk); |
f6837395 NH |
392 | if (err > 0) { |
393 | /* | |
394 | * positive return values are purely for | |
395 | * controlling the pagewalk, so should never | |
396 | * be passed to the callers. | |
397 | */ | |
398 | err = 0; | |
a9ff785e | 399 | continue; |
f6837395 | 400 | } |
fafaa426 | 401 | if (err < 0) |
e6473092 | 402 | break; |
e6473092 | 403 | } |
7b86ac33 CH |
404 | if (walk.vma || walk.ops->pte_hole) |
405 | err = __walk_page_range(start, next, &walk); | |
e6473092 MM |
406 | if (err) |
407 | break; | |
fafaa426 | 408 | } while (start = next, start < end); |
e6473092 MM |
409 | return err; |
410 | } | |
900fc5f1 | 411 | |
fbf56346 SP |
412 | /* |
413 | * Similar to walk_page_range() but can walk any page tables even if they are | |
414 | * not backed by VMAs. Because 'unusual' entries may be walked this function | |
415 | * will also not lock the PTEs for the pte_entry() callback. This is useful for | |
416 | * walking the kernel pages tables or page tables for firmware. | |
417 | */ | |
488ae6a2 SP |
418 | int walk_page_range_novma(struct mm_struct *mm, unsigned long start, |
419 | unsigned long end, const struct mm_walk_ops *ops, | |
420 | void *private) | |
421 | { | |
422 | struct mm_walk walk = { | |
423 | .ops = ops, | |
424 | .mm = mm, | |
425 | .private = private, | |
426 | .no_vma = true | |
427 | }; | |
428 | ||
429 | if (start >= end || !walk.mm) | |
430 | return -EINVAL; | |
431 | ||
432 | lockdep_assert_held(&walk.mm->mmap_sem); | |
433 | ||
434 | return __walk_page_range(start, end, &walk); | |
435 | } | |
436 | ||
7b86ac33 CH |
437 | int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, |
438 | void *private) | |
900fc5f1 | 439 | { |
7b86ac33 CH |
440 | struct mm_walk walk = { |
441 | .ops = ops, | |
442 | .mm = vma->vm_mm, | |
443 | .vma = vma, | |
444 | .private = private, | |
445 | }; | |
900fc5f1 NH |
446 | int err; |
447 | ||
7b86ac33 | 448 | if (!walk.mm) |
900fc5f1 NH |
449 | return -EINVAL; |
450 | ||
b4bc7817 | 451 | lockdep_assert_held(&walk.mm->mmap_sem); |
7b86ac33 CH |
452 | |
453 | err = walk_page_test(vma->vm_start, vma->vm_end, &walk); | |
900fc5f1 NH |
454 | if (err > 0) |
455 | return 0; | |
456 | if (err < 0) | |
457 | return err; | |
7b86ac33 | 458 | return __walk_page_range(vma->vm_start, vma->vm_end, &walk); |
900fc5f1 | 459 | } |
ecaad8ac TH |
460 | |
461 | /** | |
462 | * walk_page_mapping - walk all memory areas mapped into a struct address_space. | |
463 | * @mapping: Pointer to the struct address_space | |
464 | * @first_index: First page offset in the address_space | |
465 | * @nr: Number of incremental page offsets to cover | |
466 | * @ops: operation to call during the walk | |
467 | * @private: private data for callbacks' usage | |
468 | * | |
469 | * This function walks all memory areas mapped into a struct address_space. | |
470 | * The walk is limited to only the given page-size index range, but if | |
471 | * the index boundaries cross a huge page-table entry, that entry will be | |
472 | * included. | |
473 | * | |
474 | * Also see walk_page_range() for additional information. | |
475 | * | |
476 | * Locking: | |
477 | * This function can't require that the struct mm_struct::mmap_sem is held, | |
478 | * since @mapping may be mapped by multiple processes. Instead | |
479 | * @mapping->i_mmap_rwsem must be held. This might have implications in the | |
480 | * callbacks, and it's up tho the caller to ensure that the | |
481 | * struct mm_struct::mmap_sem is not needed. | |
482 | * | |
483 | * Also this means that a caller can't rely on the struct | |
484 | * vm_area_struct::vm_flags to be constant across a call, | |
485 | * except for immutable flags. Callers requiring this shouldn't use | |
486 | * this function. | |
487 | * | |
488 | * Return: 0 on success, negative error code on failure, positive number on | |
489 | * caller defined premature termination. | |
490 | */ | |
491 | int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, | |
492 | pgoff_t nr, const struct mm_walk_ops *ops, | |
493 | void *private) | |
494 | { | |
495 | struct mm_walk walk = { | |
496 | .ops = ops, | |
497 | .private = private, | |
498 | }; | |
499 | struct vm_area_struct *vma; | |
500 | pgoff_t vba, vea, cba, cea; | |
501 | unsigned long start_addr, end_addr; | |
502 | int err = 0; | |
503 | ||
504 | lockdep_assert_held(&mapping->i_mmap_rwsem); | |
505 | vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index, | |
506 | first_index + nr - 1) { | |
507 | /* Clip to the vma */ | |
508 | vba = vma->vm_pgoff; | |
509 | vea = vba + vma_pages(vma); | |
510 | cba = first_index; | |
511 | cba = max(cba, vba); | |
512 | cea = first_index + nr; | |
513 | cea = min(cea, vea); | |
514 | ||
515 | start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start; | |
516 | end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start; | |
517 | if (start_addr >= end_addr) | |
518 | continue; | |
519 | ||
520 | walk.vma = vma; | |
521 | walk.mm = vma->vm_mm; | |
522 | ||
523 | err = walk_page_test(vma->vm_start, vma->vm_end, &walk); | |
524 | if (err > 0) { | |
525 | err = 0; | |
526 | break; | |
527 | } else if (err < 0) | |
528 | break; | |
529 | ||
530 | err = __walk_page_range(start_addr, end_addr, &walk); | |
531 | if (err) | |
532 | break; | |
533 | } | |
534 | ||
535 | return err; | |
536 | } |