Commit | Line | Data |
---|---|---|
3f49584b SP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * DAMON Primitives for Virtual Address Spaces | |
4 | * | |
5 | * Author: SeongJae Park <sjpark@amazon.de> | |
6 | */ | |
7 | ||
8 | #define pr_fmt(fmt) "damon-va: " fmt | |
9 | ||
6dea8add | 10 | #include <asm-generic/mman-common.h> |
46c3a0ac | 11 | #include <linux/highmem.h> |
3f49584b | 12 | #include <linux/hugetlb.h> |
3f49584b | 13 | #include <linux/mmu_notifier.h> |
3f49584b SP |
14 | #include <linux/page_idle.h> |
15 | #include <linux/pagewalk.h> | |
8581fd40 | 16 | #include <linux/sched/mm.h> |
46c3a0ac SP |
17 | |
18 | #include "prmtv-common.h" | |
3f49584b | 19 | |
17ccae8b SP |
20 | #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST |
21 | #undef DAMON_MIN_REGION | |
22 | #define DAMON_MIN_REGION 1 | |
23 | #endif | |
24 | ||
3f49584b SP |
25 | /* |
26 | * 't->id' should be the pointer to the relevant 'struct pid' having reference | |
27 | * count. Caller must put the returned task, unless it is NULL. | |
28 | */ | |
88f86dcf SP |
29 | static inline struct task_struct *damon_get_task_struct(struct damon_target *t) |
30 | { | |
31 | return get_pid_task((struct pid *)t->id, PIDTYPE_PID); | |
32 | } | |
3f49584b SP |
33 | |
34 | /* | |
35 | * Get the mm_struct of the given target | |
36 | * | |
37 | * Caller _must_ put the mm_struct after use, unless it is NULL. | |
38 | * | |
39 | * Returns the mm_struct of the target on success, NULL on failure | |
40 | */ | |
41 | static struct mm_struct *damon_get_mm(struct damon_target *t) | |
42 | { | |
43 | struct task_struct *task; | |
44 | struct mm_struct *mm; | |
45 | ||
46 | task = damon_get_task_struct(t); | |
47 | if (!task) | |
48 | return NULL; | |
49 | ||
50 | mm = get_task_mm(task); | |
51 | put_task_struct(task); | |
52 | return mm; | |
53 | } | |
54 | ||
55 | /* | |
56 | * Functions for the initial monitoring target regions construction | |
57 | */ | |
58 | ||
59 | /* | |
60 | * Size-evenly split a region into 'nr_pieces' small regions | |
61 | * | |
62 | * Returns 0 on success, or negative error code otherwise. | |
63 | */ | |
64 | static int damon_va_evenly_split_region(struct damon_target *t, | |
65 | struct damon_region *r, unsigned int nr_pieces) | |
66 | { | |
67 | unsigned long sz_orig, sz_piece, orig_end; | |
68 | struct damon_region *n = NULL, *next; | |
69 | unsigned long start; | |
70 | ||
71 | if (!r || !nr_pieces) | |
72 | return -EINVAL; | |
73 | ||
74 | orig_end = r->ar.end; | |
75 | sz_orig = r->ar.end - r->ar.start; | |
76 | sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION); | |
77 | ||
78 | if (!sz_piece) | |
79 | return -EINVAL; | |
80 | ||
81 | r->ar.end = r->ar.start + sz_piece; | |
82 | next = damon_next_region(r); | |
83 | for (start = r->ar.end; start + sz_piece <= orig_end; | |
84 | start += sz_piece) { | |
85 | n = damon_new_region(start, start + sz_piece); | |
86 | if (!n) | |
87 | return -ENOMEM; | |
88 | damon_insert_region(n, r, next, t); | |
89 | r = n; | |
90 | } | |
91 | /* complement last region for possible rounding error */ | |
92 | if (n) | |
93 | n->ar.end = orig_end; | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | static unsigned long sz_range(struct damon_addr_range *r) | |
99 | { | |
100 | return r->end - r->start; | |
101 | } | |
102 | ||
3f49584b SP |
103 | /* |
104 | * Find three regions separated by two biggest unmapped regions | |
105 | * | |
106 | * vma the head vma of the target address space | |
107 | * regions an array of three address ranges that results will be saved | |
108 | * | |
109 | * This function receives an address space and finds three regions in it which | |
110 | * separated by the two biggest unmapped regions in the space. Please refer to | |
111 | * below comments of '__damon_va_init_regions()' function to know why this is | |
112 | * necessary. | |
113 | * | |
114 | * Returns 0 if success, or negative error code otherwise. | |
115 | */ | |
116 | static int __damon_va_three_regions(struct vm_area_struct *vma, | |
117 | struct damon_addr_range regions[3]) | |
118 | { | |
119 | struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0}; | |
120 | struct vm_area_struct *last_vma = NULL; | |
121 | unsigned long start = 0; | |
122 | struct rb_root rbroot; | |
123 | ||
124 | /* Find two biggest gaps so that first_gap > second_gap > others */ | |
125 | for (; vma; vma = vma->vm_next) { | |
126 | if (!last_vma) { | |
127 | start = vma->vm_start; | |
128 | goto next; | |
129 | } | |
130 | ||
131 | if (vma->rb_subtree_gap <= sz_range(&second_gap)) { | |
132 | rbroot.rb_node = &vma->vm_rb; | |
133 | vma = rb_entry(rb_last(&rbroot), | |
134 | struct vm_area_struct, vm_rb); | |
135 | goto next; | |
136 | } | |
137 | ||
138 | gap.start = last_vma->vm_end; | |
139 | gap.end = vma->vm_start; | |
140 | if (sz_range(&gap) > sz_range(&second_gap)) { | |
8bd0b9da | 141 | swap(gap, second_gap); |
3f49584b | 142 | if (sz_range(&second_gap) > sz_range(&first_gap)) |
8bd0b9da | 143 | swap(second_gap, first_gap); |
3f49584b SP |
144 | } |
145 | next: | |
146 | last_vma = vma; | |
147 | } | |
148 | ||
149 | if (!sz_range(&second_gap) || !sz_range(&first_gap)) | |
150 | return -EINVAL; | |
151 | ||
152 | /* Sort the two biggest gaps by address */ | |
153 | if (first_gap.start > second_gap.start) | |
8bd0b9da | 154 | swap(first_gap, second_gap); |
3f49584b SP |
155 | |
156 | /* Store the result */ | |
157 | regions[0].start = ALIGN(start, DAMON_MIN_REGION); | |
158 | regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); | |
159 | regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); | |
160 | regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION); | |
161 | regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION); | |
162 | regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION); | |
163 | ||
164 | return 0; | |
165 | } | |
166 | ||
167 | /* | |
168 | * Get the three regions in the given target (task) | |
169 | * | |
170 | * Returns 0 on success, negative error code otherwise. | |
171 | */ | |
172 | static int damon_va_three_regions(struct damon_target *t, | |
173 | struct damon_addr_range regions[3]) | |
174 | { | |
175 | struct mm_struct *mm; | |
176 | int rc; | |
177 | ||
178 | mm = damon_get_mm(t); | |
179 | if (!mm) | |
180 | return -EINVAL; | |
181 | ||
182 | mmap_read_lock(mm); | |
183 | rc = __damon_va_three_regions(mm->mmap, regions); | |
184 | mmap_read_unlock(mm); | |
185 | ||
186 | mmput(mm); | |
187 | return rc; | |
188 | } | |
189 | ||
190 | /* | |
191 | * Initialize the monitoring target regions for the given target (task) | |
192 | * | |
193 | * t the given target | |
194 | * | |
195 | * Because only a number of small portions of the entire address space | |
196 | * is actually mapped to the memory and accessed, monitoring the unmapped | |
197 | * regions is wasteful. That said, because we can deal with small noises, | |
198 | * tracking every mapping is not strictly required but could even incur a high | |
199 | * overhead if the mapping frequently changes or the number of mappings is | |
200 | * high. The adaptive regions adjustment mechanism will further help to deal | |
201 | * with the noise by simply identifying the unmapped areas as a region that | |
202 | * has no access. Moreover, applying the real mappings that would have many | |
203 | * unmapped areas inside will make the adaptive mechanism quite complex. That | |
204 | * said, too huge unmapped areas inside the monitoring target should be removed | |
205 | * to not take the time for the adaptive mechanism. | |
206 | * | |
207 | * For the reason, we convert the complex mappings to three distinct regions | |
208 | * that cover every mapped area of the address space. Also the two gaps | |
209 | * between the three regions are the two biggest unmapped areas in the given | |
210 | * address space. In detail, this function first identifies the start and the | |
211 | * end of the mappings and the two biggest unmapped areas of the address space. | |
212 | * Then, it constructs the three regions as below: | |
213 | * | |
214 | * [mappings[0]->start, big_two_unmapped_areas[0]->start) | |
215 | * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start) | |
216 | * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end) | |
217 | * | |
218 | * As usual memory map of processes is as below, the gap between the heap and | |
219 | * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed | |
220 | * region and the stack will be two biggest unmapped regions. Because these | |
221 | * gaps are exceptionally huge areas in usual address space, excluding these | |
222 | * two biggest unmapped regions will be sufficient to make a trade-off. | |
223 | * | |
224 | * <heap> | |
225 | * <BIG UNMAPPED REGION 1> | |
226 | * <uppermost mmap()-ed region> | |
227 | * (other mmap()-ed regions and small unmapped regions) | |
228 | * <lowermost mmap()-ed region> | |
229 | * <BIG UNMAPPED REGION 2> | |
230 | * <stack> | |
231 | */ | |
232 | static void __damon_va_init_regions(struct damon_ctx *ctx, | |
233 | struct damon_target *t) | |
234 | { | |
962fe7a6 | 235 | struct damon_target *ti; |
3f49584b SP |
236 | struct damon_region *r; |
237 | struct damon_addr_range regions[3]; | |
238 | unsigned long sz = 0, nr_pieces; | |
962fe7a6 | 239 | int i, tidx = 0; |
3f49584b SP |
240 | |
241 | if (damon_va_three_regions(t, regions)) { | |
962fe7a6 SP |
242 | damon_for_each_target(ti, ctx) { |
243 | if (ti == t) | |
244 | break; | |
245 | tidx++; | |
246 | } | |
247 | pr_debug("Failed to get three regions of %dth target\n", tidx); | |
3f49584b SP |
248 | return; |
249 | } | |
250 | ||
251 | for (i = 0; i < 3; i++) | |
252 | sz += regions[i].end - regions[i].start; | |
253 | if (ctx->min_nr_regions) | |
254 | sz /= ctx->min_nr_regions; | |
255 | if (sz < DAMON_MIN_REGION) | |
256 | sz = DAMON_MIN_REGION; | |
257 | ||
258 | /* Set the initial three regions of the target */ | |
259 | for (i = 0; i < 3; i++) { | |
260 | r = damon_new_region(regions[i].start, regions[i].end); | |
261 | if (!r) { | |
262 | pr_err("%d'th init region creation failed\n", i); | |
263 | return; | |
264 | } | |
265 | damon_add_region(r, t); | |
266 | ||
267 | nr_pieces = (regions[i].end - regions[i].start) / sz; | |
268 | damon_va_evenly_split_region(t, r, nr_pieces); | |
269 | } | |
270 | } | |
271 | ||
272 | /* Initialize '->regions_list' of every target (task) */ | |
cdeed009 | 273 | static void damon_va_init(struct damon_ctx *ctx) |
3f49584b SP |
274 | { |
275 | struct damon_target *t; | |
276 | ||
277 | damon_for_each_target(t, ctx) { | |
278 | /* the user may set the target regions as they want */ | |
279 | if (!damon_nr_regions(t)) | |
280 | __damon_va_init_regions(ctx, t); | |
281 | } | |
282 | } | |
283 | ||
284 | /* | |
285 | * Functions for the dynamic monitoring target regions update | |
286 | */ | |
287 | ||
288 | /* | |
289 | * Check whether a region is intersecting an address range | |
290 | * | |
291 | * Returns true if it is. | |
292 | */ | |
cdeed009 XH |
293 | static bool damon_intersect(struct damon_region *r, |
294 | struct damon_addr_range *re) | |
3f49584b SP |
295 | { |
296 | return !(r->ar.end <= re->start || re->end <= r->ar.start); | |
297 | } | |
298 | ||
299 | /* | |
300 | * Update damon regions for the three big regions of the given target | |
301 | * | |
302 | * t the given target | |
303 | * bregions the three big regions of the target | |
304 | */ | |
305 | static void damon_va_apply_three_regions(struct damon_target *t, | |
306 | struct damon_addr_range bregions[3]) | |
307 | { | |
308 | struct damon_region *r, *next; | |
a460a360 | 309 | unsigned int i; |
3f49584b SP |
310 | |
311 | /* Remove regions which are not in the three big regions now */ | |
312 | damon_for_each_region_safe(r, next, t) { | |
313 | for (i = 0; i < 3; i++) { | |
314 | if (damon_intersect(r, &bregions[i])) | |
315 | break; | |
316 | } | |
317 | if (i == 3) | |
318 | damon_destroy_region(r, t); | |
319 | } | |
320 | ||
321 | /* Adjust intersecting regions to fit with the three big regions */ | |
322 | for (i = 0; i < 3; i++) { | |
323 | struct damon_region *first = NULL, *last; | |
324 | struct damon_region *newr; | |
325 | struct damon_addr_range *br; | |
326 | ||
327 | br = &bregions[i]; | |
328 | /* Get the first and last regions which intersects with br */ | |
329 | damon_for_each_region(r, t) { | |
330 | if (damon_intersect(r, br)) { | |
331 | if (!first) | |
332 | first = r; | |
333 | last = r; | |
334 | } | |
335 | if (r->ar.start >= br->end) | |
336 | break; | |
337 | } | |
338 | if (!first) { | |
339 | /* no damon_region intersects with this big region */ | |
340 | newr = damon_new_region( | |
341 | ALIGN_DOWN(br->start, | |
342 | DAMON_MIN_REGION), | |
343 | ALIGN(br->end, DAMON_MIN_REGION)); | |
344 | if (!newr) | |
345 | continue; | |
346 | damon_insert_region(newr, damon_prev_region(r), r, t); | |
347 | } else { | |
348 | first->ar.start = ALIGN_DOWN(br->start, | |
349 | DAMON_MIN_REGION); | |
350 | last->ar.end = ALIGN(br->end, DAMON_MIN_REGION); | |
351 | } | |
352 | } | |
353 | } | |
354 | ||
355 | /* | |
356 | * Update regions for current memory mappings | |
357 | */ | |
cdeed009 | 358 | static void damon_va_update(struct damon_ctx *ctx) |
3f49584b SP |
359 | { |
360 | struct damon_addr_range three_regions[3]; | |
361 | struct damon_target *t; | |
362 | ||
363 | damon_for_each_target(t, ctx) { | |
364 | if (damon_va_three_regions(t, three_regions)) | |
365 | continue; | |
366 | damon_va_apply_three_regions(t, three_regions); | |
367 | } | |
368 | } | |
369 | ||
3f49584b SP |
370 | static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, |
371 | unsigned long next, struct mm_walk *walk) | |
372 | { | |
373 | pte_t *pte; | |
374 | spinlock_t *ptl; | |
375 | ||
376 | if (pmd_huge(*pmd)) { | |
377 | ptl = pmd_lock(walk->mm, pmd); | |
378 | if (pmd_huge(*pmd)) { | |
379 | damon_pmdp_mkold(pmd, walk->mm, addr); | |
380 | spin_unlock(ptl); | |
381 | return 0; | |
382 | } | |
383 | spin_unlock(ptl); | |
384 | } | |
385 | ||
386 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | |
387 | return 0; | |
388 | pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | |
389 | if (!pte_present(*pte)) | |
390 | goto out; | |
391 | damon_ptep_mkold(pte, walk->mm, addr); | |
392 | out: | |
393 | pte_unmap_unlock(pte, ptl); | |
394 | return 0; | |
395 | } | |
396 | ||
49f4203a BW |
397 | #ifdef CONFIG_HUGETLB_PAGE |
398 | static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, | |
399 | struct vm_area_struct *vma, unsigned long addr) | |
400 | { | |
401 | bool referenced = false; | |
402 | pte_t entry = huge_ptep_get(pte); | |
403 | struct page *page = pte_page(entry); | |
404 | ||
405 | if (!page) | |
406 | return; | |
407 | ||
408 | get_page(page); | |
409 | ||
410 | if (pte_young(entry)) { | |
411 | referenced = true; | |
412 | entry = pte_mkold(entry); | |
413 | huge_ptep_set_access_flags(vma, addr, pte, entry, | |
414 | vma->vm_flags & VM_WRITE); | |
415 | } | |
416 | ||
417 | #ifdef CONFIG_MMU_NOTIFIER | |
418 | if (mmu_notifier_clear_young(mm, addr, | |
419 | addr + huge_page_size(hstate_vma(vma)))) | |
420 | referenced = true; | |
421 | #endif /* CONFIG_MMU_NOTIFIER */ | |
422 | ||
423 | if (referenced) | |
424 | set_page_young(page); | |
425 | ||
426 | set_page_idle(page); | |
427 | put_page(page); | |
428 | } | |
429 | ||
430 | static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, | |
431 | unsigned long addr, unsigned long end, | |
432 | struct mm_walk *walk) | |
433 | { | |
434 | struct hstate *h = hstate_vma(walk->vma); | |
435 | spinlock_t *ptl; | |
436 | pte_t entry; | |
437 | ||
438 | ptl = huge_pte_lock(h, walk->mm, pte); | |
439 | entry = huge_ptep_get(pte); | |
440 | if (!pte_present(entry)) | |
441 | goto out; | |
442 | ||
443 | damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr); | |
444 | ||
445 | out: | |
446 | spin_unlock(ptl); | |
447 | return 0; | |
448 | } | |
449 | #else | |
450 | #define damon_mkold_hugetlb_entry NULL | |
451 | #endif /* CONFIG_HUGETLB_PAGE */ | |
452 | ||
199b50f4 | 453 | static const struct mm_walk_ops damon_mkold_ops = { |
3f49584b | 454 | .pmd_entry = damon_mkold_pmd_entry, |
49f4203a | 455 | .hugetlb_entry = damon_mkold_hugetlb_entry, |
3f49584b SP |
456 | }; |
457 | ||
458 | static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) | |
459 | { | |
460 | mmap_read_lock(mm); | |
461 | walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); | |
462 | mmap_read_unlock(mm); | |
463 | } | |
464 | ||
465 | /* | |
466 | * Functions for the access checking of the regions | |
467 | */ | |
468 | ||
b627b774 | 469 | static void __damon_va_prepare_access_check(struct damon_ctx *ctx, |
3f49584b SP |
470 | struct mm_struct *mm, struct damon_region *r) |
471 | { | |
472 | r->sampling_addr = damon_rand(r->ar.start, r->ar.end); | |
473 | ||
474 | damon_va_mkold(mm, r->sampling_addr); | |
475 | } | |
476 | ||
cdeed009 | 477 | static void damon_va_prepare_access_checks(struct damon_ctx *ctx) |
3f49584b SP |
478 | { |
479 | struct damon_target *t; | |
480 | struct mm_struct *mm; | |
481 | struct damon_region *r; | |
482 | ||
483 | damon_for_each_target(t, ctx) { | |
484 | mm = damon_get_mm(t); | |
485 | if (!mm) | |
486 | continue; | |
487 | damon_for_each_region(r, t) | |
b627b774 | 488 | __damon_va_prepare_access_check(ctx, mm, r); |
3f49584b SP |
489 | mmput(mm); |
490 | } | |
491 | } | |
492 | ||
493 | struct damon_young_walk_private { | |
494 | unsigned long *page_sz; | |
495 | bool young; | |
496 | }; | |
497 | ||
498 | static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, | |
499 | unsigned long next, struct mm_walk *walk) | |
500 | { | |
501 | pte_t *pte; | |
502 | spinlock_t *ptl; | |
503 | struct page *page; | |
504 | struct damon_young_walk_private *priv = walk->private; | |
505 | ||
506 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
507 | if (pmd_huge(*pmd)) { | |
508 | ptl = pmd_lock(walk->mm, pmd); | |
509 | if (!pmd_huge(*pmd)) { | |
510 | spin_unlock(ptl); | |
511 | goto regular_page; | |
512 | } | |
513 | page = damon_get_page(pmd_pfn(*pmd)); | |
514 | if (!page) | |
515 | goto huge_out; | |
516 | if (pmd_young(*pmd) || !page_is_idle(page) || | |
517 | mmu_notifier_test_young(walk->mm, | |
518 | addr)) { | |
519 | *priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT); | |
520 | priv->young = true; | |
521 | } | |
522 | put_page(page); | |
523 | huge_out: | |
524 | spin_unlock(ptl); | |
525 | return 0; | |
526 | } | |
527 | ||
528 | regular_page: | |
529 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
530 | ||
531 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | |
532 | return -EINVAL; | |
533 | pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | |
534 | if (!pte_present(*pte)) | |
535 | goto out; | |
536 | page = damon_get_page(pte_pfn(*pte)); | |
537 | if (!page) | |
538 | goto out; | |
539 | if (pte_young(*pte) || !page_is_idle(page) || | |
540 | mmu_notifier_test_young(walk->mm, addr)) { | |
541 | *priv->page_sz = PAGE_SIZE; | |
542 | priv->young = true; | |
543 | } | |
544 | put_page(page); | |
545 | out: | |
546 | pte_unmap_unlock(pte, ptl); | |
547 | return 0; | |
548 | } | |
549 | ||
49f4203a BW |
550 | #ifdef CONFIG_HUGETLB_PAGE |
551 | static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, | |
552 | unsigned long addr, unsigned long end, | |
553 | struct mm_walk *walk) | |
554 | { | |
555 | struct damon_young_walk_private *priv = walk->private; | |
556 | struct hstate *h = hstate_vma(walk->vma); | |
557 | struct page *page; | |
558 | spinlock_t *ptl; | |
559 | pte_t entry; | |
560 | ||
561 | ptl = huge_pte_lock(h, walk->mm, pte); | |
562 | entry = huge_ptep_get(pte); | |
563 | if (!pte_present(entry)) | |
564 | goto out; | |
565 | ||
566 | page = pte_page(entry); | |
567 | if (!page) | |
568 | goto out; | |
569 | ||
570 | get_page(page); | |
571 | ||
572 | if (pte_young(entry) || !page_is_idle(page) || | |
573 | mmu_notifier_test_young(walk->mm, addr)) { | |
574 | *priv->page_sz = huge_page_size(h); | |
575 | priv->young = true; | |
576 | } | |
577 | ||
578 | put_page(page); | |
579 | ||
580 | out: | |
581 | spin_unlock(ptl); | |
582 | return 0; | |
583 | } | |
584 | #else | |
585 | #define damon_young_hugetlb_entry NULL | |
586 | #endif /* CONFIG_HUGETLB_PAGE */ | |
587 | ||
199b50f4 | 588 | static const struct mm_walk_ops damon_young_ops = { |
3f49584b | 589 | .pmd_entry = damon_young_pmd_entry, |
49f4203a | 590 | .hugetlb_entry = damon_young_hugetlb_entry, |
3f49584b SP |
591 | }; |
592 | ||
593 | static bool damon_va_young(struct mm_struct *mm, unsigned long addr, | |
594 | unsigned long *page_sz) | |
595 | { | |
596 | struct damon_young_walk_private arg = { | |
597 | .page_sz = page_sz, | |
598 | .young = false, | |
599 | }; | |
600 | ||
601 | mmap_read_lock(mm); | |
602 | walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); | |
603 | mmap_read_unlock(mm); | |
604 | return arg.young; | |
605 | } | |
606 | ||
607 | /* | |
608 | * Check whether the region was accessed after the last preparation | |
609 | * | |
610 | * mm 'mm_struct' for the given virtual address space | |
611 | * r the region to be checked | |
612 | */ | |
b627b774 | 613 | static void __damon_va_check_access(struct damon_ctx *ctx, |
3f49584b SP |
614 | struct mm_struct *mm, struct damon_region *r) |
615 | { | |
616 | static struct mm_struct *last_mm; | |
617 | static unsigned long last_addr; | |
618 | static unsigned long last_page_sz = PAGE_SIZE; | |
619 | static bool last_accessed; | |
620 | ||
621 | /* If the region is in the last checked page, reuse the result */ | |
622 | if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) == | |
623 | ALIGN_DOWN(r->sampling_addr, last_page_sz))) { | |
624 | if (last_accessed) | |
625 | r->nr_accesses++; | |
626 | return; | |
627 | } | |
628 | ||
629 | last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz); | |
630 | if (last_accessed) | |
631 | r->nr_accesses++; | |
632 | ||
633 | last_mm = mm; | |
634 | last_addr = r->sampling_addr; | |
635 | } | |
636 | ||
cdeed009 | 637 | static unsigned int damon_va_check_accesses(struct damon_ctx *ctx) |
3f49584b SP |
638 | { |
639 | struct damon_target *t; | |
640 | struct mm_struct *mm; | |
641 | struct damon_region *r; | |
642 | unsigned int max_nr_accesses = 0; | |
643 | ||
644 | damon_for_each_target(t, ctx) { | |
645 | mm = damon_get_mm(t); | |
646 | if (!mm) | |
647 | continue; | |
648 | damon_for_each_region(r, t) { | |
b627b774 | 649 | __damon_va_check_access(ctx, mm, r); |
3f49584b SP |
650 | max_nr_accesses = max(r->nr_accesses, max_nr_accesses); |
651 | } | |
652 | mmput(mm); | |
653 | } | |
654 | ||
655 | return max_nr_accesses; | |
656 | } | |
657 | ||
658 | /* | |
659 | * Functions for the target validity check and cleanup | |
660 | */ | |
661 | ||
662 | bool damon_va_target_valid(void *target) | |
663 | { | |
664 | struct damon_target *t = target; | |
665 | struct task_struct *task; | |
666 | ||
667 | task = damon_get_task_struct(t); | |
668 | if (task) { | |
669 | put_task_struct(task); | |
670 | return true; | |
671 | } | |
672 | ||
673 | return false; | |
674 | } | |
675 | ||
6dea8add | 676 | #ifndef CONFIG_ADVISE_SYSCALLS |
0e92c2ee SP |
677 | static unsigned long damos_madvise(struct damon_target *target, |
678 | struct damon_region *r, int behavior) | |
6dea8add | 679 | { |
0e92c2ee | 680 | return 0; |
6dea8add SP |
681 | } |
682 | #else | |
0e92c2ee SP |
683 | static unsigned long damos_madvise(struct damon_target *target, |
684 | struct damon_region *r, int behavior) | |
6dea8add SP |
685 | { |
686 | struct mm_struct *mm; | |
0e92c2ee SP |
687 | unsigned long start = PAGE_ALIGN(r->ar.start); |
688 | unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start); | |
689 | unsigned long applied; | |
6dea8add SP |
690 | |
691 | mm = damon_get_mm(target); | |
692 | if (!mm) | |
0e92c2ee | 693 | return 0; |
6dea8add | 694 | |
0e92c2ee | 695 | applied = do_madvise(mm, start, len, behavior) ? 0 : len; |
6dea8add | 696 | mmput(mm); |
0e92c2ee SP |
697 | |
698 | return applied; | |
6dea8add SP |
699 | } |
700 | #endif /* CONFIG_ADVISE_SYSCALLS */ | |
701 | ||
0e92c2ee SP |
702 | static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx, |
703 | struct damon_target *t, struct damon_region *r, | |
704 | struct damos *scheme) | |
6dea8add SP |
705 | { |
706 | int madv_action; | |
707 | ||
708 | switch (scheme->action) { | |
709 | case DAMOS_WILLNEED: | |
710 | madv_action = MADV_WILLNEED; | |
711 | break; | |
712 | case DAMOS_COLD: | |
713 | madv_action = MADV_COLD; | |
714 | break; | |
715 | case DAMOS_PAGEOUT: | |
716 | madv_action = MADV_PAGEOUT; | |
717 | break; | |
718 | case DAMOS_HUGEPAGE: | |
719 | madv_action = MADV_HUGEPAGE; | |
720 | break; | |
721 | case DAMOS_NOHUGEPAGE: | |
722 | madv_action = MADV_NOHUGEPAGE; | |
723 | break; | |
2f0b548c SP |
724 | case DAMOS_STAT: |
725 | return 0; | |
6dea8add | 726 | default: |
0e92c2ee | 727 | return 0; |
6dea8add SP |
728 | } |
729 | ||
730 | return damos_madvise(t, r, madv_action); | |
731 | } | |
732 | ||
cdeed009 XH |
733 | static int damon_va_scheme_score(struct damon_ctx *context, |
734 | struct damon_target *t, struct damon_region *r, | |
735 | struct damos *scheme) | |
198f0f4c SP |
736 | { |
737 | ||
738 | switch (scheme->action) { | |
739 | case DAMOS_PAGEOUT: | |
740 | return damon_pageout_score(context, r, scheme); | |
741 | default: | |
742 | break; | |
743 | } | |
744 | ||
745 | return DAMOS_MAX_SCORE; | |
746 | } | |
747 | ||
3f49584b SP |
748 | void damon_va_set_primitives(struct damon_ctx *ctx) |
749 | { | |
750 | ctx->primitive.init = damon_va_init; | |
751 | ctx->primitive.update = damon_va_update; | |
752 | ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks; | |
753 | ctx->primitive.check_accesses = damon_va_check_accesses; | |
754 | ctx->primitive.reset_aggregated = NULL; | |
755 | ctx->primitive.target_valid = damon_va_target_valid; | |
756 | ctx->primitive.cleanup = NULL; | |
6dea8add | 757 | ctx->primitive.apply_scheme = damon_va_apply_scheme; |
198f0f4c | 758 | ctx->primitive.get_scheme_score = damon_va_scheme_score; |
3f49584b | 759 | } |
17ccae8b SP |
760 | |
761 | #include "vaddr-test.h" |