Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-block.git] / mm / damon / paddr.c
CommitLineData
a28397be
SP
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DAMON Primitives for The Physical Address Space
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8#define pr_fmt(fmt) "damon-pa: " fmt
9
10#include <linux/mmu_notifier.h>
11#include <linux/page_idle.h>
12#include <linux/pagemap.h>
13#include <linux/rmap.h>
57223ac2 14#include <linux/swap.h>
a28397be 15
57223ac2 16#include "../internal.h"
f7d911c3 17#include "ops-common.h"
a28397be 18
2f031c6f 19static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
a28397be
SP
20 unsigned long addr, void *arg)
21{
2f031c6f 22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
a28397be
SP
23
24 while (page_vma_mapped_walk(&pvmw)) {
25 addr = pvmw.address;
26 if (pvmw.pte)
c11d34fa 27 damon_ptep_mkold(pvmw.pte, vma, addr);
a28397be 28 else
c11d34fa 29 damon_pmdp_mkold(pvmw.pmd, vma, addr);
a28397be
SP
30 }
31 return true;
32}
33
34static void damon_pa_mkold(unsigned long paddr)
35{
07bb1fba 36 struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
a28397be
SP
37 struct rmap_walk_control rwc = {
38 .rmap_one = __damon_pa_mkold,
2f031c6f 39 .anon_lock = folio_lock_anon_vma_read,
a28397be
SP
40 };
41 bool need_lock;
42
07bb1fba 43 if (!folio)
a28397be
SP
44 return;
45
6d42dba3
MWO
46 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
47 folio_set_idle(folio);
a28397be
SP
48 goto out;
49 }
50
6d42dba3
MWO
51 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
52 if (need_lock && !folio_trylock(folio))
a28397be
SP
53 goto out;
54
2f031c6f 55 rmap_walk(folio, &rwc);
a28397be
SP
56
57 if (need_lock)
6d42dba3 58 folio_unlock(folio);
a28397be
SP
59
60out:
6d42dba3 61 folio_put(folio);
a28397be
SP
62}
63
8ef4d5ca 64static void __damon_pa_prepare_access_check(struct damon_region *r)
a28397be
SP
65{
66 r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
67
68 damon_pa_mkold(r->sampling_addr);
69}
70
cdeed009 71static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
a28397be
SP
72{
73 struct damon_target *t;
74 struct damon_region *r;
75
76 damon_for_each_target(t, ctx) {
77 damon_for_each_region(r, t)
8ef4d5ca 78 __damon_pa_prepare_access_check(r);
a28397be
SP
79 }
80}
81
2f031c6f 82static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
a28397be
SP
83 unsigned long addr, void *arg)
84{
b0c0e744 85 bool *accessed = arg;
c8423186 86 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
a28397be 87
b0c0e744 88 *accessed = false;
a28397be
SP
89 while (page_vma_mapped_walk(&pvmw)) {
90 addr = pvmw.address;
91 if (pvmw.pte) {
c33c7948 92 *accessed = pte_young(ptep_get(pvmw.pte)) ||
c8423186 93 !folio_test_idle(folio) ||
a28397be
SP
94 mmu_notifier_test_young(vma->vm_mm, addr);
95 } else {
96#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e7ee3f97 97 *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
c8423186 98 !folio_test_idle(folio) ||
a28397be 99 mmu_notifier_test_young(vma->vm_mm, addr);
a28397be
SP
100#else
101 WARN_ON_ONCE(1);
102#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
103 }
b0c0e744 104 if (*accessed) {
a28397be
SP
105 page_vma_mapped_walk_done(&pvmw);
106 break;
107 }
108 }
109
110 /* If accessed, stop walking */
b0c0e744 111 return *accessed == false;
a28397be
SP
112}
113
af40e35a 114static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
a28397be 115{
07bb1fba 116 struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
b0c0e744 117 bool accessed = false;
a28397be 118 struct rmap_walk_control rwc = {
b0c0e744 119 .arg = &accessed,
a28397be 120 .rmap_one = __damon_pa_young,
2f031c6f 121 .anon_lock = folio_lock_anon_vma_read,
a28397be
SP
122 };
123 bool need_lock;
124
07bb1fba 125 if (!folio)
a28397be
SP
126 return false;
127
c8423186
MWO
128 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
129 if (folio_test_idle(folio))
b0c0e744 130 accessed = false;
a28397be 131 else
b0c0e744 132 accessed = true;
a28397be
SP
133 goto out;
134 }
135
c8423186 136 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
70307b0e
KW
137 if (need_lock && !folio_trylock(folio))
138 goto out;
a28397be 139
2f031c6f 140 rmap_walk(folio, &rwc);
a28397be
SP
141
142 if (need_lock)
c8423186 143 folio_unlock(folio);
a28397be
SP
144
145out:
397b0c3a 146 *folio_sz = folio_size(folio);
751688b8 147 folio_put(folio);
b0c0e744 148 return accessed;
a28397be
SP
149}
150
ace30fb2
SP
151static void __damon_pa_check_access(struct damon_region *r,
152 struct damon_attrs *attrs)
a28397be
SP
153{
154 static unsigned long last_addr;
af40e35a 155 static unsigned long last_folio_sz = PAGE_SIZE;
a28397be
SP
156 static bool last_accessed;
157
158 /* If the region is in the last checked page, reuse the result */
af40e35a
SP
159 if (ALIGN_DOWN(last_addr, last_folio_sz) ==
160 ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
ace30fb2 161 damon_update_region_access_rate(r, last_accessed, attrs);
a28397be
SP
162 return;
163 }
164
af40e35a 165 last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
ace30fb2 166 damon_update_region_access_rate(r, last_accessed, attrs);
a28397be
SP
167
168 last_addr = r->sampling_addr;
169}
170
cdeed009 171static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
a28397be
SP
172{
173 struct damon_target *t;
174 struct damon_region *r;
175 unsigned int max_nr_accesses = 0;
176
177 damon_for_each_target(t, ctx) {
178 damon_for_each_region(r, t) {
ace30fb2 179 __damon_pa_check_access(r, &ctx->attrs);
a28397be
SP
180 max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
181 }
182 }
183
184 return max_nr_accesses;
185}
186
18250e78 187static bool __damos_pa_filter_out(struct damos_filter *filter,
07bb1fba 188 struct folio *folio)
18250e78
SP
189{
190 bool matched = false;
191 struct mem_cgroup *memcg;
192
193 switch (filter->type) {
194 case DAMOS_FILTER_TYPE_ANON:
07bb1fba 195 matched = folio_test_anon(folio);
18250e78
SP
196 break;
197 case DAMOS_FILTER_TYPE_MEMCG:
198 rcu_read_lock();
07bb1fba 199 memcg = folio_memcg_check(folio);
18250e78
SP
200 if (!memcg)
201 matched = false;
202 else
203 matched = filter->memcg_id == mem_cgroup_id(memcg);
204 rcu_read_unlock();
205 break;
206 default:
207 break;
208 }
209
210 return matched == filter->matching;
211}
212
213/*
214 * damos_pa_filter_out - Return true if the page should be filtered out.
215 */
07bb1fba 216static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
18250e78
SP
217{
218 struct damos_filter *filter;
219
220 damos_for_each_filter(filter, scheme) {
07bb1fba 221 if (__damos_pa_filter_out(filter, folio))
18250e78
SP
222 return true;
223 }
224 return false;
225}
226
227static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
57223ac2 228{
0e92c2ee 229 unsigned long addr, applied;
07bb1fba 230 LIST_HEAD(folio_list);
57223ac2 231
57223ac2 232 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
07bb1fba 233 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
57223ac2 234
07bb1fba 235 if (!folio)
57223ac2
SP
236 continue;
237
dd411433
KW
238 if (damos_pa_filter_out(s, folio))
239 goto put_folio;
18250e78 240
07bb1fba
KW
241 folio_clear_referenced(folio);
242 folio_test_clear_young(folio);
dd411433
KW
243 if (!folio_isolate_lru(folio))
244 goto put_folio;
3f98c9a6 245 if (folio_test_unevictable(folio))
07bb1fba 246 folio_putback_lru(folio);
3f98c9a6 247 else
07bb1fba 248 list_add(&folio->lru, &folio_list);
dd411433 249put_folio:
3f98c9a6 250 folio_put(folio);
57223ac2 251 }
07bb1fba 252 applied = reclaim_pages(&folio_list);
57223ac2 253 cond_resched();
0e92c2ee 254 return applied * PAGE_SIZE;
57223ac2
SP
255}
256
8193321a 257static inline unsigned long damon_pa_mark_accessed_or_deactivate(
18250e78 258 struct damon_region *r, struct damos *s, bool mark_accessed)
8cdcc532
SP
259{
260 unsigned long addr, applied = 0;
261
262 for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
07bb1fba 263 struct folio *folio = damon_get_folio(PHYS_PFN(addr));
8cdcc532 264
07bb1fba 265 if (!folio)
8cdcc532 266 continue;
18250e78 267
b6993be2
KW
268 if (damos_pa_filter_out(s, folio))
269 goto put_folio;
18250e78 270
8193321a 271 if (mark_accessed)
f70da5ee 272 folio_mark_accessed(folio);
8193321a 273 else
5a9e3474 274 folio_deactivate(folio);
f70da5ee 275 applied += folio_nr_pages(folio);
b6993be2 276put_folio:
dd52a61d 277 folio_put(folio);
8cdcc532
SP
278 }
279 return applied * PAGE_SIZE;
280}
281
18250e78
SP
282static unsigned long damon_pa_mark_accessed(struct damon_region *r,
283 struct damos *s)
99cdc2cd 284{
18250e78 285 return damon_pa_mark_accessed_or_deactivate(r, s, true);
8193321a 286}
99cdc2cd 287
18250e78
SP
288static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
289 struct damos *s)
8193321a 290{
18250e78 291 return damon_pa_mark_accessed_or_deactivate(r, s, false);
99cdc2cd
SP
292}
293
0e93e8bf
SP
294static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
295 struct damon_target *t, struct damon_region *r,
296 struct damos *scheme)
297{
298 switch (scheme->action) {
299 case DAMOS_PAGEOUT:
18250e78 300 return damon_pa_pageout(r, scheme);
8cdcc532 301 case DAMOS_LRU_PRIO:
18250e78 302 return damon_pa_mark_accessed(r, scheme);
99cdc2cd 303 case DAMOS_LRU_DEPRIO:
18250e78 304 return damon_pa_deactivate_pages(r, scheme);
f82e70e2
SP
305 case DAMOS_STAT:
306 break;
0e93e8bf 307 default:
f82e70e2 308 /* DAMOS actions that not yet supported by 'paddr'. */
0e93e8bf
SP
309 break;
310 }
311 return 0;
312}
313
cdeed009
XH
314static int damon_pa_scheme_score(struct damon_ctx *context,
315 struct damon_target *t, struct damon_region *r,
316 struct damos *scheme)
198f0f4c
SP
317{
318 switch (scheme->action) {
319 case DAMOS_PAGEOUT:
e3e486e6 320 return damon_cold_score(context, r, scheme);
8cdcc532
SP
321 case DAMOS_LRU_PRIO:
322 return damon_hot_score(context, r, scheme);
99cdc2cd 323 case DAMOS_LRU_DEPRIO:
e3e486e6 324 return damon_cold_score(context, r, scheme);
198f0f4c
SP
325 default:
326 break;
327 }
328
329 return DAMOS_MAX_SCORE;
330}
331
7752925f
SP
332static int __init damon_pa_initcall(void)
333{
334 struct damon_operations ops = {
335 .id = DAMON_OPS_PADDR,
336 .init = NULL,
337 .update = NULL,
338 .prepare_access_checks = damon_pa_prepare_access_checks,
339 .check_accesses = damon_pa_check_accesses,
340 .reset_aggregated = NULL,
85104056 341 .target_valid = NULL,
7752925f
SP
342 .cleanup = NULL,
343 .apply_scheme = damon_pa_apply_scheme,
344 .get_scheme_score = damon_pa_scheme_score,
345 };
346
347 return damon_register_ops(&ops);
348};
349
350subsys_initcall(damon_pa_initcall);