Commit | Line | Data |
---|---|---|
2224d848 SP |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * DAMON api | |
4 | * | |
5 | * Author: SeongJae Park <sjpark@amazon.de> | |
6 | */ | |
7 | ||
8 | #ifndef _DAMON_H_ | |
9 | #define _DAMON_H_ | |
10 | ||
98def236 | 11 | #include <linux/memcontrol.h> |
2224d848 SP |
12 | #include <linux/mutex.h> |
13 | #include <linux/time64.h> | |
14 | #include <linux/types.h> | |
9b2a38d6 | 15 | #include <linux/random.h> |
2224d848 | 16 | |
b9a6ac4e SP |
17 | /* Minimal region size. Every damon_region is aligned by this. */ |
18 | #define DAMON_MIN_REGION PAGE_SIZE | |
38683e00 SP |
19 | /* Max priority score for DAMON-based operation schemes */ |
20 | #define DAMOS_MAX_SCORE (99) | |
b9a6ac4e | 21 | |
9b2a38d6 | 22 | /* Get a random number in [l, r) */ |
234d6873 XH |
23 | static inline unsigned long damon_rand(unsigned long l, unsigned long r) |
24 | { | |
8032bf12 | 25 | return l + get_random_u32_below(r - l); |
234d6873 | 26 | } |
9b2a38d6 | 27 | |
f23b8eee SP |
28 | /** |
29 | * struct damon_addr_range - Represents an address region of [@start, @end). | |
30 | * @start: Start address of the region (inclusive). | |
31 | * @end: End address of the region (exclusive). | |
32 | */ | |
33 | struct damon_addr_range { | |
34 | unsigned long start; | |
35 | unsigned long end; | |
36 | }; | |
37 | ||
38 | /** | |
39 | * struct damon_region - Represents a monitoring target region. | |
40 | * @ar: The address range of the region. | |
41 | * @sampling_addr: Address of the sample for the next access check. | |
42 | * @nr_accesses: Access frequency of this region. | |
43 | * @list: List head for siblings. | |
fda504fa SP |
44 | * @age: Age of this region. |
45 | * | |
d896073f SP |
46 | * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be |
47 | * increased for every &damon_attrs->sample_interval if an access to the region | |
48 | * during the last sampling interval is found. | |
49 | * | |
fda504fa SP |
50 | * @age is initially zero, increased for each aggregation interval, and reset |
51 | * to zero again if the access frequency is significantly changed. If two | |
52 | * regions are merged into a new region, both @nr_accesses and @age of the new | |
53 | * region are set as region size-weighted average of those of the two regions. | |
f23b8eee SP |
54 | */ |
55 | struct damon_region { | |
56 | struct damon_addr_range ar; | |
57 | unsigned long sampling_addr; | |
58 | unsigned int nr_accesses; | |
59 | struct list_head list; | |
fda504fa SP |
60 | |
61 | unsigned int age; | |
62 | /* private: Internal value for age calculation. */ | |
63 | unsigned int last_nr_accesses; | |
f23b8eee SP |
64 | }; |
65 | ||
66 | /** | |
67 | * struct damon_target - Represents a monitoring target. | |
1971bd63 | 68 | * @pid: The PID of the virtual address space to monitor. |
b9a6ac4e | 69 | * @nr_regions: Number of monitoring target regions of this target. |
f23b8eee SP |
70 | * @regions_list: Head of the monitoring target regions of this target. |
71 | * @list: List head for siblings. | |
72 | * | |
73 | * Each monitoring context could have multiple targets. For example, a context | |
74 | * for virtual memory address spaces could have multiple target processes. The | |
f7d911c3 SP |
75 | * @pid should be set for appropriate &struct damon_operations including the |
76 | * virtual address spaces monitoring operations. | |
f23b8eee SP |
77 | */ |
78 | struct damon_target { | |
1971bd63 | 79 | struct pid *pid; |
b9a6ac4e | 80 | unsigned int nr_regions; |
f23b8eee SP |
81 | struct list_head regions_list; |
82 | struct list_head list; | |
83 | }; | |
84 | ||
1f366e42 SP |
85 | /** |
86 | * enum damos_action - Represents an action of a Data Access Monitoring-based | |
87 | * Operation Scheme. | |
88 | * | |
89 | * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED. | |
90 | * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD. | |
91 | * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT. | |
92 | * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE. | |
93 | * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE. | |
8cdcc532 | 94 | * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists. |
99cdc2cd | 95 | * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists. |
2f0b548c | 96 | * @DAMOS_STAT: Do nothing but count the stat. |
5257f36e | 97 | * @NR_DAMOS_ACTIONS: Total number of DAMOS actions |
fb6f026b SP |
98 | * |
99 | * The support of each action is up to running &struct damon_operations. | |
100 | * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR supports all actions except | |
101 | * &enum DAMOS_LRU_PRIO and &enum DAMOS_LRU_DEPRIO. &enum DAMON_OPS_PADDR | |
102 | * supports only &enum DAMOS_PAGEOUT, &enum DAMOS_LRU_PRIO, &enum | |
103 | * DAMOS_LRU_DEPRIO, and &DAMOS_STAT. | |
1f366e42 SP |
104 | */ |
105 | enum damos_action { | |
106 | DAMOS_WILLNEED, | |
107 | DAMOS_COLD, | |
108 | DAMOS_PAGEOUT, | |
109 | DAMOS_HUGEPAGE, | |
110 | DAMOS_NOHUGEPAGE, | |
8cdcc532 | 111 | DAMOS_LRU_PRIO, |
99cdc2cd | 112 | DAMOS_LRU_DEPRIO, |
2f0b548c | 113 | DAMOS_STAT, /* Do nothing but only record the stat */ |
5257f36e | 114 | NR_DAMOS_ACTIONS, |
1f366e42 SP |
115 | }; |
116 | ||
2b8a248d SP |
117 | /** |
118 | * struct damos_quota - Controls the aggressiveness of the given scheme. | |
1cd24303 | 119 | * @ms: Maximum milliseconds that the scheme can use. |
2b8a248d SP |
120 | * @sz: Maximum bytes of memory that the action can be applied. |
121 | * @reset_interval: Charge reset interval in milliseconds. | |
122 | * | |
38683e00 SP |
123 | * @weight_sz: Weight of the region's size for prioritization. |
124 | * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization. | |
125 | * @weight_age: Weight of the region's age for prioritization. | |
126 | * | |
2b8a248d | 127 | * To avoid consuming too much CPU time or IO resources for applying the |
1cd24303 SP |
128 | * &struct damos->action to large memory, DAMON allows users to set time and/or |
129 | * size quotas. The quotas can be set by writing non-zero values to &ms and | |
130 | * &sz, respectively. If the time quota is set, DAMON tries to use only up to | |
131 | * &ms milliseconds within &reset_interval for applying the action. If the | |
132 | * size quota is set, DAMON tries to apply the action only up to &sz bytes | |
133 | * within &reset_interval. | |
134 | * | |
135 | * Internally, the time quota is transformed to a size quota using estimated | |
136 | * throughput of the scheme's action. DAMON then compares it against &sz and | |
137 | * uses smaller one as the effective quota. | |
38683e00 SP |
138 | * |
139 | * For selecting regions within the quota, DAMON prioritizes current scheme's | |
f7d911c3 | 140 | * target memory regions using the &struct damon_operations->get_scheme_score. |
38683e00 | 141 | * You could customize the prioritization logic by setting &weight_sz, |
f7d911c3 | 142 | * &weight_nr_accesses, and &weight_age, because monitoring operations are |
38683e00 | 143 | * encouraged to respect those. |
2b8a248d SP |
144 | */ |
145 | struct damos_quota { | |
1cd24303 | 146 | unsigned long ms; |
2b8a248d SP |
147 | unsigned long sz; |
148 | unsigned long reset_interval; | |
149 | ||
38683e00 SP |
150 | unsigned int weight_sz; |
151 | unsigned int weight_nr_accesses; | |
152 | unsigned int weight_age; | |
153 | ||
1cd24303 SP |
154 | /* private: */ |
155 | /* For throughput estimation */ | |
156 | unsigned long total_charged_sz; | |
157 | unsigned long total_charged_ns; | |
158 | ||
159 | unsigned long esz; /* Effective size quota in bytes */ | |
160 | ||
161 | /* For charging the quota */ | |
2b8a248d SP |
162 | unsigned long charged_sz; |
163 | unsigned long charged_from; | |
50585192 SP |
164 | struct damon_target *charge_target_from; |
165 | unsigned long charge_addr_from; | |
38683e00 SP |
166 | |
167 | /* For prioritization */ | |
168 | unsigned long histogram[DAMOS_MAX_SCORE + 1]; | |
169 | unsigned int min_score; | |
2b8a248d SP |
170 | }; |
171 | ||
ee801b7d SP |
172 | /** |
173 | * enum damos_wmark_metric - Represents the watermark metric. | |
174 | * | |
175 | * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme. | |
176 | * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000]. | |
5257f36e | 177 | * @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics |
ee801b7d SP |
178 | */ |
179 | enum damos_wmark_metric { | |
180 | DAMOS_WMARK_NONE, | |
181 | DAMOS_WMARK_FREE_MEM_RATE, | |
5257f36e | 182 | NR_DAMOS_WMARK_METRICS, |
ee801b7d SP |
183 | }; |
184 | ||
185 | /** | |
186 | * struct damos_watermarks - Controls when a given scheme should be activated. | |
187 | * @metric: Metric for the watermarks. | |
188 | * @interval: Watermarks check time interval in microseconds. | |
189 | * @high: High watermark. | |
190 | * @mid: Middle watermark. | |
191 | * @low: Low watermark. | |
192 | * | |
193 | * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active | |
194 | * means DAMON does monitoring and applying the action of the scheme to | |
195 | * appropriate memory regions. Else, DAMON checks &metric of the system for at | |
196 | * least every &interval microseconds and works as below. | |
197 | * | |
198 | * If &metric is higher than &high, the scheme is inactivated. If &metric is | |
199 | * between &mid and &low, the scheme is activated. If &metric is lower than | |
200 | * &low, the scheme is inactivated. | |
201 | */ | |
202 | struct damos_watermarks { | |
203 | enum damos_wmark_metric metric; | |
204 | unsigned long interval; | |
205 | unsigned long high; | |
206 | unsigned long mid; | |
207 | unsigned long low; | |
208 | ||
209 | /* private: */ | |
210 | bool activated; | |
211 | }; | |
212 | ||
0e92c2ee SP |
213 | /** |
214 | * struct damos_stat - Statistics on a given scheme. | |
215 | * @nr_tried: Total number of regions that the scheme is tried to be applied. | |
216 | * @sz_tried: Total size of regions that the scheme is tried to be applied. | |
217 | * @nr_applied: Total number of regions that the scheme is applied. | |
218 | * @sz_applied: Total size of regions that the scheme is applied. | |
6268eac3 | 219 | * @qt_exceeds: Total number of times the quota of the scheme has exceeded. |
0e92c2ee SP |
220 | */ |
221 | struct damos_stat { | |
222 | unsigned long nr_tried; | |
223 | unsigned long sz_tried; | |
224 | unsigned long nr_applied; | |
225 | unsigned long sz_applied; | |
6268eac3 | 226 | unsigned long qt_exceeds; |
0e92c2ee SP |
227 | }; |
228 | ||
98def236 SP |
229 | /** |
230 | * enum damos_filter_type - Type of memory for &struct damos_filter | |
231 | * @DAMOS_FILTER_TYPE_ANON: Anonymous pages. | |
232 | * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages. | |
ab9bda00 | 233 | * @DAMOS_FILTER_TYPE_ADDR: Address range. |
17e7c724 | 234 | * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target. |
98def236 | 235 | * @NR_DAMOS_FILTER_TYPES: Number of filter types. |
55901e89 | 236 | * |
ab9bda00 SP |
237 | * The anon pages type and memcg type filters are handled by underlying |
238 | * &struct damon_operations as a part of scheme action trying, and therefore | |
239 | * accounted as 'tried'. In contrast, other types are handled by core layer | |
240 | * before trying of the action and therefore not accounted as 'tried'. | |
241 | * | |
242 | * The support of the filters that handled by &struct damon_operations depend | |
243 | * on the running &struct damon_operations. | |
244 | * &enum DAMON_OPS_PADDR supports both anon pages type and memcg type filters, | |
245 | * while &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR don't support any of | |
246 | * the two types. | |
98def236 SP |
247 | */ |
248 | enum damos_filter_type { | |
249 | DAMOS_FILTER_TYPE_ANON, | |
250 | DAMOS_FILTER_TYPE_MEMCG, | |
ab9bda00 | 251 | DAMOS_FILTER_TYPE_ADDR, |
17e7c724 | 252 | DAMOS_FILTER_TYPE_TARGET, |
98def236 SP |
253 | NR_DAMOS_FILTER_TYPES, |
254 | }; | |
255 | ||
256 | /** | |
257 | * struct damos_filter - DAMOS action target memory filter. | |
258 | * @type: Type of the page. | |
259 | * @matching: If the matching page should filtered out or in. | |
260 | * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG. | |
ab9bda00 | 261 | * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR. |
17e7c724 SP |
262 | * @target_idx: Index of the &struct damon_target of |
263 | * &damon_ctx->adaptive_targets if @type is | |
264 | * DAMOS_FILTER_TYPE_TARGET. | |
98def236 SP |
265 | * @list: List head for siblings. |
266 | * | |
267 | * Before applying the &damos->action to a memory region, DAMOS checks if each | |
268 | * page of the region matches to this and avoid applying the action if so. | |
ab9bda00 SP |
269 | * Support of each filter type depends on the running &struct damon_operations |
270 | * and the type. Refer to &enum damos_filter_type for more detai. | |
98def236 SP |
271 | */ |
272 | struct damos_filter { | |
273 | enum damos_filter_type type; | |
274 | bool matching; | |
275 | union { | |
276 | unsigned short memcg_id; | |
ab9bda00 | 277 | struct damon_addr_range addr_range; |
17e7c724 | 278 | int target_idx; |
98def236 SP |
279 | }; |
280 | struct list_head list; | |
281 | }; | |
282 | ||
1f366e42 | 283 | /** |
f5a79d7c | 284 | * struct damos_access_pattern - Target access pattern of the given scheme. |
1f366e42 SP |
285 | * @min_sz_region: Minimum size of target regions. |
286 | * @max_sz_region: Maximum size of target regions. | |
287 | * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions. | |
288 | * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions. | |
289 | * @min_age_region: Minimum age of target regions. | |
290 | * @max_age_region: Maximum age of target regions. | |
f5a79d7c YD |
291 | */ |
292 | struct damos_access_pattern { | |
293 | unsigned long min_sz_region; | |
294 | unsigned long max_sz_region; | |
295 | unsigned int min_nr_accesses; | |
296 | unsigned int max_nr_accesses; | |
297 | unsigned int min_age_region; | |
298 | unsigned int max_age_region; | |
299 | }; | |
300 | ||
301 | /** | |
302 | * struct damos - Represents a Data Access Monitoring-based Operation Scheme. | |
303 | * @pattern: Access pattern of target regions. | |
1f366e42 | 304 | * @action: &damo_action to be applied to the target regions. |
2b8a248d | 305 | * @quota: Control the aggressiveness of this scheme. |
ee801b7d | 306 | * @wmarks: Watermarks for automated (in)activation of this scheme. |
98def236 | 307 | * @filters: Additional set of &struct damos_filter for &action. |
0e92c2ee | 308 | * @stat: Statistics of this scheme. |
1f366e42 SP |
309 | * @list: List head for siblings. |
310 | * | |
2b8a248d | 311 | * For each aggregation interval, DAMON finds regions which fit in the |
f5a79d7c YD |
312 | * &pattern and applies &action to those. To avoid consuming too much |
313 | * CPU time or IO resources for the &action, "a is used. | |
2b8a248d | 314 | * |
ee801b7d SP |
315 | * To do the work only when needed, schemes can be activated for specific |
316 | * system situations using &wmarks. If all schemes that registered to the | |
317 | * monitoring context are inactive, DAMON stops monitoring either, and just | |
318 | * repeatedly checks the watermarks. | |
319 | * | |
98def236 SP |
320 | * Before applying the &action to a memory region, &struct damon_operations |
321 | * implementation could check pages of the region and skip &action to respect | |
322 | * &filters | |
323 | * | |
2b8a248d SP |
324 | * After applying the &action to each region, &stat_count and &stat_sz is |
325 | * updated to reflect the number of regions and total size of regions that the | |
326 | * &action is applied. | |
1f366e42 SP |
327 | */ |
328 | struct damos { | |
f5a79d7c | 329 | struct damos_access_pattern pattern; |
1f366e42 | 330 | enum damos_action action; |
2b8a248d | 331 | struct damos_quota quota; |
ee801b7d | 332 | struct damos_watermarks wmarks; |
98def236 | 333 | struct list_head filters; |
0e92c2ee | 334 | struct damos_stat stat; |
1f366e42 SP |
335 | struct list_head list; |
336 | }; | |
337 | ||
9f7b053a SP |
338 | /** |
339 | * enum damon_ops_id - Identifier for each monitoring operations implementation | |
340 | * | |
341 | * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces | |
de6d0154 SP |
342 | * @DAMON_OPS_FVADDR: Monitoring operations for only fixed ranges of virtual |
343 | * address spaces | |
9f7b053a | 344 | * @DAMON_OPS_PADDR: Monitoring operations for the physical address space |
d4a157f5 | 345 | * @NR_DAMON_OPS: Number of monitoring operations implementations |
9f7b053a SP |
346 | */ |
347 | enum damon_ops_id { | |
348 | DAMON_OPS_VADDR, | |
de6d0154 | 349 | DAMON_OPS_FVADDR, |
9f7b053a SP |
350 | DAMON_OPS_PADDR, |
351 | NR_DAMON_OPS, | |
352 | }; | |
353 | ||
2224d848 SP |
354 | struct damon_ctx; |
355 | ||
356 | /** | |
f7d911c3 | 357 | * struct damon_operations - Monitoring operations for given use cases. |
2224d848 | 358 | * |
9f7b053a | 359 | * @id: Identifier of this operations set. |
f7d911c3 SP |
360 | * @init: Initialize operations-related data structures. |
361 | * @update: Update operations-related data structures. | |
2224d848 SP |
362 | * @prepare_access_checks: Prepare next access check of target regions. |
363 | * @check_accesses: Check the accesses to target regions. | |
364 | * @reset_aggregated: Reset aggregated accesses monitoring results. | |
38683e00 | 365 | * @get_scheme_score: Get the score of a region for a scheme. |
1f366e42 | 366 | * @apply_scheme: Apply a DAMON-based operation scheme. |
2224d848 SP |
367 | * @target_valid: Determine if the target is valid. |
368 | * @cleanup: Clean up the context. | |
369 | * | |
370 | * DAMON can be extended for various address spaces and usages. For this, | |
f7d911c3 SP |
371 | * users should register the low level operations for their target address |
372 | * space and usecase via the &damon_ctx.ops. Then, the monitoring thread | |
2224d848 | 373 | * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting |
6b3f013b | 374 | * the monitoring, @update after each &damon_attrs.ops_update_interval, and |
2224d848 | 375 | * @check_accesses, @target_valid and @prepare_access_checks after each |
6b3f013b SP |
376 | * &damon_attrs.sample_interval. Finally, @reset_aggregated is called after |
377 | * each &damon_attrs.aggr_interval. | |
2224d848 | 378 | * |
9f7b053a SP |
379 | * Each &struct damon_operations instance having valid @id can be registered |
380 | * via damon_register_ops() and selected by damon_select_ops() later. | |
f7d911c3 | 381 | * @init should initialize operations-related data structures. For example, |
2224d848 | 382 | * this could be used to construct proper monitoring target regions and link |
f23b8eee | 383 | * those to @damon_ctx.adaptive_targets. |
f7d911c3 | 384 | * @update should update the operations-related data structures. For example, |
2224d848 SP |
385 | * this could be used to update monitoring target regions for current status. |
386 | * @prepare_access_checks should manipulate the monitoring regions to be | |
387 | * prepared for the next access check. | |
388 | * @check_accesses should check the accesses to each region that made after the | |
389 | * last preparation and update the number of observed accesses of each region. | |
b9a6ac4e SP |
390 | * It should also return max number of observed accesses that made as a result |
391 | * of its update. The value will be used for regions adjustment threshold. | |
2224d848 SP |
392 | * @reset_aggregated should reset the access monitoring results that aggregated |
393 | * by @check_accesses. | |
38683e00 SP |
394 | * @get_scheme_score should return the priority score of a region for a scheme |
395 | * as an integer in [0, &DAMOS_MAX_SCORE]. | |
1f366e42 SP |
396 | * @apply_scheme is called from @kdamond when a region for user provided |
397 | * DAMON-based operation scheme is found. It should apply the scheme's action | |
0e92c2ee SP |
398 | * to the region and return bytes of the region that the action is successfully |
399 | * applied. | |
2224d848 SP |
400 | * @target_valid should check whether the target is still valid for the |
401 | * monitoring. | |
402 | * @cleanup is called from @kdamond just before its termination. | |
403 | */ | |
f7d911c3 | 404 | struct damon_operations { |
9f7b053a | 405 | enum damon_ops_id id; |
2224d848 SP |
406 | void (*init)(struct damon_ctx *context); |
407 | void (*update)(struct damon_ctx *context); | |
408 | void (*prepare_access_checks)(struct damon_ctx *context); | |
b9a6ac4e | 409 | unsigned int (*check_accesses)(struct damon_ctx *context); |
2224d848 | 410 | void (*reset_aggregated)(struct damon_ctx *context); |
38683e00 SP |
411 | int (*get_scheme_score)(struct damon_ctx *context, |
412 | struct damon_target *t, struct damon_region *r, | |
413 | struct damos *scheme); | |
0e92c2ee SP |
414 | unsigned long (*apply_scheme)(struct damon_ctx *context, |
415 | struct damon_target *t, struct damon_region *r, | |
416 | struct damos *scheme); | |
16bc1b0f | 417 | bool (*target_valid)(struct damon_target *t); |
2224d848 SP |
418 | void (*cleanup)(struct damon_ctx *context); |
419 | }; | |
420 | ||
d2f272b3 SP |
421 | /** |
422 | * struct damon_callback - Monitoring events notification callbacks. | |
2224d848 SP |
423 | * |
424 | * @before_start: Called before starting the monitoring. | |
6e74d2bf | 425 | * @after_wmarks_check: Called after each schemes' watermarks check. |
2224d848 SP |
426 | * @after_sampling: Called after each sampling. |
427 | * @after_aggregation: Called after each aggregation. | |
44467bbb | 428 | * @before_damos_apply: Called before applying DAMOS action. |
2224d848 SP |
429 | * @before_terminate: Called before terminating the monitoring. |
430 | * @private: User private data. | |
431 | * | |
432 | * The monitoring thread (&damon_ctx.kdamond) calls @before_start and | |
433 | * @before_terminate just before starting and finishing the monitoring, | |
434 | * respectively. Therefore, those are good places for installing and cleaning | |
435 | * @private. | |
436 | * | |
6e74d2bf SP |
437 | * The monitoring thread calls @after_wmarks_check after each DAMON-based |
438 | * operation schemes' watermarks check. If users need to make changes to the | |
439 | * attributes of the monitoring context while it's deactivated due to the | |
440 | * watermarks, this is the good place to do. | |
441 | * | |
2224d848 SP |
442 | * The monitoring thread calls @after_sampling and @after_aggregation for each |
443 | * of the sampling intervals and aggregation intervals, respectively. | |
444 | * Therefore, users can safely access the monitoring results without additional | |
445 | * protection. For the reason, users are recommended to use these callback for | |
446 | * the accesses to the results. | |
447 | * | |
448 | * If any callback returns non-zero, monitoring stops. | |
449 | */ | |
450 | struct damon_callback { | |
451 | void *private; | |
452 | ||
453 | int (*before_start)(struct damon_ctx *context); | |
6e74d2bf | 454 | int (*after_wmarks_check)(struct damon_ctx *context); |
2224d848 SP |
455 | int (*after_sampling)(struct damon_ctx *context); |
456 | int (*after_aggregation)(struct damon_ctx *context); | |
44467bbb SP |
457 | int (*before_damos_apply)(struct damon_ctx *context, |
458 | struct damon_target *target, | |
459 | struct damon_region *region, | |
460 | struct damos *scheme); | |
658f9ae7 | 461 | void (*before_terminate)(struct damon_ctx *context); |
2224d848 SP |
462 | }; |
463 | ||
464 | /** | |
cbeaa77b | 465 | * struct damon_attrs - Monitoring attributes for accuracy/overhead control. |
2224d848 SP |
466 | * |
467 | * @sample_interval: The time between access samplings. | |
468 | * @aggr_interval: The time between monitor results aggregations. | |
f7d911c3 | 469 | * @ops_update_interval: The time between monitoring operations updates. |
cbeaa77b SP |
470 | * @min_nr_regions: The minimum number of adaptive monitoring |
471 | * regions. | |
472 | * @max_nr_regions: The maximum number of adaptive monitoring | |
473 | * regions. | |
2224d848 SP |
474 | * |
475 | * For each @sample_interval, DAMON checks whether each region is accessed or | |
d896073f SP |
476 | * not during the last @sample_interval. If such access is found, DAMON |
477 | * aggregates the information by increasing &damon_region->nr_accesses for | |
478 | * @aggr_interval time. For each @aggr_interval, the count is reset. DAMON | |
479 | * also checks whether the target memory regions need update (e.g., by | |
480 | * ``mmap()`` calls from the application, in case of virtual memory monitoring) | |
481 | * and applies the changes for each @ops_update_interval. All time intervals | |
482 | * are in micro-seconds. Please refer to &struct damon_operations and &struct | |
483 | * damon_callback for more detail. | |
cbeaa77b SP |
484 | */ |
485 | struct damon_attrs { | |
486 | unsigned long sample_interval; | |
487 | unsigned long aggr_interval; | |
488 | unsigned long ops_update_interval; | |
489 | unsigned long min_nr_regions; | |
490 | unsigned long max_nr_regions; | |
491 | }; | |
492 | ||
493 | /** | |
494 | * struct damon_ctx - Represents a context for each monitoring. This is the | |
495 | * main interface that allows users to set the attributes and get the results | |
496 | * of the monitoring. | |
2224d848 | 497 | * |
cbeaa77b | 498 | * @attrs: Monitoring attributes for accuracy/overhead control. |
2224d848 | 499 | * @kdamond: Kernel thread who does the monitoring. |
2224d848 SP |
500 | * @kdamond_lock: Mutex for the synchronizations with @kdamond. |
501 | * | |
502 | * For each monitoring context, one kernel thread for the monitoring is | |
503 | * created. The pointer to the thread is stored in @kdamond. | |
504 | * | |
505 | * Once started, the monitoring thread runs until explicitly required to be | |
506 | * terminated or every monitoring target is invalid. The validity of the | |
f7d911c3 | 507 | * targets is checked via the &damon_operations.target_valid of @ops. The |
bcc728eb CZ |
508 | * termination can also be explicitly requested by calling damon_stop(). |
509 | * The thread sets @kdamond to NULL when it terminates. Therefore, users can | |
510 | * know whether the monitoring is ongoing or terminated by reading @kdamond. | |
511 | * Reads and writes to @kdamond from outside of the monitoring thread must | |
512 | * be protected by @kdamond_lock. | |
513 | * | |
514 | * Note that the monitoring thread protects only @kdamond via @kdamond_lock. | |
515 | * Accesses to other fields must be protected by themselves. | |
2224d848 | 516 | * |
f7d911c3 | 517 | * @ops: Set of monitoring operations for given use cases. |
2224d848 SP |
518 | * @callback: Set of callbacks for monitoring events notifications. |
519 | * | |
b9a6ac4e | 520 | * @adaptive_targets: Head of monitoring targets (&damon_target) list. |
1f366e42 | 521 | * @schemes: Head of schemes (&damos) list. |
2224d848 SP |
522 | */ |
523 | struct damon_ctx { | |
cbeaa77b | 524 | struct damon_attrs attrs; |
2224d848 SP |
525 | |
526 | /* private: internal use only */ | |
4472edf6 SP |
527 | /* number of sample intervals that passed since this context started */ |
528 | unsigned long passed_sample_intervals; | |
529 | /* | |
530 | * number of sample intervals that should be passed before next | |
531 | * aggregation | |
532 | */ | |
533 | unsigned long next_aggregation_sis; | |
534 | /* | |
535 | * number of sample intervals that should be passed before next ops | |
536 | * update | |
537 | */ | |
538 | unsigned long next_ops_update_sis; | |
2224d848 SP |
539 | |
540 | /* public: */ | |
541 | struct task_struct *kdamond; | |
2224d848 SP |
542 | struct mutex kdamond_lock; |
543 | ||
f7d911c3 | 544 | struct damon_operations ops; |
2224d848 SP |
545 | struct damon_callback callback; |
546 | ||
b9a6ac4e | 547 | struct list_head adaptive_targets; |
1f366e42 | 548 | struct list_head schemes; |
2224d848 SP |
549 | }; |
550 | ||
88f86dcf SP |
551 | static inline struct damon_region *damon_next_region(struct damon_region *r) |
552 | { | |
553 | return container_of(r->list.next, struct damon_region, list); | |
554 | } | |
f23b8eee | 555 | |
88f86dcf SP |
556 | static inline struct damon_region *damon_prev_region(struct damon_region *r) |
557 | { | |
558 | return container_of(r->list.prev, struct damon_region, list); | |
559 | } | |
f23b8eee | 560 | |
88f86dcf SP |
561 | static inline struct damon_region *damon_last_region(struct damon_target *t) |
562 | { | |
563 | return list_last_entry(&t->regions_list, struct damon_region, list); | |
564 | } | |
50585192 | 565 | |
36001cba KX |
566 | static inline struct damon_region *damon_first_region(struct damon_target *t) |
567 | { | |
568 | return list_first_entry(&t->regions_list, struct damon_region, list); | |
569 | } | |
570 | ||
652e0446 XH |
571 | static inline unsigned long damon_sz_region(struct damon_region *r) |
572 | { | |
573 | return r->ar.end - r->ar.start; | |
574 | } | |
575 | ||
576 | ||
f23b8eee SP |
577 | #define damon_for_each_region(r, t) \ |
578 | list_for_each_entry(r, &t->regions_list, list) | |
579 | ||
36001cba KX |
580 | #define damon_for_each_region_from(r, t) \ |
581 | list_for_each_entry_from(r, &t->regions_list, list) | |
582 | ||
f23b8eee SP |
583 | #define damon_for_each_region_safe(r, next, t) \ |
584 | list_for_each_entry_safe(r, next, &t->regions_list, list) | |
585 | ||
586 | #define damon_for_each_target(t, ctx) \ | |
b9a6ac4e | 587 | list_for_each_entry(t, &(ctx)->adaptive_targets, list) |
f23b8eee SP |
588 | |
589 | #define damon_for_each_target_safe(t, next, ctx) \ | |
b9a6ac4e | 590 | list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list) |
f23b8eee | 591 | |
1f366e42 SP |
592 | #define damon_for_each_scheme(s, ctx) \ |
593 | list_for_each_entry(s, &(ctx)->schemes, list) | |
594 | ||
595 | #define damon_for_each_scheme_safe(s, next, ctx) \ | |
596 | list_for_each_entry_safe(s, next, &(ctx)->schemes, list) | |
597 | ||
98def236 SP |
598 | #define damos_for_each_filter(f, scheme) \ |
599 | list_for_each_entry(f, &(scheme)->filters, list) | |
600 | ||
601 | #define damos_for_each_filter_safe(f, next, scheme) \ | |
602 | list_for_each_entry_safe(f, next, &(scheme)->filters, list) | |
603 | ||
2224d848 SP |
604 | #ifdef CONFIG_DAMON |
605 | ||
f23b8eee | 606 | struct damon_region *damon_new_region(unsigned long start, unsigned long end); |
2cd4b8e1 GJ |
607 | |
608 | /* | |
609 | * Add a region between two other regions | |
610 | */ | |
611 | static inline void damon_insert_region(struct damon_region *r, | |
b9a6ac4e | 612 | struct damon_region *prev, struct damon_region *next, |
2cd4b8e1 GJ |
613 | struct damon_target *t) |
614 | { | |
615 | __list_add(&r->list, &prev->list, &next->list); | |
616 | t->nr_regions++; | |
617 | } | |
618 | ||
f23b8eee | 619 | void damon_add_region(struct damon_region *r, struct damon_target *t); |
b9a6ac4e | 620 | void damon_destroy_region(struct damon_region *r, struct damon_target *t); |
d0723bc0 SP |
621 | int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, |
622 | unsigned int nr_ranges); | |
f23b8eee | 623 | |
98def236 SP |
624 | struct damos_filter *damos_new_filter(enum damos_filter_type type, |
625 | bool matching); | |
626 | void damos_add_filter(struct damos *s, struct damos_filter *f); | |
627 | void damos_destroy_filter(struct damos_filter *f); | |
628 | ||
f5a79d7c YD |
629 | struct damos *damon_new_scheme(struct damos_access_pattern *pattern, |
630 | enum damos_action action, struct damos_quota *quota, | |
631 | struct damos_watermarks *wmarks); | |
1f366e42 SP |
632 | void damon_add_scheme(struct damon_ctx *ctx, struct damos *s); |
633 | void damon_destroy_scheme(struct damos *s); | |
634 | ||
1971bd63 | 635 | struct damon_target *damon_new_target(void); |
f23b8eee | 636 | void damon_add_target(struct damon_ctx *ctx, struct damon_target *t); |
b5ca3e83 | 637 | bool damon_targets_empty(struct damon_ctx *ctx); |
f23b8eee SP |
638 | void damon_free_target(struct damon_target *t); |
639 | void damon_destroy_target(struct damon_target *t); | |
b9a6ac4e | 640 | unsigned int damon_nr_regions(struct damon_target *t); |
f23b8eee | 641 | |
2224d848 SP |
642 | struct damon_ctx *damon_new_ctx(void); |
643 | void damon_destroy_ctx(struct damon_ctx *ctx); | |
bead3b00 | 644 | int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs); |
cc713520 | 645 | void damon_set_schemes(struct damon_ctx *ctx, |
1f366e42 | 646 | struct damos **schemes, ssize_t nr_schemes); |
4bc05954 | 647 | int damon_nr_running_ctxs(void); |
152e5617 | 648 | bool damon_is_registered_ops(enum damon_ops_id id); |
9f7b053a SP |
649 | int damon_register_ops(struct damon_operations *ops); |
650 | int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id); | |
2224d848 | 651 | |
c9e124e0 SP |
652 | static inline bool damon_target_has_pid(const struct damon_ctx *ctx) |
653 | { | |
654 | return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR; | |
655 | } | |
656 | ||
657 | ||
8b9b0d33 | 658 | int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive); |
2224d848 SP |
659 | int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); |
660 | ||
233f0b31 KX |
661 | int damon_set_region_biggest_system_ram_default(struct damon_target *t, |
662 | unsigned long *start, unsigned long *end); | |
0d83b2d8 | 663 | |
2224d848 SP |
664 | #endif /* CONFIG_DAMON */ |
665 | ||
666 | #endif /* _DAMON_H */ |