| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Data Access Monitor |
| 4 | * |
| 5 | * Author: SeongJae Park <sj@kernel.org> |
| 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) "damon: " fmt |
| 9 | |
| 10 | #include <linux/damon.h> |
| 11 | #include <linux/delay.h> |
| 12 | #include <linux/kthread.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/psi.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/string.h> |
| 17 | #include <linux/string_choices.h> |
| 18 | |
| 19 | #define CREATE_TRACE_POINTS |
| 20 | #include <trace/events/damon.h> |
| 21 | |
| 22 | #ifdef CONFIG_DAMON_KUNIT_TEST |
| 23 | #undef DAMON_MIN_REGION |
| 24 | #define DAMON_MIN_REGION 1 |
| 25 | #endif |
| 26 | |
| 27 | static DEFINE_MUTEX(damon_lock); |
| 28 | static int nr_running_ctxs; |
| 29 | static bool running_exclusive_ctxs; |
| 30 | |
| 31 | static DEFINE_MUTEX(damon_ops_lock); |
| 32 | static struct damon_operations damon_registered_ops[NR_DAMON_OPS]; |
| 33 | |
| 34 | static struct kmem_cache *damon_region_cache __ro_after_init; |
| 35 | |
| 36 | /* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */ |
| 37 | static bool __damon_is_registered_ops(enum damon_ops_id id) |
| 38 | { |
| 39 | struct damon_operations empty_ops = {}; |
| 40 | |
| 41 | if (!memcmp(&empty_ops, &damon_registered_ops[id], sizeof(empty_ops))) |
| 42 | return false; |
| 43 | return true; |
| 44 | } |
| 45 | |
| 46 | /** |
| 47 | * damon_is_registered_ops() - Check if a given damon_operations is registered. |
| 48 | * @id: Id of the damon_operations to check if registered. |
| 49 | * |
| 50 | * Return: true if the ops is set, false otherwise. |
| 51 | */ |
| 52 | bool damon_is_registered_ops(enum damon_ops_id id) |
| 53 | { |
| 54 | bool registered; |
| 55 | |
| 56 | if (id >= NR_DAMON_OPS) |
| 57 | return false; |
| 58 | mutex_lock(&damon_ops_lock); |
| 59 | registered = __damon_is_registered_ops(id); |
| 60 | mutex_unlock(&damon_ops_lock); |
| 61 | return registered; |
| 62 | } |
| 63 | |
| 64 | /** |
| 65 | * damon_register_ops() - Register a monitoring operations set to DAMON. |
| 66 | * @ops: monitoring operations set to register. |
| 67 | * |
| 68 | * This function registers a monitoring operations set of valid &struct |
| 69 | * damon_operations->id so that others can find and use them later. |
| 70 | * |
| 71 | * Return: 0 on success, negative error code otherwise. |
| 72 | */ |
| 73 | int damon_register_ops(struct damon_operations *ops) |
| 74 | { |
| 75 | int err = 0; |
| 76 | |
| 77 | if (ops->id >= NR_DAMON_OPS) |
| 78 | return -EINVAL; |
| 79 | |
| 80 | mutex_lock(&damon_ops_lock); |
| 81 | /* Fail for already registered ops */ |
| 82 | if (__damon_is_registered_ops(ops->id)) |
| 83 | err = -EINVAL; |
| 84 | else |
| 85 | damon_registered_ops[ops->id] = *ops; |
| 86 | mutex_unlock(&damon_ops_lock); |
| 87 | return err; |
| 88 | } |
| 89 | |
| 90 | /** |
| 91 | * damon_select_ops() - Select a monitoring operations to use with the context. |
| 92 | * @ctx: monitoring context to use the operations. |
| 93 | * @id: id of the registered monitoring operations to select. |
| 94 | * |
| 95 | * This function finds registered monitoring operations set of @id and make |
| 96 | * @ctx to use it. |
| 97 | * |
| 98 | * Return: 0 on success, negative error code otherwise. |
| 99 | */ |
| 100 | int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id) |
| 101 | { |
| 102 | int err = 0; |
| 103 | |
| 104 | if (id >= NR_DAMON_OPS) |
| 105 | return -EINVAL; |
| 106 | |
| 107 | mutex_lock(&damon_ops_lock); |
| 108 | if (!__damon_is_registered_ops(id)) |
| 109 | err = -EINVAL; |
| 110 | else |
| 111 | ctx->ops = damon_registered_ops[id]; |
| 112 | mutex_unlock(&damon_ops_lock); |
| 113 | return err; |
| 114 | } |
| 115 | |
| 116 | /* |
| 117 | * Construct a damon_region struct |
| 118 | * |
| 119 | * Returns the pointer to the new struct if success, or NULL otherwise |
| 120 | */ |
| 121 | struct damon_region *damon_new_region(unsigned long start, unsigned long end) |
| 122 | { |
| 123 | struct damon_region *region; |
| 124 | |
| 125 | region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL); |
| 126 | if (!region) |
| 127 | return NULL; |
| 128 | |
| 129 | region->ar.start = start; |
| 130 | region->ar.end = end; |
| 131 | region->nr_accesses = 0; |
| 132 | region->nr_accesses_bp = 0; |
| 133 | INIT_LIST_HEAD(®ion->list); |
| 134 | |
| 135 | region->age = 0; |
| 136 | region->last_nr_accesses = 0; |
| 137 | |
| 138 | return region; |
| 139 | } |
| 140 | |
| 141 | void damon_add_region(struct damon_region *r, struct damon_target *t) |
| 142 | { |
| 143 | list_add_tail(&r->list, &t->regions_list); |
| 144 | t->nr_regions++; |
| 145 | } |
| 146 | |
| 147 | static void damon_del_region(struct damon_region *r, struct damon_target *t) |
| 148 | { |
| 149 | list_del(&r->list); |
| 150 | t->nr_regions--; |
| 151 | } |
| 152 | |
| 153 | static void damon_free_region(struct damon_region *r) |
| 154 | { |
| 155 | kmem_cache_free(damon_region_cache, r); |
| 156 | } |
| 157 | |
| 158 | void damon_destroy_region(struct damon_region *r, struct damon_target *t) |
| 159 | { |
| 160 | damon_del_region(r, t); |
| 161 | damon_free_region(r); |
| 162 | } |
| 163 | |
| 164 | /* |
| 165 | * Check whether a region is intersecting an address range |
| 166 | * |
| 167 | * Returns true if it is. |
| 168 | */ |
| 169 | static bool damon_intersect(struct damon_region *r, |
| 170 | struct damon_addr_range *re) |
| 171 | { |
| 172 | return !(r->ar.end <= re->start || re->end <= r->ar.start); |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * Fill holes in regions with new regions. |
| 177 | */ |
| 178 | static int damon_fill_regions_holes(struct damon_region *first, |
| 179 | struct damon_region *last, struct damon_target *t) |
| 180 | { |
| 181 | struct damon_region *r = first; |
| 182 | |
| 183 | damon_for_each_region_from(r, t) { |
| 184 | struct damon_region *next, *newr; |
| 185 | |
| 186 | if (r == last) |
| 187 | break; |
| 188 | next = damon_next_region(r); |
| 189 | if (r->ar.end != next->ar.start) { |
| 190 | newr = damon_new_region(r->ar.end, next->ar.start); |
| 191 | if (!newr) |
| 192 | return -ENOMEM; |
| 193 | damon_insert_region(newr, r, next, t); |
| 194 | } |
| 195 | } |
| 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | /* |
| 200 | * damon_set_regions() - Set regions of a target for given address ranges. |
| 201 | * @t: the given target. |
| 202 | * @ranges: array of new monitoring target ranges. |
| 203 | * @nr_ranges: length of @ranges. |
| 204 | * |
| 205 | * This function adds new regions to, or modify existing regions of a |
| 206 | * monitoring target to fit in specific ranges. |
| 207 | * |
| 208 | * Return: 0 if success, or negative error code otherwise. |
| 209 | */ |
| 210 | int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, |
| 211 | unsigned int nr_ranges) |
| 212 | { |
| 213 | struct damon_region *r, *next; |
| 214 | unsigned int i; |
| 215 | int err; |
| 216 | |
| 217 | /* Remove regions which are not in the new ranges */ |
| 218 | damon_for_each_region_safe(r, next, t) { |
| 219 | for (i = 0; i < nr_ranges; i++) { |
| 220 | if (damon_intersect(r, &ranges[i])) |
| 221 | break; |
| 222 | } |
| 223 | if (i == nr_ranges) |
| 224 | damon_destroy_region(r, t); |
| 225 | } |
| 226 | |
| 227 | r = damon_first_region(t); |
| 228 | /* Add new regions or resize existing regions to fit in the ranges */ |
| 229 | for (i = 0; i < nr_ranges; i++) { |
| 230 | struct damon_region *first = NULL, *last, *newr; |
| 231 | struct damon_addr_range *range; |
| 232 | |
| 233 | range = &ranges[i]; |
| 234 | /* Get the first/last regions intersecting with the range */ |
| 235 | damon_for_each_region_from(r, t) { |
| 236 | if (damon_intersect(r, range)) { |
| 237 | if (!first) |
| 238 | first = r; |
| 239 | last = r; |
| 240 | } |
| 241 | if (r->ar.start >= range->end) |
| 242 | break; |
| 243 | } |
| 244 | if (!first) { |
| 245 | /* no region intersects with this range */ |
| 246 | newr = damon_new_region( |
| 247 | ALIGN_DOWN(range->start, |
| 248 | DAMON_MIN_REGION), |
| 249 | ALIGN(range->end, DAMON_MIN_REGION)); |
| 250 | if (!newr) |
| 251 | return -ENOMEM; |
| 252 | damon_insert_region(newr, damon_prev_region(r), r, t); |
| 253 | } else { |
| 254 | /* resize intersecting regions to fit in this range */ |
| 255 | first->ar.start = ALIGN_DOWN(range->start, |
| 256 | DAMON_MIN_REGION); |
| 257 | last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); |
| 258 | |
| 259 | /* fill possible holes in the range */ |
| 260 | err = damon_fill_regions_holes(first, last, t); |
| 261 | if (err) |
| 262 | return err; |
| 263 | } |
| 264 | } |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | struct damos_filter *damos_new_filter(enum damos_filter_type type, |
| 269 | bool matching, bool allow) |
| 270 | { |
| 271 | struct damos_filter *filter; |
| 272 | |
| 273 | filter = kmalloc(sizeof(*filter), GFP_KERNEL); |
| 274 | if (!filter) |
| 275 | return NULL; |
| 276 | filter->type = type; |
| 277 | filter->matching = matching; |
| 278 | filter->allow = allow; |
| 279 | INIT_LIST_HEAD(&filter->list); |
| 280 | return filter; |
| 281 | } |
| 282 | |
| 283 | /** |
| 284 | * damos_filter_for_ops() - Return if the filter is ops-hndled one. |
| 285 | * @type: type of the filter. |
| 286 | * |
| 287 | * Return: true if the filter of @type needs to be handled by ops layer, false |
| 288 | * otherwise. |
| 289 | */ |
| 290 | bool damos_filter_for_ops(enum damos_filter_type type) |
| 291 | { |
| 292 | switch (type) { |
| 293 | case DAMOS_FILTER_TYPE_ADDR: |
| 294 | case DAMOS_FILTER_TYPE_TARGET: |
| 295 | return false; |
| 296 | default: |
| 297 | break; |
| 298 | } |
| 299 | return true; |
| 300 | } |
| 301 | |
| 302 | void damos_add_filter(struct damos *s, struct damos_filter *f) |
| 303 | { |
| 304 | if (damos_filter_for_ops(f->type)) |
| 305 | list_add_tail(&f->list, &s->ops_filters); |
| 306 | else |
| 307 | list_add_tail(&f->list, &s->filters); |
| 308 | } |
| 309 | |
| 310 | static void damos_del_filter(struct damos_filter *f) |
| 311 | { |
| 312 | list_del(&f->list); |
| 313 | } |
| 314 | |
| 315 | static void damos_free_filter(struct damos_filter *f) |
| 316 | { |
| 317 | kfree(f); |
| 318 | } |
| 319 | |
| 320 | void damos_destroy_filter(struct damos_filter *f) |
| 321 | { |
| 322 | damos_del_filter(f); |
| 323 | damos_free_filter(f); |
| 324 | } |
| 325 | |
| 326 | struct damos_quota_goal *damos_new_quota_goal( |
| 327 | enum damos_quota_goal_metric metric, |
| 328 | unsigned long target_value) |
| 329 | { |
| 330 | struct damos_quota_goal *goal; |
| 331 | |
| 332 | goal = kmalloc(sizeof(*goal), GFP_KERNEL); |
| 333 | if (!goal) |
| 334 | return NULL; |
| 335 | goal->metric = metric; |
| 336 | goal->target_value = target_value; |
| 337 | INIT_LIST_HEAD(&goal->list); |
| 338 | return goal; |
| 339 | } |
| 340 | |
| 341 | void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g) |
| 342 | { |
| 343 | list_add_tail(&g->list, &q->goals); |
| 344 | } |
| 345 | |
| 346 | static void damos_del_quota_goal(struct damos_quota_goal *g) |
| 347 | { |
| 348 | list_del(&g->list); |
| 349 | } |
| 350 | |
| 351 | static void damos_free_quota_goal(struct damos_quota_goal *g) |
| 352 | { |
| 353 | kfree(g); |
| 354 | } |
| 355 | |
| 356 | void damos_destroy_quota_goal(struct damos_quota_goal *g) |
| 357 | { |
| 358 | damos_del_quota_goal(g); |
| 359 | damos_free_quota_goal(g); |
| 360 | } |
| 361 | |
| 362 | /* initialize fields of @quota that normally API users wouldn't set */ |
| 363 | static struct damos_quota *damos_quota_init(struct damos_quota *quota) |
| 364 | { |
| 365 | quota->esz = 0; |
| 366 | quota->total_charged_sz = 0; |
| 367 | quota->total_charged_ns = 0; |
| 368 | quota->charged_sz = 0; |
| 369 | quota->charged_from = 0; |
| 370 | quota->charge_target_from = NULL; |
| 371 | quota->charge_addr_from = 0; |
| 372 | quota->esz_bp = 0; |
| 373 | return quota; |
| 374 | } |
| 375 | |
| 376 | struct damos *damon_new_scheme(struct damos_access_pattern *pattern, |
| 377 | enum damos_action action, |
| 378 | unsigned long apply_interval_us, |
| 379 | struct damos_quota *quota, |
| 380 | struct damos_watermarks *wmarks, |
| 381 | int target_nid) |
| 382 | { |
| 383 | struct damos *scheme; |
| 384 | |
| 385 | scheme = kmalloc(sizeof(*scheme), GFP_KERNEL); |
| 386 | if (!scheme) |
| 387 | return NULL; |
| 388 | scheme->pattern = *pattern; |
| 389 | scheme->action = action; |
| 390 | scheme->apply_interval_us = apply_interval_us; |
| 391 | /* |
| 392 | * next_apply_sis will be set when kdamond starts. While kdamond is |
| 393 | * running, it will also updated when it is added to the DAMON context, |
| 394 | * or damon_attrs are updated. |
| 395 | */ |
| 396 | scheme->next_apply_sis = 0; |
| 397 | scheme->walk_completed = false; |
| 398 | INIT_LIST_HEAD(&scheme->filters); |
| 399 | INIT_LIST_HEAD(&scheme->ops_filters); |
| 400 | scheme->stat = (struct damos_stat){}; |
| 401 | INIT_LIST_HEAD(&scheme->list); |
| 402 | |
| 403 | scheme->quota = *(damos_quota_init(quota)); |
| 404 | /* quota.goals should be separately set by caller */ |
| 405 | INIT_LIST_HEAD(&scheme->quota.goals); |
| 406 | |
| 407 | scheme->wmarks = *wmarks; |
| 408 | scheme->wmarks.activated = true; |
| 409 | |
| 410 | scheme->target_nid = target_nid; |
| 411 | |
| 412 | return scheme; |
| 413 | } |
| 414 | |
| 415 | static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx) |
| 416 | { |
| 417 | unsigned long sample_interval = ctx->attrs.sample_interval ? |
| 418 | ctx->attrs.sample_interval : 1; |
| 419 | unsigned long apply_interval = s->apply_interval_us ? |
| 420 | s->apply_interval_us : ctx->attrs.aggr_interval; |
| 421 | |
| 422 | s->next_apply_sis = ctx->passed_sample_intervals + |
| 423 | apply_interval / sample_interval; |
| 424 | } |
| 425 | |
| 426 | void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) |
| 427 | { |
| 428 | list_add_tail(&s->list, &ctx->schemes); |
| 429 | damos_set_next_apply_sis(s, ctx); |
| 430 | } |
| 431 | |
| 432 | static void damon_del_scheme(struct damos *s) |
| 433 | { |
| 434 | list_del(&s->list); |
| 435 | } |
| 436 | |
| 437 | static void damon_free_scheme(struct damos *s) |
| 438 | { |
| 439 | kfree(s); |
| 440 | } |
| 441 | |
| 442 | void damon_destroy_scheme(struct damos *s) |
| 443 | { |
| 444 | struct damos_quota_goal *g, *g_next; |
| 445 | struct damos_filter *f, *next; |
| 446 | |
| 447 | damos_for_each_quota_goal_safe(g, g_next, &s->quota) |
| 448 | damos_destroy_quota_goal(g); |
| 449 | |
| 450 | damos_for_each_filter_safe(f, next, s) |
| 451 | damos_destroy_filter(f); |
| 452 | damon_del_scheme(s); |
| 453 | damon_free_scheme(s); |
| 454 | } |
| 455 | |
| 456 | /* |
| 457 | * Construct a damon_target struct |
| 458 | * |
| 459 | * Returns the pointer to the new struct if success, or NULL otherwise |
| 460 | */ |
| 461 | struct damon_target *damon_new_target(void) |
| 462 | { |
| 463 | struct damon_target *t; |
| 464 | |
| 465 | t = kmalloc(sizeof(*t), GFP_KERNEL); |
| 466 | if (!t) |
| 467 | return NULL; |
| 468 | |
| 469 | t->pid = NULL; |
| 470 | t->nr_regions = 0; |
| 471 | INIT_LIST_HEAD(&t->regions_list); |
| 472 | INIT_LIST_HEAD(&t->list); |
| 473 | |
| 474 | return t; |
| 475 | } |
| 476 | |
| 477 | void damon_add_target(struct damon_ctx *ctx, struct damon_target *t) |
| 478 | { |
| 479 | list_add_tail(&t->list, &ctx->adaptive_targets); |
| 480 | } |
| 481 | |
| 482 | bool damon_targets_empty(struct damon_ctx *ctx) |
| 483 | { |
| 484 | return list_empty(&ctx->adaptive_targets); |
| 485 | } |
| 486 | |
| 487 | static void damon_del_target(struct damon_target *t) |
| 488 | { |
| 489 | list_del(&t->list); |
| 490 | } |
| 491 | |
| 492 | void damon_free_target(struct damon_target *t) |
| 493 | { |
| 494 | struct damon_region *r, *next; |
| 495 | |
| 496 | damon_for_each_region_safe(r, next, t) |
| 497 | damon_free_region(r); |
| 498 | kfree(t); |
| 499 | } |
| 500 | |
| 501 | void damon_destroy_target(struct damon_target *t) |
| 502 | { |
| 503 | damon_del_target(t); |
| 504 | damon_free_target(t); |
| 505 | } |
| 506 | |
| 507 | unsigned int damon_nr_regions(struct damon_target *t) |
| 508 | { |
| 509 | return t->nr_regions; |
| 510 | } |
| 511 | |
| 512 | struct damon_ctx *damon_new_ctx(void) |
| 513 | { |
| 514 | struct damon_ctx *ctx; |
| 515 | |
| 516 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 517 | if (!ctx) |
| 518 | return NULL; |
| 519 | |
| 520 | init_completion(&ctx->kdamond_started); |
| 521 | |
| 522 | ctx->attrs.sample_interval = 5 * 1000; |
| 523 | ctx->attrs.aggr_interval = 100 * 1000; |
| 524 | ctx->attrs.ops_update_interval = 60 * 1000 * 1000; |
| 525 | |
| 526 | ctx->passed_sample_intervals = 0; |
| 527 | /* These will be set from kdamond_init_ctx() */ |
| 528 | ctx->next_aggregation_sis = 0; |
| 529 | ctx->next_ops_update_sis = 0; |
| 530 | |
| 531 | mutex_init(&ctx->kdamond_lock); |
| 532 | mutex_init(&ctx->call_control_lock); |
| 533 | mutex_init(&ctx->walk_control_lock); |
| 534 | |
| 535 | ctx->attrs.min_nr_regions = 10; |
| 536 | ctx->attrs.max_nr_regions = 1000; |
| 537 | |
| 538 | INIT_LIST_HEAD(&ctx->adaptive_targets); |
| 539 | INIT_LIST_HEAD(&ctx->schemes); |
| 540 | |
| 541 | return ctx; |
| 542 | } |
| 543 | |
| 544 | static void damon_destroy_targets(struct damon_ctx *ctx) |
| 545 | { |
| 546 | struct damon_target *t, *next_t; |
| 547 | |
| 548 | if (ctx->ops.cleanup) { |
| 549 | ctx->ops.cleanup(ctx); |
| 550 | return; |
| 551 | } |
| 552 | |
| 553 | damon_for_each_target_safe(t, next_t, ctx) |
| 554 | damon_destroy_target(t); |
| 555 | } |
| 556 | |
| 557 | void damon_destroy_ctx(struct damon_ctx *ctx) |
| 558 | { |
| 559 | struct damos *s, *next_s; |
| 560 | |
| 561 | damon_destroy_targets(ctx); |
| 562 | |
| 563 | damon_for_each_scheme_safe(s, next_s, ctx) |
| 564 | damon_destroy_scheme(s); |
| 565 | |
| 566 | kfree(ctx); |
| 567 | } |
| 568 | |
| 569 | static unsigned int damon_age_for_new_attrs(unsigned int age, |
| 570 | struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) |
| 571 | { |
| 572 | return age * old_attrs->aggr_interval / new_attrs->aggr_interval; |
| 573 | } |
| 574 | |
| 575 | /* convert access ratio in bp (per 10,000) to nr_accesses */ |
| 576 | static unsigned int damon_accesses_bp_to_nr_accesses( |
| 577 | unsigned int accesses_bp, struct damon_attrs *attrs) |
| 578 | { |
| 579 | return accesses_bp * damon_max_nr_accesses(attrs) / 10000; |
| 580 | } |
| 581 | |
| 582 | /* |
| 583 | * Convert nr_accesses to access ratio in bp (per 10,000). |
| 584 | * |
| 585 | * Callers should ensure attrs.aggr_interval is not zero, like |
| 586 | * damon_update_monitoring_results() does . Otherwise, divide-by-zero would |
| 587 | * happen. |
| 588 | */ |
| 589 | static unsigned int damon_nr_accesses_to_accesses_bp( |
| 590 | unsigned int nr_accesses, struct damon_attrs *attrs) |
| 591 | { |
| 592 | return nr_accesses * 10000 / damon_max_nr_accesses(attrs); |
| 593 | } |
| 594 | |
| 595 | static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses, |
| 596 | struct damon_attrs *old_attrs, struct damon_attrs *new_attrs) |
| 597 | { |
| 598 | return damon_accesses_bp_to_nr_accesses( |
| 599 | damon_nr_accesses_to_accesses_bp( |
| 600 | nr_accesses, old_attrs), |
| 601 | new_attrs); |
| 602 | } |
| 603 | |
| 604 | static void damon_update_monitoring_result(struct damon_region *r, |
| 605 | struct damon_attrs *old_attrs, struct damon_attrs *new_attrs, |
| 606 | bool aggregating) |
| 607 | { |
| 608 | if (!aggregating) { |
| 609 | r->nr_accesses = damon_nr_accesses_for_new_attrs( |
| 610 | r->nr_accesses, old_attrs, new_attrs); |
| 611 | r->nr_accesses_bp = r->nr_accesses * 10000; |
| 612 | } else { |
| 613 | /* |
| 614 | * if this is called in the middle of the aggregation, reset |
| 615 | * the aggregations we made so far for this aggregation |
| 616 | * interval. In other words, make the status like |
| 617 | * kdamond_reset_aggregated() is called. |
| 618 | */ |
| 619 | r->last_nr_accesses = damon_nr_accesses_for_new_attrs( |
| 620 | r->last_nr_accesses, old_attrs, new_attrs); |
| 621 | r->nr_accesses_bp = r->last_nr_accesses * 10000; |
| 622 | r->nr_accesses = 0; |
| 623 | } |
| 624 | r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); |
| 625 | } |
| 626 | |
| 627 | /* |
| 628 | * region->nr_accesses is the number of sampling intervals in the last |
| 629 | * aggregation interval that access to the region has found, and region->age is |
| 630 | * the number of aggregation intervals that its access pattern has maintained. |
| 631 | * For the reason, the real meaning of the two fields depend on current |
| 632 | * sampling interval and aggregation interval. This function updates |
| 633 | * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs. |
| 634 | */ |
| 635 | static void damon_update_monitoring_results(struct damon_ctx *ctx, |
| 636 | struct damon_attrs *new_attrs, bool aggregating) |
| 637 | { |
| 638 | struct damon_attrs *old_attrs = &ctx->attrs; |
| 639 | struct damon_target *t; |
| 640 | struct damon_region *r; |
| 641 | |
| 642 | /* if any interval is zero, simply forgive conversion */ |
| 643 | if (!old_attrs->sample_interval || !old_attrs->aggr_interval || |
| 644 | !new_attrs->sample_interval || |
| 645 | !new_attrs->aggr_interval) |
| 646 | return; |
| 647 | |
| 648 | damon_for_each_target(t, ctx) |
| 649 | damon_for_each_region(r, t) |
| 650 | damon_update_monitoring_result( |
| 651 | r, old_attrs, new_attrs, aggregating); |
| 652 | } |
| 653 | |
| 654 | /* |
| 655 | * damon_valid_intervals_goal() - return if the intervals goal of @attrs is |
| 656 | * valid. |
| 657 | */ |
| 658 | static bool damon_valid_intervals_goal(struct damon_attrs *attrs) |
| 659 | { |
| 660 | struct damon_intervals_goal *goal = &attrs->intervals_goal; |
| 661 | |
| 662 | /* tuning is disabled */ |
| 663 | if (!goal->aggrs) |
| 664 | return true; |
| 665 | if (goal->min_sample_us > goal->max_sample_us) |
| 666 | return false; |
| 667 | if (attrs->sample_interval < goal->min_sample_us || |
| 668 | goal->max_sample_us < attrs->sample_interval) |
| 669 | return false; |
| 670 | return true; |
| 671 | } |
| 672 | |
| 673 | /** |
| 674 | * damon_set_attrs() - Set attributes for the monitoring. |
| 675 | * @ctx: monitoring context |
| 676 | * @attrs: monitoring attributes |
| 677 | * |
| 678 | * This function should be called while the kdamond is not running, an access |
| 679 | * check results aggregation is not ongoing (e.g., from &struct |
| 680 | * damon_callback->after_aggregation or &struct |
| 681 | * damon_callback->after_wmarks_check callbacks), or from damon_call(). |
| 682 | * |
| 683 | * Every time interval is in micro-seconds. |
| 684 | * |
| 685 | * Return: 0 on success, negative error code otherwise. |
| 686 | */ |
| 687 | int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) |
| 688 | { |
| 689 | unsigned long sample_interval = attrs->sample_interval ? |
| 690 | attrs->sample_interval : 1; |
| 691 | struct damos *s; |
| 692 | bool aggregating = ctx->passed_sample_intervals < |
| 693 | ctx->next_aggregation_sis; |
| 694 | |
| 695 | if (!damon_valid_intervals_goal(attrs)) |
| 696 | return -EINVAL; |
| 697 | |
| 698 | if (attrs->min_nr_regions < 3) |
| 699 | return -EINVAL; |
| 700 | if (attrs->min_nr_regions > attrs->max_nr_regions) |
| 701 | return -EINVAL; |
| 702 | if (attrs->sample_interval > attrs->aggr_interval) |
| 703 | return -EINVAL; |
| 704 | |
| 705 | /* calls from core-external doesn't set this. */ |
| 706 | if (!attrs->aggr_samples) |
| 707 | attrs->aggr_samples = attrs->aggr_interval / sample_interval; |
| 708 | |
| 709 | ctx->next_aggregation_sis = ctx->passed_sample_intervals + |
| 710 | attrs->aggr_interval / sample_interval; |
| 711 | ctx->next_ops_update_sis = ctx->passed_sample_intervals + |
| 712 | attrs->ops_update_interval / sample_interval; |
| 713 | |
| 714 | damon_update_monitoring_results(ctx, attrs, aggregating); |
| 715 | ctx->attrs = *attrs; |
| 716 | |
| 717 | damon_for_each_scheme(s, ctx) |
| 718 | damos_set_next_apply_sis(s, ctx); |
| 719 | |
| 720 | return 0; |
| 721 | } |
| 722 | |
| 723 | /** |
| 724 | * damon_set_schemes() - Set data access monitoring based operation schemes. |
| 725 | * @ctx: monitoring context |
| 726 | * @schemes: array of the schemes |
| 727 | * @nr_schemes: number of entries in @schemes |
| 728 | * |
| 729 | * This function should not be called while the kdamond of the context is |
| 730 | * running. |
| 731 | */ |
| 732 | void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, |
| 733 | ssize_t nr_schemes) |
| 734 | { |
| 735 | struct damos *s, *next; |
| 736 | ssize_t i; |
| 737 | |
| 738 | damon_for_each_scheme_safe(s, next, ctx) |
| 739 | damon_destroy_scheme(s); |
| 740 | for (i = 0; i < nr_schemes; i++) |
| 741 | damon_add_scheme(ctx, schemes[i]); |
| 742 | } |
| 743 | |
| 744 | static struct damos_quota_goal *damos_nth_quota_goal( |
| 745 | int n, struct damos_quota *q) |
| 746 | { |
| 747 | struct damos_quota_goal *goal; |
| 748 | int i = 0; |
| 749 | |
| 750 | damos_for_each_quota_goal(goal, q) { |
| 751 | if (i++ == n) |
| 752 | return goal; |
| 753 | } |
| 754 | return NULL; |
| 755 | } |
| 756 | |
| 757 | static void damos_commit_quota_goal( |
| 758 | struct damos_quota_goal *dst, struct damos_quota_goal *src) |
| 759 | { |
| 760 | dst->metric = src->metric; |
| 761 | dst->target_value = src->target_value; |
| 762 | if (dst->metric == DAMOS_QUOTA_USER_INPUT) |
| 763 | dst->current_value = src->current_value; |
| 764 | /* keep last_psi_total as is, since it will be updated in next cycle */ |
| 765 | } |
| 766 | |
| 767 | /** |
| 768 | * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota. |
| 769 | * @dst: The commit destination DAMOS quota. |
| 770 | * @src: The commit source DAMOS quota. |
| 771 | * |
| 772 | * Copies user-specified parameters for quota goals from @src to @dst. Users |
| 773 | * should use this function for quota goals-level parameters update of running |
| 774 | * DAMON contexts, instead of manual in-place updates. |
| 775 | * |
| 776 | * This function should be called from parameters-update safe context, like |
| 777 | * DAMON callbacks. |
| 778 | */ |
| 779 | int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src) |
| 780 | { |
| 781 | struct damos_quota_goal *dst_goal, *next, *src_goal, *new_goal; |
| 782 | int i = 0, j = 0; |
| 783 | |
| 784 | damos_for_each_quota_goal_safe(dst_goal, next, dst) { |
| 785 | src_goal = damos_nth_quota_goal(i++, src); |
| 786 | if (src_goal) |
| 787 | damos_commit_quota_goal(dst_goal, src_goal); |
| 788 | else |
| 789 | damos_destroy_quota_goal(dst_goal); |
| 790 | } |
| 791 | damos_for_each_quota_goal_safe(src_goal, next, src) { |
| 792 | if (j++ < i) |
| 793 | continue; |
| 794 | new_goal = damos_new_quota_goal( |
| 795 | src_goal->metric, src_goal->target_value); |
| 796 | if (!new_goal) |
| 797 | return -ENOMEM; |
| 798 | damos_add_quota_goal(dst, new_goal); |
| 799 | } |
| 800 | return 0; |
| 801 | } |
| 802 | |
| 803 | static int damos_commit_quota(struct damos_quota *dst, struct damos_quota *src) |
| 804 | { |
| 805 | int err; |
| 806 | |
| 807 | dst->reset_interval = src->reset_interval; |
| 808 | dst->ms = src->ms; |
| 809 | dst->sz = src->sz; |
| 810 | err = damos_commit_quota_goals(dst, src); |
| 811 | if (err) |
| 812 | return err; |
| 813 | dst->weight_sz = src->weight_sz; |
| 814 | dst->weight_nr_accesses = src->weight_nr_accesses; |
| 815 | dst->weight_age = src->weight_age; |
| 816 | return 0; |
| 817 | } |
| 818 | |
| 819 | static struct damos_filter *damos_nth_filter(int n, struct damos *s) |
| 820 | { |
| 821 | struct damos_filter *filter; |
| 822 | int i = 0; |
| 823 | |
| 824 | damos_for_each_filter(filter, s) { |
| 825 | if (i++ == n) |
| 826 | return filter; |
| 827 | } |
| 828 | return NULL; |
| 829 | } |
| 830 | |
| 831 | static void damos_commit_filter_arg( |
| 832 | struct damos_filter *dst, struct damos_filter *src) |
| 833 | { |
| 834 | switch (dst->type) { |
| 835 | case DAMOS_FILTER_TYPE_MEMCG: |
| 836 | dst->memcg_id = src->memcg_id; |
| 837 | break; |
| 838 | case DAMOS_FILTER_TYPE_ADDR: |
| 839 | dst->addr_range = src->addr_range; |
| 840 | break; |
| 841 | case DAMOS_FILTER_TYPE_TARGET: |
| 842 | dst->target_idx = src->target_idx; |
| 843 | break; |
| 844 | case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: |
| 845 | dst->sz_range = src->sz_range; |
| 846 | break; |
| 847 | default: |
| 848 | break; |
| 849 | } |
| 850 | } |
| 851 | |
| 852 | static void damos_commit_filter( |
| 853 | struct damos_filter *dst, struct damos_filter *src) |
| 854 | { |
| 855 | dst->type = src->type; |
| 856 | dst->matching = src->matching; |
| 857 | damos_commit_filter_arg(dst, src); |
| 858 | } |
| 859 | |
| 860 | static int damos_commit_core_filters(struct damos *dst, struct damos *src) |
| 861 | { |
| 862 | struct damos_filter *dst_filter, *next, *src_filter, *new_filter; |
| 863 | int i = 0, j = 0; |
| 864 | |
| 865 | damos_for_each_filter_safe(dst_filter, next, dst) { |
| 866 | src_filter = damos_nth_filter(i++, src); |
| 867 | if (src_filter) |
| 868 | damos_commit_filter(dst_filter, src_filter); |
| 869 | else |
| 870 | damos_destroy_filter(dst_filter); |
| 871 | } |
| 872 | |
| 873 | damos_for_each_filter_safe(src_filter, next, src) { |
| 874 | if (j++ < i) |
| 875 | continue; |
| 876 | |
| 877 | new_filter = damos_new_filter( |
| 878 | src_filter->type, src_filter->matching, |
| 879 | src_filter->allow); |
| 880 | if (!new_filter) |
| 881 | return -ENOMEM; |
| 882 | damos_commit_filter_arg(new_filter, src_filter); |
| 883 | damos_add_filter(dst, new_filter); |
| 884 | } |
| 885 | return 0; |
| 886 | } |
| 887 | |
| 888 | static int damos_commit_ops_filters(struct damos *dst, struct damos *src) |
| 889 | { |
| 890 | struct damos_filter *dst_filter, *next, *src_filter, *new_filter; |
| 891 | int i = 0, j = 0; |
| 892 | |
| 893 | damos_for_each_ops_filter_safe(dst_filter, next, dst) { |
| 894 | src_filter = damos_nth_filter(i++, src); |
| 895 | if (src_filter) |
| 896 | damos_commit_filter(dst_filter, src_filter); |
| 897 | else |
| 898 | damos_destroy_filter(dst_filter); |
| 899 | } |
| 900 | |
| 901 | damos_for_each_ops_filter_safe(src_filter, next, src) { |
| 902 | if (j++ < i) |
| 903 | continue; |
| 904 | |
| 905 | new_filter = damos_new_filter( |
| 906 | src_filter->type, src_filter->matching, |
| 907 | src_filter->allow); |
| 908 | if (!new_filter) |
| 909 | return -ENOMEM; |
| 910 | damos_commit_filter_arg(new_filter, src_filter); |
| 911 | damos_add_filter(dst, new_filter); |
| 912 | } |
| 913 | return 0; |
| 914 | } |
| 915 | |
| 916 | /** |
| 917 | * damos_filters_default_reject() - decide whether to reject memory that didn't |
| 918 | * match with any given filter. |
| 919 | * @filters: Given DAMOS filters of a group. |
| 920 | */ |
| 921 | static bool damos_filters_default_reject(struct list_head *filters) |
| 922 | { |
| 923 | struct damos_filter *last_filter; |
| 924 | |
| 925 | if (list_empty(filters)) |
| 926 | return false; |
| 927 | last_filter = list_last_entry(filters, struct damos_filter, list); |
| 928 | return last_filter->allow; |
| 929 | } |
| 930 | |
| 931 | static void damos_set_filters_default_reject(struct damos *s) |
| 932 | { |
| 933 | if (!list_empty(&s->ops_filters)) |
| 934 | s->core_filters_default_reject = false; |
| 935 | else |
| 936 | s->core_filters_default_reject = |
| 937 | damos_filters_default_reject(&s->filters); |
| 938 | s->ops_filters_default_reject = |
| 939 | damos_filters_default_reject(&s->ops_filters); |
| 940 | } |
| 941 | |
| 942 | static int damos_commit_filters(struct damos *dst, struct damos *src) |
| 943 | { |
| 944 | int err; |
| 945 | |
| 946 | err = damos_commit_core_filters(dst, src); |
| 947 | if (err) |
| 948 | return err; |
| 949 | err = damos_commit_ops_filters(dst, src); |
| 950 | if (err) |
| 951 | return err; |
| 952 | damos_set_filters_default_reject(dst); |
| 953 | return 0; |
| 954 | } |
| 955 | |
| 956 | static struct damos *damon_nth_scheme(int n, struct damon_ctx *ctx) |
| 957 | { |
| 958 | struct damos *s; |
| 959 | int i = 0; |
| 960 | |
| 961 | damon_for_each_scheme(s, ctx) { |
| 962 | if (i++ == n) |
| 963 | return s; |
| 964 | } |
| 965 | return NULL; |
| 966 | } |
| 967 | |
| 968 | static int damos_commit(struct damos *dst, struct damos *src) |
| 969 | { |
| 970 | int err; |
| 971 | |
| 972 | dst->pattern = src->pattern; |
| 973 | dst->action = src->action; |
| 974 | dst->apply_interval_us = src->apply_interval_us; |
| 975 | |
| 976 | err = damos_commit_quota(&dst->quota, &src->quota); |
| 977 | if (err) |
| 978 | return err; |
| 979 | |
| 980 | dst->wmarks = src->wmarks; |
| 981 | |
| 982 | err = damos_commit_filters(dst, src); |
| 983 | return err; |
| 984 | } |
| 985 | |
| 986 | static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src) |
| 987 | { |
| 988 | struct damos *dst_scheme, *next, *src_scheme, *new_scheme; |
| 989 | int i = 0, j = 0, err; |
| 990 | |
| 991 | damon_for_each_scheme_safe(dst_scheme, next, dst) { |
| 992 | src_scheme = damon_nth_scheme(i++, src); |
| 993 | if (src_scheme) { |
| 994 | err = damos_commit(dst_scheme, src_scheme); |
| 995 | if (err) |
| 996 | return err; |
| 997 | } else { |
| 998 | damon_destroy_scheme(dst_scheme); |
| 999 | } |
| 1000 | } |
| 1001 | |
| 1002 | damon_for_each_scheme_safe(src_scheme, next, src) { |
| 1003 | if (j++ < i) |
| 1004 | continue; |
| 1005 | new_scheme = damon_new_scheme(&src_scheme->pattern, |
| 1006 | src_scheme->action, |
| 1007 | src_scheme->apply_interval_us, |
| 1008 | &src_scheme->quota, &src_scheme->wmarks, |
| 1009 | NUMA_NO_NODE); |
| 1010 | if (!new_scheme) |
| 1011 | return -ENOMEM; |
| 1012 | err = damos_commit(new_scheme, src_scheme); |
| 1013 | if (err) { |
| 1014 | damon_destroy_scheme(new_scheme); |
| 1015 | return err; |
| 1016 | } |
| 1017 | damon_add_scheme(dst, new_scheme); |
| 1018 | } |
| 1019 | return 0; |
| 1020 | } |
| 1021 | |
| 1022 | static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) |
| 1023 | { |
| 1024 | struct damon_target *t; |
| 1025 | int i = 0; |
| 1026 | |
| 1027 | damon_for_each_target(t, ctx) { |
| 1028 | if (i++ == n) |
| 1029 | return t; |
| 1030 | } |
| 1031 | return NULL; |
| 1032 | } |
| 1033 | |
| 1034 | /* |
| 1035 | * The caller should ensure the regions of @src are |
| 1036 | * 1. valid (end >= src) and |
| 1037 | * 2. sorted by starting address. |
| 1038 | * |
| 1039 | * If @src has no region, @dst keeps current regions. |
| 1040 | */ |
| 1041 | static int damon_commit_target_regions( |
| 1042 | struct damon_target *dst, struct damon_target *src) |
| 1043 | { |
| 1044 | struct damon_region *src_region; |
| 1045 | struct damon_addr_range *ranges; |
| 1046 | int i = 0, err; |
| 1047 | |
| 1048 | damon_for_each_region(src_region, src) |
| 1049 | i++; |
| 1050 | if (!i) |
| 1051 | return 0; |
| 1052 | |
| 1053 | ranges = kmalloc_array(i, sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); |
| 1054 | if (!ranges) |
| 1055 | return -ENOMEM; |
| 1056 | i = 0; |
| 1057 | damon_for_each_region(src_region, src) |
| 1058 | ranges[i++] = src_region->ar; |
| 1059 | err = damon_set_regions(dst, ranges, i); |
| 1060 | kfree(ranges); |
| 1061 | return err; |
| 1062 | } |
| 1063 | |
| 1064 | static int damon_commit_target( |
| 1065 | struct damon_target *dst, bool dst_has_pid, |
| 1066 | struct damon_target *src, bool src_has_pid) |
| 1067 | { |
| 1068 | int err; |
| 1069 | |
| 1070 | err = damon_commit_target_regions(dst, src); |
| 1071 | if (err) |
| 1072 | return err; |
| 1073 | if (dst_has_pid) |
| 1074 | put_pid(dst->pid); |
| 1075 | if (src_has_pid) |
| 1076 | get_pid(src->pid); |
| 1077 | dst->pid = src->pid; |
| 1078 | return 0; |
| 1079 | } |
| 1080 | |
| 1081 | static int damon_commit_targets( |
| 1082 | struct damon_ctx *dst, struct damon_ctx *src) |
| 1083 | { |
| 1084 | struct damon_target *dst_target, *next, *src_target, *new_target; |
| 1085 | int i = 0, j = 0, err; |
| 1086 | |
| 1087 | damon_for_each_target_safe(dst_target, next, dst) { |
| 1088 | src_target = damon_nth_target(i++, src); |
| 1089 | if (src_target) { |
| 1090 | err = damon_commit_target( |
| 1091 | dst_target, damon_target_has_pid(dst), |
| 1092 | src_target, damon_target_has_pid(src)); |
| 1093 | if (err) |
| 1094 | return err; |
| 1095 | } else { |
| 1096 | struct damos *s; |
| 1097 | |
| 1098 | if (damon_target_has_pid(dst)) |
| 1099 | put_pid(dst_target->pid); |
| 1100 | damon_destroy_target(dst_target); |
| 1101 | damon_for_each_scheme(s, dst) { |
| 1102 | if (s->quota.charge_target_from == dst_target) { |
| 1103 | s->quota.charge_target_from = NULL; |
| 1104 | s->quota.charge_addr_from = 0; |
| 1105 | } |
| 1106 | } |
| 1107 | } |
| 1108 | } |
| 1109 | |
| 1110 | damon_for_each_target_safe(src_target, next, src) { |
| 1111 | if (j++ < i) |
| 1112 | continue; |
| 1113 | new_target = damon_new_target(); |
| 1114 | if (!new_target) |
| 1115 | return -ENOMEM; |
| 1116 | err = damon_commit_target(new_target, false, |
| 1117 | src_target, damon_target_has_pid(src)); |
| 1118 | if (err) { |
| 1119 | damon_destroy_target(new_target); |
| 1120 | return err; |
| 1121 | } |
| 1122 | damon_add_target(dst, new_target); |
| 1123 | } |
| 1124 | return 0; |
| 1125 | } |
| 1126 | |
| 1127 | /** |
| 1128 | * damon_commit_ctx() - Commit parameters of a DAMON context to another. |
| 1129 | * @dst: The commit destination DAMON context. |
| 1130 | * @src: The commit source DAMON context. |
| 1131 | * |
| 1132 | * This function copies user-specified parameters from @src to @dst and update |
| 1133 | * the internal status and results accordingly. Users should use this function |
| 1134 | * for context-level parameters update of running context, instead of manual |
| 1135 | * in-place updates. |
| 1136 | * |
| 1137 | * This function should be called from parameters-update safe context, like |
| 1138 | * DAMON callbacks. |
| 1139 | */ |
| 1140 | int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) |
| 1141 | { |
| 1142 | int err; |
| 1143 | |
| 1144 | err = damon_commit_schemes(dst, src); |
| 1145 | if (err) |
| 1146 | return err; |
| 1147 | err = damon_commit_targets(dst, src); |
| 1148 | if (err) |
| 1149 | return err; |
| 1150 | /* |
| 1151 | * schemes and targets should be updated first, since |
| 1152 | * 1. damon_set_attrs() updates monitoring results of targets and |
| 1153 | * next_apply_sis of schemes, and |
| 1154 | * 2. ops update should be done after pid handling is done (target |
| 1155 | * committing require putting pids). |
| 1156 | */ |
| 1157 | err = damon_set_attrs(dst, &src->attrs); |
| 1158 | if (err) |
| 1159 | return err; |
| 1160 | dst->ops = src->ops; |
| 1161 | |
| 1162 | return 0; |
| 1163 | } |
| 1164 | |
| 1165 | /** |
| 1166 | * damon_nr_running_ctxs() - Return number of currently running contexts. |
| 1167 | */ |
| 1168 | int damon_nr_running_ctxs(void) |
| 1169 | { |
| 1170 | int nr_ctxs; |
| 1171 | |
| 1172 | mutex_lock(&damon_lock); |
| 1173 | nr_ctxs = nr_running_ctxs; |
| 1174 | mutex_unlock(&damon_lock); |
| 1175 | |
| 1176 | return nr_ctxs; |
| 1177 | } |
| 1178 | |
| 1179 | /* Returns the size upper limit for each monitoring region */ |
| 1180 | static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) |
| 1181 | { |
| 1182 | struct damon_target *t; |
| 1183 | struct damon_region *r; |
| 1184 | unsigned long sz = 0; |
| 1185 | |
| 1186 | damon_for_each_target(t, ctx) { |
| 1187 | damon_for_each_region(r, t) |
| 1188 | sz += damon_sz_region(r); |
| 1189 | } |
| 1190 | |
| 1191 | if (ctx->attrs.min_nr_regions) |
| 1192 | sz /= ctx->attrs.min_nr_regions; |
| 1193 | if (sz < DAMON_MIN_REGION) |
| 1194 | sz = DAMON_MIN_REGION; |
| 1195 | |
| 1196 | return sz; |
| 1197 | } |
| 1198 | |
| 1199 | static int kdamond_fn(void *data); |
| 1200 | |
| 1201 | /* |
| 1202 | * __damon_start() - Starts monitoring with given context. |
| 1203 | * @ctx: monitoring context |
| 1204 | * |
| 1205 | * This function should be called while damon_lock is hold. |
| 1206 | * |
| 1207 | * Return: 0 on success, negative error code otherwise. |
| 1208 | */ |
| 1209 | static int __damon_start(struct damon_ctx *ctx) |
| 1210 | { |
| 1211 | int err = -EBUSY; |
| 1212 | |
| 1213 | mutex_lock(&ctx->kdamond_lock); |
| 1214 | if (!ctx->kdamond) { |
| 1215 | err = 0; |
| 1216 | reinit_completion(&ctx->kdamond_started); |
| 1217 | ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", |
| 1218 | nr_running_ctxs); |
| 1219 | if (IS_ERR(ctx->kdamond)) { |
| 1220 | err = PTR_ERR(ctx->kdamond); |
| 1221 | ctx->kdamond = NULL; |
| 1222 | } else { |
| 1223 | wait_for_completion(&ctx->kdamond_started); |
| 1224 | } |
| 1225 | } |
| 1226 | mutex_unlock(&ctx->kdamond_lock); |
| 1227 | |
| 1228 | return err; |
| 1229 | } |
| 1230 | |
| 1231 | /** |
| 1232 | * damon_start() - Starts the monitorings for a given group of contexts. |
| 1233 | * @ctxs: an array of the pointers for contexts to start monitoring |
| 1234 | * @nr_ctxs: size of @ctxs |
| 1235 | * @exclusive: exclusiveness of this contexts group |
| 1236 | * |
| 1237 | * This function starts a group of monitoring threads for a group of monitoring |
| 1238 | * contexts. One thread per each context is created and run in parallel. The |
| 1239 | * caller should handle synchronization between the threads by itself. If |
| 1240 | * @exclusive is true and a group of threads that created by other |
| 1241 | * 'damon_start()' call is currently running, this function does nothing but |
| 1242 | * returns -EBUSY. |
| 1243 | * |
| 1244 | * Return: 0 on success, negative error code otherwise. |
| 1245 | */ |
| 1246 | int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) |
| 1247 | { |
| 1248 | int i; |
| 1249 | int err = 0; |
| 1250 | |
| 1251 | mutex_lock(&damon_lock); |
| 1252 | if ((exclusive && nr_running_ctxs) || |
| 1253 | (!exclusive && running_exclusive_ctxs)) { |
| 1254 | mutex_unlock(&damon_lock); |
| 1255 | return -EBUSY; |
| 1256 | } |
| 1257 | |
| 1258 | for (i = 0; i < nr_ctxs; i++) { |
| 1259 | err = __damon_start(ctxs[i]); |
| 1260 | if (err) |
| 1261 | break; |
| 1262 | nr_running_ctxs++; |
| 1263 | } |
| 1264 | if (exclusive && nr_running_ctxs) |
| 1265 | running_exclusive_ctxs = true; |
| 1266 | mutex_unlock(&damon_lock); |
| 1267 | |
| 1268 | return err; |
| 1269 | } |
| 1270 | |
| 1271 | /* |
| 1272 | * __damon_stop() - Stops monitoring of a given context. |
| 1273 | * @ctx: monitoring context |
| 1274 | * |
| 1275 | * Return: 0 on success, negative error code otherwise. |
| 1276 | */ |
| 1277 | static int __damon_stop(struct damon_ctx *ctx) |
| 1278 | { |
| 1279 | struct task_struct *tsk; |
| 1280 | |
| 1281 | mutex_lock(&ctx->kdamond_lock); |
| 1282 | tsk = ctx->kdamond; |
| 1283 | if (tsk) { |
| 1284 | get_task_struct(tsk); |
| 1285 | mutex_unlock(&ctx->kdamond_lock); |
| 1286 | kthread_stop_put(tsk); |
| 1287 | return 0; |
| 1288 | } |
| 1289 | mutex_unlock(&ctx->kdamond_lock); |
| 1290 | |
| 1291 | return -EPERM; |
| 1292 | } |
| 1293 | |
| 1294 | /** |
| 1295 | * damon_stop() - Stops the monitorings for a given group of contexts. |
| 1296 | * @ctxs: an array of the pointers for contexts to stop monitoring |
| 1297 | * @nr_ctxs: size of @ctxs |
| 1298 | * |
| 1299 | * Return: 0 on success, negative error code otherwise. |
| 1300 | */ |
| 1301 | int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) |
| 1302 | { |
| 1303 | int i, err = 0; |
| 1304 | |
| 1305 | for (i = 0; i < nr_ctxs; i++) { |
| 1306 | /* nr_running_ctxs is decremented in kdamond_fn */ |
| 1307 | err = __damon_stop(ctxs[i]); |
| 1308 | if (err) |
| 1309 | break; |
| 1310 | } |
| 1311 | return err; |
| 1312 | } |
| 1313 | |
| 1314 | static bool damon_is_running(struct damon_ctx *ctx) |
| 1315 | { |
| 1316 | bool running; |
| 1317 | |
| 1318 | mutex_lock(&ctx->kdamond_lock); |
| 1319 | running = ctx->kdamond != NULL; |
| 1320 | mutex_unlock(&ctx->kdamond_lock); |
| 1321 | return running; |
| 1322 | } |
| 1323 | |
| 1324 | /** |
| 1325 | * damon_call() - Invoke a given function on DAMON worker thread (kdamond). |
| 1326 | * @ctx: DAMON context to call the function for. |
| 1327 | * @control: Control variable of the call request. |
| 1328 | * |
| 1329 | * Ask DAMON worker thread (kdamond) of @ctx to call a function with an |
| 1330 | * argument data that respectively passed via &damon_call_control->fn and |
| 1331 | * &damon_call_control->data of @control, and wait until the kdamond finishes |
| 1332 | * handling of the request. |
| 1333 | * |
| 1334 | * The kdamond executes the function with the argument in the main loop, just |
| 1335 | * after a sampling of the iteration is finished. The function can hence |
| 1336 | * safely access the internal data of the &struct damon_ctx without additional |
| 1337 | * synchronization. The return value of the function will be saved in |
| 1338 | * &damon_call_control->return_code. |
| 1339 | * |
| 1340 | * Return: 0 on success, negative error code otherwise. |
| 1341 | */ |
| 1342 | int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) |
| 1343 | { |
| 1344 | init_completion(&control->completion); |
| 1345 | control->canceled = false; |
| 1346 | |
| 1347 | mutex_lock(&ctx->call_control_lock); |
| 1348 | if (ctx->call_control) { |
| 1349 | mutex_unlock(&ctx->call_control_lock); |
| 1350 | return -EBUSY; |
| 1351 | } |
| 1352 | ctx->call_control = control; |
| 1353 | mutex_unlock(&ctx->call_control_lock); |
| 1354 | if (!damon_is_running(ctx)) |
| 1355 | return -EINVAL; |
| 1356 | wait_for_completion(&control->completion); |
| 1357 | if (control->canceled) |
| 1358 | return -ECANCELED; |
| 1359 | return 0; |
| 1360 | } |
| 1361 | |
| 1362 | /** |
| 1363 | * damos_walk() - Invoke a given functions while DAMOS walk regions. |
| 1364 | * @ctx: DAMON context to call the functions for. |
| 1365 | * @control: Control variable of the walk request. |
| 1366 | * |
| 1367 | * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region |
| 1368 | * that the kdamond will apply DAMOS action to, and wait until the kdamond |
| 1369 | * finishes handling of the request. |
| 1370 | * |
| 1371 | * The kdamond executes the given function in the main loop, for each region |
| 1372 | * just after it applied any DAMOS actions of @ctx to it. The invocation is |
| 1373 | * made only within one &damos->apply_interval_us since damos_walk() |
| 1374 | * invocation, for each scheme. The given callback function can hence safely |
| 1375 | * access the internal data of &struct damon_ctx and &struct damon_region that |
| 1376 | * each of the scheme will apply the action for next interval, without |
| 1377 | * additional synchronizations against the kdamond. If every scheme of @ctx |
| 1378 | * passed at least one &damos->apply_interval_us, kdamond marks the request as |
| 1379 | * completed so that damos_walk() can wakeup and return. |
| 1380 | * |
| 1381 | * Return: 0 on success, negative error code otherwise. |
| 1382 | */ |
| 1383 | int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) |
| 1384 | { |
| 1385 | init_completion(&control->completion); |
| 1386 | control->canceled = false; |
| 1387 | mutex_lock(&ctx->walk_control_lock); |
| 1388 | if (ctx->walk_control) { |
| 1389 | mutex_unlock(&ctx->walk_control_lock); |
| 1390 | return -EBUSY; |
| 1391 | } |
| 1392 | ctx->walk_control = control; |
| 1393 | mutex_unlock(&ctx->walk_control_lock); |
| 1394 | if (!damon_is_running(ctx)) |
| 1395 | return -EINVAL; |
| 1396 | wait_for_completion(&control->completion); |
| 1397 | if (control->canceled) |
| 1398 | return -ECANCELED; |
| 1399 | return 0; |
| 1400 | } |
| 1401 | |
| 1402 | /* |
| 1403 | * Warn and fix corrupted ->nr_accesses[_bp] for investigations and preventing |
| 1404 | * the problem being propagated. |
| 1405 | */ |
| 1406 | static void damon_warn_fix_nr_accesses_corruption(struct damon_region *r) |
| 1407 | { |
| 1408 | if (r->nr_accesses_bp == r->nr_accesses * 10000) |
| 1409 | return; |
| 1410 | WARN_ONCE(true, "invalid nr_accesses_bp at reset: %u %u\n", |
| 1411 | r->nr_accesses_bp, r->nr_accesses); |
| 1412 | r->nr_accesses_bp = r->nr_accesses * 10000; |
| 1413 | } |
| 1414 | |
| 1415 | /* |
| 1416 | * Reset the aggregated monitoring results ('nr_accesses' of each region). |
| 1417 | */ |
| 1418 | static void kdamond_reset_aggregated(struct damon_ctx *c) |
| 1419 | { |
| 1420 | struct damon_target *t; |
| 1421 | unsigned int ti = 0; /* target's index */ |
| 1422 | |
| 1423 | damon_for_each_target(t, c) { |
| 1424 | struct damon_region *r; |
| 1425 | |
| 1426 | damon_for_each_region(r, t) { |
| 1427 | trace_damon_aggregated(ti, r, damon_nr_regions(t)); |
| 1428 | damon_warn_fix_nr_accesses_corruption(r); |
| 1429 | r->last_nr_accesses = r->nr_accesses; |
| 1430 | r->nr_accesses = 0; |
| 1431 | } |
| 1432 | ti++; |
| 1433 | } |
| 1434 | } |
| 1435 | |
| 1436 | static unsigned long damon_get_intervals_score(struct damon_ctx *c) |
| 1437 | { |
| 1438 | struct damon_target *t; |
| 1439 | struct damon_region *r; |
| 1440 | unsigned long sz_region, max_access_events = 0, access_events = 0; |
| 1441 | unsigned long target_access_events; |
| 1442 | unsigned long goal_bp = c->attrs.intervals_goal.access_bp; |
| 1443 | |
| 1444 | damon_for_each_target(t, c) { |
| 1445 | damon_for_each_region(r, t) { |
| 1446 | sz_region = damon_sz_region(r); |
| 1447 | max_access_events += sz_region * c->attrs.aggr_samples; |
| 1448 | access_events += sz_region * r->nr_accesses; |
| 1449 | } |
| 1450 | } |
| 1451 | target_access_events = max_access_events * goal_bp / 10000; |
| 1452 | target_access_events = target_access_events ? : 1; |
| 1453 | return access_events * 10000 / target_access_events; |
| 1454 | } |
| 1455 | |
| 1456 | static unsigned long damon_feed_loop_next_input(unsigned long last_input, |
| 1457 | unsigned long score); |
| 1458 | |
| 1459 | static unsigned long damon_get_intervals_adaptation_bp(struct damon_ctx *c) |
| 1460 | { |
| 1461 | unsigned long score_bp, adaptation_bp; |
| 1462 | |
| 1463 | score_bp = damon_get_intervals_score(c); |
| 1464 | adaptation_bp = damon_feed_loop_next_input(100000000, score_bp) / |
| 1465 | 10000; |
| 1466 | /* |
| 1467 | * adaptaion_bp ranges from 1 to 20,000. Avoid too rapid reduction of |
| 1468 | * the intervals by rescaling [1,10,000] to [5000, 10,000]. |
| 1469 | */ |
| 1470 | if (adaptation_bp <= 10000) |
| 1471 | adaptation_bp = 5000 + adaptation_bp / 2; |
| 1472 | return adaptation_bp; |
| 1473 | } |
| 1474 | |
| 1475 | static void kdamond_tune_intervals(struct damon_ctx *c) |
| 1476 | { |
| 1477 | unsigned long adaptation_bp; |
| 1478 | struct damon_attrs new_attrs; |
| 1479 | struct damon_intervals_goal *goal; |
| 1480 | |
| 1481 | adaptation_bp = damon_get_intervals_adaptation_bp(c); |
| 1482 | if (adaptation_bp == 10000) |
| 1483 | return; |
| 1484 | |
| 1485 | new_attrs = c->attrs; |
| 1486 | goal = &c->attrs.intervals_goal; |
| 1487 | new_attrs.sample_interval = min(goal->max_sample_us, |
| 1488 | c->attrs.sample_interval * adaptation_bp / 10000); |
| 1489 | new_attrs.sample_interval = max(goal->min_sample_us, |
| 1490 | new_attrs.sample_interval); |
| 1491 | new_attrs.aggr_interval = new_attrs.sample_interval * |
| 1492 | c->attrs.aggr_samples; |
| 1493 | damon_set_attrs(c, &new_attrs); |
| 1494 | } |
| 1495 | |
| 1496 | static void damon_split_region_at(struct damon_target *t, |
| 1497 | struct damon_region *r, unsigned long sz_r); |
| 1498 | |
| 1499 | static bool __damos_valid_target(struct damon_region *r, struct damos *s) |
| 1500 | { |
| 1501 | unsigned long sz; |
| 1502 | unsigned int nr_accesses = r->nr_accesses_bp / 10000; |
| 1503 | |
| 1504 | sz = damon_sz_region(r); |
| 1505 | return s->pattern.min_sz_region <= sz && |
| 1506 | sz <= s->pattern.max_sz_region && |
| 1507 | s->pattern.min_nr_accesses <= nr_accesses && |
| 1508 | nr_accesses <= s->pattern.max_nr_accesses && |
| 1509 | s->pattern.min_age_region <= r->age && |
| 1510 | r->age <= s->pattern.max_age_region; |
| 1511 | } |
| 1512 | |
| 1513 | static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, |
| 1514 | struct damon_region *r, struct damos *s) |
| 1515 | { |
| 1516 | bool ret = __damos_valid_target(r, s); |
| 1517 | |
| 1518 | if (!ret || !s->quota.esz || !c->ops.get_scheme_score) |
| 1519 | return ret; |
| 1520 | |
| 1521 | return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; |
| 1522 | } |
| 1523 | |
| 1524 | /* |
| 1525 | * damos_skip_charged_region() - Check if the given region or starting part of |
| 1526 | * it is already charged for the DAMOS quota. |
| 1527 | * @t: The target of the region. |
| 1528 | * @rp: The pointer to the region. |
| 1529 | * @s: The scheme to be applied. |
| 1530 | * |
| 1531 | * If a quota of a scheme has exceeded in a quota charge window, the scheme's |
| 1532 | * action would applied to only a part of the target access pattern fulfilling |
| 1533 | * regions. To avoid applying the scheme action to only already applied |
| 1534 | * regions, DAMON skips applying the scheme action to the regions that charged |
| 1535 | * in the previous charge window. |
| 1536 | * |
| 1537 | * This function checks if a given region should be skipped or not for the |
| 1538 | * reason. If only the starting part of the region has previously charged, |
| 1539 | * this function splits the region into two so that the second one covers the |
| 1540 | * area that not charged in the previous charge widnow and saves the second |
| 1541 | * region in *rp and returns false, so that the caller can apply DAMON action |
| 1542 | * to the second one. |
| 1543 | * |
| 1544 | * Return: true if the region should be entirely skipped, false otherwise. |
| 1545 | */ |
| 1546 | static bool damos_skip_charged_region(struct damon_target *t, |
| 1547 | struct damon_region **rp, struct damos *s) |
| 1548 | { |
| 1549 | struct damon_region *r = *rp; |
| 1550 | struct damos_quota *quota = &s->quota; |
| 1551 | unsigned long sz_to_skip; |
| 1552 | |
| 1553 | /* Skip previously charged regions */ |
| 1554 | if (quota->charge_target_from) { |
| 1555 | if (t != quota->charge_target_from) |
| 1556 | return true; |
| 1557 | if (r == damon_last_region(t)) { |
| 1558 | quota->charge_target_from = NULL; |
| 1559 | quota->charge_addr_from = 0; |
| 1560 | return true; |
| 1561 | } |
| 1562 | if (quota->charge_addr_from && |
| 1563 | r->ar.end <= quota->charge_addr_from) |
| 1564 | return true; |
| 1565 | |
| 1566 | if (quota->charge_addr_from && r->ar.start < |
| 1567 | quota->charge_addr_from) { |
| 1568 | sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - |
| 1569 | r->ar.start, DAMON_MIN_REGION); |
| 1570 | if (!sz_to_skip) { |
| 1571 | if (damon_sz_region(r) <= DAMON_MIN_REGION) |
| 1572 | return true; |
| 1573 | sz_to_skip = DAMON_MIN_REGION; |
| 1574 | } |
| 1575 | damon_split_region_at(t, r, sz_to_skip); |
| 1576 | r = damon_next_region(r); |
| 1577 | *rp = r; |
| 1578 | } |
| 1579 | quota->charge_target_from = NULL; |
| 1580 | quota->charge_addr_from = 0; |
| 1581 | } |
| 1582 | return false; |
| 1583 | } |
| 1584 | |
| 1585 | static void damos_update_stat(struct damos *s, |
| 1586 | unsigned long sz_tried, unsigned long sz_applied, |
| 1587 | unsigned long sz_ops_filter_passed) |
| 1588 | { |
| 1589 | s->stat.nr_tried++; |
| 1590 | s->stat.sz_tried += sz_tried; |
| 1591 | if (sz_applied) |
| 1592 | s->stat.nr_applied++; |
| 1593 | s->stat.sz_applied += sz_applied; |
| 1594 | s->stat.sz_ops_filter_passed += sz_ops_filter_passed; |
| 1595 | } |
| 1596 | |
| 1597 | static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, |
| 1598 | struct damon_region *r, struct damos_filter *filter) |
| 1599 | { |
| 1600 | bool matched = false; |
| 1601 | struct damon_target *ti; |
| 1602 | int target_idx = 0; |
| 1603 | unsigned long start, end; |
| 1604 | |
| 1605 | switch (filter->type) { |
| 1606 | case DAMOS_FILTER_TYPE_TARGET: |
| 1607 | damon_for_each_target(ti, ctx) { |
| 1608 | if (ti == t) |
| 1609 | break; |
| 1610 | target_idx++; |
| 1611 | } |
| 1612 | matched = target_idx == filter->target_idx; |
| 1613 | break; |
| 1614 | case DAMOS_FILTER_TYPE_ADDR: |
| 1615 | start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); |
| 1616 | end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); |
| 1617 | |
| 1618 | /* inside the range */ |
| 1619 | if (start <= r->ar.start && r->ar.end <= end) { |
| 1620 | matched = true; |
| 1621 | break; |
| 1622 | } |
| 1623 | /* outside of the range */ |
| 1624 | if (r->ar.end <= start || end <= r->ar.start) { |
| 1625 | matched = false; |
| 1626 | break; |
| 1627 | } |
| 1628 | /* start before the range and overlap */ |
| 1629 | if (r->ar.start < start) { |
| 1630 | damon_split_region_at(t, r, start - r->ar.start); |
| 1631 | matched = false; |
| 1632 | break; |
| 1633 | } |
| 1634 | /* start inside the range */ |
| 1635 | damon_split_region_at(t, r, end - r->ar.start); |
| 1636 | matched = true; |
| 1637 | break; |
| 1638 | default: |
| 1639 | return false; |
| 1640 | } |
| 1641 | |
| 1642 | return matched == filter->matching; |
| 1643 | } |
| 1644 | |
| 1645 | static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, |
| 1646 | struct damon_region *r, struct damos *s) |
| 1647 | { |
| 1648 | struct damos_filter *filter; |
| 1649 | |
| 1650 | s->core_filters_allowed = false; |
| 1651 | damos_for_each_filter(filter, s) { |
| 1652 | if (damos_filter_match(ctx, t, r, filter)) { |
| 1653 | if (filter->allow) |
| 1654 | s->core_filters_allowed = true; |
| 1655 | return !filter->allow; |
| 1656 | } |
| 1657 | } |
| 1658 | return s->core_filters_default_reject; |
| 1659 | } |
| 1660 | |
| 1661 | /* |
| 1662 | * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. |
| 1663 | * @ctx: The context of &damon_ctx->walk_control. |
| 1664 | * @t: The monitoring target of @r that @s will be applied. |
| 1665 | * @r: The region of @t that @s will be applied. |
| 1666 | * @s: The scheme of @ctx that will be applied to @r. |
| 1667 | * |
| 1668 | * This function is called from kdamond whenever it asked the operation set to |
| 1669 | * apply a DAMOS scheme action to a region. If a DAMOS walk request is |
| 1670 | * installed by damos_walk() and not yet uninstalled, invoke it. |
| 1671 | */ |
| 1672 | static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, |
| 1673 | struct damon_region *r, struct damos *s, |
| 1674 | unsigned long sz_filter_passed) |
| 1675 | { |
| 1676 | struct damos_walk_control *control; |
| 1677 | |
| 1678 | if (s->walk_completed) |
| 1679 | return; |
| 1680 | |
| 1681 | control = ctx->walk_control; |
| 1682 | if (!control) |
| 1683 | return; |
| 1684 | |
| 1685 | control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); |
| 1686 | } |
| 1687 | |
| 1688 | /* |
| 1689 | * damos_walk_complete() - Complete DAMOS walk request if all walks are done. |
| 1690 | * @ctx: The context of &damon_ctx->walk_control. |
| 1691 | * @s: A scheme of @ctx that all walks are now done. |
| 1692 | * |
| 1693 | * This function is called when kdamond finished applying the action of a DAMOS |
| 1694 | * scheme to all regions that eligible for the given &damos->apply_interval_us. |
| 1695 | * If every scheme of @ctx including @s now finished walking for at least one |
| 1696 | * &damos->apply_interval_us, this function makrs the handling of the given |
| 1697 | * DAMOS walk request is done, so that damos_walk() can wake up and return. |
| 1698 | */ |
| 1699 | static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) |
| 1700 | { |
| 1701 | struct damos *siter; |
| 1702 | struct damos_walk_control *control; |
| 1703 | |
| 1704 | control = ctx->walk_control; |
| 1705 | if (!control) |
| 1706 | return; |
| 1707 | |
| 1708 | s->walk_completed = true; |
| 1709 | /* if all schemes completed, signal completion to walker */ |
| 1710 | damon_for_each_scheme(siter, ctx) { |
| 1711 | if (!siter->walk_completed) |
| 1712 | return; |
| 1713 | } |
| 1714 | damon_for_each_scheme(siter, ctx) |
| 1715 | siter->walk_completed = false; |
| 1716 | |
| 1717 | complete(&control->completion); |
| 1718 | ctx->walk_control = NULL; |
| 1719 | } |
| 1720 | |
| 1721 | /* |
| 1722 | * damos_walk_cancel() - Cancel the current DAMOS walk request. |
| 1723 | * @ctx: The context of &damon_ctx->walk_control. |
| 1724 | * |
| 1725 | * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS |
| 1726 | * walk is requested but there is no DAMOS scheme to walk for, or the kdamond |
| 1727 | * is already out of the main loop and therefore gonna be terminated, and hence |
| 1728 | * cannot continue the walks. This function therefore marks the walk request |
| 1729 | * as canceled, so that damos_walk() can wake up and return. |
| 1730 | */ |
| 1731 | static void damos_walk_cancel(struct damon_ctx *ctx) |
| 1732 | { |
| 1733 | struct damos_walk_control *control; |
| 1734 | |
| 1735 | mutex_lock(&ctx->walk_control_lock); |
| 1736 | control = ctx->walk_control; |
| 1737 | mutex_unlock(&ctx->walk_control_lock); |
| 1738 | |
| 1739 | if (!control) |
| 1740 | return; |
| 1741 | control->canceled = true; |
| 1742 | complete(&control->completion); |
| 1743 | mutex_lock(&ctx->walk_control_lock); |
| 1744 | ctx->walk_control = NULL; |
| 1745 | mutex_unlock(&ctx->walk_control_lock); |
| 1746 | } |
| 1747 | |
| 1748 | static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, |
| 1749 | struct damon_region *r, struct damos *s) |
| 1750 | { |
| 1751 | struct damos_quota *quota = &s->quota; |
| 1752 | unsigned long sz = damon_sz_region(r); |
| 1753 | struct timespec64 begin, end; |
| 1754 | unsigned long sz_applied = 0; |
| 1755 | unsigned long sz_ops_filter_passed = 0; |
| 1756 | /* |
| 1757 | * We plan to support multiple context per kdamond, as DAMON sysfs |
| 1758 | * implies with 'nr_contexts' file. Nevertheless, only single context |
| 1759 | * per kdamond is supported for now. So, we can simply use '0' context |
| 1760 | * index here. |
| 1761 | */ |
| 1762 | unsigned int cidx = 0; |
| 1763 | struct damos *siter; /* schemes iterator */ |
| 1764 | unsigned int sidx = 0; |
| 1765 | struct damon_target *titer; /* targets iterator */ |
| 1766 | unsigned int tidx = 0; |
| 1767 | bool do_trace = false; |
| 1768 | |
| 1769 | /* get indices for trace_damos_before_apply() */ |
| 1770 | if (trace_damos_before_apply_enabled()) { |
| 1771 | damon_for_each_scheme(siter, c) { |
| 1772 | if (siter == s) |
| 1773 | break; |
| 1774 | sidx++; |
| 1775 | } |
| 1776 | damon_for_each_target(titer, c) { |
| 1777 | if (titer == t) |
| 1778 | break; |
| 1779 | tidx++; |
| 1780 | } |
| 1781 | do_trace = true; |
| 1782 | } |
| 1783 | |
| 1784 | if (c->ops.apply_scheme) { |
| 1785 | if (quota->esz && quota->charged_sz + sz > quota->esz) { |
| 1786 | sz = ALIGN_DOWN(quota->esz - quota->charged_sz, |
| 1787 | DAMON_MIN_REGION); |
| 1788 | if (!sz) |
| 1789 | goto update_stat; |
| 1790 | damon_split_region_at(t, r, sz); |
| 1791 | } |
| 1792 | if (damos_filter_out(c, t, r, s)) |
| 1793 | return; |
| 1794 | ktime_get_coarse_ts64(&begin); |
| 1795 | trace_damos_before_apply(cidx, sidx, tidx, r, |
| 1796 | damon_nr_regions(t), do_trace); |
| 1797 | sz_applied = c->ops.apply_scheme(c, t, r, s, |
| 1798 | &sz_ops_filter_passed); |
| 1799 | damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); |
| 1800 | ktime_get_coarse_ts64(&end); |
| 1801 | quota->total_charged_ns += timespec64_to_ns(&end) - |
| 1802 | timespec64_to_ns(&begin); |
| 1803 | quota->charged_sz += sz; |
| 1804 | if (quota->esz && quota->charged_sz >= quota->esz) { |
| 1805 | quota->charge_target_from = t; |
| 1806 | quota->charge_addr_from = r->ar.end + 1; |
| 1807 | } |
| 1808 | } |
| 1809 | if (s->action != DAMOS_STAT) |
| 1810 | r->age = 0; |
| 1811 | |
| 1812 | update_stat: |
| 1813 | damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed); |
| 1814 | } |
| 1815 | |
| 1816 | static void damon_do_apply_schemes(struct damon_ctx *c, |
| 1817 | struct damon_target *t, |
| 1818 | struct damon_region *r) |
| 1819 | { |
| 1820 | struct damos *s; |
| 1821 | |
| 1822 | damon_for_each_scheme(s, c) { |
| 1823 | struct damos_quota *quota = &s->quota; |
| 1824 | |
| 1825 | if (c->passed_sample_intervals < s->next_apply_sis) |
| 1826 | continue; |
| 1827 | |
| 1828 | if (!s->wmarks.activated) |
| 1829 | continue; |
| 1830 | |
| 1831 | /* Check the quota */ |
| 1832 | if (quota->esz && quota->charged_sz >= quota->esz) |
| 1833 | continue; |
| 1834 | |
| 1835 | if (damos_skip_charged_region(t, &r, s)) |
| 1836 | continue; |
| 1837 | |
| 1838 | if (!damos_valid_target(c, t, r, s)) |
| 1839 | continue; |
| 1840 | |
| 1841 | damos_apply_scheme(c, t, r, s); |
| 1842 | } |
| 1843 | } |
| 1844 | |
| 1845 | /* |
| 1846 | * damon_feed_loop_next_input() - get next input to achieve a target score. |
| 1847 | * @last_input The last input. |
| 1848 | * @score Current score that made with @last_input. |
| 1849 | * |
| 1850 | * Calculate next input to achieve the target score, based on the last input |
| 1851 | * and current score. Assuming the input and the score are positively |
| 1852 | * proportional, calculate how much compensation should be added to or |
| 1853 | * subtracted from the last input as a proportion of the last input. Avoid |
| 1854 | * next input always being zero by setting it non-zero always. In short form |
| 1855 | * (assuming support of float and signed calculations), the algorithm is as |
| 1856 | * below. |
| 1857 | * |
| 1858 | * next_input = max(last_input * ((goal - current) / goal + 1), 1) |
| 1859 | * |
| 1860 | * For simple implementation, we assume the target score is always 10,000. The |
| 1861 | * caller should adjust @score for this. |
| 1862 | * |
| 1863 | * Returns next input that assumed to achieve the target score. |
| 1864 | */ |
| 1865 | static unsigned long damon_feed_loop_next_input(unsigned long last_input, |
| 1866 | unsigned long score) |
| 1867 | { |
| 1868 | const unsigned long goal = 10000; |
| 1869 | /* Set minimum input as 10000 to avoid compensation be zero */ |
| 1870 | const unsigned long min_input = 10000; |
| 1871 | unsigned long score_goal_diff, compensation; |
| 1872 | bool over_achieving = score > goal; |
| 1873 | |
| 1874 | if (score == goal) |
| 1875 | return last_input; |
| 1876 | if (score >= goal * 2) |
| 1877 | return min_input; |
| 1878 | |
| 1879 | if (over_achieving) |
| 1880 | score_goal_diff = score - goal; |
| 1881 | else |
| 1882 | score_goal_diff = goal - score; |
| 1883 | |
| 1884 | if (last_input < ULONG_MAX / score_goal_diff) |
| 1885 | compensation = last_input * score_goal_diff / goal; |
| 1886 | else |
| 1887 | compensation = last_input / goal * score_goal_diff; |
| 1888 | |
| 1889 | if (over_achieving) |
| 1890 | return max(last_input - compensation, min_input); |
| 1891 | if (last_input < ULONG_MAX - compensation) |
| 1892 | return last_input + compensation; |
| 1893 | return ULONG_MAX; |
| 1894 | } |
| 1895 | |
| 1896 | #ifdef CONFIG_PSI |
| 1897 | |
| 1898 | static u64 damos_get_some_mem_psi_total(void) |
| 1899 | { |
| 1900 | if (static_branch_likely(&psi_disabled)) |
| 1901 | return 0; |
| 1902 | return div_u64(psi_system.total[PSI_AVGS][PSI_MEM * 2], |
| 1903 | NSEC_PER_USEC); |
| 1904 | } |
| 1905 | |
| 1906 | #else /* CONFIG_PSI */ |
| 1907 | |
| 1908 | static inline u64 damos_get_some_mem_psi_total(void) |
| 1909 | { |
| 1910 | return 0; |
| 1911 | }; |
| 1912 | |
| 1913 | #endif /* CONFIG_PSI */ |
| 1914 | |
| 1915 | #ifdef CONFIG_NUMA |
| 1916 | static __kernel_ulong_t damos_get_node_mem_bp( |
| 1917 | struct damos_quota_goal *goal) |
| 1918 | { |
| 1919 | struct sysinfo i; |
| 1920 | __kernel_ulong_t numerator; |
| 1921 | |
| 1922 | si_meminfo_node(&i, goal->nid); |
| 1923 | if (goal->metric == DAMOS_QUOTA_NODE_MEM_USED_BP) |
| 1924 | numerator = i.totalram - i.freeram; |
| 1925 | else /* DAMOS_QUOTA_NODE_MEM_FREE_BP */ |
| 1926 | numerator = i.freeram; |
| 1927 | return numerator * 10000 / i.totalram; |
| 1928 | } |
| 1929 | #else |
| 1930 | static __kernel_ulong_t damos_get_node_mem_bp( |
| 1931 | struct damos_quota_goal *goal) |
| 1932 | { |
| 1933 | return 0; |
| 1934 | } |
| 1935 | #endif |
| 1936 | |
| 1937 | |
| 1938 | static void damos_set_quota_goal_current_value(struct damos_quota_goal *goal) |
| 1939 | { |
| 1940 | u64 now_psi_total; |
| 1941 | |
| 1942 | switch (goal->metric) { |
| 1943 | case DAMOS_QUOTA_USER_INPUT: |
| 1944 | /* User should already set goal->current_value */ |
| 1945 | break; |
| 1946 | case DAMOS_QUOTA_SOME_MEM_PSI_US: |
| 1947 | now_psi_total = damos_get_some_mem_psi_total(); |
| 1948 | goal->current_value = now_psi_total - goal->last_psi_total; |
| 1949 | goal->last_psi_total = now_psi_total; |
| 1950 | break; |
| 1951 | case DAMOS_QUOTA_NODE_MEM_USED_BP: |
| 1952 | case DAMOS_QUOTA_NODE_MEM_FREE_BP: |
| 1953 | goal->current_value = damos_get_node_mem_bp(goal); |
| 1954 | break; |
| 1955 | default: |
| 1956 | break; |
| 1957 | } |
| 1958 | } |
| 1959 | |
| 1960 | /* Return the highest score since it makes schemes least aggressive */ |
| 1961 | static unsigned long damos_quota_score(struct damos_quota *quota) |
| 1962 | { |
| 1963 | struct damos_quota_goal *goal; |
| 1964 | unsigned long highest_score = 0; |
| 1965 | |
| 1966 | damos_for_each_quota_goal(goal, quota) { |
| 1967 | damos_set_quota_goal_current_value(goal); |
| 1968 | highest_score = max(highest_score, |
| 1969 | goal->current_value * 10000 / |
| 1970 | goal->target_value); |
| 1971 | } |
| 1972 | |
| 1973 | return highest_score; |
| 1974 | } |
| 1975 | |
| 1976 | /* |
| 1977 | * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty |
| 1978 | */ |
| 1979 | static void damos_set_effective_quota(struct damos_quota *quota) |
| 1980 | { |
| 1981 | unsigned long throughput; |
| 1982 | unsigned long esz = ULONG_MAX; |
| 1983 | |
| 1984 | if (!quota->ms && list_empty("a->goals)) { |
| 1985 | quota->esz = quota->sz; |
| 1986 | return; |
| 1987 | } |
| 1988 | |
| 1989 | if (!list_empty("a->goals)) { |
| 1990 | unsigned long score = damos_quota_score(quota); |
| 1991 | |
| 1992 | quota->esz_bp = damon_feed_loop_next_input( |
| 1993 | max(quota->esz_bp, 10000UL), |
| 1994 | score); |
| 1995 | esz = quota->esz_bp / 10000; |
| 1996 | } |
| 1997 | |
| 1998 | if (quota->ms) { |
| 1999 | if (quota->total_charged_ns) |
| 2000 | throughput = quota->total_charged_sz * 1000000 / |
| 2001 | quota->total_charged_ns; |
| 2002 | else |
| 2003 | throughput = PAGE_SIZE * 1024; |
| 2004 | esz = min(throughput * quota->ms, esz); |
| 2005 | } |
| 2006 | |
| 2007 | if (quota->sz && quota->sz < esz) |
| 2008 | esz = quota->sz; |
| 2009 | |
| 2010 | quota->esz = esz; |
| 2011 | } |
| 2012 | |
| 2013 | static void damos_adjust_quota(struct damon_ctx *c, struct damos *s) |
| 2014 | { |
| 2015 | struct damos_quota *quota = &s->quota; |
| 2016 | struct damon_target *t; |
| 2017 | struct damon_region *r; |
| 2018 | unsigned long cumulated_sz; |
| 2019 | unsigned int score, max_score = 0; |
| 2020 | |
| 2021 | if (!quota->ms && !quota->sz && list_empty("a->goals)) |
| 2022 | return; |
| 2023 | |
| 2024 | /* New charge window starts */ |
| 2025 | if (time_after_eq(jiffies, quota->charged_from + |
| 2026 | msecs_to_jiffies(quota->reset_interval))) { |
| 2027 | if (quota->esz && quota->charged_sz >= quota->esz) |
| 2028 | s->stat.qt_exceeds++; |
| 2029 | quota->total_charged_sz += quota->charged_sz; |
| 2030 | quota->charged_from = jiffies; |
| 2031 | quota->charged_sz = 0; |
| 2032 | damos_set_effective_quota(quota); |
| 2033 | } |
| 2034 | |
| 2035 | if (!c->ops.get_scheme_score) |
| 2036 | return; |
| 2037 | |
| 2038 | /* Fill up the score histogram */ |
| 2039 | memset(c->regions_score_histogram, 0, |
| 2040 | sizeof(*c->regions_score_histogram) * |
| 2041 | (DAMOS_MAX_SCORE + 1)); |
| 2042 | damon_for_each_target(t, c) { |
| 2043 | damon_for_each_region(r, t) { |
| 2044 | if (!__damos_valid_target(r, s)) |
| 2045 | continue; |
| 2046 | score = c->ops.get_scheme_score(c, t, r, s); |
| 2047 | c->regions_score_histogram[score] += |
| 2048 | damon_sz_region(r); |
| 2049 | if (score > max_score) |
| 2050 | max_score = score; |
| 2051 | } |
| 2052 | } |
| 2053 | |
| 2054 | /* Set the min score limit */ |
| 2055 | for (cumulated_sz = 0, score = max_score; ; score--) { |
| 2056 | cumulated_sz += c->regions_score_histogram[score]; |
| 2057 | if (cumulated_sz >= quota->esz || !score) |
| 2058 | break; |
| 2059 | } |
| 2060 | quota->min_score = score; |
| 2061 | } |
| 2062 | |
| 2063 | static void kdamond_apply_schemes(struct damon_ctx *c) |
| 2064 | { |
| 2065 | struct damon_target *t; |
| 2066 | struct damon_region *r, *next_r; |
| 2067 | struct damos *s; |
| 2068 | unsigned long sample_interval = c->attrs.sample_interval ? |
| 2069 | c->attrs.sample_interval : 1; |
| 2070 | bool has_schemes_to_apply = false; |
| 2071 | |
| 2072 | damon_for_each_scheme(s, c) { |
| 2073 | if (c->passed_sample_intervals < s->next_apply_sis) |
| 2074 | continue; |
| 2075 | |
| 2076 | if (!s->wmarks.activated) |
| 2077 | continue; |
| 2078 | |
| 2079 | has_schemes_to_apply = true; |
| 2080 | |
| 2081 | damos_adjust_quota(c, s); |
| 2082 | } |
| 2083 | |
| 2084 | if (!has_schemes_to_apply) |
| 2085 | return; |
| 2086 | |
| 2087 | mutex_lock(&c->walk_control_lock); |
| 2088 | damon_for_each_target(t, c) { |
| 2089 | damon_for_each_region_safe(r, next_r, t) |
| 2090 | damon_do_apply_schemes(c, t, r); |
| 2091 | } |
| 2092 | |
| 2093 | damon_for_each_scheme(s, c) { |
| 2094 | if (c->passed_sample_intervals < s->next_apply_sis) |
| 2095 | continue; |
| 2096 | damos_walk_complete(c, s); |
| 2097 | s->next_apply_sis = c->passed_sample_intervals + |
| 2098 | (s->apply_interval_us ? s->apply_interval_us : |
| 2099 | c->attrs.aggr_interval) / sample_interval; |
| 2100 | s->last_applied = NULL; |
| 2101 | } |
| 2102 | mutex_unlock(&c->walk_control_lock); |
| 2103 | } |
| 2104 | |
| 2105 | /* |
| 2106 | * Merge two adjacent regions into one region |
| 2107 | */ |
| 2108 | static void damon_merge_two_regions(struct damon_target *t, |
| 2109 | struct damon_region *l, struct damon_region *r) |
| 2110 | { |
| 2111 | unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); |
| 2112 | |
| 2113 | l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / |
| 2114 | (sz_l + sz_r); |
| 2115 | l->nr_accesses_bp = l->nr_accesses * 10000; |
| 2116 | l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); |
| 2117 | l->ar.end = r->ar.end; |
| 2118 | damon_destroy_region(r, t); |
| 2119 | } |
| 2120 | |
| 2121 | /* |
| 2122 | * Merge adjacent regions having similar access frequencies |
| 2123 | * |
| 2124 | * t target affected by this merge operation |
| 2125 | * thres '->nr_accesses' diff threshold for the merge |
| 2126 | * sz_limit size upper limit of each region |
| 2127 | */ |
| 2128 | static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, |
| 2129 | unsigned long sz_limit) |
| 2130 | { |
| 2131 | struct damon_region *r, *prev = NULL, *next; |
| 2132 | |
| 2133 | damon_for_each_region_safe(r, next, t) { |
| 2134 | if (abs(r->nr_accesses - r->last_nr_accesses) > thres) |
| 2135 | r->age = 0; |
| 2136 | else |
| 2137 | r->age++; |
| 2138 | |
| 2139 | if (prev && prev->ar.end == r->ar.start && |
| 2140 | abs(prev->nr_accesses - r->nr_accesses) <= thres && |
| 2141 | damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) |
| 2142 | damon_merge_two_regions(t, prev, r); |
| 2143 | else |
| 2144 | prev = r; |
| 2145 | } |
| 2146 | } |
| 2147 | |
| 2148 | /* |
| 2149 | * Merge adjacent regions having similar access frequencies |
| 2150 | * |
| 2151 | * threshold '->nr_accesses' diff threshold for the merge |
| 2152 | * sz_limit size upper limit of each region |
| 2153 | * |
| 2154 | * This function merges monitoring target regions which are adjacent and their |
| 2155 | * access frequencies are similar. This is for minimizing the monitoring |
| 2156 | * overhead under the dynamically changeable access pattern. If a merge was |
| 2157 | * unnecessarily made, later 'kdamond_split_regions()' will revert it. |
| 2158 | * |
| 2159 | * The total number of regions could be higher than the user-defined limit, |
| 2160 | * max_nr_regions for some cases. For example, the user can update |
| 2161 | * max_nr_regions to a number that lower than the current number of regions |
| 2162 | * while DAMON is running. For such a case, repeat merging until the limit is |
| 2163 | * met while increasing @threshold up to possible maximum level. |
| 2164 | */ |
| 2165 | static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, |
| 2166 | unsigned long sz_limit) |
| 2167 | { |
| 2168 | struct damon_target *t; |
| 2169 | unsigned int nr_regions; |
| 2170 | unsigned int max_thres; |
| 2171 | |
| 2172 | max_thres = c->attrs.aggr_interval / |
| 2173 | (c->attrs.sample_interval ? c->attrs.sample_interval : 1); |
| 2174 | do { |
| 2175 | nr_regions = 0; |
| 2176 | damon_for_each_target(t, c) { |
| 2177 | damon_merge_regions_of(t, threshold, sz_limit); |
| 2178 | nr_regions += damon_nr_regions(t); |
| 2179 | } |
| 2180 | threshold = max(1, threshold * 2); |
| 2181 | } while (nr_regions > c->attrs.max_nr_regions && |
| 2182 | threshold / 2 < max_thres); |
| 2183 | } |
| 2184 | |
| 2185 | /* |
| 2186 | * Split a region in two |
| 2187 | * |
| 2188 | * r the region to be split |
| 2189 | * sz_r size of the first sub-region that will be made |
| 2190 | */ |
| 2191 | static void damon_split_region_at(struct damon_target *t, |
| 2192 | struct damon_region *r, unsigned long sz_r) |
| 2193 | { |
| 2194 | struct damon_region *new; |
| 2195 | |
| 2196 | new = damon_new_region(r->ar.start + sz_r, r->ar.end); |
| 2197 | if (!new) |
| 2198 | return; |
| 2199 | |
| 2200 | r->ar.end = new->ar.start; |
| 2201 | |
| 2202 | new->age = r->age; |
| 2203 | new->last_nr_accesses = r->last_nr_accesses; |
| 2204 | new->nr_accesses_bp = r->nr_accesses_bp; |
| 2205 | new->nr_accesses = r->nr_accesses; |
| 2206 | |
| 2207 | damon_insert_region(new, r, damon_next_region(r), t); |
| 2208 | } |
| 2209 | |
| 2210 | /* Split every region in the given target into 'nr_subs' regions */ |
| 2211 | static void damon_split_regions_of(struct damon_target *t, int nr_subs) |
| 2212 | { |
| 2213 | struct damon_region *r, *next; |
| 2214 | unsigned long sz_region, sz_sub = 0; |
| 2215 | int i; |
| 2216 | |
| 2217 | damon_for_each_region_safe(r, next, t) { |
| 2218 | sz_region = damon_sz_region(r); |
| 2219 | |
| 2220 | for (i = 0; i < nr_subs - 1 && |
| 2221 | sz_region > 2 * DAMON_MIN_REGION; i++) { |
| 2222 | /* |
| 2223 | * Randomly select size of left sub-region to be at |
| 2224 | * least 10 percent and at most 90% of original region |
| 2225 | */ |
| 2226 | sz_sub = ALIGN_DOWN(damon_rand(1, 10) * |
| 2227 | sz_region / 10, DAMON_MIN_REGION); |
| 2228 | /* Do not allow blank region */ |
| 2229 | if (sz_sub == 0 || sz_sub >= sz_region) |
| 2230 | continue; |
| 2231 | |
| 2232 | damon_split_region_at(t, r, sz_sub); |
| 2233 | sz_region = sz_sub; |
| 2234 | } |
| 2235 | } |
| 2236 | } |
| 2237 | |
| 2238 | /* |
| 2239 | * Split every target region into randomly-sized small regions |
| 2240 | * |
| 2241 | * This function splits every target region into random-sized small regions if |
| 2242 | * current total number of the regions is equal or smaller than half of the |
| 2243 | * user-specified maximum number of regions. This is for maximizing the |
| 2244 | * monitoring accuracy under the dynamically changeable access patterns. If a |
| 2245 | * split was unnecessarily made, later 'kdamond_merge_regions()' will revert |
| 2246 | * it. |
| 2247 | */ |
| 2248 | static void kdamond_split_regions(struct damon_ctx *ctx) |
| 2249 | { |
| 2250 | struct damon_target *t; |
| 2251 | unsigned int nr_regions = 0; |
| 2252 | static unsigned int last_nr_regions; |
| 2253 | int nr_subregions = 2; |
| 2254 | |
| 2255 | damon_for_each_target(t, ctx) |
| 2256 | nr_regions += damon_nr_regions(t); |
| 2257 | |
| 2258 | if (nr_regions > ctx->attrs.max_nr_regions / 2) |
| 2259 | return; |
| 2260 | |
| 2261 | /* Maybe the middle of the region has different access frequency */ |
| 2262 | if (last_nr_regions == nr_regions && |
| 2263 | nr_regions < ctx->attrs.max_nr_regions / 3) |
| 2264 | nr_subregions = 3; |
| 2265 | |
| 2266 | damon_for_each_target(t, ctx) |
| 2267 | damon_split_regions_of(t, nr_subregions); |
| 2268 | |
| 2269 | last_nr_regions = nr_regions; |
| 2270 | } |
| 2271 | |
| 2272 | /* |
| 2273 | * Check whether current monitoring should be stopped |
| 2274 | * |
| 2275 | * The monitoring is stopped when either the user requested to stop, or all |
| 2276 | * monitoring targets are invalid. |
| 2277 | * |
| 2278 | * Returns true if need to stop current monitoring. |
| 2279 | */ |
| 2280 | static bool kdamond_need_stop(struct damon_ctx *ctx) |
| 2281 | { |
| 2282 | struct damon_target *t; |
| 2283 | |
| 2284 | if (kthread_should_stop()) |
| 2285 | return true; |
| 2286 | |
| 2287 | if (!ctx->ops.target_valid) |
| 2288 | return false; |
| 2289 | |
| 2290 | damon_for_each_target(t, ctx) { |
| 2291 | if (ctx->ops.target_valid(t)) |
| 2292 | return false; |
| 2293 | } |
| 2294 | |
| 2295 | return true; |
| 2296 | } |
| 2297 | |
| 2298 | static int damos_get_wmark_metric_value(enum damos_wmark_metric metric, |
| 2299 | unsigned long *metric_value) |
| 2300 | { |
| 2301 | switch (metric) { |
| 2302 | case DAMOS_WMARK_FREE_MEM_RATE: |
| 2303 | *metric_value = global_zone_page_state(NR_FREE_PAGES) * 1000 / |
| 2304 | totalram_pages(); |
| 2305 | return 0; |
| 2306 | default: |
| 2307 | break; |
| 2308 | } |
| 2309 | return -EINVAL; |
| 2310 | } |
| 2311 | |
| 2312 | /* |
| 2313 | * Returns zero if the scheme is active. Else, returns time to wait for next |
| 2314 | * watermark check in micro-seconds. |
| 2315 | */ |
| 2316 | static unsigned long damos_wmark_wait_us(struct damos *scheme) |
| 2317 | { |
| 2318 | unsigned long metric; |
| 2319 | |
| 2320 | if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) |
| 2321 | return 0; |
| 2322 | |
| 2323 | /* higher than high watermark or lower than low watermark */ |
| 2324 | if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { |
| 2325 | if (scheme->wmarks.activated) |
| 2326 | pr_debug("deactivate a scheme (%d) for %s wmark\n", |
| 2327 | scheme->action, |
| 2328 | str_high_low(metric > scheme->wmarks.high)); |
| 2329 | scheme->wmarks.activated = false; |
| 2330 | return scheme->wmarks.interval; |
| 2331 | } |
| 2332 | |
| 2333 | /* inactive and higher than middle watermark */ |
| 2334 | if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && |
| 2335 | !scheme->wmarks.activated) |
| 2336 | return scheme->wmarks.interval; |
| 2337 | |
| 2338 | if (!scheme->wmarks.activated) |
| 2339 | pr_debug("activate a scheme (%d)\n", scheme->action); |
| 2340 | scheme->wmarks.activated = true; |
| 2341 | return 0; |
| 2342 | } |
| 2343 | |
| 2344 | static void kdamond_usleep(unsigned long usecs) |
| 2345 | { |
| 2346 | if (usecs >= USLEEP_RANGE_UPPER_BOUND) |
| 2347 | schedule_timeout_idle(usecs_to_jiffies(usecs)); |
| 2348 | else |
| 2349 | usleep_range_idle(usecs, usecs + 1); |
| 2350 | } |
| 2351 | |
| 2352 | /* |
| 2353 | * kdamond_call() - handle damon_call_control. |
| 2354 | * @ctx: The &struct damon_ctx of the kdamond. |
| 2355 | * @cancel: Whether to cancel the invocation of the function. |
| 2356 | * |
| 2357 | * If there is a &struct damon_call_control request that registered via |
| 2358 | * &damon_call() on @ctx, do or cancel the invocation of the function depending |
| 2359 | * on @cancel. @cancel is set when the kdamond is already out of the main loop |
| 2360 | * and therefore will be terminated. |
| 2361 | */ |
| 2362 | static void kdamond_call(struct damon_ctx *ctx, bool cancel) |
| 2363 | { |
| 2364 | struct damon_call_control *control; |
| 2365 | int ret = 0; |
| 2366 | |
| 2367 | mutex_lock(&ctx->call_control_lock); |
| 2368 | control = ctx->call_control; |
| 2369 | mutex_unlock(&ctx->call_control_lock); |
| 2370 | if (!control) |
| 2371 | return; |
| 2372 | if (cancel) { |
| 2373 | control->canceled = true; |
| 2374 | } else { |
| 2375 | ret = control->fn(control->data); |
| 2376 | control->return_code = ret; |
| 2377 | } |
| 2378 | complete(&control->completion); |
| 2379 | mutex_lock(&ctx->call_control_lock); |
| 2380 | ctx->call_control = NULL; |
| 2381 | mutex_unlock(&ctx->call_control_lock); |
| 2382 | } |
| 2383 | |
| 2384 | /* Returns negative error code if it's not activated but should return */ |
| 2385 | static int kdamond_wait_activation(struct damon_ctx *ctx) |
| 2386 | { |
| 2387 | struct damos *s; |
| 2388 | unsigned long wait_time; |
| 2389 | unsigned long min_wait_time = 0; |
| 2390 | bool init_wait_time = false; |
| 2391 | |
| 2392 | while (!kdamond_need_stop(ctx)) { |
| 2393 | damon_for_each_scheme(s, ctx) { |
| 2394 | wait_time = damos_wmark_wait_us(s); |
| 2395 | if (!init_wait_time || wait_time < min_wait_time) { |
| 2396 | init_wait_time = true; |
| 2397 | min_wait_time = wait_time; |
| 2398 | } |
| 2399 | } |
| 2400 | if (!min_wait_time) |
| 2401 | return 0; |
| 2402 | |
| 2403 | kdamond_usleep(min_wait_time); |
| 2404 | |
| 2405 | if (ctx->callback.after_wmarks_check && |
| 2406 | ctx->callback.after_wmarks_check(ctx)) |
| 2407 | break; |
| 2408 | kdamond_call(ctx, false); |
| 2409 | damos_walk_cancel(ctx); |
| 2410 | } |
| 2411 | return -EBUSY; |
| 2412 | } |
| 2413 | |
| 2414 | static void kdamond_init_ctx(struct damon_ctx *ctx) |
| 2415 | { |
| 2416 | unsigned long sample_interval = ctx->attrs.sample_interval ? |
| 2417 | ctx->attrs.sample_interval : 1; |
| 2418 | unsigned long apply_interval; |
| 2419 | struct damos *scheme; |
| 2420 | |
| 2421 | ctx->passed_sample_intervals = 0; |
| 2422 | ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; |
| 2423 | ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / |
| 2424 | sample_interval; |
| 2425 | ctx->next_intervals_tune_sis = ctx->next_aggregation_sis * |
| 2426 | ctx->attrs.intervals_goal.aggrs; |
| 2427 | |
| 2428 | damon_for_each_scheme(scheme, ctx) { |
| 2429 | apply_interval = scheme->apply_interval_us ? |
| 2430 | scheme->apply_interval_us : ctx->attrs.aggr_interval; |
| 2431 | scheme->next_apply_sis = apply_interval / sample_interval; |
| 2432 | damos_set_filters_default_reject(scheme); |
| 2433 | } |
| 2434 | } |
| 2435 | |
| 2436 | /* |
| 2437 | * The monitoring daemon that runs as a kernel thread |
| 2438 | */ |
| 2439 | static int kdamond_fn(void *data) |
| 2440 | { |
| 2441 | struct damon_ctx *ctx = data; |
| 2442 | struct damon_target *t; |
| 2443 | struct damon_region *r, *next; |
| 2444 | unsigned int max_nr_accesses = 0; |
| 2445 | unsigned long sz_limit = 0; |
| 2446 | |
| 2447 | pr_debug("kdamond (%d) starts\n", current->pid); |
| 2448 | |
| 2449 | complete(&ctx->kdamond_started); |
| 2450 | kdamond_init_ctx(ctx); |
| 2451 | |
| 2452 | if (ctx->ops.init) |
| 2453 | ctx->ops.init(ctx); |
| 2454 | ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, |
| 2455 | sizeof(*ctx->regions_score_histogram), GFP_KERNEL); |
| 2456 | if (!ctx->regions_score_histogram) |
| 2457 | goto done; |
| 2458 | |
| 2459 | sz_limit = damon_region_sz_limit(ctx); |
| 2460 | |
| 2461 | while (!kdamond_need_stop(ctx)) { |
| 2462 | /* |
| 2463 | * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could |
| 2464 | * be changed from after_wmarks_check() or after_aggregation() |
| 2465 | * callbacks. Read the values here, and use those for this |
| 2466 | * iteration. That is, damon_set_attrs() updated new values |
| 2467 | * are respected from next iteration. |
| 2468 | */ |
| 2469 | unsigned long next_aggregation_sis = ctx->next_aggregation_sis; |
| 2470 | unsigned long next_ops_update_sis = ctx->next_ops_update_sis; |
| 2471 | unsigned long sample_interval = ctx->attrs.sample_interval; |
| 2472 | |
| 2473 | if (kdamond_wait_activation(ctx)) |
| 2474 | break; |
| 2475 | |
| 2476 | if (ctx->ops.prepare_access_checks) |
| 2477 | ctx->ops.prepare_access_checks(ctx); |
| 2478 | |
| 2479 | kdamond_usleep(sample_interval); |
| 2480 | ctx->passed_sample_intervals++; |
| 2481 | |
| 2482 | if (ctx->ops.check_accesses) |
| 2483 | max_nr_accesses = ctx->ops.check_accesses(ctx); |
| 2484 | |
| 2485 | if (ctx->passed_sample_intervals >= next_aggregation_sis) { |
| 2486 | kdamond_merge_regions(ctx, |
| 2487 | max_nr_accesses / 10, |
| 2488 | sz_limit); |
| 2489 | if (ctx->callback.after_aggregation && |
| 2490 | ctx->callback.after_aggregation(ctx)) |
| 2491 | break; |
| 2492 | } |
| 2493 | |
| 2494 | /* |
| 2495 | * do kdamond_call() and kdamond_apply_schemes() after |
| 2496 | * kdamond_merge_regions() if possible, to reduce overhead |
| 2497 | */ |
| 2498 | kdamond_call(ctx, false); |
| 2499 | if (!list_empty(&ctx->schemes)) |
| 2500 | kdamond_apply_schemes(ctx); |
| 2501 | else |
| 2502 | damos_walk_cancel(ctx); |
| 2503 | |
| 2504 | sample_interval = ctx->attrs.sample_interval ? |
| 2505 | ctx->attrs.sample_interval : 1; |
| 2506 | if (ctx->passed_sample_intervals >= next_aggregation_sis) { |
| 2507 | if (ctx->attrs.intervals_goal.aggrs && |
| 2508 | ctx->passed_sample_intervals >= |
| 2509 | ctx->next_intervals_tune_sis) { |
| 2510 | /* |
| 2511 | * ctx->next_aggregation_sis might be updated |
| 2512 | * from kdamond_call(). In the case, |
| 2513 | * damon_set_attrs() which will be called from |
| 2514 | * kdamond_tune_interval() may wrongly think |
| 2515 | * this is in the middle of the current |
| 2516 | * aggregation, and make aggregation |
| 2517 | * information reset for all regions. Then, |
| 2518 | * following kdamond_reset_aggregated() call |
| 2519 | * will make the region information invalid, |
| 2520 | * particularly for ->nr_accesses_bp. |
| 2521 | * |
| 2522 | * Reset ->next_aggregation_sis to avoid that. |
| 2523 | * It will anyway correctly updated after this |
| 2524 | * if caluse. |
| 2525 | */ |
| 2526 | ctx->next_aggregation_sis = |
| 2527 | next_aggregation_sis; |
| 2528 | ctx->next_intervals_tune_sis += |
| 2529 | ctx->attrs.aggr_samples * |
| 2530 | ctx->attrs.intervals_goal.aggrs; |
| 2531 | kdamond_tune_intervals(ctx); |
| 2532 | sample_interval = ctx->attrs.sample_interval ? |
| 2533 | ctx->attrs.sample_interval : 1; |
| 2534 | |
| 2535 | } |
| 2536 | ctx->next_aggregation_sis = next_aggregation_sis + |
| 2537 | ctx->attrs.aggr_interval / sample_interval; |
| 2538 | |
| 2539 | kdamond_reset_aggregated(ctx); |
| 2540 | kdamond_split_regions(ctx); |
| 2541 | } |
| 2542 | |
| 2543 | if (ctx->passed_sample_intervals >= next_ops_update_sis) { |
| 2544 | ctx->next_ops_update_sis = next_ops_update_sis + |
| 2545 | ctx->attrs.ops_update_interval / |
| 2546 | sample_interval; |
| 2547 | if (ctx->ops.update) |
| 2548 | ctx->ops.update(ctx); |
| 2549 | sz_limit = damon_region_sz_limit(ctx); |
| 2550 | } |
| 2551 | } |
| 2552 | done: |
| 2553 | damon_for_each_target(t, ctx) { |
| 2554 | damon_for_each_region_safe(r, next, t) |
| 2555 | damon_destroy_region(r, t); |
| 2556 | } |
| 2557 | |
| 2558 | if (ctx->callback.before_terminate) |
| 2559 | ctx->callback.before_terminate(ctx); |
| 2560 | if (ctx->ops.cleanup) |
| 2561 | ctx->ops.cleanup(ctx); |
| 2562 | kfree(ctx->regions_score_histogram); |
| 2563 | |
| 2564 | pr_debug("kdamond (%d) finishes\n", current->pid); |
| 2565 | mutex_lock(&ctx->kdamond_lock); |
| 2566 | ctx->kdamond = NULL; |
| 2567 | mutex_unlock(&ctx->kdamond_lock); |
| 2568 | |
| 2569 | kdamond_call(ctx, true); |
| 2570 | damos_walk_cancel(ctx); |
| 2571 | |
| 2572 | mutex_lock(&damon_lock); |
| 2573 | nr_running_ctxs--; |
| 2574 | if (!nr_running_ctxs && running_exclusive_ctxs) |
| 2575 | running_exclusive_ctxs = false; |
| 2576 | mutex_unlock(&damon_lock); |
| 2577 | |
| 2578 | return 0; |
| 2579 | } |
| 2580 | |
| 2581 | /* |
| 2582 | * struct damon_system_ram_region - System RAM resource address region of |
| 2583 | * [@start, @end). |
| 2584 | * @start: Start address of the region (inclusive). |
| 2585 | * @end: End address of the region (exclusive). |
| 2586 | */ |
| 2587 | struct damon_system_ram_region { |
| 2588 | unsigned long start; |
| 2589 | unsigned long end; |
| 2590 | }; |
| 2591 | |
| 2592 | static int walk_system_ram(struct resource *res, void *arg) |
| 2593 | { |
| 2594 | struct damon_system_ram_region *a = arg; |
| 2595 | |
| 2596 | if (a->end - a->start < resource_size(res)) { |
| 2597 | a->start = res->start; |
| 2598 | a->end = res->end; |
| 2599 | } |
| 2600 | return 0; |
| 2601 | } |
| 2602 | |
| 2603 | /* |
| 2604 | * Find biggest 'System RAM' resource and store its start and end address in |
| 2605 | * @start and @end, respectively. If no System RAM is found, returns false. |
| 2606 | */ |
| 2607 | static bool damon_find_biggest_system_ram(unsigned long *start, |
| 2608 | unsigned long *end) |
| 2609 | |
| 2610 | { |
| 2611 | struct damon_system_ram_region arg = {}; |
| 2612 | |
| 2613 | walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram); |
| 2614 | if (arg.end <= arg.start) |
| 2615 | return false; |
| 2616 | |
| 2617 | *start = arg.start; |
| 2618 | *end = arg.end; |
| 2619 | return true; |
| 2620 | } |
| 2621 | |
| 2622 | /** |
| 2623 | * damon_set_region_biggest_system_ram_default() - Set the region of the given |
| 2624 | * monitoring target as requested, or biggest 'System RAM'. |
| 2625 | * @t: The monitoring target to set the region. |
| 2626 | * @start: The pointer to the start address of the region. |
| 2627 | * @end: The pointer to the end address of the region. |
| 2628 | * |
| 2629 | * This function sets the region of @t as requested by @start and @end. If the |
| 2630 | * values of @start and @end are zero, however, this function finds the biggest |
| 2631 | * 'System RAM' resource and sets the region to cover the resource. In the |
| 2632 | * latter case, this function saves the start and end addresses of the resource |
| 2633 | * in @start and @end, respectively. |
| 2634 | * |
| 2635 | * Return: 0 on success, negative error code otherwise. |
| 2636 | */ |
| 2637 | int damon_set_region_biggest_system_ram_default(struct damon_target *t, |
| 2638 | unsigned long *start, unsigned long *end) |
| 2639 | { |
| 2640 | struct damon_addr_range addr_range; |
| 2641 | |
| 2642 | if (*start > *end) |
| 2643 | return -EINVAL; |
| 2644 | |
| 2645 | if (!*start && !*end && |
| 2646 | !damon_find_biggest_system_ram(start, end)) |
| 2647 | return -EINVAL; |
| 2648 | |
| 2649 | addr_range.start = *start; |
| 2650 | addr_range.end = *end; |
| 2651 | return damon_set_regions(t, &addr_range, 1); |
| 2652 | } |
| 2653 | |
| 2654 | /* |
| 2655 | * damon_moving_sum() - Calculate an inferred moving sum value. |
| 2656 | * @mvsum: Inferred sum of the last @len_window values. |
| 2657 | * @nomvsum: Non-moving sum of the last discrete @len_window window values. |
| 2658 | * @len_window: The number of last values to take care of. |
| 2659 | * @new_value: New value that will be added to the pseudo moving sum. |
| 2660 | * |
| 2661 | * Moving sum (moving average * window size) is good for handling noise, but |
| 2662 | * the cost of keeping past values can be high for arbitrary window size. This |
| 2663 | * function implements a lightweight pseudo moving sum function that doesn't |
| 2664 | * keep the past window values. |
| 2665 | * |
| 2666 | * It simply assumes there was no noise in the past, and get the no-noise |
| 2667 | * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a |
| 2668 | * non-moving sum of the last window. For example, if @len_window is 10 and we |
| 2669 | * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25 |
| 2670 | * values. Hence, this function simply drops @nomvsum / @len_window from |
| 2671 | * given @mvsum and add @new_value. |
| 2672 | * |
| 2673 | * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for |
| 2674 | * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For |
| 2675 | * calculating next moving sum with a new value, we should drop 0 from 50 and |
| 2676 | * add the new value. However, this function assumes it got value 5 for each |
| 2677 | * of the last ten times. Based on the assumption, when the next value is |
| 2678 | * measured, it drops the assumed past value, 5 from the current sum, and add |
| 2679 | * the new value to get the updated pseduo-moving average. |
| 2680 | * |
| 2681 | * This means the value could have errors, but the errors will be disappeared |
| 2682 | * for every @len_window aligned calls. For example, if @len_window is 10, the |
| 2683 | * pseudo moving sum with 11th value to 19th value would have an error. But |
| 2684 | * the sum with 20th value will not have the error. |
| 2685 | * |
| 2686 | * Return: Pseudo-moving average after getting the @new_value. |
| 2687 | */ |
| 2688 | static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum, |
| 2689 | unsigned int len_window, unsigned int new_value) |
| 2690 | { |
| 2691 | return mvsum - nomvsum / len_window + new_value; |
| 2692 | } |
| 2693 | |
| 2694 | /** |
| 2695 | * damon_update_region_access_rate() - Update the access rate of a region. |
| 2696 | * @r: The DAMON region to update for its access check result. |
| 2697 | * @accessed: Whether the region has accessed during last sampling interval. |
| 2698 | * @attrs: The damon_attrs of the DAMON context. |
| 2699 | * |
| 2700 | * Update the access rate of a region with the region's last sampling interval |
| 2701 | * access check result. |
| 2702 | * |
| 2703 | * Usually this will be called by &damon_operations->check_accesses callback. |
| 2704 | */ |
| 2705 | void damon_update_region_access_rate(struct damon_region *r, bool accessed, |
| 2706 | struct damon_attrs *attrs) |
| 2707 | { |
| 2708 | unsigned int len_window = 1; |
| 2709 | |
| 2710 | /* |
| 2711 | * sample_interval can be zero, but cannot be larger than |
| 2712 | * aggr_interval, owing to validation of damon_set_attrs(). |
| 2713 | */ |
| 2714 | if (attrs->sample_interval) |
| 2715 | len_window = damon_max_nr_accesses(attrs); |
| 2716 | r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, |
| 2717 | r->last_nr_accesses * 10000, len_window, |
| 2718 | accessed ? 10000 : 0); |
| 2719 | |
| 2720 | if (accessed) |
| 2721 | r->nr_accesses++; |
| 2722 | } |
| 2723 | |
| 2724 | static int __init damon_init(void) |
| 2725 | { |
| 2726 | damon_region_cache = KMEM_CACHE(damon_region, 0); |
| 2727 | if (unlikely(!damon_region_cache)) { |
| 2728 | pr_err("creating damon_region_cache fails\n"); |
| 2729 | return -ENOMEM; |
| 2730 | } |
| 2731 | |
| 2732 | return 0; |
| 2733 | } |
| 2734 | |
| 2735 | subsys_initcall(damon_init); |
| 2736 | |
| 2737 | #include "tests/core-kunit.h" |