mm/huge_memory: prevent THP_ZERO_PAGE_ALLOC increased twice
[linux-block.git] / mm / damon / sysfs.c
CommitLineData
c951cd3b
SP
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DAMON sysfs Interface
4 *
5 * Copyright (c) 2022 SeongJae Park <sj@kernel.org>
6 */
7
8#include <linux/damon.h>
9#include <linux/kobject.h>
10#include <linux/pid.h>
11#include <linux/sched.h>
12#include <linux/slab.h>
13
14static DEFINE_MUTEX(damon_sysfs_lock);
15
16/*
17 * unsigned long range directory
18 */
19
20struct damon_sysfs_ul_range {
21 struct kobject kobj;
22 unsigned long min;
23 unsigned long max;
24};
25
26static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
27 unsigned long min,
28 unsigned long max)
29{
30 struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
31 GFP_KERNEL);
32
33 if (!range)
34 return NULL;
35 range->kobj = (struct kobject){};
36 range->min = min;
37 range->max = max;
38
39 return range;
40}
41
42static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
43 char *buf)
44{
45 struct damon_sysfs_ul_range *range = container_of(kobj,
46 struct damon_sysfs_ul_range, kobj);
47
48 return sysfs_emit(buf, "%lu\n", range->min);
49}
50
51static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
52 const char *buf, size_t count)
53{
54 struct damon_sysfs_ul_range *range = container_of(kobj,
55 struct damon_sysfs_ul_range, kobj);
56 unsigned long min;
57 int err;
58
59 err = kstrtoul(buf, 0, &min);
60 if (err)
61 return -EINVAL;
62
63 range->min = min;
64 return count;
65}
66
67static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
68 char *buf)
69{
70 struct damon_sysfs_ul_range *range = container_of(kobj,
71 struct damon_sysfs_ul_range, kobj);
72
73 return sysfs_emit(buf, "%lu\n", range->max);
74}
75
76static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
77 const char *buf, size_t count)
78{
79 struct damon_sysfs_ul_range *range = container_of(kobj,
80 struct damon_sysfs_ul_range, kobj);
81 unsigned long max;
82 int err;
83
84 err = kstrtoul(buf, 0, &max);
85 if (err)
86 return -EINVAL;
87
88 range->max = max;
89 return count;
90}
91
92static void damon_sysfs_ul_range_release(struct kobject *kobj)
93{
94 kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
95}
96
97static struct kobj_attribute damon_sysfs_ul_range_min_attr =
98 __ATTR_RW_MODE(min, 0600);
99
100static struct kobj_attribute damon_sysfs_ul_range_max_attr =
101 __ATTR_RW_MODE(max, 0600);
102
103static struct attribute *damon_sysfs_ul_range_attrs[] = {
104 &damon_sysfs_ul_range_min_attr.attr,
105 &damon_sysfs_ul_range_max_attr.attr,
106 NULL,
107};
108ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
109
110static struct kobj_type damon_sysfs_ul_range_ktype = {
111 .release = damon_sysfs_ul_range_release,
112 .sysfs_ops = &kobj_sysfs_ops,
113 .default_groups = damon_sysfs_ul_range_groups,
114};
115
0ac32b8a
SP
116/*
117 * schemes/stats directory
118 */
119
120struct damon_sysfs_stats {
121 struct kobject kobj;
122 unsigned long nr_tried;
123 unsigned long sz_tried;
124 unsigned long nr_applied;
125 unsigned long sz_applied;
126 unsigned long qt_exceeds;
127};
128
129static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void)
130{
131 return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL);
132}
133
134static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
135 char *buf)
136{
137 struct damon_sysfs_stats *stats = container_of(kobj,
138 struct damon_sysfs_stats, kobj);
139
140 return sysfs_emit(buf, "%lu\n", stats->nr_tried);
141}
142
143static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr,
144 char *buf)
145{
146 struct damon_sysfs_stats *stats = container_of(kobj,
147 struct damon_sysfs_stats, kobj);
148
149 return sysfs_emit(buf, "%lu\n", stats->sz_tried);
150}
151
152static ssize_t nr_applied_show(struct kobject *kobj,
153 struct kobj_attribute *attr, char *buf)
154{
155 struct damon_sysfs_stats *stats = container_of(kobj,
156 struct damon_sysfs_stats, kobj);
157
158 return sysfs_emit(buf, "%lu\n", stats->nr_applied);
159}
160
161static ssize_t sz_applied_show(struct kobject *kobj,
162 struct kobj_attribute *attr, char *buf)
163{
164 struct damon_sysfs_stats *stats = container_of(kobj,
165 struct damon_sysfs_stats, kobj);
166
167 return sysfs_emit(buf, "%lu\n", stats->sz_applied);
168}
169
170static ssize_t qt_exceeds_show(struct kobject *kobj,
171 struct kobj_attribute *attr, char *buf)
172{
173 struct damon_sysfs_stats *stats = container_of(kobj,
174 struct damon_sysfs_stats, kobj);
175
176 return sysfs_emit(buf, "%lu\n", stats->qt_exceeds);
177}
178
179static void damon_sysfs_stats_release(struct kobject *kobj)
180{
181 kfree(container_of(kobj, struct damon_sysfs_stats, kobj));
182}
183
184static struct kobj_attribute damon_sysfs_stats_nr_tried_attr =
185 __ATTR_RO_MODE(nr_tried, 0400);
186
187static struct kobj_attribute damon_sysfs_stats_sz_tried_attr =
188 __ATTR_RO_MODE(sz_tried, 0400);
189
190static struct kobj_attribute damon_sysfs_stats_nr_applied_attr =
191 __ATTR_RO_MODE(nr_applied, 0400);
192
193static struct kobj_attribute damon_sysfs_stats_sz_applied_attr =
194 __ATTR_RO_MODE(sz_applied, 0400);
195
196static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr =
197 __ATTR_RO_MODE(qt_exceeds, 0400);
198
199static struct attribute *damon_sysfs_stats_attrs[] = {
200 &damon_sysfs_stats_nr_tried_attr.attr,
201 &damon_sysfs_stats_sz_tried_attr.attr,
202 &damon_sysfs_stats_nr_applied_attr.attr,
203 &damon_sysfs_stats_sz_applied_attr.attr,
204 &damon_sysfs_stats_qt_exceeds_attr.attr,
205 NULL,
206};
207ATTRIBUTE_GROUPS(damon_sysfs_stats);
208
209static struct kobj_type damon_sysfs_stats_ktype = {
210 .release = damon_sysfs_stats_release,
211 .sysfs_ops = &kobj_sysfs_ops,
212 .default_groups = damon_sysfs_stats_groups,
213};
214
1b32234a
SP
215/*
216 * watermarks directory
217 */
218
219struct damon_sysfs_watermarks {
220 struct kobject kobj;
221 enum damos_wmark_metric metric;
222 unsigned long interval_us;
223 unsigned long high;
224 unsigned long mid;
225 unsigned long low;
226};
227
228static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc(
229 enum damos_wmark_metric metric, unsigned long interval_us,
230 unsigned long high, unsigned long mid, unsigned long low)
231{
232 struct damon_sysfs_watermarks *watermarks = kmalloc(
233 sizeof(*watermarks), GFP_KERNEL);
234
235 if (!watermarks)
236 return NULL;
237 watermarks->kobj = (struct kobject){};
238 watermarks->metric = metric;
239 watermarks->interval_us = interval_us;
240 watermarks->high = high;
241 watermarks->mid = mid;
242 watermarks->low = low;
243 return watermarks;
244}
245
246/* Should match with enum damos_wmark_metric */
247static const char * const damon_sysfs_wmark_metric_strs[] = {
248 "none",
249 "free_mem_rate",
250};
251
252static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr,
253 char *buf)
254{
255 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
256 struct damon_sysfs_watermarks, kobj);
257
258 return sysfs_emit(buf, "%s\n",
259 damon_sysfs_wmark_metric_strs[watermarks->metric]);
260}
261
262static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr,
263 const char *buf, size_t count)
264{
265 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
266 struct damon_sysfs_watermarks, kobj);
267 enum damos_wmark_metric metric;
268
269 for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) {
270 if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) {
271 watermarks->metric = metric;
272 return count;
273 }
274 }
275 return -EINVAL;
276}
277
278static ssize_t interval_us_show(struct kobject *kobj,
279 struct kobj_attribute *attr, char *buf)
280{
281 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
282 struct damon_sysfs_watermarks, kobj);
283
284 return sysfs_emit(buf, "%lu\n", watermarks->interval_us);
285}
286
287static ssize_t interval_us_store(struct kobject *kobj,
288 struct kobj_attribute *attr, const char *buf, size_t count)
289{
290 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
291 struct damon_sysfs_watermarks, kobj);
292 int err = kstrtoul(buf, 0, &watermarks->interval_us);
293
294 if (err)
295 return -EINVAL;
296 return count;
297}
298
299static ssize_t high_show(struct kobject *kobj,
300 struct kobj_attribute *attr, char *buf)
301{
302 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
303 struct damon_sysfs_watermarks, kobj);
304
305 return sysfs_emit(buf, "%lu\n", watermarks->high);
306}
307
308static ssize_t high_store(struct kobject *kobj,
309 struct kobj_attribute *attr, const char *buf, size_t count)
310{
311 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
312 struct damon_sysfs_watermarks, kobj);
313 int err = kstrtoul(buf, 0, &watermarks->high);
314
315 if (err)
316 return -EINVAL;
317 return count;
318}
319
320static ssize_t mid_show(struct kobject *kobj,
321 struct kobj_attribute *attr, char *buf)
322{
323 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
324 struct damon_sysfs_watermarks, kobj);
325
326 return sysfs_emit(buf, "%lu\n", watermarks->mid);
327}
328
329static ssize_t mid_store(struct kobject *kobj,
330 struct kobj_attribute *attr, const char *buf, size_t count)
331{
332 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
333 struct damon_sysfs_watermarks, kobj);
334 int err = kstrtoul(buf, 0, &watermarks->mid);
335
336 if (err)
337 return -EINVAL;
338 return count;
339}
340
341static ssize_t low_show(struct kobject *kobj,
342 struct kobj_attribute *attr, char *buf)
343{
344 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
345 struct damon_sysfs_watermarks, kobj);
346
347 return sysfs_emit(buf, "%lu\n", watermarks->low);
348}
349
350static ssize_t low_store(struct kobject *kobj,
351 struct kobj_attribute *attr, const char *buf, size_t count)
352{
353 struct damon_sysfs_watermarks *watermarks = container_of(kobj,
354 struct damon_sysfs_watermarks, kobj);
355 int err = kstrtoul(buf, 0, &watermarks->low);
356
357 if (err)
358 return -EINVAL;
359 return count;
360}
361
362static void damon_sysfs_watermarks_release(struct kobject *kobj)
363{
364 kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj));
365}
366
367static struct kobj_attribute damon_sysfs_watermarks_metric_attr =
368 __ATTR_RW_MODE(metric, 0600);
369
370static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr =
371 __ATTR_RW_MODE(interval_us, 0600);
372
373static struct kobj_attribute damon_sysfs_watermarks_high_attr =
374 __ATTR_RW_MODE(high, 0600);
375
376static struct kobj_attribute damon_sysfs_watermarks_mid_attr =
377 __ATTR_RW_MODE(mid, 0600);
378
379static struct kobj_attribute damon_sysfs_watermarks_low_attr =
380 __ATTR_RW_MODE(low, 0600);
381
382static struct attribute *damon_sysfs_watermarks_attrs[] = {
383 &damon_sysfs_watermarks_metric_attr.attr,
384 &damon_sysfs_watermarks_interval_us_attr.attr,
385 &damon_sysfs_watermarks_high_attr.attr,
386 &damon_sysfs_watermarks_mid_attr.attr,
387 &damon_sysfs_watermarks_low_attr.attr,
388 NULL,
389};
390ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
391
392static struct kobj_type damon_sysfs_watermarks_ktype = {
393 .release = damon_sysfs_watermarks_release,
394 .sysfs_ops = &kobj_sysfs_ops,
395 .default_groups = damon_sysfs_watermarks_groups,
396};
397
1c78b2bc
SP
398/*
399 * scheme/weights directory
400 */
401
402struct damon_sysfs_weights {
403 struct kobject kobj;
404 unsigned int sz;
405 unsigned int nr_accesses;
406 unsigned int age;
407};
408
409static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz,
410 unsigned int nr_accesses, unsigned int age)
411{
412 struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights),
413 GFP_KERNEL);
414
415 if (!weights)
416 return NULL;
417 weights->kobj = (struct kobject){};
418 weights->sz = sz;
419 weights->nr_accesses = nr_accesses;
420 weights->age = age;
421 return weights;
422}
423
424static ssize_t sz_permil_show(struct kobject *kobj,
425 struct kobj_attribute *attr, char *buf)
426{
427 struct damon_sysfs_weights *weights = container_of(kobj,
428 struct damon_sysfs_weights, kobj);
429
430 return sysfs_emit(buf, "%u\n", weights->sz);
431}
432
433static ssize_t sz_permil_store(struct kobject *kobj,
434 struct kobj_attribute *attr, const char *buf, size_t count)
435{
436 struct damon_sysfs_weights *weights = container_of(kobj,
437 struct damon_sysfs_weights, kobj);
438 int err = kstrtouint(buf, 0, &weights->sz);
439
440 if (err)
441 return -EINVAL;
442 return count;
443}
444
445static ssize_t nr_accesses_permil_show(struct kobject *kobj,
446 struct kobj_attribute *attr, char *buf)
447{
448 struct damon_sysfs_weights *weights = container_of(kobj,
449 struct damon_sysfs_weights, kobj);
450
451 return sysfs_emit(buf, "%u\n", weights->nr_accesses);
452}
453
454static ssize_t nr_accesses_permil_store(struct kobject *kobj,
455 struct kobj_attribute *attr, const char *buf, size_t count)
456{
457 struct damon_sysfs_weights *weights = container_of(kobj,
458 struct damon_sysfs_weights, kobj);
459 int err = kstrtouint(buf, 0, &weights->nr_accesses);
460
461 if (err)
462 return -EINVAL;
463 return count;
464}
465
466static ssize_t age_permil_show(struct kobject *kobj,
467 struct kobj_attribute *attr, char *buf)
468{
469 struct damon_sysfs_weights *weights = container_of(kobj,
470 struct damon_sysfs_weights, kobj);
471
472 return sysfs_emit(buf, "%u\n", weights->age);
473}
474
475static ssize_t age_permil_store(struct kobject *kobj,
476 struct kobj_attribute *attr, const char *buf, size_t count)
477{
478 struct damon_sysfs_weights *weights = container_of(kobj,
479 struct damon_sysfs_weights, kobj);
480 int err = kstrtouint(buf, 0, &weights->age);
481
482 if (err)
483 return -EINVAL;
484 return count;
485}
486
487static void damon_sysfs_weights_release(struct kobject *kobj)
488{
489 kfree(container_of(kobj, struct damon_sysfs_weights, kobj));
490}
491
492static struct kobj_attribute damon_sysfs_weights_sz_attr =
493 __ATTR_RW_MODE(sz_permil, 0600);
494
495static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr =
496 __ATTR_RW_MODE(nr_accesses_permil, 0600);
497
498static struct kobj_attribute damon_sysfs_weights_age_attr =
499 __ATTR_RW_MODE(age_permil, 0600);
500
501static struct attribute *damon_sysfs_weights_attrs[] = {
502 &damon_sysfs_weights_sz_attr.attr,
503 &damon_sysfs_weights_nr_accesses_attr.attr,
504 &damon_sysfs_weights_age_attr.attr,
505 NULL,
506};
507ATTRIBUTE_GROUPS(damon_sysfs_weights);
508
509static struct kobj_type damon_sysfs_weights_ktype = {
510 .release = damon_sysfs_weights_release,
511 .sysfs_ops = &kobj_sysfs_ops,
512 .default_groups = damon_sysfs_weights_groups,
513};
514
9bbb820a
SP
515/*
516 * quotas directory
517 */
518
519struct damon_sysfs_quotas {
520 struct kobject kobj;
1c78b2bc 521 struct damon_sysfs_weights *weights;
9bbb820a
SP
522 unsigned long ms;
523 unsigned long sz;
524 unsigned long reset_interval_ms;
525};
526
527static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void)
528{
529 return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL);
530}
531
1c78b2bc
SP
532static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas)
533{
534 struct damon_sysfs_weights *weights;
535 int err;
536
537 weights = damon_sysfs_weights_alloc(0, 0, 0);
538 if (!weights)
539 return -ENOMEM;
540
541 err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype,
542 &quotas->kobj, "weights");
543 if (err)
544 kobject_put(&weights->kobj);
545 else
546 quotas->weights = weights;
547 return err;
548}
549
550static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas)
551{
552 kobject_put(&quotas->weights->kobj);
553}
554
9bbb820a
SP
555static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr,
556 char *buf)
557{
558 struct damon_sysfs_quotas *quotas = container_of(kobj,
559 struct damon_sysfs_quotas, kobj);
560
561 return sysfs_emit(buf, "%lu\n", quotas->ms);
562}
563
564static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr,
565 const char *buf, size_t count)
566{
567 struct damon_sysfs_quotas *quotas = container_of(kobj,
568 struct damon_sysfs_quotas, kobj);
569 int err = kstrtoul(buf, 0, &quotas->ms);
570
571 if (err)
572 return -EINVAL;
573 return count;
574}
575
576static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr,
577 char *buf)
578{
579 struct damon_sysfs_quotas *quotas = container_of(kobj,
580 struct damon_sysfs_quotas, kobj);
581
582 return sysfs_emit(buf, "%lu\n", quotas->sz);
583}
584
585static ssize_t bytes_store(struct kobject *kobj,
586 struct kobj_attribute *attr, const char *buf, size_t count)
587{
588 struct damon_sysfs_quotas *quotas = container_of(kobj,
589 struct damon_sysfs_quotas, kobj);
590 int err = kstrtoul(buf, 0, &quotas->sz);
591
592 if (err)
593 return -EINVAL;
594 return count;
595}
596
597static ssize_t reset_interval_ms_show(struct kobject *kobj,
598 struct kobj_attribute *attr, char *buf)
599{
600 struct damon_sysfs_quotas *quotas = container_of(kobj,
601 struct damon_sysfs_quotas, kobj);
602
603 return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms);
604}
605
606static ssize_t reset_interval_ms_store(struct kobject *kobj,
607 struct kobj_attribute *attr, const char *buf, size_t count)
608{
609 struct damon_sysfs_quotas *quotas = container_of(kobj,
610 struct damon_sysfs_quotas, kobj);
611 int err = kstrtoul(buf, 0, &quotas->reset_interval_ms);
612
613 if (err)
614 return -EINVAL;
615 return count;
616}
617
618static void damon_sysfs_quotas_release(struct kobject *kobj)
619{
620 kfree(container_of(kobj, struct damon_sysfs_quotas, kobj));
621}
622
623static struct kobj_attribute damon_sysfs_quotas_ms_attr =
624 __ATTR_RW_MODE(ms, 0600);
625
626static struct kobj_attribute damon_sysfs_quotas_sz_attr =
627 __ATTR_RW_MODE(bytes, 0600);
628
629static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr =
630 __ATTR_RW_MODE(reset_interval_ms, 0600);
631
632static struct attribute *damon_sysfs_quotas_attrs[] = {
633 &damon_sysfs_quotas_ms_attr.attr,
634 &damon_sysfs_quotas_sz_attr.attr,
635 &damon_sysfs_quotas_reset_interval_ms_attr.attr,
636 NULL,
637};
638ATTRIBUTE_GROUPS(damon_sysfs_quotas);
639
640static struct kobj_type damon_sysfs_quotas_ktype = {
641 .release = damon_sysfs_quotas_release,
642 .sysfs_ops = &kobj_sysfs_ops,
643 .default_groups = damon_sysfs_quotas_groups,
644};
645
7e84b1f8
SP
646/*
647 * access_pattern directory
648 */
649
650struct damon_sysfs_access_pattern {
651 struct kobject kobj;
652 struct damon_sysfs_ul_range *sz;
653 struct damon_sysfs_ul_range *nr_accesses;
654 struct damon_sysfs_ul_range *age;
655};
656
657static
658struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void)
659{
660 struct damon_sysfs_access_pattern *access_pattern =
661 kmalloc(sizeof(*access_pattern), GFP_KERNEL);
662
663 if (!access_pattern)
664 return NULL;
665 access_pattern->kobj = (struct kobject){};
666 return access_pattern;
667}
668
669static int damon_sysfs_access_pattern_add_range_dir(
670 struct damon_sysfs_access_pattern *access_pattern,
671 struct damon_sysfs_ul_range **range_dir_ptr,
672 char *name)
673{
674 struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0);
675 int err;
676
677 if (!range)
678 return -ENOMEM;
679 err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype,
680 &access_pattern->kobj, name);
681 if (err)
682 kobject_put(&range->kobj);
683 else
684 *range_dir_ptr = range;
685 return err;
686}
687
688static int damon_sysfs_access_pattern_add_dirs(
689 struct damon_sysfs_access_pattern *access_pattern)
690{
691 int err;
692
693 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
694 &access_pattern->sz, "sz");
695 if (err)
696 goto put_sz_out;
697
698 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
699 &access_pattern->nr_accesses, "nr_accesses");
700 if (err)
701 goto put_nr_accesses_sz_out;
702
703 err = damon_sysfs_access_pattern_add_range_dir(access_pattern,
704 &access_pattern->age, "age");
705 if (err)
706 goto put_age_nr_accesses_sz_out;
707 return 0;
708
709put_age_nr_accesses_sz_out:
710 kobject_put(&access_pattern->age->kobj);
711 access_pattern->age = NULL;
712put_nr_accesses_sz_out:
713 kobject_put(&access_pattern->nr_accesses->kobj);
714 access_pattern->nr_accesses = NULL;
715put_sz_out:
716 kobject_put(&access_pattern->sz->kobj);
717 access_pattern->sz = NULL;
718 return err;
719}
720
721static void damon_sysfs_access_pattern_rm_dirs(
722 struct damon_sysfs_access_pattern *access_pattern)
723{
724 kobject_put(&access_pattern->sz->kobj);
725 kobject_put(&access_pattern->nr_accesses->kobj);
726 kobject_put(&access_pattern->age->kobj);
727}
728
729static void damon_sysfs_access_pattern_release(struct kobject *kobj)
730{
731 kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj));
732}
733
734static struct attribute *damon_sysfs_access_pattern_attrs[] = {
735 NULL,
736};
737ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
738
739static struct kobj_type damon_sysfs_access_pattern_ktype = {
740 .release = damon_sysfs_access_pattern_release,
741 .sysfs_ops = &kobj_sysfs_ops,
742 .default_groups = damon_sysfs_access_pattern_groups,
743};
744
745/*
746 * scheme directory
747 */
748
749struct damon_sysfs_scheme {
750 struct kobject kobj;
751 enum damos_action action;
752 struct damon_sysfs_access_pattern *access_pattern;
9bbb820a 753 struct damon_sysfs_quotas *quotas;
1b32234a 754 struct damon_sysfs_watermarks *watermarks;
0ac32b8a 755 struct damon_sysfs_stats *stats;
7e84b1f8
SP
756};
757
758/* This should match with enum damos_action */
759static const char * const damon_sysfs_damos_action_strs[] = {
760 "willneed",
761 "cold",
762 "pageout",
763 "hugepage",
764 "nohugepage",
8cdcc532 765 "lru_prio",
99cdc2cd 766 "lru_deprio",
7e84b1f8
SP
767 "stat",
768};
769
770static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
771 enum damos_action action)
772{
773 struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
774 GFP_KERNEL);
775
776 if (!scheme)
777 return NULL;
778 scheme->kobj = (struct kobject){};
779 scheme->action = action;
780 return scheme;
781}
782
783static int damon_sysfs_scheme_set_access_pattern(
784 struct damon_sysfs_scheme *scheme)
785{
786 struct damon_sysfs_access_pattern *access_pattern;
787 int err;
788
789 access_pattern = damon_sysfs_access_pattern_alloc();
790 if (!access_pattern)
791 return -ENOMEM;
792 err = kobject_init_and_add(&access_pattern->kobj,
793 &damon_sysfs_access_pattern_ktype, &scheme->kobj,
794 "access_pattern");
795 if (err)
796 goto out;
797 err = damon_sysfs_access_pattern_add_dirs(access_pattern);
798 if (err)
799 goto out;
800 scheme->access_pattern = access_pattern;
801 return 0;
802
803out:
804 kobject_put(&access_pattern->kobj);
805 return err;
806}
807
9bbb820a
SP
808static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme)
809{
810 struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc();
811 int err;
812
813 if (!quotas)
814 return -ENOMEM;
815 err = kobject_init_and_add(&quotas->kobj, &damon_sysfs_quotas_ktype,
816 &scheme->kobj, "quotas");
1c78b2bc
SP
817 if (err)
818 goto out;
819 err = damon_sysfs_quotas_add_dirs(quotas);
9bbb820a
SP
820 if (err)
821 goto out;
822 scheme->quotas = quotas;
823 return 0;
824
825out:
826 kobject_put(&quotas->kobj);
827 return err;
828}
829
1b32234a
SP
830static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
831{
832 struct damon_sysfs_watermarks *watermarks =
833 damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0);
834 int err;
835
836 if (!watermarks)
837 return -ENOMEM;
838 err = kobject_init_and_add(&watermarks->kobj,
839 &damon_sysfs_watermarks_ktype, &scheme->kobj,
840 "watermarks");
841 if (err)
842 kobject_put(&watermarks->kobj);
843 else
844 scheme->watermarks = watermarks;
845 return err;
846}
847
0ac32b8a
SP
848static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
849{
850 struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
851 int err;
852
853 if (!stats)
854 return -ENOMEM;
855 err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype,
856 &scheme->kobj, "stats");
857 if (err)
858 kobject_put(&stats->kobj);
859 else
860 scheme->stats = stats;
861 return err;
862}
863
7e84b1f8
SP
864static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
865{
866 int err;
867
868 err = damon_sysfs_scheme_set_access_pattern(scheme);
869 if (err)
870 return err;
9bbb820a
SP
871 err = damon_sysfs_scheme_set_quotas(scheme);
872 if (err)
873 goto put_access_pattern_out;
1b32234a
SP
874 err = damon_sysfs_scheme_set_watermarks(scheme);
875 if (err)
876 goto put_quotas_access_pattern_out;
0ac32b8a
SP
877 err = damon_sysfs_scheme_set_stats(scheme);
878 if (err)
879 goto put_watermarks_quotas_access_pattern_out;
7e84b1f8 880 return 0;
9bbb820a 881
0ac32b8a
SP
882put_watermarks_quotas_access_pattern_out:
883 kobject_put(&scheme->watermarks->kobj);
884 scheme->watermarks = NULL;
1b32234a
SP
885put_quotas_access_pattern_out:
886 kobject_put(&scheme->quotas->kobj);
887 scheme->quotas = NULL;
9bbb820a
SP
888put_access_pattern_out:
889 kobject_put(&scheme->access_pattern->kobj);
890 scheme->access_pattern = NULL;
891 return err;
7e84b1f8
SP
892}
893
894static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
895{
896 damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern);
897 kobject_put(&scheme->access_pattern->kobj);
1c78b2bc 898 damon_sysfs_quotas_rm_dirs(scheme->quotas);
9bbb820a 899 kobject_put(&scheme->quotas->kobj);
1b32234a 900 kobject_put(&scheme->watermarks->kobj);
0ac32b8a 901 kobject_put(&scheme->stats->kobj);
7e84b1f8
SP
902}
903
904static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr,
905 char *buf)
906{
907 struct damon_sysfs_scheme *scheme = container_of(kobj,
908 struct damon_sysfs_scheme, kobj);
909
910 return sysfs_emit(buf, "%s\n",
911 damon_sysfs_damos_action_strs[scheme->action]);
912}
913
914static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
915 const char *buf, size_t count)
916{
917 struct damon_sysfs_scheme *scheme = container_of(kobj,
918 struct damon_sysfs_scheme, kobj);
919 enum damos_action action;
920
921 for (action = 0; action < NR_DAMOS_ACTIONS; action++) {
922 if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) {
923 scheme->action = action;
924 return count;
925 }
926 }
927 return -EINVAL;
928}
929
930static void damon_sysfs_scheme_release(struct kobject *kobj)
931{
932 kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
933}
934
935static struct kobj_attribute damon_sysfs_scheme_action_attr =
936 __ATTR_RW_MODE(action, 0600);
937
938static struct attribute *damon_sysfs_scheme_attrs[] = {
939 &damon_sysfs_scheme_action_attr.attr,
940 NULL,
941};
942ATTRIBUTE_GROUPS(damon_sysfs_scheme);
943
944static struct kobj_type damon_sysfs_scheme_ktype = {
945 .release = damon_sysfs_scheme_release,
946 .sysfs_ops = &kobj_sysfs_ops,
947 .default_groups = damon_sysfs_scheme_groups,
948};
949
950/*
951 * schemes directory
952 */
953
954struct damon_sysfs_schemes {
955 struct kobject kobj;
956 struct damon_sysfs_scheme **schemes_arr;
957 int nr;
958};
959
960static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void)
961{
962 return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL);
963}
964
965static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes)
966{
967 struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr;
968 int i;
969
970 for (i = 0; i < schemes->nr; i++) {
971 damon_sysfs_scheme_rm_dirs(schemes_arr[i]);
972 kobject_put(&schemes_arr[i]->kobj);
973 }
974 schemes->nr = 0;
975 kfree(schemes_arr);
976 schemes->schemes_arr = NULL;
977}
978
979static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
980 int nr_schemes)
981{
982 struct damon_sysfs_scheme **schemes_arr, *scheme;
983 int err, i;
984
985 damon_sysfs_schemes_rm_dirs(schemes);
986 if (!nr_schemes)
987 return 0;
988
989 schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr),
990 GFP_KERNEL | __GFP_NOWARN);
991 if (!schemes_arr)
992 return -ENOMEM;
993 schemes->schemes_arr = schemes_arr;
994
995 for (i = 0; i < nr_schemes; i++) {
996 scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
997 if (!scheme) {
998 damon_sysfs_schemes_rm_dirs(schemes);
999 return -ENOMEM;
1000 }
1001
1002 err = kobject_init_and_add(&scheme->kobj,
1003 &damon_sysfs_scheme_ktype, &schemes->kobj,
1004 "%d", i);
1005 if (err)
1006 goto out;
1007 err = damon_sysfs_scheme_add_dirs(scheme);
1008 if (err)
1009 goto out;
1010
1011 schemes_arr[i] = scheme;
1012 schemes->nr++;
1013 }
1014 return 0;
1015
1016out:
1017 damon_sysfs_schemes_rm_dirs(schemes);
1018 kobject_put(&scheme->kobj);
1019 return err;
1020}
1021
1022static ssize_t nr_schemes_show(struct kobject *kobj,
1023 struct kobj_attribute *attr, char *buf)
1024{
1025 struct damon_sysfs_schemes *schemes = container_of(kobj,
1026 struct damon_sysfs_schemes, kobj);
1027
1028 return sysfs_emit(buf, "%d\n", schemes->nr);
1029}
1030
1031static ssize_t nr_schemes_store(struct kobject *kobj,
1032 struct kobj_attribute *attr, const char *buf, size_t count)
1033{
1034 struct damon_sysfs_schemes *schemes = container_of(kobj,
1035 struct damon_sysfs_schemes, kobj);
1036 int nr, err = kstrtoint(buf, 0, &nr);
1037
1038 if (err)
1039 return err;
1040 if (nr < 0)
1041 return -EINVAL;
1042
1043 if (!mutex_trylock(&damon_sysfs_lock))
1044 return -EBUSY;
1045 err = damon_sysfs_schemes_add_dirs(schemes, nr);
1046 mutex_unlock(&damon_sysfs_lock);
1047 if (err)
1048 return err;
1049 return count;
1050}
1051
1052static void damon_sysfs_schemes_release(struct kobject *kobj)
1053{
1054 kfree(container_of(kobj, struct damon_sysfs_schemes, kobj));
1055}
1056
1057static struct kobj_attribute damon_sysfs_schemes_nr_attr =
1058 __ATTR_RW_MODE(nr_schemes, 0600);
1059
1060static struct attribute *damon_sysfs_schemes_attrs[] = {
1061 &damon_sysfs_schemes_nr_attr.attr,
1062 NULL,
1063};
1064ATTRIBUTE_GROUPS(damon_sysfs_schemes);
1065
1066static struct kobj_type damon_sysfs_schemes_ktype = {
1067 .release = damon_sysfs_schemes_release,
1068 .sysfs_ops = &kobj_sysfs_ops,
1069 .default_groups = damon_sysfs_schemes_groups,
1070};
1071
2031b14e
SP
1072/*
1073 * init region directory
1074 */
1075
1076struct damon_sysfs_region {
1077 struct kobject kobj;
1078 unsigned long start;
1079 unsigned long end;
1080};
1081
1082static struct damon_sysfs_region *damon_sysfs_region_alloc(
1083 unsigned long start,
1084 unsigned long end)
1085{
1086 struct damon_sysfs_region *region = kmalloc(sizeof(*region),
1087 GFP_KERNEL);
1088
1089 if (!region)
1090 return NULL;
1091 region->kobj = (struct kobject){};
1092 region->start = start;
1093 region->end = end;
1094 return region;
1095}
1096
1097static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr,
1098 char *buf)
1099{
1100 struct damon_sysfs_region *region = container_of(kobj,
1101 struct damon_sysfs_region, kobj);
1102
1103 return sysfs_emit(buf, "%lu\n", region->start);
1104}
1105
1106static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
1107 const char *buf, size_t count)
1108{
1109 struct damon_sysfs_region *region = container_of(kobj,
1110 struct damon_sysfs_region, kobj);
1111 int err = kstrtoul(buf, 0, &region->start);
1112
1113 if (err)
1114 return -EINVAL;
1115 return count;
1116}
1117
1118static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
1119 char *buf)
1120{
1121 struct damon_sysfs_region *region = container_of(kobj,
1122 struct damon_sysfs_region, kobj);
1123
1124 return sysfs_emit(buf, "%lu\n", region->end);
1125}
1126
1127static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
1128 const char *buf, size_t count)
1129{
1130 struct damon_sysfs_region *region = container_of(kobj,
1131 struct damon_sysfs_region, kobj);
1132 int err = kstrtoul(buf, 0, &region->end);
1133
1134 if (err)
1135 return -EINVAL;
1136 return count;
1137}
1138
1139static void damon_sysfs_region_release(struct kobject *kobj)
1140{
1141 kfree(container_of(kobj, struct damon_sysfs_region, kobj));
1142}
1143
1144static struct kobj_attribute damon_sysfs_region_start_attr =
1145 __ATTR_RW_MODE(start, 0600);
1146
1147static struct kobj_attribute damon_sysfs_region_end_attr =
1148 __ATTR_RW_MODE(end, 0600);
1149
1150static struct attribute *damon_sysfs_region_attrs[] = {
1151 &damon_sysfs_region_start_attr.attr,
1152 &damon_sysfs_region_end_attr.attr,
1153 NULL,
1154};
1155ATTRIBUTE_GROUPS(damon_sysfs_region);
1156
1157static struct kobj_type damon_sysfs_region_ktype = {
1158 .release = damon_sysfs_region_release,
1159 .sysfs_ops = &kobj_sysfs_ops,
1160 .default_groups = damon_sysfs_region_groups,
1161};
1162
1163/*
1164 * init_regions directory
1165 */
1166
1167struct damon_sysfs_regions {
1168 struct kobject kobj;
1169 struct damon_sysfs_region **regions_arr;
1170 int nr;
1171};
1172
1173static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void)
1174{
1175 return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL);
1176}
1177
1178static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions)
1179{
1180 struct damon_sysfs_region **regions_arr = regions->regions_arr;
1181 int i;
1182
1183 for (i = 0; i < regions->nr; i++)
1184 kobject_put(&regions_arr[i]->kobj);
1185 regions->nr = 0;
1186 kfree(regions_arr);
1187 regions->regions_arr = NULL;
1188}
1189
1190static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions,
1191 int nr_regions)
1192{
1193 struct damon_sysfs_region **regions_arr, *region;
1194 int err, i;
1195
1196 damon_sysfs_regions_rm_dirs(regions);
1197 if (!nr_regions)
1198 return 0;
1199
1200 regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr),
1201 GFP_KERNEL | __GFP_NOWARN);
1202 if (!regions_arr)
1203 return -ENOMEM;
1204 regions->regions_arr = regions_arr;
1205
1206 for (i = 0; i < nr_regions; i++) {
1207 region = damon_sysfs_region_alloc(0, 0);
1208 if (!region) {
1209 damon_sysfs_regions_rm_dirs(regions);
1210 return -ENOMEM;
1211 }
1212
1213 err = kobject_init_and_add(&region->kobj,
1214 &damon_sysfs_region_ktype, &regions->kobj,
1215 "%d", i);
1216 if (err) {
1217 kobject_put(&region->kobj);
1218 damon_sysfs_regions_rm_dirs(regions);
1219 return err;
1220 }
1221
1222 regions_arr[i] = region;
1223 regions->nr++;
1224 }
1225 return 0;
1226}
1227
1228static ssize_t nr_regions_show(struct kobject *kobj,
1229 struct kobj_attribute *attr, char *buf)
1230{
1231 struct damon_sysfs_regions *regions = container_of(kobj,
1232 struct damon_sysfs_regions, kobj);
1233
1234 return sysfs_emit(buf, "%d\n", regions->nr);
1235}
1236
1237static ssize_t nr_regions_store(struct kobject *kobj,
1238 struct kobj_attribute *attr, const char *buf, size_t count)
1239{
1240 struct damon_sysfs_regions *regions = container_of(kobj,
1241 struct damon_sysfs_regions, kobj);
1242 int nr, err = kstrtoint(buf, 0, &nr);
1243
1244 if (err)
1245 return err;
1246 if (nr < 0)
1247 return -EINVAL;
1248
1249 if (!mutex_trylock(&damon_sysfs_lock))
1250 return -EBUSY;
1251 err = damon_sysfs_regions_add_dirs(regions, nr);
1252 mutex_unlock(&damon_sysfs_lock);
1253 if (err)
1254 return err;
1255
1256 return count;
1257}
1258
1259static void damon_sysfs_regions_release(struct kobject *kobj)
1260{
1261 kfree(container_of(kobj, struct damon_sysfs_regions, kobj));
1262}
1263
1264static struct kobj_attribute damon_sysfs_regions_nr_attr =
1265 __ATTR_RW_MODE(nr_regions, 0600);
1266
1267static struct attribute *damon_sysfs_regions_attrs[] = {
1268 &damon_sysfs_regions_nr_attr.attr,
1269 NULL,
1270};
1271ATTRIBUTE_GROUPS(damon_sysfs_regions);
1272
1273static struct kobj_type damon_sysfs_regions_ktype = {
1274 .release = damon_sysfs_regions_release,
1275 .sysfs_ops = &kobj_sysfs_ops,
1276 .default_groups = damon_sysfs_regions_groups,
1277};
1278
c951cd3b
SP
1279/*
1280 * target directory
1281 */
1282
1283struct damon_sysfs_target {
1284 struct kobject kobj;
2031b14e 1285 struct damon_sysfs_regions *regions;
c951cd3b
SP
1286 int pid;
1287};
1288
1289static struct damon_sysfs_target *damon_sysfs_target_alloc(void)
1290{
1291 return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL);
1292}
1293
2031b14e
SP
1294static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target)
1295{
1296 struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc();
1297 int err;
1298
1299 if (!regions)
1300 return -ENOMEM;
1301
1302 err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype,
1303 &target->kobj, "regions");
1304 if (err)
1305 kobject_put(&regions->kobj);
1306 else
1307 target->regions = regions;
1308 return err;
1309}
1310
1311static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target)
1312{
1313 damon_sysfs_regions_rm_dirs(target->regions);
1314 kobject_put(&target->regions->kobj);
1315}
1316
c951cd3b
SP
1317static ssize_t pid_target_show(struct kobject *kobj,
1318 struct kobj_attribute *attr, char *buf)
1319{
1320 struct damon_sysfs_target *target = container_of(kobj,
1321 struct damon_sysfs_target, kobj);
1322
1323 return sysfs_emit(buf, "%d\n", target->pid);
1324}
1325
1326static ssize_t pid_target_store(struct kobject *kobj,
1327 struct kobj_attribute *attr, const char *buf, size_t count)
1328{
1329 struct damon_sysfs_target *target = container_of(kobj,
1330 struct damon_sysfs_target, kobj);
1331 int err = kstrtoint(buf, 0, &target->pid);
1332
1333 if (err)
1334 return -EINVAL;
1335 return count;
1336}
1337
1338static void damon_sysfs_target_release(struct kobject *kobj)
1339{
1340 kfree(container_of(kobj, struct damon_sysfs_target, kobj));
1341}
1342
1343static struct kobj_attribute damon_sysfs_target_pid_attr =
1344 __ATTR_RW_MODE(pid_target, 0600);
1345
1346static struct attribute *damon_sysfs_target_attrs[] = {
1347 &damon_sysfs_target_pid_attr.attr,
1348 NULL,
1349};
1350ATTRIBUTE_GROUPS(damon_sysfs_target);
1351
1352static struct kobj_type damon_sysfs_target_ktype = {
1353 .release = damon_sysfs_target_release,
1354 .sysfs_ops = &kobj_sysfs_ops,
1355 .default_groups = damon_sysfs_target_groups,
1356};
1357
1358/*
1359 * targets directory
1360 */
1361
1362struct damon_sysfs_targets {
1363 struct kobject kobj;
1364 struct damon_sysfs_target **targets_arr;
1365 int nr;
1366};
1367
1368static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void)
1369{
1370 return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL);
1371}
1372
1373static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets)
1374{
1375 struct damon_sysfs_target **targets_arr = targets->targets_arr;
1376 int i;
1377
2031b14e
SP
1378 for (i = 0; i < targets->nr; i++) {
1379 damon_sysfs_target_rm_dirs(targets_arr[i]);
c951cd3b 1380 kobject_put(&targets_arr[i]->kobj);
2031b14e 1381 }
c951cd3b
SP
1382 targets->nr = 0;
1383 kfree(targets_arr);
1384 targets->targets_arr = NULL;
1385}
1386
1387static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets,
1388 int nr_targets)
1389{
1390 struct damon_sysfs_target **targets_arr, *target;
1391 int err, i;
1392
1393 damon_sysfs_targets_rm_dirs(targets);
1394 if (!nr_targets)
1395 return 0;
1396
1397 targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr),
1398 GFP_KERNEL | __GFP_NOWARN);
1399 if (!targets_arr)
1400 return -ENOMEM;
1401 targets->targets_arr = targets_arr;
1402
1403 for (i = 0; i < nr_targets; i++) {
1404 target = damon_sysfs_target_alloc();
1405 if (!target) {
1406 damon_sysfs_targets_rm_dirs(targets);
1407 return -ENOMEM;
1408 }
1409
1410 err = kobject_init_and_add(&target->kobj,
1411 &damon_sysfs_target_ktype, &targets->kobj,
1412 "%d", i);
1413 if (err)
1414 goto out;
1415
2031b14e
SP
1416 err = damon_sysfs_target_add_dirs(target);
1417 if (err)
1418 goto out;
1419
c951cd3b
SP
1420 targets_arr[i] = target;
1421 targets->nr++;
1422 }
1423 return 0;
1424
1425out:
1426 damon_sysfs_targets_rm_dirs(targets);
1427 kobject_put(&target->kobj);
1428 return err;
1429}
1430
1431static ssize_t nr_targets_show(struct kobject *kobj,
1432 struct kobj_attribute *attr, char *buf)
1433{
1434 struct damon_sysfs_targets *targets = container_of(kobj,
1435 struct damon_sysfs_targets, kobj);
1436
1437 return sysfs_emit(buf, "%d\n", targets->nr);
1438}
1439
1440static ssize_t nr_targets_store(struct kobject *kobj,
1441 struct kobj_attribute *attr, const char *buf, size_t count)
1442{
1443 struct damon_sysfs_targets *targets = container_of(kobj,
1444 struct damon_sysfs_targets, kobj);
1445 int nr, err = kstrtoint(buf, 0, &nr);
1446
1447 if (err)
1448 return err;
1449 if (nr < 0)
1450 return -EINVAL;
1451
1452 if (!mutex_trylock(&damon_sysfs_lock))
1453 return -EBUSY;
1454 err = damon_sysfs_targets_add_dirs(targets, nr);
1455 mutex_unlock(&damon_sysfs_lock);
1456 if (err)
1457 return err;
1458
1459 return count;
1460}
1461
1462static void damon_sysfs_targets_release(struct kobject *kobj)
1463{
1464 kfree(container_of(kobj, struct damon_sysfs_targets, kobj));
1465}
1466
1467static struct kobj_attribute damon_sysfs_targets_nr_attr =
1468 __ATTR_RW_MODE(nr_targets, 0600);
1469
1470static struct attribute *damon_sysfs_targets_attrs[] = {
1471 &damon_sysfs_targets_nr_attr.attr,
1472 NULL,
1473};
1474ATTRIBUTE_GROUPS(damon_sysfs_targets);
1475
1476static struct kobj_type damon_sysfs_targets_ktype = {
1477 .release = damon_sysfs_targets_release,
1478 .sysfs_ops = &kobj_sysfs_ops,
1479 .default_groups = damon_sysfs_targets_groups,
1480};
1481
1482/*
1483 * intervals directory
1484 */
1485
1486struct damon_sysfs_intervals {
1487 struct kobject kobj;
1488 unsigned long sample_us;
1489 unsigned long aggr_us;
1490 unsigned long update_us;
1491};
1492
1493static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc(
1494 unsigned long sample_us, unsigned long aggr_us,
1495 unsigned long update_us)
1496{
1497 struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals),
1498 GFP_KERNEL);
1499
1500 if (!intervals)
1501 return NULL;
1502
1503 intervals->kobj = (struct kobject){};
1504 intervals->sample_us = sample_us;
1505 intervals->aggr_us = aggr_us;
1506 intervals->update_us = update_us;
1507 return intervals;
1508}
1509
1510static ssize_t sample_us_show(struct kobject *kobj,
1511 struct kobj_attribute *attr, char *buf)
1512{
1513 struct damon_sysfs_intervals *intervals = container_of(kobj,
1514 struct damon_sysfs_intervals, kobj);
1515
1516 return sysfs_emit(buf, "%lu\n", intervals->sample_us);
1517}
1518
1519static ssize_t sample_us_store(struct kobject *kobj,
1520 struct kobj_attribute *attr, const char *buf, size_t count)
1521{
1522 struct damon_sysfs_intervals *intervals = container_of(kobj,
1523 struct damon_sysfs_intervals, kobj);
1524 unsigned long us;
1525 int err = kstrtoul(buf, 0, &us);
1526
1527 if (err)
1528 return -EINVAL;
1529
1530 intervals->sample_us = us;
1531 return count;
1532}
1533
1534static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr,
1535 char *buf)
1536{
1537 struct damon_sysfs_intervals *intervals = container_of(kobj,
1538 struct damon_sysfs_intervals, kobj);
1539
1540 return sysfs_emit(buf, "%lu\n", intervals->aggr_us);
1541}
1542
1543static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
1544 const char *buf, size_t count)
1545{
1546 struct damon_sysfs_intervals *intervals = container_of(kobj,
1547 struct damon_sysfs_intervals, kobj);
1548 unsigned long us;
1549 int err = kstrtoul(buf, 0, &us);
1550
1551 if (err)
1552 return -EINVAL;
1553
1554 intervals->aggr_us = us;
1555 return count;
1556}
1557
1558static ssize_t update_us_show(struct kobject *kobj,
1559 struct kobj_attribute *attr, char *buf)
1560{
1561 struct damon_sysfs_intervals *intervals = container_of(kobj,
1562 struct damon_sysfs_intervals, kobj);
1563
1564 return sysfs_emit(buf, "%lu\n", intervals->update_us);
1565}
1566
1567static ssize_t update_us_store(struct kobject *kobj,
1568 struct kobj_attribute *attr, const char *buf, size_t count)
1569{
1570 struct damon_sysfs_intervals *intervals = container_of(kobj,
1571 struct damon_sysfs_intervals, kobj);
1572 unsigned long us;
1573 int err = kstrtoul(buf, 0, &us);
1574
1575 if (err)
1576 return -EINVAL;
1577
1578 intervals->update_us = us;
1579 return count;
1580}
1581
1582static void damon_sysfs_intervals_release(struct kobject *kobj)
1583{
1584 kfree(container_of(kobj, struct damon_sysfs_intervals, kobj));
1585}
1586
1587static struct kobj_attribute damon_sysfs_intervals_sample_us_attr =
1588 __ATTR_RW_MODE(sample_us, 0600);
1589
1590static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr =
1591 __ATTR_RW_MODE(aggr_us, 0600);
1592
1593static struct kobj_attribute damon_sysfs_intervals_update_us_attr =
1594 __ATTR_RW_MODE(update_us, 0600);
1595
1596static struct attribute *damon_sysfs_intervals_attrs[] = {
1597 &damon_sysfs_intervals_sample_us_attr.attr,
1598 &damon_sysfs_intervals_aggr_us_attr.attr,
1599 &damon_sysfs_intervals_update_us_attr.attr,
1600 NULL,
1601};
1602ATTRIBUTE_GROUPS(damon_sysfs_intervals);
1603
1604static struct kobj_type damon_sysfs_intervals_ktype = {
1605 .release = damon_sysfs_intervals_release,
1606 .sysfs_ops = &kobj_sysfs_ops,
1607 .default_groups = damon_sysfs_intervals_groups,
1608};
1609
1610/*
1611 * monitoring_attrs directory
1612 */
1613
1614struct damon_sysfs_attrs {
1615 struct kobject kobj;
1616 struct damon_sysfs_intervals *intervals;
1617 struct damon_sysfs_ul_range *nr_regions_range;
1618};
1619
1620static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void)
1621{
1622 struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL);
1623
1624 if (!attrs)
1625 return NULL;
1626 attrs->kobj = (struct kobject){};
1627 return attrs;
1628}
1629
1630static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs)
1631{
1632 struct damon_sysfs_intervals *intervals;
1633 struct damon_sysfs_ul_range *nr_regions_range;
1634 int err;
1635
1636 intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000);
1637 if (!intervals)
1638 return -ENOMEM;
1639
1640 err = kobject_init_and_add(&intervals->kobj,
1641 &damon_sysfs_intervals_ktype, &attrs->kobj,
1642 "intervals");
1643 if (err)
1644 goto put_intervals_out;
1645 attrs->intervals = intervals;
1646
1647 nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000);
1648 if (!nr_regions_range) {
1649 err = -ENOMEM;
1650 goto put_intervals_out;
1651 }
1652
1653 err = kobject_init_and_add(&nr_regions_range->kobj,
1654 &damon_sysfs_ul_range_ktype, &attrs->kobj,
1655 "nr_regions");
1656 if (err)
1657 goto put_nr_regions_intervals_out;
1658 attrs->nr_regions_range = nr_regions_range;
1659 return 0;
1660
1661put_nr_regions_intervals_out:
1662 kobject_put(&nr_regions_range->kobj);
1663 attrs->nr_regions_range = NULL;
1664put_intervals_out:
1665 kobject_put(&intervals->kobj);
1666 attrs->intervals = NULL;
1667 return err;
1668}
1669
1670static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs)
1671{
1672 kobject_put(&attrs->nr_regions_range->kobj);
1673 kobject_put(&attrs->intervals->kobj);
1674}
1675
1676static void damon_sysfs_attrs_release(struct kobject *kobj)
1677{
1678 kfree(container_of(kobj, struct damon_sysfs_attrs, kobj));
1679}
1680
1681static struct attribute *damon_sysfs_attrs_attrs[] = {
1682 NULL,
1683};
1684ATTRIBUTE_GROUPS(damon_sysfs_attrs);
1685
1686static struct kobj_type damon_sysfs_attrs_ktype = {
1687 .release = damon_sysfs_attrs_release,
1688 .sysfs_ops = &kobj_sysfs_ops,
1689 .default_groups = damon_sysfs_attrs_groups,
1690};
1691
1692/*
1693 * context directory
1694 */
1695
1696/* This should match with enum damon_ops_id */
1697static const char * const damon_sysfs_ops_strs[] = {
1698 "vaddr",
b8243447 1699 "fvaddr",
c951cd3b
SP
1700 "paddr",
1701};
1702
1703struct damon_sysfs_context {
1704 struct kobject kobj;
1705 enum damon_ops_id ops_id;
1706 struct damon_sysfs_attrs *attrs;
1707 struct damon_sysfs_targets *targets;
7e84b1f8 1708 struct damon_sysfs_schemes *schemes;
c951cd3b
SP
1709};
1710
1711static struct damon_sysfs_context *damon_sysfs_context_alloc(
1712 enum damon_ops_id ops_id)
1713{
1714 struct damon_sysfs_context *context = kmalloc(sizeof(*context),
1715 GFP_KERNEL);
1716
1717 if (!context)
1718 return NULL;
1719 context->kobj = (struct kobject){};
1720 context->ops_id = ops_id;
1721 return context;
1722}
1723
1724static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context)
1725{
1726 struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc();
1727 int err;
1728
1729 if (!attrs)
1730 return -ENOMEM;
1731 err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype,
1732 &context->kobj, "monitoring_attrs");
1733 if (err)
1734 goto out;
1735 err = damon_sysfs_attrs_add_dirs(attrs);
1736 if (err)
1737 goto out;
1738 context->attrs = attrs;
1739 return 0;
1740
1741out:
1742 kobject_put(&attrs->kobj);
1743 return err;
1744}
1745
1746static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context)
1747{
1748 struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc();
1749 int err;
1750
1751 if (!targets)
1752 return -ENOMEM;
1753 err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype,
1754 &context->kobj, "targets");
1755 if (err) {
1756 kobject_put(&targets->kobj);
1757 return err;
1758 }
1759 context->targets = targets;
1760 return 0;
1761}
1762
7e84b1f8
SP
1763static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context)
1764{
1765 struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc();
1766 int err;
1767
1768 if (!schemes)
1769 return -ENOMEM;
1770 err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype,
1771 &context->kobj, "schemes");
1772 if (err) {
1773 kobject_put(&schemes->kobj);
1774 return err;
1775 }
1776 context->schemes = schemes;
1777 return 0;
1778}
1779
c951cd3b
SP
1780static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context)
1781{
1782 int err;
1783
1784 err = damon_sysfs_context_set_attrs(context);
1785 if (err)
1786 return err;
1787
1788 err = damon_sysfs_context_set_targets(context);
1789 if (err)
1790 goto put_attrs_out;
7e84b1f8
SP
1791
1792 err = damon_sysfs_context_set_schemes(context);
1793 if (err)
1794 goto put_targets_attrs_out;
c951cd3b
SP
1795 return 0;
1796
7e84b1f8
SP
1797put_targets_attrs_out:
1798 kobject_put(&context->targets->kobj);
1799 context->targets = NULL;
c951cd3b
SP
1800put_attrs_out:
1801 kobject_put(&context->attrs->kobj);
1802 context->attrs = NULL;
1803 return err;
1804}
1805
1806static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context)
1807{
1808 damon_sysfs_attrs_rm_dirs(context->attrs);
1809 kobject_put(&context->attrs->kobj);
1810 damon_sysfs_targets_rm_dirs(context->targets);
1811 kobject_put(&context->targets->kobj);
7e84b1f8
SP
1812 damon_sysfs_schemes_rm_dirs(context->schemes);
1813 kobject_put(&context->schemes->kobj);
c951cd3b
SP
1814}
1815
0f2cb588
SP
1816static ssize_t avail_operations_show(struct kobject *kobj,
1817 struct kobj_attribute *attr, char *buf)
1818{
1819 enum damon_ops_id id;
1820 int len = 0;
1821
1822 for (id = 0; id < NR_DAMON_OPS; id++) {
1823 if (!damon_is_registered_ops(id))
1824 continue;
1825 len += sysfs_emit_at(buf, len, "%s\n",
1826 damon_sysfs_ops_strs[id]);
1827 }
1828 return len;
1829}
1830
c951cd3b
SP
1831static ssize_t operations_show(struct kobject *kobj,
1832 struct kobj_attribute *attr, char *buf)
1833{
1834 struct damon_sysfs_context *context = container_of(kobj,
1835 struct damon_sysfs_context, kobj);
1836
1837 return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]);
1838}
1839
1840static ssize_t operations_store(struct kobject *kobj,
1841 struct kobj_attribute *attr, const char *buf, size_t count)
1842{
1843 struct damon_sysfs_context *context = container_of(kobj,
1844 struct damon_sysfs_context, kobj);
1845 enum damon_ops_id id;
1846
1847 for (id = 0; id < NR_DAMON_OPS; id++) {
1848 if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) {
c951cd3b
SP
1849 context->ops_id = id;
1850 return count;
1851 }
1852 }
1853 return -EINVAL;
1854}
1855
1856static void damon_sysfs_context_release(struct kobject *kobj)
1857{
1858 kfree(container_of(kobj, struct damon_sysfs_context, kobj));
1859}
1860
0f2cb588
SP
1861static struct kobj_attribute damon_sysfs_context_avail_operations_attr =
1862 __ATTR_RO_MODE(avail_operations, 0400);
1863
c951cd3b
SP
1864static struct kobj_attribute damon_sysfs_context_operations_attr =
1865 __ATTR_RW_MODE(operations, 0600);
1866
1867static struct attribute *damon_sysfs_context_attrs[] = {
0f2cb588 1868 &damon_sysfs_context_avail_operations_attr.attr,
c951cd3b
SP
1869 &damon_sysfs_context_operations_attr.attr,
1870 NULL,
1871};
1872ATTRIBUTE_GROUPS(damon_sysfs_context);
1873
1874static struct kobj_type damon_sysfs_context_ktype = {
1875 .release = damon_sysfs_context_release,
1876 .sysfs_ops = &kobj_sysfs_ops,
1877 .default_groups = damon_sysfs_context_groups,
1878};
1879
1880/*
1881 * contexts directory
1882 */
1883
1884struct damon_sysfs_contexts {
1885 struct kobject kobj;
1886 struct damon_sysfs_context **contexts_arr;
1887 int nr;
1888};
1889
1890static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void)
1891{
1892 return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL);
1893}
1894
1895static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts)
1896{
1897 struct damon_sysfs_context **contexts_arr = contexts->contexts_arr;
1898 int i;
1899
1900 for (i = 0; i < contexts->nr; i++) {
1901 damon_sysfs_context_rm_dirs(contexts_arr[i]);
1902 kobject_put(&contexts_arr[i]->kobj);
1903 }
1904 contexts->nr = 0;
1905 kfree(contexts_arr);
1906 contexts->contexts_arr = NULL;
1907}
1908
1909static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts,
1910 int nr_contexts)
1911{
1912 struct damon_sysfs_context **contexts_arr, *context;
1913 int err, i;
1914
1915 damon_sysfs_contexts_rm_dirs(contexts);
1916 if (!nr_contexts)
1917 return 0;
1918
1919 contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr),
1920 GFP_KERNEL | __GFP_NOWARN);
1921 if (!contexts_arr)
1922 return -ENOMEM;
1923 contexts->contexts_arr = contexts_arr;
1924
1925 for (i = 0; i < nr_contexts; i++) {
1926 context = damon_sysfs_context_alloc(DAMON_OPS_VADDR);
1927 if (!context) {
1928 damon_sysfs_contexts_rm_dirs(contexts);
1929 return -ENOMEM;
1930 }
1931
1932 err = kobject_init_and_add(&context->kobj,
1933 &damon_sysfs_context_ktype, &contexts->kobj,
1934 "%d", i);
1935 if (err)
1936 goto out;
1937
1938 err = damon_sysfs_context_add_dirs(context);
1939 if (err)
1940 goto out;
1941
1942 contexts_arr[i] = context;
1943 contexts->nr++;
1944 }
1945 return 0;
1946
1947out:
1948 damon_sysfs_contexts_rm_dirs(contexts);
1949 kobject_put(&context->kobj);
1950 return err;
1951}
1952
1953static ssize_t nr_contexts_show(struct kobject *kobj,
1954 struct kobj_attribute *attr, char *buf)
1955{
1956 struct damon_sysfs_contexts *contexts = container_of(kobj,
1957 struct damon_sysfs_contexts, kobj);
1958
1959 return sysfs_emit(buf, "%d\n", contexts->nr);
1960}
1961
1962static ssize_t nr_contexts_store(struct kobject *kobj,
1963 struct kobj_attribute *attr, const char *buf, size_t count)
1964{
1965 struct damon_sysfs_contexts *contexts = container_of(kobj,
1966 struct damon_sysfs_contexts, kobj);
1967 int nr, err;
1968
1969 err = kstrtoint(buf, 0, &nr);
1970 if (err)
1971 return err;
1972 /* TODO: support multiple contexts per kdamond */
1973 if (nr < 0 || 1 < nr)
1974 return -EINVAL;
1975
1976 if (!mutex_trylock(&damon_sysfs_lock))
1977 return -EBUSY;
1978 err = damon_sysfs_contexts_add_dirs(contexts, nr);
1979 mutex_unlock(&damon_sysfs_lock);
1980 if (err)
1981 return err;
1982
1983 return count;
1984}
1985
1986static void damon_sysfs_contexts_release(struct kobject *kobj)
1987{
1988 kfree(container_of(kobj, struct damon_sysfs_contexts, kobj));
1989}
1990
1991static struct kobj_attribute damon_sysfs_contexts_nr_attr
1992 = __ATTR_RW_MODE(nr_contexts, 0600);
1993
1994static struct attribute *damon_sysfs_contexts_attrs[] = {
1995 &damon_sysfs_contexts_nr_attr.attr,
1996 NULL,
1997};
1998ATTRIBUTE_GROUPS(damon_sysfs_contexts);
1999
2000static struct kobj_type damon_sysfs_contexts_ktype = {
2001 .release = damon_sysfs_contexts_release,
2002 .sysfs_ops = &kobj_sysfs_ops,
2003 .default_groups = damon_sysfs_contexts_groups,
2004};
2005
2006/*
2007 * kdamond directory
2008 */
2009
2010struct damon_sysfs_kdamond {
2011 struct kobject kobj;
2012 struct damon_sysfs_contexts *contexts;
2013 struct damon_ctx *damon_ctx;
2014};
2015
2016static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void)
2017{
2018 return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL);
2019}
2020
2021static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond)
2022{
2023 struct damon_sysfs_contexts *contexts;
2024 int err;
2025
2026 contexts = damon_sysfs_contexts_alloc();
2027 if (!contexts)
2028 return -ENOMEM;
2029
2030 err = kobject_init_and_add(&contexts->kobj,
2031 &damon_sysfs_contexts_ktype, &kdamond->kobj,
2032 "contexts");
2033 if (err) {
2034 kobject_put(&contexts->kobj);
2035 return err;
2036 }
2037 kdamond->contexts = contexts;
2038
2039 return err;
2040}
2041
2042static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond)
2043{
2044 damon_sysfs_contexts_rm_dirs(kdamond->contexts);
2045 kobject_put(&kdamond->contexts->kobj);
2046}
2047
a61ea561
SP
2048static bool damon_sysfs_ctx_running(struct damon_ctx *ctx)
2049{
2050 bool running;
2051
2052 mutex_lock(&ctx->kdamond_lock);
2053 running = ctx->kdamond != NULL;
2054 mutex_unlock(&ctx->kdamond_lock);
2055 return running;
2056}
2057
3cbab4ca
SP
2058/*
2059 * enum damon_sysfs_cmd - Commands for a specific kdamond.
2060 */
2061enum damon_sysfs_cmd {
2062 /* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */
2063 DAMON_SYSFS_CMD_ON,
2064 /* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */
2065 DAMON_SYSFS_CMD_OFF,
da878780
SP
2066 /* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */
2067 DAMON_SYSFS_CMD_COMMIT,
3cbab4ca
SP
2068 /*
2069 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs
2070 * files.
2071 */
2072 DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS,
2073 /*
2074 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands.
2075 */
2076 NR_DAMON_SYSFS_CMDS,
2077};
2078
2079/* Should match with enum damon_sysfs_cmd */
2080static const char * const damon_sysfs_cmd_strs[] = {
2081 "on",
2082 "off",
da878780 2083 "commit",
3cbab4ca
SP
2084 "update_schemes_stats",
2085};
2086
01538719
SP
2087/*
2088 * struct damon_sysfs_cmd_request - A request to the DAMON callback.
2089 * @cmd: The command that needs to be handled by the callback.
2090 * @kdamond: The kobject wrapper that associated to the kdamond thread.
2091 *
2092 * This structure represents a sysfs command request that need to access some
2093 * DAMON context-internal data. Because DAMON context-internal data can be
2094 * safely accessed from DAMON callbacks without additional synchronization, the
2095 * request will be handled by the DAMON callback. None-``NULL`` @kdamond means
2096 * the request is valid.
2097 */
2098struct damon_sysfs_cmd_request {
2099 enum damon_sysfs_cmd cmd;
2100 struct damon_sysfs_kdamond *kdamond;
2101};
2102
2103/* Current DAMON callback request. Protected by damon_sysfs_lock. */
2104static struct damon_sysfs_cmd_request damon_sysfs_cmd_request;
2105
c951cd3b
SP
2106static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
2107 char *buf)
2108{
a61ea561
SP
2109 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2110 struct damon_sysfs_kdamond, kobj);
2111 struct damon_ctx *ctx = kdamond->damon_ctx;
2112 bool running;
2113
2114 if (!ctx)
2115 running = false;
2116 else
2117 running = damon_sysfs_ctx_running(ctx);
2118
3cbab4ca
SP
2119 return sysfs_emit(buf, "%s\n", running ?
2120 damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
2121 damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
a61ea561
SP
2122}
2123
2124static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
2125 struct damon_sysfs_attrs *sys_attrs)
2126{
2127 struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
2128 struct damon_sysfs_ul_range *sys_nr_regions =
2129 sys_attrs->nr_regions_range;
2130
2131 return damon_set_attrs(ctx, sys_intervals->sample_us,
2132 sys_intervals->aggr_us, sys_intervals->update_us,
2133 sys_nr_regions->min, sys_nr_regions->max);
2134}
2135
2136static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
2137{
2138 struct damon_target *t, *next;
2139
2140 damon_for_each_target_safe(t, next, ctx) {
c9e124e0 2141 if (damon_target_has_pid(ctx))
a61ea561
SP
2142 put_pid(t->pid);
2143 damon_destroy_target(t);
2144 }
2145}
2146
2031b14e
SP
2147static int damon_sysfs_set_regions(struct damon_target *t,
2148 struct damon_sysfs_regions *sysfs_regions)
2149{
97d482f4
SP
2150 struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr,
2151 sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN);
2152 int i, err = -EINVAL;
2031b14e 2153
97d482f4
SP
2154 if (!ranges)
2155 return -ENOMEM;
2031b14e
SP
2156 for (i = 0; i < sysfs_regions->nr; i++) {
2157 struct damon_sysfs_region *sys_region =
2158 sysfs_regions->regions_arr[i];
2031b14e
SP
2159
2160 if (sys_region->start > sys_region->end)
97d482f4
SP
2161 goto out;
2162
2163 ranges[i].start = sys_region->start;
2164 ranges[i].end = sys_region->end;
2165 if (i == 0)
2166 continue;
2167 if (ranges[i - 1].end > ranges[i].start)
2168 goto out;
2031b14e 2169 }
97d482f4
SP
2170 err = damon_set_regions(t, ranges, sysfs_regions->nr);
2171out:
2172 kfree(ranges);
2173 return err;
2174
2031b14e
SP
2175}
2176
74bd8b7d
SP
2177static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target,
2178 struct damon_ctx *ctx)
2179{
2180 struct damon_target *t = damon_new_target();
2181 int err = -EINVAL;
2182
2183 if (!t)
2184 return -ENOMEM;
c9e124e0 2185 if (damon_target_has_pid(ctx)) {
74bd8b7d
SP
2186 t->pid = find_get_pid(sys_target->pid);
2187 if (!t->pid)
2188 goto destroy_targets_out;
2189 }
2190 damon_add_target(ctx, t);
2191 err = damon_sysfs_set_regions(t, sys_target->regions);
2192 if (err)
2193 goto destroy_targets_out;
2194 return 0;
2195
2196destroy_targets_out:
2197 damon_sysfs_destroy_targets(ctx);
2198 return err;
2199}
2200
da878780
SP
2201/*
2202 * Search a target in a context that corresponds to the sysfs target input.
2203 *
2204 * Return: pointer to the target if found, NULL if not found, or negative
2205 * error code if the search failed.
2206 */
2207static struct damon_target *damon_sysfs_existing_target(
2208 struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
2209{
2210 struct pid *pid;
2211 struct damon_target *t;
2212
c9e124e0 2213 if (!damon_target_has_pid(ctx)) {
da878780
SP
2214 /* Up to only one target for paddr could exist */
2215 damon_for_each_target(t, ctx)
2216 return t;
2217 return NULL;
2218 }
2219
2220 /* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
2221 pid = find_get_pid(sys_target->pid);
2222 if (!pid)
2223 return ERR_PTR(-EINVAL);
2224 damon_for_each_target(t, ctx) {
2225 if (t->pid == pid) {
2226 put_pid(pid);
2227 return t;
2228 }
2229 }
2230 put_pid(pid);
2231 return NULL;
2232}
2233
a61ea561
SP
2234static int damon_sysfs_set_targets(struct damon_ctx *ctx,
2235 struct damon_sysfs_targets *sysfs_targets)
2236{
2031b14e 2237 int i, err;
a61ea561 2238
0a890a9f
SP
2239 /* Multiple physical address space monitoring targets makes no sense */
2240 if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
2241 return -EINVAL;
2242
a61ea561 2243 for (i = 0; i < sysfs_targets->nr; i++) {
da878780
SP
2244 struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
2245 struct damon_target *t = damon_sysfs_existing_target(st, ctx);
2246
2247 if (IS_ERR(t))
2248 return PTR_ERR(t);
2249 if (!t)
2250 err = damon_sysfs_add_target(st, ctx);
2251 else
2252 err = damon_sysfs_set_regions(t, st->regions);
74bd8b7d 2253 if (err)
2031b14e 2254 return err;
a61ea561
SP
2255 }
2256 return 0;
2257}
2258
7e84b1f8
SP
2259static struct damos *damon_sysfs_mk_scheme(
2260 struct damon_sysfs_scheme *sysfs_scheme)
2261{
f5a79d7c 2262 struct damon_sysfs_access_pattern *access_pattern =
7e84b1f8 2263 sysfs_scheme->access_pattern;
9bbb820a 2264 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
1c78b2bc 2265 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
1b32234a 2266 struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
f5a79d7c
YD
2267
2268 struct damos_access_pattern pattern = {
2269 .min_sz_region = access_pattern->sz->min,
2270 .max_sz_region = access_pattern->sz->max,
2271 .min_nr_accesses = access_pattern->nr_accesses->min,
2272 .max_nr_accesses = access_pattern->nr_accesses->max,
2273 .min_age_region = access_pattern->age->min,
2274 .max_age_region = access_pattern->age->max,
2275 };
9bbb820a
SP
2276 struct damos_quota quota = {
2277 .ms = sysfs_quotas->ms,
2278 .sz = sysfs_quotas->sz,
2279 .reset_interval = sysfs_quotas->reset_interval_ms,
1c78b2bc
SP
2280 .weight_sz = sysfs_weights->sz,
2281 .weight_nr_accesses = sysfs_weights->nr_accesses,
2282 .weight_age = sysfs_weights->age,
9bbb820a 2283 };
7e84b1f8 2284 struct damos_watermarks wmarks = {
1b32234a
SP
2285 .metric = sysfs_wmarks->metric,
2286 .interval = sysfs_wmarks->interval_us,
2287 .high = sysfs_wmarks->high,
2288 .mid = sysfs_wmarks->mid,
2289 .low = sysfs_wmarks->low,
7e84b1f8
SP
2290 };
2291
f5a79d7c
YD
2292 return damon_new_scheme(&pattern, sysfs_scheme->action, &quota,
2293 &wmarks);
7e84b1f8
SP
2294}
2295
2296static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
2297 struct damon_sysfs_schemes *sysfs_schemes)
2298{
2299 int i;
2300
2301 for (i = 0; i < sysfs_schemes->nr; i++) {
2302 struct damos *scheme, *next;
2303
2304 scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]);
2305 if (!scheme) {
2306 damon_for_each_scheme_safe(scheme, next, ctx)
2307 damon_destroy_scheme(scheme);
2308 return -ENOMEM;
2309 }
2310 damon_add_scheme(ctx, scheme);
2311 }
2312 return 0;
2313}
2314
a61ea561
SP
2315static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
2316{
2317 struct damon_target *t, *next;
2318
61768a1b 2319 if (!damon_target_has_pid(ctx))
a61ea561
SP
2320 return;
2321
2322 mutex_lock(&ctx->kdamond_lock);
2323 damon_for_each_target_safe(t, next, ctx) {
2324 put_pid(t->pid);
2325 damon_destroy_target(t);
2326 }
2327 mutex_unlock(&ctx->kdamond_lock);
2328}
2329
01538719
SP
2330/*
2331 * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files.
2332 * @kdamond: The kobject wrapper that associated to the kdamond thread.
2333 *
2334 * This function reads the schemes stats of specific kdamond and update the
2335 * related values for sysfs files. This function should be called from DAMON
2336 * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON
2337 * contexts-internal data and DAMON sysfs variables.
2338 */
2339static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond)
2340{
2341 struct damon_ctx *ctx = kdamond->damon_ctx;
2342 struct damon_sysfs_schemes *sysfs_schemes;
2343 struct damos *scheme;
2344 int schemes_idx = 0;
2345
2346 if (!ctx)
2347 return -EINVAL;
2348 sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes;
2349 damon_for_each_scheme(scheme, ctx) {
2350 struct damon_sysfs_stats *sysfs_stats;
2351
2352 sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats;
2353 sysfs_stats->nr_tried = scheme->stat.nr_tried;
2354 sysfs_stats->sz_tried = scheme->stat.sz_tried;
2355 sysfs_stats->nr_applied = scheme->stat.nr_applied;
2356 sysfs_stats->sz_applied = scheme->stat.sz_applied;
2357 sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds;
2358 }
2359 return 0;
2360}
2361
da878780
SP
2362static inline bool damon_sysfs_kdamond_running(
2363 struct damon_sysfs_kdamond *kdamond)
2364{
2365 return kdamond->damon_ctx &&
2366 damon_sysfs_ctx_running(kdamond->damon_ctx);
2367}
2368
a79b68ee
SP
2369static int damon_sysfs_apply_inputs(struct damon_ctx *ctx,
2370 struct damon_sysfs_context *sys_ctx)
2371{
2372 int err;
2373
2374 err = damon_select_ops(ctx, sys_ctx->ops_id);
2375 if (err)
2376 return err;
2377 err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs);
2378 if (err)
2379 return err;
2380 err = damon_sysfs_set_targets(ctx, sys_ctx->targets);
2381 if (err)
2382 return err;
2383 return damon_sysfs_set_schemes(ctx, sys_ctx->schemes);
2384}
2385
da878780
SP
2386/*
2387 * damon_sysfs_commit_input() - Commit user inputs to a running kdamond.
2388 * @kdamond: The kobject wrapper for the associated kdamond.
2389 *
2390 * If the sysfs input is wrong, the kdamond will be terminated.
2391 */
2392static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
2393{
da878780
SP
2394 if (!damon_sysfs_kdamond_running(kdamond))
2395 return -EINVAL;
2396 /* TODO: Support multiple contexts per kdamond */
2397 if (kdamond->contexts->nr != 1)
2398 return -EINVAL;
2399
a79b68ee
SP
2400 return damon_sysfs_apply_inputs(kdamond->damon_ctx,
2401 kdamond->contexts->contexts_arr[0]);
da878780
SP
2402}
2403
01538719
SP
2404/*
2405 * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
2406 * @c: The DAMON context of the callback.
2407 *
2408 * This function is periodically called back from the kdamond thread for @c.
2409 * Then, it checks if there is a waiting DAMON sysfs request and handles it.
2410 */
2411static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
2412{
2413 struct damon_sysfs_kdamond *kdamond;
2414 int err = 0;
2415
2416 /* avoid deadlock due to concurrent state_store('off') */
2417 if (!mutex_trylock(&damon_sysfs_lock))
2418 return 0;
2419 kdamond = damon_sysfs_cmd_request.kdamond;
2420 if (!kdamond || kdamond->damon_ctx != c)
2421 goto out;
2422 switch (damon_sysfs_cmd_request.cmd) {
2423 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS:
2424 err = damon_sysfs_upd_schemes_stats(kdamond);
2425 break;
da878780
SP
2426 case DAMON_SYSFS_CMD_COMMIT:
2427 err = damon_sysfs_commit_input(kdamond);
2428 break;
01538719
SP
2429 default:
2430 break;
2431 }
2432 /* Mark the request as invalid now. */
2433 damon_sysfs_cmd_request.kdamond = NULL;
2434out:
2435 mutex_unlock(&damon_sysfs_lock);
2436 return err;
2437}
2438
a61ea561
SP
2439static struct damon_ctx *damon_sysfs_build_ctx(
2440 struct damon_sysfs_context *sys_ctx)
2441{
2442 struct damon_ctx *ctx = damon_new_ctx();
2443 int err;
2444
2445 if (!ctx)
2446 return ERR_PTR(-ENOMEM);
2447
a79b68ee
SP
2448 err = damon_sysfs_apply_inputs(ctx, sys_ctx);
2449 if (err) {
2450 damon_destroy_ctx(ctx);
2451 return ERR_PTR(err);
2452 }
a61ea561 2453
01538719
SP
2454 ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
2455 ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
a61ea561
SP
2456 ctx->callback.before_terminate = damon_sysfs_before_terminate;
2457 return ctx;
a61ea561
SP
2458}
2459
2460static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
2461{
2462 struct damon_ctx *ctx;
2463 int err;
2464
2465 if (kdamond->damon_ctx &&
2466 damon_sysfs_ctx_running(kdamond->damon_ctx))
2467 return -EBUSY;
01538719
SP
2468 if (damon_sysfs_cmd_request.kdamond == kdamond)
2469 return -EBUSY;
a61ea561
SP
2470 /* TODO: support multiple contexts per kdamond */
2471 if (kdamond->contexts->nr != 1)
2472 return -EINVAL;
2473
2474 if (kdamond->damon_ctx)
2475 damon_destroy_ctx(kdamond->damon_ctx);
2476 kdamond->damon_ctx = NULL;
2477
2478 ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]);
2479 if (IS_ERR(ctx))
2480 return PTR_ERR(ctx);
2481 err = damon_start(&ctx, 1, false);
2482 if (err) {
2483 damon_destroy_ctx(ctx);
2484 return err;
2485 }
2486 kdamond->damon_ctx = ctx;
2487 return err;
2488}
2489
2490static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond)
2491{
2492 if (!kdamond->damon_ctx)
2493 return -EINVAL;
2494 return damon_stop(&kdamond->damon_ctx, 1);
2495 /*
2496 * To allow users show final monitoring results of already turned-off
2497 * DAMON, we free kdamond->damon_ctx in next
2498 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store()
2499 */
c951cd3b
SP
2500}
2501
3cbab4ca
SP
2502/*
2503 * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond.
2504 * @cmd: The command to handle.
2505 * @kdamond: The kobject wrapper for the associated kdamond.
2506 *
01538719
SP
2507 * This function handles a DAMON sysfs command for a kdamond. For commands
2508 * that need to access running DAMON context-internal data, it requests
2509 * handling of the command to the DAMON callback
2510 * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled,
2511 * or the context is completed.
3cbab4ca
SP
2512 *
2513 * Return: 0 on success, negative error code otherwise.
2514 */
2515static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
2516 struct damon_sysfs_kdamond *kdamond)
2517{
01538719
SP
2518 bool need_wait = true;
2519
2520 /* Handle commands that doesn't access DAMON context-internal data */
3cbab4ca
SP
2521 switch (cmd) {
2522 case DAMON_SYSFS_CMD_ON:
2523 return damon_sysfs_turn_damon_on(kdamond);
2524 case DAMON_SYSFS_CMD_OFF:
2525 return damon_sysfs_turn_damon_off(kdamond);
3cbab4ca
SP
2526 default:
2527 break;
2528 }
01538719
SP
2529
2530 /* Pass the command to DAMON callback for safe DAMON context access */
2531 if (damon_sysfs_cmd_request.kdamond)
2532 return -EBUSY;
2533 if (!damon_sysfs_kdamond_running(kdamond))
2534 return -EINVAL;
2535 damon_sysfs_cmd_request.cmd = cmd;
2536 damon_sysfs_cmd_request.kdamond = kdamond;
2537
2538 /*
2539 * wait until damon_sysfs_cmd_request_callback() handles the request
2540 * from kdamond context
2541 */
2542 mutex_unlock(&damon_sysfs_lock);
2543 while (need_wait) {
2544 schedule_timeout_idle(msecs_to_jiffies(100));
2545 if (!mutex_trylock(&damon_sysfs_lock))
2546 continue;
2547 if (!damon_sysfs_cmd_request.kdamond) {
2548 /* damon_sysfs_cmd_request_callback() handled */
2549 need_wait = false;
2550 } else if (!damon_sysfs_kdamond_running(kdamond)) {
2551 /* kdamond has already finished */
2552 need_wait = false;
2553 damon_sysfs_cmd_request.kdamond = NULL;
2554 }
2555 mutex_unlock(&damon_sysfs_lock);
2556 }
2557 mutex_lock(&damon_sysfs_lock);
2558 return 0;
3cbab4ca
SP
2559}
2560
c951cd3b
SP
2561static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
2562 const char *buf, size_t count)
2563{
a61ea561
SP
2564 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2565 struct damon_sysfs_kdamond, kobj);
3cbab4ca
SP
2566 enum damon_sysfs_cmd cmd;
2567 ssize_t ret = -EINVAL;
a61ea561
SP
2568
2569 if (!mutex_trylock(&damon_sysfs_lock))
2570 return -EBUSY;
3cbab4ca
SP
2571 for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) {
2572 if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) {
2573 ret = damon_sysfs_handle_cmd(cmd, kdamond);
2574 break;
2575 }
2576 }
a61ea561
SP
2577 mutex_unlock(&damon_sysfs_lock);
2578 if (!ret)
2579 ret = count;
2580 return ret;
c951cd3b
SP
2581}
2582
2583static ssize_t pid_show(struct kobject *kobj,
2584 struct kobj_attribute *attr, char *buf)
2585{
a61ea561
SP
2586 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2587 struct damon_sysfs_kdamond, kobj);
2588 struct damon_ctx *ctx;
2589 int pid;
2590
2591 if (!mutex_trylock(&damon_sysfs_lock))
2592 return -EBUSY;
2593 ctx = kdamond->damon_ctx;
2594 if (!ctx) {
2595 pid = -1;
2596 goto out;
2597 }
2598 mutex_lock(&ctx->kdamond_lock);
2599 if (!ctx->kdamond)
2600 pid = -1;
2601 else
2602 pid = ctx->kdamond->pid;
2603 mutex_unlock(&ctx->kdamond_lock);
2604out:
2605 mutex_unlock(&damon_sysfs_lock);
2606 return sysfs_emit(buf, "%d\n", pid);
c951cd3b
SP
2607}
2608
2609static void damon_sysfs_kdamond_release(struct kobject *kobj)
2610{
2611 struct damon_sysfs_kdamond *kdamond = container_of(kobj,
2612 struct damon_sysfs_kdamond, kobj);
2613
2614 if (kdamond->damon_ctx)
2615 damon_destroy_ctx(kdamond->damon_ctx);
15423a52 2616 kfree(kdamond);
c951cd3b
SP
2617}
2618
2619static struct kobj_attribute damon_sysfs_kdamond_state_attr =
2620 __ATTR_RW_MODE(state, 0600);
2621
2622static struct kobj_attribute damon_sysfs_kdamond_pid_attr =
2623 __ATTR_RO_MODE(pid, 0400);
2624
2625static struct attribute *damon_sysfs_kdamond_attrs[] = {
2626 &damon_sysfs_kdamond_state_attr.attr,
2627 &damon_sysfs_kdamond_pid_attr.attr,
2628 NULL,
2629};
2630ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
2631
2632static struct kobj_type damon_sysfs_kdamond_ktype = {
2633 .release = damon_sysfs_kdamond_release,
2634 .sysfs_ops = &kobj_sysfs_ops,
2635 .default_groups = damon_sysfs_kdamond_groups,
2636};
2637
2638/*
2639 * kdamonds directory
2640 */
2641
2642struct damon_sysfs_kdamonds {
2643 struct kobject kobj;
2644 struct damon_sysfs_kdamond **kdamonds_arr;
2645 int nr;
2646};
2647
2648static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void)
2649{
2650 return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL);
2651}
2652
2653static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
2654{
2655 struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr;
2656 int i;
2657
2658 for (i = 0; i < kdamonds->nr; i++) {
2659 damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]);
2660 kobject_put(&kdamonds_arr[i]->kobj);
2661 }
2662 kdamonds->nr = 0;
2663 kfree(kdamonds_arr);
2664 kdamonds->kdamonds_arr = NULL;
2665}
2666
c274cd5c 2667static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds,
c951cd3b
SP
2668 int nr_kdamonds)
2669{
c951cd3b
SP
2670 int i;
2671
2672 for (i = 0; i < nr_kdamonds; i++) {
c274cd5c
KX
2673 if (damon_sysfs_kdamond_running(kdamonds[i]) ||
2674 damon_sysfs_cmd_request.kdamond == kdamonds[i])
2675 return true;
c951cd3b 2676 }
c274cd5c
KX
2677
2678 return false;
c951cd3b
SP
2679}
2680
2681static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
2682 int nr_kdamonds)
2683{
2684 struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
2685 int err, i;
2686
c274cd5c 2687 if (damon_sysfs_kdamonds_busy(kdamonds->kdamonds_arr, kdamonds->nr))
c951cd3b
SP
2688 return -EBUSY;
2689
2690 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2691 if (!nr_kdamonds)
2692 return 0;
2693
2694 kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr),
2695 GFP_KERNEL | __GFP_NOWARN);
2696 if (!kdamonds_arr)
2697 return -ENOMEM;
2698 kdamonds->kdamonds_arr = kdamonds_arr;
2699
2700 for (i = 0; i < nr_kdamonds; i++) {
2701 kdamond = damon_sysfs_kdamond_alloc();
2702 if (!kdamond) {
2703 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2704 return -ENOMEM;
2705 }
2706
2707 err = kobject_init_and_add(&kdamond->kobj,
2708 &damon_sysfs_kdamond_ktype, &kdamonds->kobj,
2709 "%d", i);
2710 if (err)
2711 goto out;
2712
2713 err = damon_sysfs_kdamond_add_dirs(kdamond);
2714 if (err)
2715 goto out;
2716
2717 kdamonds_arr[i] = kdamond;
2718 kdamonds->nr++;
2719 }
2720 return 0;
2721
2722out:
2723 damon_sysfs_kdamonds_rm_dirs(kdamonds);
2724 kobject_put(&kdamond->kobj);
2725 return err;
2726}
2727
2728static ssize_t nr_kdamonds_show(struct kobject *kobj,
2729 struct kobj_attribute *attr, char *buf)
2730{
2731 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2732 struct damon_sysfs_kdamonds, kobj);
2733
2734 return sysfs_emit(buf, "%d\n", kdamonds->nr);
2735}
2736
2737static ssize_t nr_kdamonds_store(struct kobject *kobj,
2738 struct kobj_attribute *attr, const char *buf, size_t count)
2739{
2740 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
2741 struct damon_sysfs_kdamonds, kobj);
2742 int nr, err;
2743
2744 err = kstrtoint(buf, 0, &nr);
2745 if (err)
2746 return err;
2747 if (nr < 0)
2748 return -EINVAL;
2749
2750 if (!mutex_trylock(&damon_sysfs_lock))
2751 return -EBUSY;
2752 err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
2753 mutex_unlock(&damon_sysfs_lock);
2754 if (err)
2755 return err;
2756
2757 return count;
2758}
2759
2760static void damon_sysfs_kdamonds_release(struct kobject *kobj)
2761{
2762 kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj));
2763}
2764
2765static struct kobj_attribute damon_sysfs_kdamonds_nr_attr =
2766 __ATTR_RW_MODE(nr_kdamonds, 0600);
2767
2768static struct attribute *damon_sysfs_kdamonds_attrs[] = {
2769 &damon_sysfs_kdamonds_nr_attr.attr,
2770 NULL,
2771};
2772ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
2773
2774static struct kobj_type damon_sysfs_kdamonds_ktype = {
2775 .release = damon_sysfs_kdamonds_release,
2776 .sysfs_ops = &kobj_sysfs_ops,
2777 .default_groups = damon_sysfs_kdamonds_groups,
2778};
2779
2780/*
2781 * damon user interface directory
2782 */
2783
2784struct damon_sysfs_ui_dir {
2785 struct kobject kobj;
2786 struct damon_sysfs_kdamonds *kdamonds;
2787};
2788
2789static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void)
2790{
2791 return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL);
2792}
2793
2794static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir)
2795{
2796 struct damon_sysfs_kdamonds *kdamonds;
2797 int err;
2798
2799 kdamonds = damon_sysfs_kdamonds_alloc();
2800 if (!kdamonds)
2801 return -ENOMEM;
2802
2803 err = kobject_init_and_add(&kdamonds->kobj,
2804 &damon_sysfs_kdamonds_ktype, &ui_dir->kobj,
2805 "kdamonds");
2806 if (err) {
2807 kobject_put(&kdamonds->kobj);
2808 return err;
2809 }
2810 ui_dir->kdamonds = kdamonds;
2811 return err;
2812}
2813
2814static void damon_sysfs_ui_dir_release(struct kobject *kobj)
2815{
2816 kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj));
2817}
2818
2819static struct attribute *damon_sysfs_ui_dir_attrs[] = {
2820 NULL,
2821};
2822ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
2823
2824static struct kobj_type damon_sysfs_ui_dir_ktype = {
2825 .release = damon_sysfs_ui_dir_release,
2826 .sysfs_ops = &kobj_sysfs_ops,
2827 .default_groups = damon_sysfs_ui_dir_groups,
2828};
2829
2830static int __init damon_sysfs_init(void)
2831{
2832 struct kobject *damon_sysfs_root;
2833 struct damon_sysfs_ui_dir *admin;
2834 int err;
2835
2836 damon_sysfs_root = kobject_create_and_add("damon", mm_kobj);
2837 if (!damon_sysfs_root)
2838 return -ENOMEM;
2839
2840 admin = damon_sysfs_ui_dir_alloc();
2841 if (!admin) {
2842 kobject_put(damon_sysfs_root);
2843 return -ENOMEM;
2844 }
2845 err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype,
2846 damon_sysfs_root, "admin");
2847 if (err)
2848 goto out;
2849 err = damon_sysfs_ui_dir_add_dirs(admin);
2850 if (err)
2851 goto out;
2852 return 0;
2853
2854out:
2855 kobject_put(&admin->kobj);
2856 kobject_put(damon_sysfs_root);
2857 return err;
2858}
2859subsys_initcall(damon_sysfs_init);