2 * User interface for Resource Alloction in Resource Director Technology(RDT)
4 * Copyright (C) 2016 Intel Corporation
6 * Author: Fenghua Yu <fenghua.yu@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * More information about RDT be found in the Intel (R) x86 Architecture
18 * Software Developer Manual.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/cpu.h>
25 #include <linux/sysfs.h>
26 #include <linux/kernfs.h>
27 #include <linux/seq_buf.h>
28 #include <linux/seq_file.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/task.h>
31 #include <linux/slab.h>
32 #include <linux/task_work.h>
34 #include <uapi/linux/magic.h>
36 #include <asm/intel_rdt_sched.h>
37 #include "intel_rdt.h"
39 DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
40 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
41 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
42 static struct kernfs_root *rdt_root;
43 struct rdtgroup rdtgroup_default;
44 LIST_HEAD(rdt_all_groups);
46 /* Kernel fs node for "info" directory under root */
47 static struct kernfs_node *kn_info;
49 /* Kernel fs node for "mon_groups" directory under root */
50 static struct kernfs_node *kn_mongrp;
52 /* Kernel fs node for "mon_data" directory under root */
53 static struct kernfs_node *kn_mondata;
55 static struct seq_buf last_cmd_status;
56 static char last_cmd_status_buf[512];
58 void rdt_last_cmd_clear(void)
60 lockdep_assert_held(&rdtgroup_mutex);
61 seq_buf_clear(&last_cmd_status);
64 void rdt_last_cmd_puts(const char *s)
66 lockdep_assert_held(&rdtgroup_mutex);
67 seq_buf_puts(&last_cmd_status, s);
70 void rdt_last_cmd_printf(const char *fmt, ...)
75 lockdep_assert_held(&rdtgroup_mutex);
76 seq_buf_vprintf(&last_cmd_status, fmt, ap);
81 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
82 * we can keep a bitmap of free CLOSIDs in a single integer.
84 * Using a global CLOSID across all resources has some advantages and
86 * + We can simply set "current->closid" to assign a task to a resource
88 * + Context switch code can avoid extra memory references deciding which
89 * CLOSID to load into the PQR_ASSOC MSR
90 * - We give up some options in configuring resource groups across multi-socket
92 * - Our choices on how to configure each resource become progressively more
93 * limited as the number of resources grows.
95 static int closid_free_map;
97 static void closid_init(void)
99 struct rdt_resource *r;
100 int rdt_min_closid = 32;
102 /* Compute rdt_min_closid across all resources */
103 for_each_alloc_enabled_rdt_resource(r)
104 rdt_min_closid = min(rdt_min_closid, r->num_closid);
106 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
108 /* CLOSID 0 is always reserved for the default group */
109 closid_free_map &= ~1;
112 static int closid_alloc(void)
114 u32 closid = ffs(closid_free_map);
119 closid_free_map &= ~(1 << closid);
124 static void closid_free(int closid)
126 closid_free_map |= 1 << closid;
129 /* set uid and gid of rdtgroup dirs and files to that of the creator */
130 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
132 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
133 .ia_uid = current_fsuid(),
134 .ia_gid = current_fsgid(), };
136 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
137 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
140 return kernfs_setattr(kn, &iattr);
143 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
145 struct kernfs_node *kn;
148 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
149 0, rft->kf_ops, rft, NULL, NULL);
153 ret = rdtgroup_kn_set_ugid(kn);
162 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
164 struct kernfs_open_file *of = m->private;
165 struct rftype *rft = of->kn->priv;
168 return rft->seq_show(of, m, arg);
172 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
173 size_t nbytes, loff_t off)
175 struct rftype *rft = of->kn->priv;
178 return rft->write(of, buf, nbytes, off);
183 static struct kernfs_ops rdtgroup_kf_single_ops = {
184 .atomic_write_len = PAGE_SIZE,
185 .write = rdtgroup_file_write,
186 .seq_show = rdtgroup_seqfile_show,
189 static struct kernfs_ops kf_mondata_ops = {
190 .atomic_write_len = PAGE_SIZE,
191 .seq_show = rdtgroup_mondata_show,
194 static bool is_cpu_list(struct kernfs_open_file *of)
196 struct rftype *rft = of->kn->priv;
198 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
201 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
202 struct seq_file *s, void *v)
204 struct rdtgroup *rdtgrp;
207 rdtgrp = rdtgroup_kn_lock_live(of->kn);
210 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
211 cpumask_pr_args(&rdtgrp->cpu_mask));
215 rdtgroup_kn_unlock(of->kn);
221 * This is safe against intel_rdt_sched_in() called from __switch_to()
222 * because __switch_to() is executed with interrupts disabled. A local call
223 * from update_closid_rmid() is proteced against __switch_to() because
224 * preemption is disabled.
226 static void update_cpu_closid_rmid(void *info)
228 struct rdtgroup *r = info;
231 this_cpu_write(pqr_state.default_closid, r->closid);
232 this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
236 * We cannot unconditionally write the MSR because the current
237 * executing task might have its own closid selected. Just reuse
238 * the context switch code.
240 intel_rdt_sched_in();
244 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
246 * Per task closids/rmids must have been set up before calling this function.
249 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
253 if (cpumask_test_cpu(cpu, cpu_mask))
254 update_cpu_closid_rmid(r);
255 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
259 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
260 cpumask_var_t tmpmask)
262 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
263 struct list_head *head;
265 /* Check whether cpus belong to parent ctrl group */
266 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
267 if (cpumask_weight(tmpmask)) {
268 rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n");
272 /* Check whether cpus are dropped from this group */
273 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
274 if (cpumask_weight(tmpmask)) {
275 /* Give any dropped cpus to parent rdtgroup */
276 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
277 update_closid_rmid(tmpmask, prgrp);
281 * If we added cpus, remove them from previous group that owned them
282 * and update per-cpu rmid
284 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
285 if (cpumask_weight(tmpmask)) {
286 head = &prgrp->mon.crdtgrp_list;
287 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
290 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
293 update_closid_rmid(tmpmask, rdtgrp);
296 /* Done pushing/pulling - update this group with new mask */
297 cpumask_copy(&rdtgrp->cpu_mask, newmask);
302 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
304 struct rdtgroup *crgrp;
306 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
307 /* update the child mon group masks as well*/
308 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
309 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
312 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
313 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
315 struct rdtgroup *r, *crgrp;
316 struct list_head *head;
318 /* Check whether cpus are dropped from this group */
319 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
320 if (cpumask_weight(tmpmask)) {
321 /* Can't drop from default group */
322 if (rdtgrp == &rdtgroup_default) {
323 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
327 /* Give any dropped cpus to rdtgroup_default */
328 cpumask_or(&rdtgroup_default.cpu_mask,
329 &rdtgroup_default.cpu_mask, tmpmask);
330 update_closid_rmid(tmpmask, &rdtgroup_default);
334 * If we added cpus, remove them from previous group and
335 * the prev group's child groups that owned them
336 * and update per-cpu closid/rmid.
338 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
339 if (cpumask_weight(tmpmask)) {
340 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
343 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
344 if (cpumask_weight(tmpmask1))
345 cpumask_rdtgrp_clear(r, tmpmask1);
347 update_closid_rmid(tmpmask, rdtgrp);
350 /* Done pushing/pulling - update this group with new mask */
351 cpumask_copy(&rdtgrp->cpu_mask, newmask);
354 * Clear child mon group masks since there is a new parent mask
355 * now and update the rmid for the cpus the child lost.
357 head = &rdtgrp->mon.crdtgrp_list;
358 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
359 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
360 update_closid_rmid(tmpmask, rdtgrp);
361 cpumask_clear(&crgrp->cpu_mask);
367 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
368 char *buf, size_t nbytes, loff_t off)
370 cpumask_var_t tmpmask, newmask, tmpmask1;
371 struct rdtgroup *rdtgrp;
377 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
379 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
380 free_cpumask_var(tmpmask);
383 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
384 free_cpumask_var(tmpmask);
385 free_cpumask_var(newmask);
389 rdtgrp = rdtgroup_kn_lock_live(of->kn);
390 rdt_last_cmd_clear();
393 rdt_last_cmd_puts("directory was removed\n");
398 ret = cpulist_parse(buf, newmask);
400 ret = cpumask_parse(buf, newmask);
403 rdt_last_cmd_puts("bad cpu list/mask\n");
407 /* check that user didn't specify any offline cpus */
408 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
409 if (cpumask_weight(tmpmask)) {
411 rdt_last_cmd_puts("can only assign online cpus\n");
415 if (rdtgrp->type == RDTCTRL_GROUP)
416 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
417 else if (rdtgrp->type == RDTMON_GROUP)
418 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
423 rdtgroup_kn_unlock(of->kn);
424 free_cpumask_var(tmpmask);
425 free_cpumask_var(newmask);
426 free_cpumask_var(tmpmask1);
428 return ret ?: nbytes;
431 struct task_move_callback {
432 struct callback_head work;
433 struct rdtgroup *rdtgrp;
436 static void move_myself(struct callback_head *head)
438 struct task_move_callback *callback;
439 struct rdtgroup *rdtgrp;
441 callback = container_of(head, struct task_move_callback, work);
442 rdtgrp = callback->rdtgrp;
445 * If resource group was deleted before this task work callback
446 * was invoked, then assign the task to root group and free the
449 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
450 (rdtgrp->flags & RDT_DELETED)) {
457 /* update PQR_ASSOC MSR to make resource group go into effect */
458 intel_rdt_sched_in();
464 static int __rdtgroup_move_task(struct task_struct *tsk,
465 struct rdtgroup *rdtgrp)
467 struct task_move_callback *callback;
470 callback = kzalloc(sizeof(*callback), GFP_KERNEL);
473 callback->work.func = move_myself;
474 callback->rdtgrp = rdtgrp;
477 * Take a refcount, so rdtgrp cannot be freed before the
478 * callback has been invoked.
480 atomic_inc(&rdtgrp->waitcount);
481 ret = task_work_add(tsk, &callback->work, true);
484 * Task is exiting. Drop the refcount and free the callback.
485 * No need to check the refcount as the group cannot be
486 * deleted before the write function unlocks rdtgroup_mutex.
488 atomic_dec(&rdtgrp->waitcount);
490 rdt_last_cmd_puts("task exited\n");
493 * For ctrl_mon groups move both closid and rmid.
494 * For monitor groups, can move the tasks only from
495 * their parent CTRL group.
497 if (rdtgrp->type == RDTCTRL_GROUP) {
498 tsk->closid = rdtgrp->closid;
499 tsk->rmid = rdtgrp->mon.rmid;
500 } else if (rdtgrp->type == RDTMON_GROUP) {
501 if (rdtgrp->mon.parent->closid == tsk->closid) {
502 tsk->rmid = rdtgrp->mon.rmid;
504 rdt_last_cmd_puts("Can't move task to different control group\n");
512 static int rdtgroup_task_write_permission(struct task_struct *task,
513 struct kernfs_open_file *of)
515 const struct cred *tcred = get_task_cred(task);
516 const struct cred *cred = current_cred();
520 * Even if we're attaching all tasks in the thread group, we only
521 * need to check permissions on one of them.
523 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
524 !uid_eq(cred->euid, tcred->uid) &&
525 !uid_eq(cred->euid, tcred->suid)) {
526 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
534 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
535 struct kernfs_open_file *of)
537 struct task_struct *tsk;
542 tsk = find_task_by_vpid(pid);
545 rdt_last_cmd_printf("No task %d\n", pid);
552 get_task_struct(tsk);
555 ret = rdtgroup_task_write_permission(tsk, of);
557 ret = __rdtgroup_move_task(tsk, rdtgrp);
559 put_task_struct(tsk);
563 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
564 char *buf, size_t nbytes, loff_t off)
566 struct rdtgroup *rdtgrp;
570 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
572 rdtgrp = rdtgroup_kn_lock_live(of->kn);
573 rdt_last_cmd_clear();
576 ret = rdtgroup_move_task(pid, rdtgrp, of);
580 rdtgroup_kn_unlock(of->kn);
582 return ret ?: nbytes;
585 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
587 struct task_struct *p, *t;
590 for_each_process_thread(p, t) {
591 if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
592 (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid))
593 seq_printf(s, "%d\n", t->pid);
598 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
599 struct seq_file *s, void *v)
601 struct rdtgroup *rdtgrp;
604 rdtgrp = rdtgroup_kn_lock_live(of->kn);
606 show_rdt_tasks(rdtgrp, s);
609 rdtgroup_kn_unlock(of->kn);
614 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
615 struct seq_file *seq, void *v)
619 mutex_lock(&rdtgroup_mutex);
620 len = seq_buf_used(&last_cmd_status);
622 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
624 seq_puts(seq, "ok\n");
625 mutex_unlock(&rdtgroup_mutex);
629 static int rdt_num_closids_show(struct kernfs_open_file *of,
630 struct seq_file *seq, void *v)
632 struct rdt_resource *r = of->kn->parent->priv;
634 seq_printf(seq, "%d\n", r->num_closid);
638 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
639 struct seq_file *seq, void *v)
641 struct rdt_resource *r = of->kn->parent->priv;
643 seq_printf(seq, "%x\n", r->default_ctrl);
647 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
648 struct seq_file *seq, void *v)
650 struct rdt_resource *r = of->kn->parent->priv;
652 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
656 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
657 struct seq_file *seq, void *v)
659 struct rdt_resource *r = of->kn->parent->priv;
661 seq_printf(seq, "%x\n", r->cache.shareable_bits);
665 static int rdt_min_bw_show(struct kernfs_open_file *of,
666 struct seq_file *seq, void *v)
668 struct rdt_resource *r = of->kn->parent->priv;
670 seq_printf(seq, "%u\n", r->membw.min_bw);
674 static int rdt_num_rmids_show(struct kernfs_open_file *of,
675 struct seq_file *seq, void *v)
677 struct rdt_resource *r = of->kn->parent->priv;
679 seq_printf(seq, "%d\n", r->num_rmid);
684 static int rdt_mon_features_show(struct kernfs_open_file *of,
685 struct seq_file *seq, void *v)
687 struct rdt_resource *r = of->kn->parent->priv;
688 struct mon_evt *mevt;
690 list_for_each_entry(mevt, &r->evt_list, list)
691 seq_printf(seq, "%s\n", mevt->name);
696 static int rdt_bw_gran_show(struct kernfs_open_file *of,
697 struct seq_file *seq, void *v)
699 struct rdt_resource *r = of->kn->parent->priv;
701 seq_printf(seq, "%u\n", r->membw.bw_gran);
705 static int rdt_delay_linear_show(struct kernfs_open_file *of,
706 struct seq_file *seq, void *v)
708 struct rdt_resource *r = of->kn->parent->priv;
710 seq_printf(seq, "%u\n", r->membw.delay_linear);
714 static int max_threshold_occ_show(struct kernfs_open_file *of,
715 struct seq_file *seq, void *v)
717 struct rdt_resource *r = of->kn->parent->priv;
719 seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
724 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
725 char *buf, size_t nbytes, loff_t off)
727 struct rdt_resource *r = of->kn->parent->priv;
731 ret = kstrtouint(buf, 0, &bytes);
735 if (bytes > (boot_cpu_data.x86_cache_size * 1024))
738 intel_cqm_threshold = bytes / r->mon_scale;
743 /* rdtgroup information files for one cache resource. */
744 static struct rftype res_common_files[] = {
746 .name = "last_cmd_status",
748 .kf_ops = &rdtgroup_kf_single_ops,
749 .seq_show = rdt_last_cmd_status_show,
750 .fflags = RF_TOP_INFO,
753 .name = "num_closids",
755 .kf_ops = &rdtgroup_kf_single_ops,
756 .seq_show = rdt_num_closids_show,
757 .fflags = RF_CTRL_INFO,
760 .name = "mon_features",
762 .kf_ops = &rdtgroup_kf_single_ops,
763 .seq_show = rdt_mon_features_show,
764 .fflags = RF_MON_INFO,
769 .kf_ops = &rdtgroup_kf_single_ops,
770 .seq_show = rdt_num_rmids_show,
771 .fflags = RF_MON_INFO,
776 .kf_ops = &rdtgroup_kf_single_ops,
777 .seq_show = rdt_default_ctrl_show,
778 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
781 .name = "min_cbm_bits",
783 .kf_ops = &rdtgroup_kf_single_ops,
784 .seq_show = rdt_min_cbm_bits_show,
785 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
788 .name = "shareable_bits",
790 .kf_ops = &rdtgroup_kf_single_ops,
791 .seq_show = rdt_shareable_bits_show,
792 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
795 .name = "min_bandwidth",
797 .kf_ops = &rdtgroup_kf_single_ops,
798 .seq_show = rdt_min_bw_show,
799 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
802 .name = "bandwidth_gran",
804 .kf_ops = &rdtgroup_kf_single_ops,
805 .seq_show = rdt_bw_gran_show,
806 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
809 .name = "delay_linear",
811 .kf_ops = &rdtgroup_kf_single_ops,
812 .seq_show = rdt_delay_linear_show,
813 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
816 .name = "max_threshold_occupancy",
818 .kf_ops = &rdtgroup_kf_single_ops,
819 .write = max_threshold_occ_write,
820 .seq_show = max_threshold_occ_show,
821 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE,
826 .kf_ops = &rdtgroup_kf_single_ops,
827 .write = rdtgroup_cpus_write,
828 .seq_show = rdtgroup_cpus_show,
829 .fflags = RFTYPE_BASE,
834 .kf_ops = &rdtgroup_kf_single_ops,
835 .write = rdtgroup_cpus_write,
836 .seq_show = rdtgroup_cpus_show,
837 .flags = RFTYPE_FLAGS_CPUS_LIST,
838 .fflags = RFTYPE_BASE,
843 .kf_ops = &rdtgroup_kf_single_ops,
844 .write = rdtgroup_tasks_write,
845 .seq_show = rdtgroup_tasks_show,
846 .fflags = RFTYPE_BASE,
851 .kf_ops = &rdtgroup_kf_single_ops,
852 .write = rdtgroup_schemata_write,
853 .seq_show = rdtgroup_schemata_show,
854 .fflags = RF_CTRL_BASE,
858 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
860 struct rftype *rfts, *rft;
863 rfts = res_common_files;
864 len = ARRAY_SIZE(res_common_files);
866 lockdep_assert_held(&rdtgroup_mutex);
868 for (rft = rfts; rft < rfts + len; rft++) {
869 if ((fflags & rft->fflags) == rft->fflags) {
870 ret = rdtgroup_add_file(kn, rft);
878 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
879 while (--rft >= rfts) {
880 if ((fflags & rft->fflags) == rft->fflags)
881 kernfs_remove_by_name(kn, rft->name);
886 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
887 unsigned long fflags)
889 struct kernfs_node *kn_subdir;
892 kn_subdir = kernfs_create_dir(kn_info, name,
894 if (IS_ERR(kn_subdir))
895 return PTR_ERR(kn_subdir);
897 kernfs_get(kn_subdir);
898 ret = rdtgroup_kn_set_ugid(kn_subdir);
902 ret = rdtgroup_add_files(kn_subdir, fflags);
904 kernfs_activate(kn_subdir);
909 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
911 struct rdt_resource *r;
912 unsigned long fflags;
916 /* create the directory */
917 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
919 return PTR_ERR(kn_info);
922 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
926 for_each_alloc_enabled_rdt_resource(r) {
927 fflags = r->fflags | RF_CTRL_INFO;
928 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
933 for_each_mon_enabled_rdt_resource(r) {
934 fflags = r->fflags | RF_MON_INFO;
935 sprintf(name, "%s_MON", r->name);
936 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
942 * This extra ref will be put in kernfs_remove() and guarantees
943 * that @rdtgrp->kn is always accessible.
947 ret = rdtgroup_kn_set_ugid(kn_info);
951 kernfs_activate(kn_info);
956 kernfs_remove(kn_info);
961 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
962 char *name, struct kernfs_node **dest_kn)
964 struct kernfs_node *kn;
967 /* create the directory */
968 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
976 * This extra ref will be put in kernfs_remove() and guarantees
977 * that @rdtgrp->kn is always accessible.
981 ret = rdtgroup_kn_set_ugid(kn);
994 static void l3_qos_cfg_update(void *arg)
998 wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
1001 static void l2_qos_cfg_update(void *arg)
1005 wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1008 static inline bool is_mba_linear(void)
1010 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
1013 static int set_cache_qos_cfg(int level, bool enable)
1015 void (*update)(void *arg);
1016 struct rdt_resource *r_l;
1017 cpumask_var_t cpu_mask;
1018 struct rdt_domain *d;
1021 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1024 if (level == RDT_RESOURCE_L3)
1025 update = l3_qos_cfg_update;
1026 else if (level == RDT_RESOURCE_L2)
1027 update = l2_qos_cfg_update;
1031 r_l = &rdt_resources_all[level];
1032 list_for_each_entry(d, &r_l->domains, list) {
1033 /* Pick one CPU from each domain instance to update MSR */
1034 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1037 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1038 if (cpumask_test_cpu(cpu, cpu_mask))
1040 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1041 smp_call_function_many(cpu_mask, update, &enable, 1);
1044 free_cpumask_var(cpu_mask);
1050 * Enable or disable the MBA software controller
1051 * which helps user specify bandwidth in MBps.
1052 * MBA software controller is supported only if
1053 * MBM is supported and MBA is in linear scale.
1055 static int set_mba_sc(bool mba_sc)
1057 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
1059 if (!is_mbm_enabled() || !is_mba_linear() ||
1060 mba_sc == is_mba_sc(r))
1063 r->membw.mba_sc = mba_sc;
1068 static int cdp_enable(int level, int data_type, int code_type)
1070 struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
1071 struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
1072 struct rdt_resource *r_l = &rdt_resources_all[level];
1075 if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
1076 !r_lcode->alloc_capable)
1079 ret = set_cache_qos_cfg(level, true);
1081 r_l->alloc_enabled = false;
1082 r_ldata->alloc_enabled = true;
1083 r_lcode->alloc_enabled = true;
1088 static int cdpl3_enable(void)
1090 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
1091 RDT_RESOURCE_L3CODE);
1094 static int cdpl2_enable(void)
1096 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
1097 RDT_RESOURCE_L2CODE);
1100 static void cdp_disable(int level, int data_type, int code_type)
1102 struct rdt_resource *r = &rdt_resources_all[level];
1104 r->alloc_enabled = r->alloc_capable;
1106 if (rdt_resources_all[data_type].alloc_enabled) {
1107 rdt_resources_all[data_type].alloc_enabled = false;
1108 rdt_resources_all[code_type].alloc_enabled = false;
1109 set_cache_qos_cfg(level, false);
1113 static void cdpl3_disable(void)
1115 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
1118 static void cdpl2_disable(void)
1120 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
1123 static void cdp_disable_all(void)
1125 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
1127 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
1131 static int parse_rdtgroupfs_options(char *data)
1133 char *token, *o = data;
1136 while ((token = strsep(&o, ",")) != NULL) {
1142 if (!strcmp(token, "cdp")) {
1143 ret = cdpl3_enable();
1146 } else if (!strcmp(token, "cdpl2")) {
1147 ret = cdpl2_enable();
1150 } else if (!strcmp(token, "mba_MBps")) {
1151 ret = set_mba_sc(true);
1163 pr_err("Invalid mount option \"%s\"\n", token);
1169 * We don't allow rdtgroup directories to be created anywhere
1170 * except the root directory. Thus when looking for the rdtgroup
1171 * structure for a kernfs node we are either looking at a directory,
1172 * in which case the rdtgroup structure is pointed at by the "priv"
1173 * field, otherwise we have a file, and need only look to the parent
1174 * to find the rdtgroup.
1176 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
1178 if (kernfs_type(kn) == KERNFS_DIR) {
1180 * All the resource directories use "kn->priv"
1181 * to point to the "struct rdtgroup" for the
1182 * resource. "info" and its subdirectories don't
1183 * have rdtgroup structures, so return NULL here.
1185 if (kn == kn_info || kn->parent == kn_info)
1190 return kn->parent->priv;
1194 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
1196 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1201 atomic_inc(&rdtgrp->waitcount);
1202 kernfs_break_active_protection(kn);
1204 mutex_lock(&rdtgroup_mutex);
1206 /* Was this group deleted while we waited? */
1207 if (rdtgrp->flags & RDT_DELETED)
1213 void rdtgroup_kn_unlock(struct kernfs_node *kn)
1215 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1220 mutex_unlock(&rdtgroup_mutex);
1222 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
1223 (rdtgrp->flags & RDT_DELETED)) {
1224 kernfs_unbreak_active_protection(kn);
1225 kernfs_put(rdtgrp->kn);
1228 kernfs_unbreak_active_protection(kn);
1232 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
1233 struct rdtgroup *prgrp,
1234 struct kernfs_node **mon_data_kn);
1236 static struct dentry *rdt_mount(struct file_system_type *fs_type,
1237 int flags, const char *unused_dev_name,
1240 struct rdt_domain *dom;
1241 struct rdt_resource *r;
1242 struct dentry *dentry;
1246 mutex_lock(&rdtgroup_mutex);
1248 * resctrl file system can only be mounted once.
1250 if (static_branch_unlikely(&rdt_enable_key)) {
1251 dentry = ERR_PTR(-EBUSY);
1255 ret = parse_rdtgroupfs_options(data);
1257 dentry = ERR_PTR(ret);
1263 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
1265 dentry = ERR_PTR(ret);
1269 if (rdt_mon_capable) {
1270 ret = mongroup_create_dir(rdtgroup_default.kn,
1274 dentry = ERR_PTR(ret);
1277 kernfs_get(kn_mongrp);
1279 ret = mkdir_mondata_all(rdtgroup_default.kn,
1280 &rdtgroup_default, &kn_mondata);
1282 dentry = ERR_PTR(ret);
1285 kernfs_get(kn_mondata);
1286 rdtgroup_default.mon.mon_data_kn = kn_mondata;
1289 dentry = kernfs_mount(fs_type, flags, rdt_root,
1290 RDTGROUP_SUPER_MAGIC, NULL);
1294 if (rdt_alloc_capable)
1295 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
1296 if (rdt_mon_capable)
1297 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
1299 if (rdt_alloc_capable || rdt_mon_capable)
1300 static_branch_enable_cpuslocked(&rdt_enable_key);
1302 if (is_mbm_enabled()) {
1303 r = &rdt_resources_all[RDT_RESOURCE_L3];
1304 list_for_each_entry(dom, &r->domains, list)
1305 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
1311 if (rdt_mon_capable)
1312 kernfs_remove(kn_mondata);
1314 if (rdt_mon_capable)
1315 kernfs_remove(kn_mongrp);
1317 kernfs_remove(kn_info);
1321 rdt_last_cmd_clear();
1322 mutex_unlock(&rdtgroup_mutex);
1328 static int reset_all_ctrls(struct rdt_resource *r)
1330 struct msr_param msr_param;
1331 cpumask_var_t cpu_mask;
1332 struct rdt_domain *d;
1335 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1340 msr_param.high = r->num_closid;
1343 * Disable resource control for this resource by setting all
1344 * CBMs in all domains to the maximum mask value. Pick one CPU
1345 * from each domain to update the MSRs below.
1347 list_for_each_entry(d, &r->domains, list) {
1348 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1350 for (i = 0; i < r->num_closid; i++)
1351 d->ctrl_val[i] = r->default_ctrl;
1354 /* Update CBM on this cpu if it's in cpu_mask. */
1355 if (cpumask_test_cpu(cpu, cpu_mask))
1356 rdt_ctrl_update(&msr_param);
1357 /* Update CBM on all other cpus in cpu_mask. */
1358 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
1361 free_cpumask_var(cpu_mask);
1366 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
1368 return (rdt_alloc_capable &&
1369 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
1372 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
1374 return (rdt_mon_capable &&
1375 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
1379 * Move tasks from one to the other group. If @from is NULL, then all tasks
1380 * in the systems are moved unconditionally (used for teardown).
1382 * If @mask is not NULL the cpus on which moved tasks are running are set
1383 * in that mask so the update smp function call is restricted to affected
1386 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
1387 struct cpumask *mask)
1389 struct task_struct *p, *t;
1391 read_lock(&tasklist_lock);
1392 for_each_process_thread(p, t) {
1393 if (!from || is_closid_match(t, from) ||
1394 is_rmid_match(t, from)) {
1395 t->closid = to->closid;
1396 t->rmid = to->mon.rmid;
1400 * This is safe on x86 w/o barriers as the ordering
1401 * of writing to task_cpu() and t->on_cpu is
1402 * reverse to the reading here. The detection is
1403 * inaccurate as tasks might move or schedule
1404 * before the smp function call takes place. In
1405 * such a case the function call is pointless, but
1406 * there is no other side effect.
1408 if (mask && t->on_cpu)
1409 cpumask_set_cpu(task_cpu(t), mask);
1413 read_unlock(&tasklist_lock);
1416 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
1418 struct rdtgroup *sentry, *stmp;
1419 struct list_head *head;
1421 head = &rdtgrp->mon.crdtgrp_list;
1422 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
1423 free_rmid(sentry->mon.rmid);
1424 list_del(&sentry->mon.crdtgrp_list);
1430 * Forcibly remove all of subdirectories under root.
1432 static void rmdir_all_sub(void)
1434 struct rdtgroup *rdtgrp, *tmp;
1436 /* Move all tasks to the default resource group */
1437 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
1439 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
1440 /* Free any child rmids */
1441 free_all_child_rdtgrp(rdtgrp);
1443 /* Remove each rdtgroup other than root */
1444 if (rdtgrp == &rdtgroup_default)
1448 * Give any CPUs back to the default group. We cannot copy
1449 * cpu_online_mask because a CPU might have executed the
1450 * offline callback already, but is still marked online.
1452 cpumask_or(&rdtgroup_default.cpu_mask,
1453 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
1455 free_rmid(rdtgrp->mon.rmid);
1457 kernfs_remove(rdtgrp->kn);
1458 list_del(&rdtgrp->rdtgroup_list);
1461 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
1462 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
1464 kernfs_remove(kn_info);
1465 kernfs_remove(kn_mongrp);
1466 kernfs_remove(kn_mondata);
1469 static void rdt_kill_sb(struct super_block *sb)
1471 struct rdt_resource *r;
1474 mutex_lock(&rdtgroup_mutex);
1478 /*Put everything back to default values. */
1479 for_each_alloc_enabled_rdt_resource(r)
1483 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
1484 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
1485 static_branch_disable_cpuslocked(&rdt_enable_key);
1487 mutex_unlock(&rdtgroup_mutex);
1491 static struct file_system_type rdt_fs_type = {
1494 .kill_sb = rdt_kill_sb,
1497 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
1500 struct kernfs_node *kn;
1503 kn = __kernfs_create_file(parent_kn, name, 0444, 0,
1504 &kf_mondata_ops, priv, NULL, NULL);
1508 ret = rdtgroup_kn_set_ugid(kn);
1518 * Remove all subdirectories of mon_data of ctrl_mon groups
1519 * and monitor groups with given domain id.
1521 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
1523 struct rdtgroup *prgrp, *crgrp;
1526 if (!r->mon_enabled)
1529 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
1530 sprintf(name, "mon_%s_%02d", r->name, dom_id);
1531 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
1533 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
1534 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
1538 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
1539 struct rdt_domain *d,
1540 struct rdt_resource *r, struct rdtgroup *prgrp)
1542 union mon_data_bits priv;
1543 struct kernfs_node *kn;
1544 struct mon_evt *mevt;
1545 struct rmid_read rr;
1549 sprintf(name, "mon_%s_%02d", r->name, d->id);
1550 /* create the directory */
1551 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
1556 * This extra ref will be put in kernfs_remove() and guarantees
1557 * that kn is always accessible.
1560 ret = rdtgroup_kn_set_ugid(kn);
1564 if (WARN_ON(list_empty(&r->evt_list))) {
1569 priv.u.rid = r->rid;
1570 priv.u.domid = d->id;
1571 list_for_each_entry(mevt, &r->evt_list, list) {
1572 priv.u.evtid = mevt->evtid;
1573 ret = mon_addfile(kn, mevt->name, priv.priv);
1577 if (is_mbm_event(mevt->evtid))
1578 mon_event_read(&rr, d, prgrp, mevt->evtid, true);
1580 kernfs_activate(kn);
1589 * Add all subdirectories of mon_data for "ctrl_mon" groups
1590 * and "monitor" groups with given domain id.
1592 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
1593 struct rdt_domain *d)
1595 struct kernfs_node *parent_kn;
1596 struct rdtgroup *prgrp, *crgrp;
1597 struct list_head *head;
1599 if (!r->mon_enabled)
1602 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
1603 parent_kn = prgrp->mon.mon_data_kn;
1604 mkdir_mondata_subdir(parent_kn, d, r, prgrp);
1606 head = &prgrp->mon.crdtgrp_list;
1607 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
1608 parent_kn = crgrp->mon.mon_data_kn;
1609 mkdir_mondata_subdir(parent_kn, d, r, crgrp);
1614 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
1615 struct rdt_resource *r,
1616 struct rdtgroup *prgrp)
1618 struct rdt_domain *dom;
1621 list_for_each_entry(dom, &r->domains, list) {
1622 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
1631 * This creates a directory mon_data which contains the monitored data.
1633 * mon_data has one directory for each domain whic are named
1634 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
1635 * with L3 domain looks as below:
1642 * Each domain directory has one file per event:
1647 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
1648 struct rdtgroup *prgrp,
1649 struct kernfs_node **dest_kn)
1651 struct rdt_resource *r;
1652 struct kernfs_node *kn;
1656 * Create the mon_data directory first.
1658 ret = mongroup_create_dir(parent_kn, NULL, "mon_data", &kn);
1666 * Create the subdirectories for each domain. Note that all events
1667 * in a domain like L3 are grouped into a resource whose domain is L3
1669 for_each_mon_enabled_rdt_resource(r) {
1670 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
1682 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
1683 struct kernfs_node *prgrp_kn,
1684 const char *name, umode_t mode,
1685 enum rdt_group_type rtype, struct rdtgroup **r)
1687 struct rdtgroup *prdtgrp, *rdtgrp;
1688 struct kernfs_node *kn;
1692 prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
1693 rdt_last_cmd_clear();
1696 rdt_last_cmd_puts("directory was removed\n");
1700 /* allocate the rdtgroup. */
1701 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
1704 rdt_last_cmd_puts("kernel out of memory\n");
1708 rdtgrp->mon.parent = prdtgrp;
1709 rdtgrp->type = rtype;
1710 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
1712 /* kernfs creates the directory for rdtgrp */
1713 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
1716 rdt_last_cmd_puts("kernfs create error\n");
1722 * kernfs_remove() will drop the reference count on "kn" which
1723 * will free it. But we still need it to stick around for the
1724 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
1725 * here, which will be dropped inside rdtgroup_kn_unlock().
1729 ret = rdtgroup_kn_set_ugid(kn);
1731 rdt_last_cmd_puts("kernfs perm error\n");
1735 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
1736 ret = rdtgroup_add_files(kn, files);
1738 rdt_last_cmd_puts("kernfs fill error\n");
1742 if (rdt_mon_capable) {
1745 rdt_last_cmd_puts("out of RMIDs\n");
1748 rdtgrp->mon.rmid = ret;
1750 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
1752 rdt_last_cmd_puts("kernfs subdir error\n");
1756 kernfs_activate(kn);
1759 * The caller unlocks the prgrp_kn upon success.
1764 free_rmid(rdtgrp->mon.rmid);
1766 kernfs_remove(rdtgrp->kn);
1770 rdtgroup_kn_unlock(prgrp_kn);
1774 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
1776 kernfs_remove(rgrp->kn);
1777 free_rmid(rgrp->mon.rmid);
1782 * Create a monitor group under "mon_groups" directory of a control
1783 * and monitor group(ctrl_mon). This is a resource group
1784 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
1786 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
1787 struct kernfs_node *prgrp_kn,
1791 struct rdtgroup *rdtgrp, *prgrp;
1794 ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTMON_GROUP,
1799 prgrp = rdtgrp->mon.parent;
1800 rdtgrp->closid = prgrp->closid;
1803 * Add the rdtgrp to the list of rdtgrps the parent
1804 * ctrl_mon group has to track.
1806 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
1808 rdtgroup_kn_unlock(prgrp_kn);
1813 * These are rdtgroups created under the root directory. Can be used
1814 * to allocate and monitor resources.
1816 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
1817 struct kernfs_node *prgrp_kn,
1818 const char *name, umode_t mode)
1820 struct rdtgroup *rdtgrp;
1821 struct kernfs_node *kn;
1825 ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTCTRL_GROUP,
1831 ret = closid_alloc();
1833 rdt_last_cmd_puts("out of CLOSIDs\n");
1834 goto out_common_fail;
1839 rdtgrp->closid = closid;
1840 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
1842 if (rdt_mon_capable) {
1844 * Create an empty mon_groups directory to hold the subset
1845 * of tasks and cpus to monitor.
1847 ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
1849 rdt_last_cmd_puts("kernfs subdir error\n");
1857 closid_free(closid);
1858 list_del(&rdtgrp->rdtgroup_list);
1860 mkdir_rdt_prepare_clean(rdtgrp);
1862 rdtgroup_kn_unlock(prgrp_kn);
1867 * We allow creating mon groups only with in a directory called "mon_groups"
1868 * which is present in every ctrl_mon group. Check if this is a valid
1869 * "mon_groups" directory.
1871 * 1. The directory should be named "mon_groups".
1872 * 2. The mon group itself should "not" be named "mon_groups".
1873 * This makes sure "mon_groups" directory always has a ctrl_mon group
1876 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
1878 return (!strcmp(kn->name, "mon_groups") &&
1879 strcmp(name, "mon_groups"));
1882 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
1885 /* Do not accept '\n' to avoid unparsable situation. */
1886 if (strchr(name, '\n'))
1890 * If the parent directory is the root directory and RDT
1891 * allocation is supported, add a control and monitoring
1894 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
1895 return rdtgroup_mkdir_ctrl_mon(parent_kn, parent_kn, name, mode);
1898 * If RDT monitoring is supported and the parent directory is a valid
1899 * "mon_groups" directory, add a monitoring subdirectory.
1901 if (rdt_mon_capable && is_mon_groups(parent_kn, name))
1902 return rdtgroup_mkdir_mon(parent_kn, parent_kn->parent, name, mode);
1907 static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
1908 cpumask_var_t tmpmask)
1910 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
1913 /* Give any tasks back to the parent group */
1914 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
1916 /* Update per cpu rmid of the moved CPUs first */
1917 for_each_cpu(cpu, &rdtgrp->cpu_mask)
1918 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
1920 * Update the MSR on moved CPUs and CPUs which have moved
1921 * task running on them.
1923 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
1924 update_closid_rmid(tmpmask, NULL);
1926 rdtgrp->flags = RDT_DELETED;
1927 free_rmid(rdtgrp->mon.rmid);
1930 * Remove the rdtgrp from the parent ctrl_mon group's list
1932 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
1933 list_del(&rdtgrp->mon.crdtgrp_list);
1936 * one extra hold on this, will drop when we kfree(rdtgrp)
1937 * in rdtgroup_kn_unlock()
1940 kernfs_remove(rdtgrp->kn);
1945 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
1946 cpumask_var_t tmpmask)
1950 /* Give any tasks back to the default group */
1951 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
1953 /* Give any CPUs back to the default group */
1954 cpumask_or(&rdtgroup_default.cpu_mask,
1955 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
1957 /* Update per cpu closid and rmid of the moved CPUs first */
1958 for_each_cpu(cpu, &rdtgrp->cpu_mask) {
1959 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
1960 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
1964 * Update the MSR on moved CPUs and CPUs which have moved
1965 * task running on them.
1967 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
1968 update_closid_rmid(tmpmask, NULL);
1970 rdtgrp->flags = RDT_DELETED;
1971 closid_free(rdtgrp->closid);
1972 free_rmid(rdtgrp->mon.rmid);
1975 * Free all the child monitor group rmids.
1977 free_all_child_rdtgrp(rdtgrp);
1979 list_del(&rdtgrp->rdtgroup_list);
1982 * one extra hold on this, will drop when we kfree(rdtgrp)
1983 * in rdtgroup_kn_unlock()
1986 kernfs_remove(rdtgrp->kn);
1991 static int rdtgroup_rmdir(struct kernfs_node *kn)
1993 struct kernfs_node *parent_kn = kn->parent;
1994 struct rdtgroup *rdtgrp;
1995 cpumask_var_t tmpmask;
1998 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
2001 rdtgrp = rdtgroup_kn_lock_live(kn);
2008 * If the rdtgroup is a ctrl_mon group and parent directory
2009 * is the root directory, remove the ctrl_mon group.
2011 * If the rdtgroup is a mon group and parent directory
2012 * is a valid "mon_groups" directory, remove the mon group.
2014 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn)
2015 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
2016 else if (rdtgrp->type == RDTMON_GROUP &&
2017 is_mon_groups(parent_kn, kn->name))
2018 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
2023 rdtgroup_kn_unlock(kn);
2024 free_cpumask_var(tmpmask);
2028 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
2030 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
2031 seq_puts(seq, ",cdp");
2035 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
2036 .mkdir = rdtgroup_mkdir,
2037 .rmdir = rdtgroup_rmdir,
2038 .show_options = rdtgroup_show_options,
2041 static int __init rdtgroup_setup_root(void)
2045 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
2046 KERNFS_ROOT_CREATE_DEACTIVATED,
2048 if (IS_ERR(rdt_root))
2049 return PTR_ERR(rdt_root);
2051 mutex_lock(&rdtgroup_mutex);
2053 rdtgroup_default.closid = 0;
2054 rdtgroup_default.mon.rmid = 0;
2055 rdtgroup_default.type = RDTCTRL_GROUP;
2056 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
2058 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
2060 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE);
2062 kernfs_destroy_root(rdt_root);
2066 rdtgroup_default.kn = rdt_root->kn;
2067 kernfs_activate(rdtgroup_default.kn);
2070 mutex_unlock(&rdtgroup_mutex);
2076 * rdtgroup_init - rdtgroup initialization
2078 * Setup resctrl file system including set up root, create mount point,
2079 * register rdtgroup filesystem, and initialize files under root directory.
2081 * Return: 0 on success or -errno
2083 int __init rdtgroup_init(void)
2087 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
2088 sizeof(last_cmd_status_buf));
2090 ret = rdtgroup_setup_root();
2094 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
2098 ret = register_filesystem(&rdt_fs_type);
2100 goto cleanup_mountpoint;
2105 sysfs_remove_mount_point(fs_kobj, "resctrl");
2107 kernfs_destroy_root(rdt_root);