1 // SPDX-License-Identifier: GPL-2.0-only
3 * User interface for Resource Allocation in Resource Director Technology(RDT)
5 * Copyright (C) 2016 Intel Corporation
7 * Author: Fenghua Yu <fenghua.yu@intel.com>
9 * More information about RDT be found in the Intel (R) x86 Architecture
10 * Software Developer Manual.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cacheinfo.h>
16 #include <linux/cpu.h>
17 #include <linux/debugfs.h>
19 #include <linux/fs_parser.h>
20 #include <linux/sysfs.h>
21 #include <linux/kernfs.h>
22 #include <linux/seq_buf.h>
23 #include <linux/seq_file.h>
24 #include <linux/sched/signal.h>
25 #include <linux/sched/task.h>
26 #include <linux/slab.h>
27 #include <linux/task_work.h>
28 #include <linux/user_namespace.h>
30 #include <uapi/linux/magic.h>
32 #include <asm/resctrl.h>
35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
38 static struct kernfs_root *rdt_root;
39 struct rdtgroup rdtgroup_default;
40 LIST_HEAD(rdt_all_groups);
42 /* list of entries for the schemata file */
43 LIST_HEAD(resctrl_schema_all);
45 /* Kernel fs node for "info" directory under root */
46 static struct kernfs_node *kn_info;
48 /* Kernel fs node for "mon_groups" directory under root */
49 static struct kernfs_node *kn_mongrp;
51 /* Kernel fs node for "mon_data" directory under root */
52 static struct kernfs_node *kn_mondata;
54 static struct seq_buf last_cmd_status;
55 static char last_cmd_status_buf[512];
57 static int rdtgroup_setup_root(struct rdt_fs_context *ctx);
58 static void rdtgroup_destroy_root(void);
60 struct dentry *debugfs_resctrl;
62 static bool resctrl_debug;
64 void rdt_last_cmd_clear(void)
66 lockdep_assert_held(&rdtgroup_mutex);
67 seq_buf_clear(&last_cmd_status);
70 void rdt_last_cmd_puts(const char *s)
72 lockdep_assert_held(&rdtgroup_mutex);
73 seq_buf_puts(&last_cmd_status, s);
76 void rdt_last_cmd_printf(const char *fmt, ...)
81 lockdep_assert_held(&rdtgroup_mutex);
82 seq_buf_vprintf(&last_cmd_status, fmt, ap);
86 void rdt_staged_configs_clear(void)
88 struct rdt_resource *r;
89 struct rdt_domain *dom;
91 lockdep_assert_held(&rdtgroup_mutex);
93 for_each_alloc_capable_rdt_resource(r) {
94 list_for_each_entry(dom, &r->domains, list)
95 memset(dom->staged_config, 0, sizeof(dom->staged_config));
100 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
101 * we can keep a bitmap of free CLOSIDs in a single integer.
103 * Using a global CLOSID across all resources has some advantages and
105 * + We can simply set "current->closid" to assign a task to a resource
107 * + Context switch code can avoid extra memory references deciding which
108 * CLOSID to load into the PQR_ASSOC MSR
109 * - We give up some options in configuring resource groups across multi-socket
111 * - Our choices on how to configure each resource become progressively more
112 * limited as the number of resources grows.
114 static int closid_free_map;
115 static int closid_free_map_len;
117 int closids_supported(void)
119 return closid_free_map_len;
122 static void closid_init(void)
124 struct resctrl_schema *s;
125 u32 rdt_min_closid = 32;
127 /* Compute rdt_min_closid across all resources */
128 list_for_each_entry(s, &resctrl_schema_all, list)
129 rdt_min_closid = min(rdt_min_closid, s->num_closid);
131 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
133 /* CLOSID 0 is always reserved for the default group */
134 closid_free_map &= ~1;
135 closid_free_map_len = rdt_min_closid;
138 static int closid_alloc(void)
140 u32 closid = ffs(closid_free_map);
145 closid_free_map &= ~(1 << closid);
150 void closid_free(int closid)
152 closid_free_map |= 1 << closid;
156 * closid_allocated - test if provided closid is in use
157 * @closid: closid to be tested
159 * Return: true if @closid is currently associated with a resource group,
160 * false if @closid is free
162 static bool closid_allocated(unsigned int closid)
164 return (closid_free_map & (1 << closid)) == 0;
168 * rdtgroup_mode_by_closid - Return mode of resource group with closid
169 * @closid: closid if the resource group
171 * Each resource group is associated with a @closid. Here the mode
172 * of a resource group can be queried by searching for it using its closid.
174 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
176 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
178 struct rdtgroup *rdtgrp;
180 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
181 if (rdtgrp->closid == closid)
185 return RDT_NUM_MODES;
188 static const char * const rdt_mode_str[] = {
189 [RDT_MODE_SHAREABLE] = "shareable",
190 [RDT_MODE_EXCLUSIVE] = "exclusive",
191 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
192 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
196 * rdtgroup_mode_str - Return the string representation of mode
197 * @mode: the resource group mode as &enum rdtgroup_mode
199 * Return: string representation of valid mode, "unknown" otherwise
201 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
203 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
206 return rdt_mode_str[mode];
209 /* set uid and gid of rdtgroup dirs and files to that of the creator */
210 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
212 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
213 .ia_uid = current_fsuid(),
214 .ia_gid = current_fsgid(), };
216 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
217 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
220 return kernfs_setattr(kn, &iattr);
223 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
225 struct kernfs_node *kn;
228 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
229 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
230 0, rft->kf_ops, rft, NULL, NULL);
234 ret = rdtgroup_kn_set_ugid(kn);
243 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
245 struct kernfs_open_file *of = m->private;
246 struct rftype *rft = of->kn->priv;
249 return rft->seq_show(of, m, arg);
253 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
254 size_t nbytes, loff_t off)
256 struct rftype *rft = of->kn->priv;
259 return rft->write(of, buf, nbytes, off);
264 static const struct kernfs_ops rdtgroup_kf_single_ops = {
265 .atomic_write_len = PAGE_SIZE,
266 .write = rdtgroup_file_write,
267 .seq_show = rdtgroup_seqfile_show,
270 static const struct kernfs_ops kf_mondata_ops = {
271 .atomic_write_len = PAGE_SIZE,
272 .seq_show = rdtgroup_mondata_show,
275 static bool is_cpu_list(struct kernfs_open_file *of)
277 struct rftype *rft = of->kn->priv;
279 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
282 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
283 struct seq_file *s, void *v)
285 struct rdtgroup *rdtgrp;
286 struct cpumask *mask;
289 rdtgrp = rdtgroup_kn_lock_live(of->kn);
292 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
293 if (!rdtgrp->plr->d) {
294 rdt_last_cmd_clear();
295 rdt_last_cmd_puts("Cache domain offline\n");
298 mask = &rdtgrp->plr->d->cpu_mask;
299 seq_printf(s, is_cpu_list(of) ?
300 "%*pbl\n" : "%*pb\n",
301 cpumask_pr_args(mask));
304 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
305 cpumask_pr_args(&rdtgrp->cpu_mask));
310 rdtgroup_kn_unlock(of->kn);
316 * This is safe against resctrl_sched_in() called from __switch_to()
317 * because __switch_to() is executed with interrupts disabled. A local call
318 * from update_closid_rmid() is protected against __switch_to() because
319 * preemption is disabled.
321 static void update_cpu_closid_rmid(void *info)
323 struct rdtgroup *r = info;
326 this_cpu_write(pqr_state.default_closid, r->closid);
327 this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
331 * We cannot unconditionally write the MSR because the current
332 * executing task might have its own closid selected. Just reuse
333 * the context switch code.
335 resctrl_sched_in(current);
339 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
341 * Per task closids/rmids must have been set up before calling this function.
344 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
346 on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1);
349 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
350 cpumask_var_t tmpmask)
352 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
353 struct list_head *head;
355 /* Check whether cpus belong to parent ctrl group */
356 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
357 if (!cpumask_empty(tmpmask)) {
358 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
362 /* Check whether cpus are dropped from this group */
363 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
364 if (!cpumask_empty(tmpmask)) {
365 /* Give any dropped cpus to parent rdtgroup */
366 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
367 update_closid_rmid(tmpmask, prgrp);
371 * If we added cpus, remove them from previous group that owned them
372 * and update per-cpu rmid
374 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
375 if (!cpumask_empty(tmpmask)) {
376 head = &prgrp->mon.crdtgrp_list;
377 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
380 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
383 update_closid_rmid(tmpmask, rdtgrp);
386 /* Done pushing/pulling - update this group with new mask */
387 cpumask_copy(&rdtgrp->cpu_mask, newmask);
392 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
394 struct rdtgroup *crgrp;
396 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
397 /* update the child mon group masks as well*/
398 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
399 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
402 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
403 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
405 struct rdtgroup *r, *crgrp;
406 struct list_head *head;
408 /* Check whether cpus are dropped from this group */
409 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
410 if (!cpumask_empty(tmpmask)) {
411 /* Can't drop from default group */
412 if (rdtgrp == &rdtgroup_default) {
413 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
417 /* Give any dropped cpus to rdtgroup_default */
418 cpumask_or(&rdtgroup_default.cpu_mask,
419 &rdtgroup_default.cpu_mask, tmpmask);
420 update_closid_rmid(tmpmask, &rdtgroup_default);
424 * If we added cpus, remove them from previous group and
425 * the prev group's child groups that owned them
426 * and update per-cpu closid/rmid.
428 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
429 if (!cpumask_empty(tmpmask)) {
430 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
433 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
434 if (!cpumask_empty(tmpmask1))
435 cpumask_rdtgrp_clear(r, tmpmask1);
437 update_closid_rmid(tmpmask, rdtgrp);
440 /* Done pushing/pulling - update this group with new mask */
441 cpumask_copy(&rdtgrp->cpu_mask, newmask);
444 * Clear child mon group masks since there is a new parent mask
445 * now and update the rmid for the cpus the child lost.
447 head = &rdtgrp->mon.crdtgrp_list;
448 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
449 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
450 update_closid_rmid(tmpmask, rdtgrp);
451 cpumask_clear(&crgrp->cpu_mask);
457 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
458 char *buf, size_t nbytes, loff_t off)
460 cpumask_var_t tmpmask, newmask, tmpmask1;
461 struct rdtgroup *rdtgrp;
467 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
469 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
470 free_cpumask_var(tmpmask);
473 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
474 free_cpumask_var(tmpmask);
475 free_cpumask_var(newmask);
479 rdtgrp = rdtgroup_kn_lock_live(of->kn);
485 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
486 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
488 rdt_last_cmd_puts("Pseudo-locking in progress\n");
493 ret = cpulist_parse(buf, newmask);
495 ret = cpumask_parse(buf, newmask);
498 rdt_last_cmd_puts("Bad CPU list/mask\n");
502 /* check that user didn't specify any offline cpus */
503 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
504 if (!cpumask_empty(tmpmask)) {
506 rdt_last_cmd_puts("Can only assign online CPUs\n");
510 if (rdtgrp->type == RDTCTRL_GROUP)
511 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
512 else if (rdtgrp->type == RDTMON_GROUP)
513 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
518 rdtgroup_kn_unlock(of->kn);
519 free_cpumask_var(tmpmask);
520 free_cpumask_var(newmask);
521 free_cpumask_var(tmpmask1);
523 return ret ?: nbytes;
527 * rdtgroup_remove - the helper to remove resource group safely
528 * @rdtgrp: resource group to remove
530 * On resource group creation via a mkdir, an extra kernfs_node reference is
531 * taken to ensure that the rdtgroup structure remains accessible for the
532 * rdtgroup_kn_unlock() calls where it is removed.
534 * Drop the extra reference here, then free the rdtgroup structure.
538 static void rdtgroup_remove(struct rdtgroup *rdtgrp)
540 kernfs_put(rdtgrp->kn);
544 static void _update_task_closid_rmid(void *task)
547 * If the task is still current on this CPU, update PQR_ASSOC MSR.
548 * Otherwise, the MSR is updated when the task is scheduled in.
551 resctrl_sched_in(task);
554 static void update_task_closid_rmid(struct task_struct *t)
556 if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
557 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
559 _update_task_closid_rmid(t);
562 static int __rdtgroup_move_task(struct task_struct *tsk,
563 struct rdtgroup *rdtgrp)
565 /* If the task is already in rdtgrp, no need to move the task. */
566 if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
567 tsk->rmid == rdtgrp->mon.rmid) ||
568 (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
569 tsk->closid == rdtgrp->mon.parent->closid))
573 * Set the task's closid/rmid before the PQR_ASSOC MSR can be
576 * For ctrl_mon groups, move both closid and rmid.
577 * For monitor groups, can move the tasks only from
578 * their parent CTRL group.
581 if (rdtgrp->type == RDTCTRL_GROUP) {
582 WRITE_ONCE(tsk->closid, rdtgrp->closid);
583 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
584 } else if (rdtgrp->type == RDTMON_GROUP) {
585 if (rdtgrp->mon.parent->closid == tsk->closid) {
586 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
588 rdt_last_cmd_puts("Can't move task to different control group\n");
594 * Ensure the task's closid and rmid are written before determining if
595 * the task is current that will decide if it will be interrupted.
596 * This pairs with the full barrier between the rq->curr update and
597 * resctrl_sched_in() during context switch.
602 * By now, the task's closid and rmid are set. If the task is current
603 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
604 * group go into effect. If the task is not current, the MSR will be
605 * updated when the task is scheduled in.
607 update_task_closid_rmid(tsk);
612 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
614 return (rdt_alloc_capable &&
615 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
618 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
620 return (rdt_mon_capable &&
621 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
625 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
628 * Return: 1 if tasks have been assigned to @r, 0 otherwise
630 int rdtgroup_tasks_assigned(struct rdtgroup *r)
632 struct task_struct *p, *t;
635 lockdep_assert_held(&rdtgroup_mutex);
638 for_each_process_thread(p, t) {
639 if (is_closid_match(t, r) || is_rmid_match(t, r)) {
649 static int rdtgroup_task_write_permission(struct task_struct *task,
650 struct kernfs_open_file *of)
652 const struct cred *tcred = get_task_cred(task);
653 const struct cred *cred = current_cred();
657 * Even if we're attaching all tasks in the thread group, we only
658 * need to check permissions on one of them.
660 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
661 !uid_eq(cred->euid, tcred->uid) &&
662 !uid_eq(cred->euid, tcred->suid)) {
663 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
671 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
672 struct kernfs_open_file *of)
674 struct task_struct *tsk;
679 tsk = find_task_by_vpid(pid);
682 rdt_last_cmd_printf("No task %d\n", pid);
689 get_task_struct(tsk);
692 ret = rdtgroup_task_write_permission(tsk, of);
694 ret = __rdtgroup_move_task(tsk, rdtgrp);
696 put_task_struct(tsk);
700 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
701 char *buf, size_t nbytes, loff_t off)
703 struct rdtgroup *rdtgrp;
708 rdtgrp = rdtgroup_kn_lock_live(of->kn);
710 rdtgroup_kn_unlock(of->kn);
713 rdt_last_cmd_clear();
715 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
716 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
718 rdt_last_cmd_puts("Pseudo-locking in progress\n");
722 while (buf && buf[0] != '\0' && buf[0] != '\n') {
723 pid_str = strim(strsep(&buf, ","));
725 if (kstrtoint(pid_str, 0, &pid)) {
726 rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str);
732 rdt_last_cmd_printf("Invalid pid %d\n", pid);
737 ret = rdtgroup_move_task(pid, rdtgrp, of);
739 rdt_last_cmd_printf("Error while processing task %d\n", pid);
745 rdtgroup_kn_unlock(of->kn);
747 return ret ?: nbytes;
750 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
752 struct task_struct *p, *t;
756 for_each_process_thread(p, t) {
757 if (is_closid_match(t, r) || is_rmid_match(t, r)) {
758 pid = task_pid_vnr(t);
760 seq_printf(s, "%d\n", pid);
766 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
767 struct seq_file *s, void *v)
769 struct rdtgroup *rdtgrp;
772 rdtgrp = rdtgroup_kn_lock_live(of->kn);
774 show_rdt_tasks(rdtgrp, s);
777 rdtgroup_kn_unlock(of->kn);
782 static int rdtgroup_closid_show(struct kernfs_open_file *of,
783 struct seq_file *s, void *v)
785 struct rdtgroup *rdtgrp;
788 rdtgrp = rdtgroup_kn_lock_live(of->kn);
790 seq_printf(s, "%u\n", rdtgrp->closid);
793 rdtgroup_kn_unlock(of->kn);
798 static int rdtgroup_rmid_show(struct kernfs_open_file *of,
799 struct seq_file *s, void *v)
801 struct rdtgroup *rdtgrp;
804 rdtgrp = rdtgroup_kn_lock_live(of->kn);
806 seq_printf(s, "%u\n", rdtgrp->mon.rmid);
809 rdtgroup_kn_unlock(of->kn);
814 #ifdef CONFIG_PROC_CPU_RESCTRL
817 * A task can only be part of one resctrl control group and of one monitor
818 * group which is associated to that control group.
823 * resctrl is not available.
828 * Task is part of the root resctrl control group, and it is not associated
829 * to any monitor group.
834 * Task is part of the root resctrl control group and monitor group mon0.
839 * Task is part of resctrl control group group0, and it is not associated
840 * to any monitor group.
845 * Task is part of resctrl control group group0 and monitor group mon1.
847 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
848 struct pid *pid, struct task_struct *tsk)
850 struct rdtgroup *rdtg;
853 mutex_lock(&rdtgroup_mutex);
855 /* Return empty if resctrl has not been mounted. */
856 if (!static_branch_unlikely(&rdt_enable_key)) {
857 seq_puts(s, "res:\nmon:\n");
861 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
862 struct rdtgroup *crg;
865 * Task information is only relevant for shareable
866 * and exclusive groups.
868 if (rdtg->mode != RDT_MODE_SHAREABLE &&
869 rdtg->mode != RDT_MODE_EXCLUSIVE)
872 if (rdtg->closid != tsk->closid)
875 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
878 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
880 if (tsk->rmid != crg->mon.rmid)
882 seq_printf(s, "%s", crg->kn->name);
889 * The above search should succeed. Otherwise return
894 mutex_unlock(&rdtgroup_mutex);
900 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
901 struct seq_file *seq, void *v)
905 mutex_lock(&rdtgroup_mutex);
906 len = seq_buf_used(&last_cmd_status);
908 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
910 seq_puts(seq, "ok\n");
911 mutex_unlock(&rdtgroup_mutex);
915 static int rdt_num_closids_show(struct kernfs_open_file *of,
916 struct seq_file *seq, void *v)
918 struct resctrl_schema *s = of->kn->parent->priv;
920 seq_printf(seq, "%u\n", s->num_closid);
924 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
925 struct seq_file *seq, void *v)
927 struct resctrl_schema *s = of->kn->parent->priv;
928 struct rdt_resource *r = s->res;
930 seq_printf(seq, "%x\n", r->default_ctrl);
934 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
935 struct seq_file *seq, void *v)
937 struct resctrl_schema *s = of->kn->parent->priv;
938 struct rdt_resource *r = s->res;
940 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
944 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
945 struct seq_file *seq, void *v)
947 struct resctrl_schema *s = of->kn->parent->priv;
948 struct rdt_resource *r = s->res;
950 seq_printf(seq, "%x\n", r->cache.shareable_bits);
955 * rdt_bit_usage_show - Display current usage of resources
957 * A domain is a shared resource that can now be allocated differently. Here
958 * we display the current regions of the domain as an annotated bitmask.
959 * For each domain of this resource its allocation bitmask
960 * is annotated as below to indicate the current usage of the corresponding bit:
961 * 0 - currently unused
962 * X - currently available for sharing and used by software and hardware
963 * H - currently used by hardware only but available for software use
964 * S - currently used and shareable by software only
965 * E - currently used exclusively by one resource group
966 * P - currently pseudo-locked by one resource group
968 static int rdt_bit_usage_show(struct kernfs_open_file *of,
969 struct seq_file *seq, void *v)
971 struct resctrl_schema *s = of->kn->parent->priv;
973 * Use unsigned long even though only 32 bits are used to ensure
974 * test_bit() is used safely.
976 unsigned long sw_shareable = 0, hw_shareable = 0;
977 unsigned long exclusive = 0, pseudo_locked = 0;
978 struct rdt_resource *r = s->res;
979 struct rdt_domain *dom;
980 int i, hwb, swb, excl, psl;
981 enum rdtgrp_mode mode;
985 mutex_lock(&rdtgroup_mutex);
986 hw_shareable = r->cache.shareable_bits;
987 list_for_each_entry(dom, &r->domains, list) {
992 seq_printf(seq, "%d=", dom->id);
993 for (i = 0; i < closids_supported(); i++) {
994 if (!closid_allocated(i))
996 ctrl_val = resctrl_arch_get_config(r, dom, i,
998 mode = rdtgroup_mode_by_closid(i);
1000 case RDT_MODE_SHAREABLE:
1001 sw_shareable |= ctrl_val;
1003 case RDT_MODE_EXCLUSIVE:
1004 exclusive |= ctrl_val;
1006 case RDT_MODE_PSEUDO_LOCKSETUP:
1008 * RDT_MODE_PSEUDO_LOCKSETUP is possible
1009 * here but not included since the CBM
1010 * associated with this CLOSID in this mode
1011 * is not initialized and no task or cpu can be
1012 * assigned this CLOSID.
1015 case RDT_MODE_PSEUDO_LOCKED:
1018 "invalid mode for closid %d\n", i);
1022 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
1023 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
1024 hwb = test_bit(i, &hw_shareable);
1025 swb = test_bit(i, &sw_shareable);
1026 excl = test_bit(i, &exclusive);
1027 psl = test_bit(i, &pseudo_locked);
1030 else if (hwb && !swb)
1032 else if (!hwb && swb)
1038 else /* Unused bits remain */
1043 seq_putc(seq, '\n');
1044 mutex_unlock(&rdtgroup_mutex);
1048 static int rdt_min_bw_show(struct kernfs_open_file *of,
1049 struct seq_file *seq, void *v)
1051 struct resctrl_schema *s = of->kn->parent->priv;
1052 struct rdt_resource *r = s->res;
1054 seq_printf(seq, "%u\n", r->membw.min_bw);
1058 static int rdt_num_rmids_show(struct kernfs_open_file *of,
1059 struct seq_file *seq, void *v)
1061 struct rdt_resource *r = of->kn->parent->priv;
1063 seq_printf(seq, "%d\n", r->num_rmid);
1068 static int rdt_mon_features_show(struct kernfs_open_file *of,
1069 struct seq_file *seq, void *v)
1071 struct rdt_resource *r = of->kn->parent->priv;
1072 struct mon_evt *mevt;
1074 list_for_each_entry(mevt, &r->evt_list, list) {
1075 seq_printf(seq, "%s\n", mevt->name);
1076 if (mevt->configurable)
1077 seq_printf(seq, "%s_config\n", mevt->name);
1083 static int rdt_bw_gran_show(struct kernfs_open_file *of,
1084 struct seq_file *seq, void *v)
1086 struct resctrl_schema *s = of->kn->parent->priv;
1087 struct rdt_resource *r = s->res;
1089 seq_printf(seq, "%u\n", r->membw.bw_gran);
1093 static int rdt_delay_linear_show(struct kernfs_open_file *of,
1094 struct seq_file *seq, void *v)
1096 struct resctrl_schema *s = of->kn->parent->priv;
1097 struct rdt_resource *r = s->res;
1099 seq_printf(seq, "%u\n", r->membw.delay_linear);
1103 static int max_threshold_occ_show(struct kernfs_open_file *of,
1104 struct seq_file *seq, void *v)
1106 seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold);
1111 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
1112 struct seq_file *seq, void *v)
1114 struct resctrl_schema *s = of->kn->parent->priv;
1115 struct rdt_resource *r = s->res;
1117 if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
1118 seq_puts(seq, "per-thread\n");
1120 seq_puts(seq, "max\n");
1125 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
1126 char *buf, size_t nbytes, loff_t off)
1131 ret = kstrtouint(buf, 0, &bytes);
1135 if (bytes > resctrl_rmid_realloc_limit)
1138 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes);
1144 * rdtgroup_mode_show - Display mode of this resource group
1146 static int rdtgroup_mode_show(struct kernfs_open_file *of,
1147 struct seq_file *s, void *v)
1149 struct rdtgroup *rdtgrp;
1151 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1153 rdtgroup_kn_unlock(of->kn);
1157 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
1159 rdtgroup_kn_unlock(of->kn);
1163 static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
1176 static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of,
1177 struct seq_file *seq, void *v)
1179 struct resctrl_schema *s = of->kn->parent->priv;
1180 struct rdt_resource *r = s->res;
1182 seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks);
1188 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1189 * @r: Resource to which domain instance @d belongs.
1190 * @d: The domain instance for which @closid is being tested.
1191 * @cbm: Capacity bitmask being tested.
1192 * @closid: Intended closid for @cbm.
1193 * @type: CDP type of @r.
1194 * @exclusive: Only check if overlaps with exclusive resource groups
1196 * Checks if provided @cbm intended to be used for @closid on domain
1197 * @d overlaps with any other closids or other hardware usage associated
1198 * with this domain. If @exclusive is true then only overlaps with
1199 * resource groups in exclusive mode will be considered. If @exclusive
1200 * is false then overlaps with any resource group or hardware entities
1201 * will be considered.
1203 * @cbm is unsigned long, even if only 32 bits are used, to make the
1204 * bitmap functions work correctly.
1206 * Return: false if CBM does not overlap, true if it does.
1208 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1209 unsigned long cbm, int closid,
1210 enum resctrl_conf_type type, bool exclusive)
1212 enum rdtgrp_mode mode;
1213 unsigned long ctrl_b;
1216 /* Check for any overlap with regions used by hardware directly */
1218 ctrl_b = r->cache.shareable_bits;
1219 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
1223 /* Check for overlap with other resource groups */
1224 for (i = 0; i < closids_supported(); i++) {
1225 ctrl_b = resctrl_arch_get_config(r, d, i, type);
1226 mode = rdtgroup_mode_by_closid(i);
1227 if (closid_allocated(i) && i != closid &&
1228 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1229 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1231 if (mode == RDT_MODE_EXCLUSIVE)
1244 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1245 * @s: Schema for the resource to which domain instance @d belongs.
1246 * @d: The domain instance for which @closid is being tested.
1247 * @cbm: Capacity bitmask being tested.
1248 * @closid: Intended closid for @cbm.
1249 * @exclusive: Only check if overlaps with exclusive resource groups
1251 * Resources that can be allocated using a CBM can use the CBM to control
1252 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
1253 * for overlap. Overlap test is not limited to the specific resource for
1254 * which the CBM is intended though - when dealing with CDP resources that
1255 * share the underlying hardware the overlap check should be performed on
1256 * the CDP resource sharing the hardware also.
1258 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
1261 * Return: true if CBM overlap detected, false if there is no overlap
1263 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
1264 unsigned long cbm, int closid, bool exclusive)
1266 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
1267 struct rdt_resource *r = s->res;
1269 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
1273 if (!resctrl_arch_get_cdp_enabled(r->rid))
1275 return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
1279 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1280 * @rdtgrp: Resource group identified through its closid.
1282 * An exclusive resource group implies that there should be no sharing of
1283 * its allocated resources. At the time this group is considered to be
1284 * exclusive this test can determine if its current schemata supports this
1285 * setting by testing for overlap with all other resource groups.
1287 * Return: true if resource group can be exclusive, false if there is overlap
1288 * with allocations of other resource groups and thus this resource group
1289 * cannot be exclusive.
1291 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1293 int closid = rdtgrp->closid;
1294 struct resctrl_schema *s;
1295 struct rdt_resource *r;
1296 bool has_cache = false;
1297 struct rdt_domain *d;
1300 list_for_each_entry(s, &resctrl_schema_all, list) {
1302 if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)
1305 list_for_each_entry(d, &r->domains, list) {
1306 ctrl = resctrl_arch_get_config(r, d, closid,
1308 if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
1309 rdt_last_cmd_puts("Schemata overlaps\n");
1316 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1324 * rdtgroup_mode_write - Modify the resource group's mode
1326 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1327 char *buf, size_t nbytes, loff_t off)
1329 struct rdtgroup *rdtgrp;
1330 enum rdtgrp_mode mode;
1333 /* Valid input requires a trailing newline */
1334 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1336 buf[nbytes - 1] = '\0';
1338 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1340 rdtgroup_kn_unlock(of->kn);
1344 rdt_last_cmd_clear();
1346 mode = rdtgrp->mode;
1348 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1349 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1350 (!strcmp(buf, "pseudo-locksetup") &&
1351 mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1352 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1355 if (mode == RDT_MODE_PSEUDO_LOCKED) {
1356 rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1361 if (!strcmp(buf, "shareable")) {
1362 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1363 ret = rdtgroup_locksetup_exit(rdtgrp);
1367 rdtgrp->mode = RDT_MODE_SHAREABLE;
1368 } else if (!strcmp(buf, "exclusive")) {
1369 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1373 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1374 ret = rdtgroup_locksetup_exit(rdtgrp);
1378 rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1379 } else if (!strcmp(buf, "pseudo-locksetup")) {
1380 ret = rdtgroup_locksetup_enter(rdtgrp);
1383 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1385 rdt_last_cmd_puts("Unknown or unsupported mode\n");
1390 rdtgroup_kn_unlock(of->kn);
1391 return ret ?: nbytes;
1395 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1396 * @r: RDT resource to which @d belongs.
1397 * @d: RDT domain instance.
1398 * @cbm: bitmask for which the size should be computed.
1400 * The bitmask provided associated with the RDT domain instance @d will be
1401 * translated into how many bytes it represents. The size in bytes is
1402 * computed by first dividing the total cache size by the CBM length to
1403 * determine how many bytes each bit in the bitmask represents. The result
1404 * is multiplied with the number of bits set in the bitmask.
1406 * @cbm is unsigned long, even if only 32 bits are used to make the
1407 * bitmap functions work correctly.
1409 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1410 struct rdt_domain *d, unsigned long cbm)
1412 struct cpu_cacheinfo *ci;
1413 unsigned int size = 0;
1416 num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1417 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
1418 for (i = 0; i < ci->num_leaves; i++) {
1419 if (ci->info_list[i].level == r->cache_level) {
1420 size = ci->info_list[i].size / r->cache.cbm_len * num_b;
1429 * rdtgroup_size_show - Display size in bytes of allocated regions
1431 * The "size" file mirrors the layout of the "schemata" file, printing the
1432 * size in bytes of each region instead of the capacity bitmask.
1434 static int rdtgroup_size_show(struct kernfs_open_file *of,
1435 struct seq_file *s, void *v)
1437 struct resctrl_schema *schema;
1438 enum resctrl_conf_type type;
1439 struct rdtgroup *rdtgrp;
1440 struct rdt_resource *r;
1441 struct rdt_domain *d;
1448 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1450 rdtgroup_kn_unlock(of->kn);
1454 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1455 if (!rdtgrp->plr->d) {
1456 rdt_last_cmd_clear();
1457 rdt_last_cmd_puts("Cache domain offline\n");
1460 seq_printf(s, "%*s:", max_name_width,
1461 rdtgrp->plr->s->name);
1462 size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
1465 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1470 closid = rdtgrp->closid;
1472 list_for_each_entry(schema, &resctrl_schema_all, list) {
1474 type = schema->conf_type;
1476 seq_printf(s, "%*s:", max_name_width, schema->name);
1477 list_for_each_entry(d, &r->domains, list) {
1480 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1484 ctrl = d->mbps_val[closid];
1486 ctrl = resctrl_arch_get_config(r, d,
1489 if (r->rid == RDT_RESOURCE_MBA ||
1490 r->rid == RDT_RESOURCE_SMBA)
1493 size = rdtgroup_cbm_to_size(r, d, ctrl);
1495 seq_printf(s, "%d=%u", d->id, size);
1502 rdtgroup_kn_unlock(of->kn);
1507 struct mon_config_info {
1512 #define INVALID_CONFIG_INDEX UINT_MAX
1515 * mon_event_config_index_get - get the hardware index for the
1516 * configurable event
1519 * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID
1520 * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID
1521 * INVALID_CONFIG_INDEX for invalid evtid
1523 static inline unsigned int mon_event_config_index_get(u32 evtid)
1526 case QOS_L3_MBM_TOTAL_EVENT_ID:
1528 case QOS_L3_MBM_LOCAL_EVENT_ID:
1531 /* Should never reach here */
1532 return INVALID_CONFIG_INDEX;
1536 static void mon_event_config_read(void *info)
1538 struct mon_config_info *mon_info = info;
1542 index = mon_event_config_index_get(mon_info->evtid);
1543 if (index == INVALID_CONFIG_INDEX) {
1544 pr_warn_once("Invalid event id %d\n", mon_info->evtid);
1547 rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval);
1549 /* Report only the valid event configuration bits */
1550 mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS;
1553 static void mondata_config_read(struct rdt_domain *d, struct mon_config_info *mon_info)
1555 smp_call_function_any(&d->cpu_mask, mon_event_config_read, mon_info, 1);
1558 static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid)
1560 struct mon_config_info mon_info = {0};
1561 struct rdt_domain *dom;
1564 mutex_lock(&rdtgroup_mutex);
1566 list_for_each_entry(dom, &r->domains, list) {
1570 memset(&mon_info, 0, sizeof(struct mon_config_info));
1571 mon_info.evtid = evtid;
1572 mondata_config_read(dom, &mon_info);
1574 seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config);
1579 mutex_unlock(&rdtgroup_mutex);
1584 static int mbm_total_bytes_config_show(struct kernfs_open_file *of,
1585 struct seq_file *seq, void *v)
1587 struct rdt_resource *r = of->kn->parent->priv;
1589 mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID);
1594 static int mbm_local_bytes_config_show(struct kernfs_open_file *of,
1595 struct seq_file *seq, void *v)
1597 struct rdt_resource *r = of->kn->parent->priv;
1599 mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID);
1604 static void mon_event_config_write(void *info)
1606 struct mon_config_info *mon_info = info;
1609 index = mon_event_config_index_get(mon_info->evtid);
1610 if (index == INVALID_CONFIG_INDEX) {
1611 pr_warn_once("Invalid event id %d\n", mon_info->evtid);
1614 wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0);
1617 static void mbm_config_write_domain(struct rdt_resource *r,
1618 struct rdt_domain *d, u32 evtid, u32 val)
1620 struct mon_config_info mon_info = {0};
1623 * Read the current config value first. If both are the same then
1624 * no need to write it again.
1626 mon_info.evtid = evtid;
1627 mondata_config_read(d, &mon_info);
1628 if (mon_info.mon_config == val)
1631 mon_info.mon_config = val;
1634 * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the
1635 * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE
1636 * are scoped at the domain level. Writing any of these MSRs
1637 * on one CPU is observed by all the CPUs in the domain.
1639 smp_call_function_any(&d->cpu_mask, mon_event_config_write,
1643 * When an Event Configuration is changed, the bandwidth counters
1644 * for all RMIDs and Events will be cleared by the hardware. The
1645 * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for
1646 * every RMID on the next read to any event for every RMID.
1647 * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62)
1648 * cleared while it is tracked by the hardware. Clear the
1649 * mbm_local and mbm_total counts for all the RMIDs.
1651 resctrl_arch_reset_rmid_all(r, d);
1654 static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
1656 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
1657 char *dom_str = NULL, *id_str;
1658 unsigned long dom_id, val;
1659 struct rdt_domain *d;
1662 if (!tok || tok[0] == '\0')
1665 /* Start processing the strings for each domain */
1666 dom_str = strim(strsep(&tok, ";"));
1667 id_str = strsep(&dom_str, "=");
1669 if (!id_str || kstrtoul(id_str, 10, &dom_id)) {
1670 rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n");
1674 if (!dom_str || kstrtoul(dom_str, 16, &val)) {
1675 rdt_last_cmd_puts("Non-numeric event configuration value\n");
1679 /* Value from user cannot be more than the supported set of events */
1680 if ((val & hw_res->mbm_cfg_mask) != val) {
1681 rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n",
1682 hw_res->mbm_cfg_mask);
1686 list_for_each_entry(d, &r->domains, list) {
1687 if (d->id == dom_id) {
1688 mbm_config_write_domain(r, d, evtid, val);
1696 static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of,
1697 char *buf, size_t nbytes,
1700 struct rdt_resource *r = of->kn->parent->priv;
1703 /* Valid input requires a trailing newline */
1704 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1707 mutex_lock(&rdtgroup_mutex);
1709 rdt_last_cmd_clear();
1711 buf[nbytes - 1] = '\0';
1713 ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID);
1715 mutex_unlock(&rdtgroup_mutex);
1717 return ret ?: nbytes;
1720 static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of,
1721 char *buf, size_t nbytes,
1724 struct rdt_resource *r = of->kn->parent->priv;
1727 /* Valid input requires a trailing newline */
1728 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1731 mutex_lock(&rdtgroup_mutex);
1733 rdt_last_cmd_clear();
1735 buf[nbytes - 1] = '\0';
1737 ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID);
1739 mutex_unlock(&rdtgroup_mutex);
1741 return ret ?: nbytes;
1744 /* rdtgroup information files for one cache resource. */
1745 static struct rftype res_common_files[] = {
1747 .name = "last_cmd_status",
1749 .kf_ops = &rdtgroup_kf_single_ops,
1750 .seq_show = rdt_last_cmd_status_show,
1751 .fflags = RFTYPE_TOP_INFO,
1754 .name = "num_closids",
1756 .kf_ops = &rdtgroup_kf_single_ops,
1757 .seq_show = rdt_num_closids_show,
1758 .fflags = RFTYPE_CTRL_INFO,
1761 .name = "mon_features",
1763 .kf_ops = &rdtgroup_kf_single_ops,
1764 .seq_show = rdt_mon_features_show,
1765 .fflags = RFTYPE_MON_INFO,
1768 .name = "num_rmids",
1770 .kf_ops = &rdtgroup_kf_single_ops,
1771 .seq_show = rdt_num_rmids_show,
1772 .fflags = RFTYPE_MON_INFO,
1777 .kf_ops = &rdtgroup_kf_single_ops,
1778 .seq_show = rdt_default_ctrl_show,
1779 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1782 .name = "min_cbm_bits",
1784 .kf_ops = &rdtgroup_kf_single_ops,
1785 .seq_show = rdt_min_cbm_bits_show,
1786 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1789 .name = "shareable_bits",
1791 .kf_ops = &rdtgroup_kf_single_ops,
1792 .seq_show = rdt_shareable_bits_show,
1793 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1796 .name = "bit_usage",
1798 .kf_ops = &rdtgroup_kf_single_ops,
1799 .seq_show = rdt_bit_usage_show,
1800 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1803 .name = "min_bandwidth",
1805 .kf_ops = &rdtgroup_kf_single_ops,
1806 .seq_show = rdt_min_bw_show,
1807 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
1810 .name = "bandwidth_gran",
1812 .kf_ops = &rdtgroup_kf_single_ops,
1813 .seq_show = rdt_bw_gran_show,
1814 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
1817 .name = "delay_linear",
1819 .kf_ops = &rdtgroup_kf_single_ops,
1820 .seq_show = rdt_delay_linear_show,
1821 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB,
1824 * Platform specific which (if any) capabilities are provided by
1825 * thread_throttle_mode. Defer "fflags" initialization to platform
1829 .name = "thread_throttle_mode",
1831 .kf_ops = &rdtgroup_kf_single_ops,
1832 .seq_show = rdt_thread_throttle_mode_show,
1835 .name = "max_threshold_occupancy",
1837 .kf_ops = &rdtgroup_kf_single_ops,
1838 .write = max_threshold_occ_write,
1839 .seq_show = max_threshold_occ_show,
1840 .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE,
1843 .name = "mbm_total_bytes_config",
1845 .kf_ops = &rdtgroup_kf_single_ops,
1846 .seq_show = mbm_total_bytes_config_show,
1847 .write = mbm_total_bytes_config_write,
1850 .name = "mbm_local_bytes_config",
1852 .kf_ops = &rdtgroup_kf_single_ops,
1853 .seq_show = mbm_local_bytes_config_show,
1854 .write = mbm_local_bytes_config_write,
1859 .kf_ops = &rdtgroup_kf_single_ops,
1860 .write = rdtgroup_cpus_write,
1861 .seq_show = rdtgroup_cpus_show,
1862 .fflags = RFTYPE_BASE,
1865 .name = "cpus_list",
1867 .kf_ops = &rdtgroup_kf_single_ops,
1868 .write = rdtgroup_cpus_write,
1869 .seq_show = rdtgroup_cpus_show,
1870 .flags = RFTYPE_FLAGS_CPUS_LIST,
1871 .fflags = RFTYPE_BASE,
1876 .kf_ops = &rdtgroup_kf_single_ops,
1877 .write = rdtgroup_tasks_write,
1878 .seq_show = rdtgroup_tasks_show,
1879 .fflags = RFTYPE_BASE,
1882 .name = "mon_hw_id",
1884 .kf_ops = &rdtgroup_kf_single_ops,
1885 .seq_show = rdtgroup_rmid_show,
1886 .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG,
1891 .kf_ops = &rdtgroup_kf_single_ops,
1892 .write = rdtgroup_schemata_write,
1893 .seq_show = rdtgroup_schemata_show,
1894 .fflags = RFTYPE_CTRL_BASE,
1899 .kf_ops = &rdtgroup_kf_single_ops,
1900 .write = rdtgroup_mode_write,
1901 .seq_show = rdtgroup_mode_show,
1902 .fflags = RFTYPE_CTRL_BASE,
1907 .kf_ops = &rdtgroup_kf_single_ops,
1908 .seq_show = rdtgroup_size_show,
1909 .fflags = RFTYPE_CTRL_BASE,
1912 .name = "sparse_masks",
1914 .kf_ops = &rdtgroup_kf_single_ops,
1915 .seq_show = rdt_has_sparse_bitmasks_show,
1916 .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE,
1919 .name = "ctrl_hw_id",
1921 .kf_ops = &rdtgroup_kf_single_ops,
1922 .seq_show = rdtgroup_closid_show,
1923 .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG,
1928 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
1930 struct rftype *rfts, *rft;
1933 rfts = res_common_files;
1934 len = ARRAY_SIZE(res_common_files);
1936 lockdep_assert_held(&rdtgroup_mutex);
1939 fflags |= RFTYPE_DEBUG;
1941 for (rft = rfts; rft < rfts + len; rft++) {
1942 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
1943 ret = rdtgroup_add_file(kn, rft);
1951 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
1952 while (--rft >= rfts) {
1953 if ((fflags & rft->fflags) == rft->fflags)
1954 kernfs_remove_by_name(kn, rft->name);
1959 static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
1961 struct rftype *rfts, *rft;
1964 rfts = res_common_files;
1965 len = ARRAY_SIZE(res_common_files);
1967 for (rft = rfts; rft < rfts + len; rft++) {
1968 if (!strcmp(rft->name, name))
1975 void __init thread_throttle_mode_init(void)
1979 rft = rdtgroup_get_rftype_by_name("thread_throttle_mode");
1983 rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB;
1986 void __init mbm_config_rftype_init(const char *config)
1990 rft = rdtgroup_get_rftype_by_name(config);
1992 rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE;
1996 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
1997 * @r: The resource group with which the file is associated.
1998 * @name: Name of the file
2000 * The permissions of named resctrl file, directory, or link are modified
2001 * to not allow read, write, or execute by any user.
2003 * WARNING: This function is intended to communicate to the user that the
2004 * resctrl file has been locked down - that it is not relevant to the
2005 * particular state the system finds itself in. It should not be relied
2006 * on to protect from user access because after the file's permissions
2007 * are restricted the user can still change the permissions using chmod
2008 * from the command line.
2010 * Return: 0 on success, <0 on failure.
2012 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
2014 struct iattr iattr = {.ia_valid = ATTR_MODE,};
2015 struct kernfs_node *kn;
2018 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
2022 switch (kernfs_type(kn)) {
2024 iattr.ia_mode = S_IFDIR;
2027 iattr.ia_mode = S_IFREG;
2030 iattr.ia_mode = S_IFLNK;
2034 ret = kernfs_setattr(kn, &iattr);
2040 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
2041 * @r: The resource group with which the file is associated.
2042 * @name: Name of the file
2043 * @mask: Mask of permissions that should be restored
2045 * Restore the permissions of the named file. If @name is a directory the
2046 * permissions of its parent will be used.
2048 * Return: 0 on success, <0 on failure.
2050 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
2053 struct iattr iattr = {.ia_valid = ATTR_MODE,};
2054 struct kernfs_node *kn, *parent;
2055 struct rftype *rfts, *rft;
2058 rfts = res_common_files;
2059 len = ARRAY_SIZE(res_common_files);
2061 for (rft = rfts; rft < rfts + len; rft++) {
2062 if (!strcmp(rft->name, name))
2063 iattr.ia_mode = rft->mode & mask;
2066 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
2070 switch (kernfs_type(kn)) {
2072 parent = kernfs_get_parent(kn);
2074 iattr.ia_mode |= parent->mode;
2077 iattr.ia_mode |= S_IFDIR;
2080 iattr.ia_mode |= S_IFREG;
2083 iattr.ia_mode |= S_IFLNK;
2087 ret = kernfs_setattr(kn, &iattr);
2092 static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
2093 unsigned long fflags)
2095 struct kernfs_node *kn_subdir;
2098 kn_subdir = kernfs_create_dir(kn_info, name,
2099 kn_info->mode, priv);
2100 if (IS_ERR(kn_subdir))
2101 return PTR_ERR(kn_subdir);
2103 ret = rdtgroup_kn_set_ugid(kn_subdir);
2107 ret = rdtgroup_add_files(kn_subdir, fflags);
2109 kernfs_activate(kn_subdir);
2114 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
2116 struct resctrl_schema *s;
2117 struct rdt_resource *r;
2118 unsigned long fflags;
2122 /* create the directory */
2123 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
2124 if (IS_ERR(kn_info))
2125 return PTR_ERR(kn_info);
2127 ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO);
2131 /* loop over enabled controls, these are all alloc_capable */
2132 list_for_each_entry(s, &resctrl_schema_all, list) {
2134 fflags = r->fflags | RFTYPE_CTRL_INFO;
2135 ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
2140 for_each_mon_capable_rdt_resource(r) {
2141 fflags = r->fflags | RFTYPE_MON_INFO;
2142 sprintf(name, "%s_MON", r->name);
2143 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
2148 ret = rdtgroup_kn_set_ugid(kn_info);
2152 kernfs_activate(kn_info);
2157 kernfs_remove(kn_info);
2162 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
2163 char *name, struct kernfs_node **dest_kn)
2165 struct kernfs_node *kn;
2168 /* create the directory */
2169 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2176 ret = rdtgroup_kn_set_ugid(kn);
2180 kernfs_activate(kn);
2189 static void l3_qos_cfg_update(void *arg)
2193 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
2196 static void l2_qos_cfg_update(void *arg)
2200 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
2203 static inline bool is_mba_linear(void)
2205 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear;
2208 static int set_cache_qos_cfg(int level, bool enable)
2210 void (*update)(void *arg);
2211 struct rdt_resource *r_l;
2212 cpumask_var_t cpu_mask;
2213 struct rdt_domain *d;
2216 if (level == RDT_RESOURCE_L3)
2217 update = l3_qos_cfg_update;
2218 else if (level == RDT_RESOURCE_L2)
2219 update = l2_qos_cfg_update;
2223 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
2226 r_l = &rdt_resources_all[level].r_resctrl;
2227 list_for_each_entry(d, &r_l->domains, list) {
2228 if (r_l->cache.arch_has_per_cpu_cfg)
2229 /* Pick all the CPUs in the domain instance */
2230 for_each_cpu(cpu, &d->cpu_mask)
2231 cpumask_set_cpu(cpu, cpu_mask);
2233 /* Pick one CPU from each domain instance to update MSR */
2234 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
2237 /* Update QOS_CFG MSR on all the CPUs in cpu_mask */
2238 on_each_cpu_mask(cpu_mask, update, &enable, 1);
2240 free_cpumask_var(cpu_mask);
2245 /* Restore the qos cfg state when a domain comes online */
2246 void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
2248 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
2250 if (!r->cdp_capable)
2253 if (r->rid == RDT_RESOURCE_L2)
2254 l2_qos_cfg_update(&hw_res->cdp_enabled);
2256 if (r->rid == RDT_RESOURCE_L3)
2257 l3_qos_cfg_update(&hw_res->cdp_enabled);
2260 static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d)
2262 u32 num_closid = resctrl_arch_get_num_closid(r);
2263 int cpu = cpumask_any(&d->cpu_mask);
2266 d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val),
2267 GFP_KERNEL, cpu_to_node(cpu));
2271 for (i = 0; i < num_closid; i++)
2272 d->mbps_val[i] = MBA_MAX_MBPS;
2277 static void mba_sc_domain_destroy(struct rdt_resource *r,
2278 struct rdt_domain *d)
2285 * MBA software controller is supported only if
2286 * MBM is supported and MBA is in linear scale.
2288 static bool supports_mba_mbps(void)
2290 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
2292 return (is_mbm_local_enabled() &&
2293 r->alloc_capable && is_mba_linear());
2297 * Enable or disable the MBA software controller
2298 * which helps user specify bandwidth in MBps.
2300 static int set_mba_sc(bool mba_sc)
2302 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
2303 u32 num_closid = resctrl_arch_get_num_closid(r);
2304 struct rdt_domain *d;
2307 if (!supports_mba_mbps() || mba_sc == is_mba_sc(r))
2310 r->membw.mba_sc = mba_sc;
2312 list_for_each_entry(d, &r->domains, list) {
2313 for (i = 0; i < num_closid; i++)
2314 d->mbps_val[i] = MBA_MAX_MBPS;
2320 static int cdp_enable(int level)
2322 struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
2325 if (!r_l->alloc_capable)
2328 ret = set_cache_qos_cfg(level, true);
2330 rdt_resources_all[level].cdp_enabled = true;
2335 static void cdp_disable(int level)
2337 struct rdt_hw_resource *r_hw = &rdt_resources_all[level];
2339 if (r_hw->cdp_enabled) {
2340 set_cache_qos_cfg(level, false);
2341 r_hw->cdp_enabled = false;
2345 int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
2347 struct rdt_hw_resource *hw_res = &rdt_resources_all[l];
2349 if (!hw_res->r_resctrl.cdp_capable)
2353 return cdp_enable(l);
2361 * We don't allow rdtgroup directories to be created anywhere
2362 * except the root directory. Thus when looking for the rdtgroup
2363 * structure for a kernfs node we are either looking at a directory,
2364 * in which case the rdtgroup structure is pointed at by the "priv"
2365 * field, otherwise we have a file, and need only look to the parent
2366 * to find the rdtgroup.
2368 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
2370 if (kernfs_type(kn) == KERNFS_DIR) {
2372 * All the resource directories use "kn->priv"
2373 * to point to the "struct rdtgroup" for the
2374 * resource. "info" and its subdirectories don't
2375 * have rdtgroup structures, so return NULL here.
2377 if (kn == kn_info || kn->parent == kn_info)
2382 return kn->parent->priv;
2386 static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
2388 atomic_inc(&rdtgrp->waitcount);
2389 kernfs_break_active_protection(kn);
2392 static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
2394 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
2395 (rdtgrp->flags & RDT_DELETED)) {
2396 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2397 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2398 rdtgroup_pseudo_lock_remove(rdtgrp);
2399 kernfs_unbreak_active_protection(kn);
2400 rdtgroup_remove(rdtgrp);
2402 kernfs_unbreak_active_protection(kn);
2406 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
2408 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2413 rdtgroup_kn_get(rdtgrp, kn);
2415 mutex_lock(&rdtgroup_mutex);
2417 /* Was this group deleted while we waited? */
2418 if (rdtgrp->flags & RDT_DELETED)
2424 void rdtgroup_kn_unlock(struct kernfs_node *kn)
2426 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2431 mutex_unlock(&rdtgroup_mutex);
2432 rdtgroup_kn_put(rdtgrp, kn);
2435 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2436 struct rdtgroup *prgrp,
2437 struct kernfs_node **mon_data_kn);
2439 static void rdt_disable_ctx(void)
2441 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
2442 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
2445 resctrl_debug = false;
2448 static int rdt_enable_ctx(struct rdt_fs_context *ctx)
2452 if (ctx->enable_cdpl2) {
2453 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
2458 if (ctx->enable_cdpl3) {
2459 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
2464 if (ctx->enable_mba_mbps) {
2465 ret = set_mba_sc(true);
2470 if (ctx->enable_debug)
2471 resctrl_debug = true;
2476 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
2478 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
2483 static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
2485 struct resctrl_schema *s;
2486 const char *suffix = "";
2489 s = kzalloc(sizeof(*s), GFP_KERNEL);
2494 s->num_closid = resctrl_arch_get_num_closid(r);
2495 if (resctrl_arch_get_cdp_enabled(r->rid))
2498 s->conf_type = type;
2511 ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
2512 if (ret >= sizeof(s->name)) {
2517 cl = strlen(s->name);
2520 * If CDP is supported by this resource, but not enabled,
2521 * include the suffix. This ensures the tabular format of the
2522 * schemata file does not change between mounts of the filesystem.
2524 if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
2527 if (cl > max_name_width)
2528 max_name_width = cl;
2530 INIT_LIST_HEAD(&s->list);
2531 list_add(&s->list, &resctrl_schema_all);
2536 static int schemata_list_create(void)
2538 struct rdt_resource *r;
2541 for_each_alloc_capable_rdt_resource(r) {
2542 if (resctrl_arch_get_cdp_enabled(r->rid)) {
2543 ret = schemata_list_add(r, CDP_CODE);
2547 ret = schemata_list_add(r, CDP_DATA);
2549 ret = schemata_list_add(r, CDP_NONE);
2559 static void schemata_list_destroy(void)
2561 struct resctrl_schema *s, *tmp;
2563 list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
2569 static int rdt_get_tree(struct fs_context *fc)
2571 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2572 unsigned long flags = RFTYPE_CTRL_BASE;
2573 struct rdt_domain *dom;
2574 struct rdt_resource *r;
2578 mutex_lock(&rdtgroup_mutex);
2580 * resctrl file system can only be mounted once.
2582 if (static_branch_unlikely(&rdt_enable_key)) {
2587 ret = rdtgroup_setup_root(ctx);
2591 ret = rdt_enable_ctx(ctx);
2595 ret = schemata_list_create();
2597 schemata_list_destroy();
2603 if (rdt_mon_capable)
2604 flags |= RFTYPE_MON;
2606 ret = rdtgroup_add_files(rdtgroup_default.kn, flags);
2608 goto out_schemata_free;
2610 kernfs_activate(rdtgroup_default.kn);
2612 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
2614 goto out_schemata_free;
2616 if (rdt_mon_capable) {
2617 ret = mongroup_create_dir(rdtgroup_default.kn,
2618 &rdtgroup_default, "mon_groups",
2623 ret = mkdir_mondata_all(rdtgroup_default.kn,
2624 &rdtgroup_default, &kn_mondata);
2627 rdtgroup_default.mon.mon_data_kn = kn_mondata;
2630 ret = rdt_pseudo_lock_init();
2634 ret = kernfs_get_tree(fc);
2638 if (rdt_alloc_capable)
2639 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
2640 if (rdt_mon_capable)
2641 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
2643 if (rdt_alloc_capable || rdt_mon_capable)
2644 static_branch_enable_cpuslocked(&rdt_enable_key);
2646 if (is_mbm_enabled()) {
2647 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
2648 list_for_each_entry(dom, &r->domains, list)
2649 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
2655 rdt_pseudo_lock_release();
2657 if (rdt_mon_capable)
2658 kernfs_remove(kn_mondata);
2660 if (rdt_mon_capable)
2661 kernfs_remove(kn_mongrp);
2663 kernfs_remove(kn_info);
2665 schemata_list_destroy();
2669 rdtgroup_destroy_root();
2671 rdt_last_cmd_clear();
2672 mutex_unlock(&rdtgroup_mutex);
2685 static const struct fs_parameter_spec rdt_fs_parameters[] = {
2686 fsparam_flag("cdp", Opt_cdp),
2687 fsparam_flag("cdpl2", Opt_cdpl2),
2688 fsparam_flag("mba_MBps", Opt_mba_mbps),
2689 fsparam_flag("debug", Opt_debug),
2693 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
2695 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2696 struct fs_parse_result result;
2699 opt = fs_parse(fc, rdt_fs_parameters, param, &result);
2705 ctx->enable_cdpl3 = true;
2708 ctx->enable_cdpl2 = true;
2711 if (!supports_mba_mbps())
2713 ctx->enable_mba_mbps = true;
2716 ctx->enable_debug = true;
2723 static void rdt_fs_context_free(struct fs_context *fc)
2725 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2727 kernfs_free_fs_context(fc);
2731 static const struct fs_context_operations rdt_fs_context_ops = {
2732 .free = rdt_fs_context_free,
2733 .parse_param = rdt_parse_param,
2734 .get_tree = rdt_get_tree,
2737 static int rdt_init_fs_context(struct fs_context *fc)
2739 struct rdt_fs_context *ctx;
2741 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL);
2745 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
2746 fc->fs_private = &ctx->kfc;
2747 fc->ops = &rdt_fs_context_ops;
2748 put_user_ns(fc->user_ns);
2749 fc->user_ns = get_user_ns(&init_user_ns);
2754 static int reset_all_ctrls(struct rdt_resource *r)
2756 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
2757 struct rdt_hw_domain *hw_dom;
2758 struct msr_param msr_param;
2759 cpumask_var_t cpu_mask;
2760 struct rdt_domain *d;
2763 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
2768 msr_param.high = hw_res->num_closid;
2771 * Disable resource control for this resource by setting all
2772 * CBMs in all domains to the maximum mask value. Pick one CPU
2773 * from each domain to update the MSRs below.
2775 list_for_each_entry(d, &r->domains, list) {
2776 hw_dom = resctrl_to_arch_dom(d);
2777 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
2779 for (i = 0; i < hw_res->num_closid; i++)
2780 hw_dom->ctrl_val[i] = r->default_ctrl;
2783 /* Update CBM on all the CPUs in cpu_mask */
2784 on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
2786 free_cpumask_var(cpu_mask);
2792 * Move tasks from one to the other group. If @from is NULL, then all tasks
2793 * in the systems are moved unconditionally (used for teardown).
2795 * If @mask is not NULL the cpus on which moved tasks are running are set
2796 * in that mask so the update smp function call is restricted to affected
2799 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2800 struct cpumask *mask)
2802 struct task_struct *p, *t;
2804 read_lock(&tasklist_lock);
2805 for_each_process_thread(p, t) {
2806 if (!from || is_closid_match(t, from) ||
2807 is_rmid_match(t, from)) {
2808 WRITE_ONCE(t->closid, to->closid);
2809 WRITE_ONCE(t->rmid, to->mon.rmid);
2812 * Order the closid/rmid stores above before the loads
2813 * in task_curr(). This pairs with the full barrier
2814 * between the rq->curr update and resctrl_sched_in()
2815 * during context switch.
2820 * If the task is on a CPU, set the CPU in the mask.
2821 * The detection is inaccurate as tasks might move or
2822 * schedule before the smp function call takes place.
2823 * In such a case the function call is pointless, but
2824 * there is no other side effect.
2826 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
2827 cpumask_set_cpu(task_cpu(t), mask);
2830 read_unlock(&tasklist_lock);
2833 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
2835 struct rdtgroup *sentry, *stmp;
2836 struct list_head *head;
2838 head = &rdtgrp->mon.crdtgrp_list;
2839 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
2840 free_rmid(sentry->mon.rmid);
2841 list_del(&sentry->mon.crdtgrp_list);
2843 if (atomic_read(&sentry->waitcount) != 0)
2844 sentry->flags = RDT_DELETED;
2846 rdtgroup_remove(sentry);
2851 * Forcibly remove all of subdirectories under root.
2853 static void rmdir_all_sub(void)
2855 struct rdtgroup *rdtgrp, *tmp;
2857 /* Move all tasks to the default resource group */
2858 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
2860 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
2861 /* Free any child rmids */
2862 free_all_child_rdtgrp(rdtgrp);
2864 /* Remove each rdtgroup other than root */
2865 if (rdtgrp == &rdtgroup_default)
2868 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2869 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2870 rdtgroup_pseudo_lock_remove(rdtgrp);
2873 * Give any CPUs back to the default group. We cannot copy
2874 * cpu_online_mask because a CPU might have executed the
2875 * offline callback already, but is still marked online.
2877 cpumask_or(&rdtgroup_default.cpu_mask,
2878 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2880 free_rmid(rdtgrp->mon.rmid);
2882 kernfs_remove(rdtgrp->kn);
2883 list_del(&rdtgrp->rdtgroup_list);
2885 if (atomic_read(&rdtgrp->waitcount) != 0)
2886 rdtgrp->flags = RDT_DELETED;
2888 rdtgroup_remove(rdtgrp);
2890 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
2891 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
2893 kernfs_remove(kn_info);
2894 kernfs_remove(kn_mongrp);
2895 kernfs_remove(kn_mondata);
2898 static void rdt_kill_sb(struct super_block *sb)
2900 struct rdt_resource *r;
2903 mutex_lock(&rdtgroup_mutex);
2907 /*Put everything back to default values. */
2908 for_each_alloc_capable_rdt_resource(r)
2911 rdt_pseudo_lock_release();
2912 rdtgroup_default.mode = RDT_MODE_SHAREABLE;
2913 schemata_list_destroy();
2914 rdtgroup_destroy_root();
2915 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
2916 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
2917 static_branch_disable_cpuslocked(&rdt_enable_key);
2919 mutex_unlock(&rdtgroup_mutex);
2923 static struct file_system_type rdt_fs_type = {
2925 .init_fs_context = rdt_init_fs_context,
2926 .parameters = rdt_fs_parameters,
2927 .kill_sb = rdt_kill_sb,
2930 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
2933 struct kernfs_node *kn;
2936 kn = __kernfs_create_file(parent_kn, name, 0444,
2937 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
2938 &kf_mondata_ops, priv, NULL, NULL);
2942 ret = rdtgroup_kn_set_ugid(kn);
2952 * Remove all subdirectories of mon_data of ctrl_mon groups
2953 * and monitor groups with given domain id.
2955 static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
2956 unsigned int dom_id)
2958 struct rdtgroup *prgrp, *crgrp;
2961 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2962 sprintf(name, "mon_%s_%02d", r->name, dom_id);
2963 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
2965 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
2966 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
2970 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
2971 struct rdt_domain *d,
2972 struct rdt_resource *r, struct rdtgroup *prgrp)
2974 union mon_data_bits priv;
2975 struct kernfs_node *kn;
2976 struct mon_evt *mevt;
2977 struct rmid_read rr;
2981 sprintf(name, "mon_%s_%02d", r->name, d->id);
2982 /* create the directory */
2983 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2987 ret = rdtgroup_kn_set_ugid(kn);
2991 if (WARN_ON(list_empty(&r->evt_list))) {
2996 priv.u.rid = r->rid;
2997 priv.u.domid = d->id;
2998 list_for_each_entry(mevt, &r->evt_list, list) {
2999 priv.u.evtid = mevt->evtid;
3000 ret = mon_addfile(kn, mevt->name, priv.priv);
3004 if (is_mbm_event(mevt->evtid))
3005 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true);
3007 kernfs_activate(kn);
3016 * Add all subdirectories of mon_data for "ctrl_mon" groups
3017 * and "monitor" groups with given domain id.
3019 static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
3020 struct rdt_domain *d)
3022 struct kernfs_node *parent_kn;
3023 struct rdtgroup *prgrp, *crgrp;
3024 struct list_head *head;
3026 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
3027 parent_kn = prgrp->mon.mon_data_kn;
3028 mkdir_mondata_subdir(parent_kn, d, r, prgrp);
3030 head = &prgrp->mon.crdtgrp_list;
3031 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
3032 parent_kn = crgrp->mon.mon_data_kn;
3033 mkdir_mondata_subdir(parent_kn, d, r, crgrp);
3038 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
3039 struct rdt_resource *r,
3040 struct rdtgroup *prgrp)
3042 struct rdt_domain *dom;
3045 list_for_each_entry(dom, &r->domains, list) {
3046 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
3055 * This creates a directory mon_data which contains the monitored data.
3057 * mon_data has one directory for each domain which are named
3058 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
3059 * with L3 domain looks as below:
3066 * Each domain directory has one file per event:
3071 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
3072 struct rdtgroup *prgrp,
3073 struct kernfs_node **dest_kn)
3075 struct rdt_resource *r;
3076 struct kernfs_node *kn;
3080 * Create the mon_data directory first.
3082 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
3090 * Create the subdirectories for each domain. Note that all events
3091 * in a domain like L3 are grouped into a resource whose domain is L3
3093 for_each_mon_capable_rdt_resource(r) {
3094 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
3107 * cbm_ensure_valid - Enforce validity on provided CBM
3108 * @_val: Candidate CBM
3109 * @r: RDT resource to which the CBM belongs
3111 * The provided CBM represents all cache portions available for use. This
3112 * may be represented by a bitmap that does not consist of contiguous ones
3113 * and thus be an invalid CBM.
3114 * Here the provided CBM is forced to be a valid CBM by only considering
3115 * the first set of contiguous bits as valid and clearing all bits.
3116 * The intention here is to provide a valid default CBM with which a new
3117 * resource group is initialized. The user can follow this with a
3118 * modification to the CBM if the default does not satisfy the
3121 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
3123 unsigned int cbm_len = r->cache.cbm_len;
3124 unsigned long first_bit, zero_bit;
3125 unsigned long val = _val;
3130 first_bit = find_first_bit(&val, cbm_len);
3131 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
3133 /* Clear any remaining bits to ensure contiguous region */
3134 bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
3139 * Initialize cache resources per RDT domain
3141 * Set the RDT domain up to start off with all usable allocations. That is,
3142 * all shareable and unused bits. All-zero CBM is invalid.
3144 static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
3147 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
3148 enum resctrl_conf_type t = s->conf_type;
3149 struct resctrl_staged_config *cfg;
3150 struct rdt_resource *r = s->res;
3151 u32 used_b = 0, unused_b = 0;
3152 unsigned long tmp_cbm;
3153 enum rdtgrp_mode mode;
3154 u32 peer_ctl, ctrl_val;
3157 cfg = &d->staged_config[t];
3158 cfg->have_new_ctrl = false;
3159 cfg->new_ctrl = r->cache.shareable_bits;
3160 used_b = r->cache.shareable_bits;
3161 for (i = 0; i < closids_supported(); i++) {
3162 if (closid_allocated(i) && i != closid) {
3163 mode = rdtgroup_mode_by_closid(i);
3164 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
3166 * ctrl values for locksetup aren't relevant
3167 * until the schemata is written, and the mode
3168 * becomes RDT_MODE_PSEUDO_LOCKED.
3172 * If CDP is active include peer domain's
3173 * usage to ensure there is no overlap
3174 * with an exclusive group.
3176 if (resctrl_arch_get_cdp_enabled(r->rid))
3177 peer_ctl = resctrl_arch_get_config(r, d, i,
3181 ctrl_val = resctrl_arch_get_config(r, d, i,
3183 used_b |= ctrl_val | peer_ctl;
3184 if (mode == RDT_MODE_SHAREABLE)
3185 cfg->new_ctrl |= ctrl_val | peer_ctl;
3188 if (d->plr && d->plr->cbm > 0)
3189 used_b |= d->plr->cbm;
3190 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
3191 unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
3192 cfg->new_ctrl |= unused_b;
3194 * Force the initial CBM to be valid, user can
3195 * modify the CBM based on system availability.
3197 cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
3199 * Assign the u32 CBM to an unsigned long to ensure that
3200 * bitmap_weight() does not access out-of-bound memory.
3202 tmp_cbm = cfg->new_ctrl;
3203 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
3204 rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id);
3207 cfg->have_new_ctrl = true;
3213 * Initialize cache resources with default values.
3215 * A new RDT group is being created on an allocation capable (CAT)
3216 * supporting system. Set this group up to start off with all usable
3219 * If there are no more shareable bits available on any domain then
3220 * the entire allocation will fail.
3222 static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
3224 struct rdt_domain *d;
3227 list_for_each_entry(d, &s->res->domains, list) {
3228 ret = __init_one_rdt_domain(d, s, closid);
3236 /* Initialize MBA resource with default values. */
3237 static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid)
3239 struct resctrl_staged_config *cfg;
3240 struct rdt_domain *d;
3242 list_for_each_entry(d, &r->domains, list) {
3244 d->mbps_val[closid] = MBA_MAX_MBPS;
3248 cfg = &d->staged_config[CDP_NONE];
3249 cfg->new_ctrl = r->default_ctrl;
3250 cfg->have_new_ctrl = true;
3254 /* Initialize the RDT group's allocations. */
3255 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
3257 struct resctrl_schema *s;
3258 struct rdt_resource *r;
3261 rdt_staged_configs_clear();
3263 list_for_each_entry(s, &resctrl_schema_all, list) {
3265 if (r->rid == RDT_RESOURCE_MBA ||
3266 r->rid == RDT_RESOURCE_SMBA) {
3267 rdtgroup_init_mba(r, rdtgrp->closid);
3271 ret = rdtgroup_init_cat(s, rdtgrp->closid);
3276 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
3278 rdt_last_cmd_puts("Failed to initialize allocations\n");
3284 rdtgrp->mode = RDT_MODE_SHAREABLE;
3287 rdt_staged_configs_clear();
3291 static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp)
3295 if (!rdt_mon_capable)
3300 rdt_last_cmd_puts("Out of RMIDs\n");
3303 rdtgrp->mon.rmid = ret;
3305 ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
3307 rdt_last_cmd_puts("kernfs subdir error\n");
3308 free_rmid(rdtgrp->mon.rmid);
3315 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
3316 const char *name, umode_t mode,
3317 enum rdt_group_type rtype, struct rdtgroup **r)
3319 struct rdtgroup *prdtgrp, *rdtgrp;
3320 unsigned long files = 0;
3321 struct kernfs_node *kn;
3324 prdtgrp = rdtgroup_kn_lock_live(parent_kn);
3330 if (rtype == RDTMON_GROUP &&
3331 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3332 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
3334 rdt_last_cmd_puts("Pseudo-locking in progress\n");
3338 /* allocate the rdtgroup. */
3339 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
3342 rdt_last_cmd_puts("Kernel out of memory\n");
3346 rdtgrp->mon.parent = prdtgrp;
3347 rdtgrp->type = rtype;
3348 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
3350 /* kernfs creates the directory for rdtgrp */
3351 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
3354 rdt_last_cmd_puts("kernfs create error\n");
3360 * kernfs_remove() will drop the reference count on "kn" which
3361 * will free it. But we still need it to stick around for the
3362 * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
3363 * which will be dropped by kernfs_put() in rdtgroup_remove().
3367 ret = rdtgroup_kn_set_ugid(kn);
3369 rdt_last_cmd_puts("kernfs perm error\n");
3373 if (rtype == RDTCTRL_GROUP) {
3374 files = RFTYPE_BASE | RFTYPE_CTRL;
3375 if (rdt_mon_capable)
3376 files |= RFTYPE_MON;
3378 files = RFTYPE_BASE | RFTYPE_MON;
3381 ret = rdtgroup_add_files(kn, files);
3383 rdt_last_cmd_puts("kernfs fill error\n");
3387 ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp);
3391 kernfs_activate(kn);
3394 * The caller unlocks the parent_kn upon success.
3399 kernfs_put(rdtgrp->kn);
3400 kernfs_remove(rdtgrp->kn);
3404 rdtgroup_kn_unlock(parent_kn);
3408 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
3410 kernfs_remove(rgrp->kn);
3411 free_rmid(rgrp->mon.rmid);
3412 rdtgroup_remove(rgrp);
3416 * Create a monitor group under "mon_groups" directory of a control
3417 * and monitor group(ctrl_mon). This is a resource group
3418 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
3420 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
3421 const char *name, umode_t mode)
3423 struct rdtgroup *rdtgrp, *prgrp;
3426 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
3430 prgrp = rdtgrp->mon.parent;
3431 rdtgrp->closid = prgrp->closid;
3434 * Add the rdtgrp to the list of rdtgrps the parent
3435 * ctrl_mon group has to track.
3437 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
3439 rdtgroup_kn_unlock(parent_kn);
3444 * These are rdtgroups created under the root directory. Can be used
3445 * to allocate and monitor resources.
3447 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
3448 const char *name, umode_t mode)
3450 struct rdtgroup *rdtgrp;
3451 struct kernfs_node *kn;
3455 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
3460 ret = closid_alloc();
3462 rdt_last_cmd_puts("Out of CLOSIDs\n");
3463 goto out_common_fail;
3468 rdtgrp->closid = closid;
3469 ret = rdtgroup_init_alloc(rdtgrp);
3473 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
3475 if (rdt_mon_capable) {
3477 * Create an empty mon_groups directory to hold the subset
3478 * of tasks and cpus to monitor.
3480 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
3482 rdt_last_cmd_puts("kernfs subdir error\n");
3490 list_del(&rdtgrp->rdtgroup_list);
3492 closid_free(closid);
3494 mkdir_rdt_prepare_clean(rdtgrp);
3496 rdtgroup_kn_unlock(parent_kn);
3501 * We allow creating mon groups only with in a directory called "mon_groups"
3502 * which is present in every ctrl_mon group. Check if this is a valid
3503 * "mon_groups" directory.
3505 * 1. The directory should be named "mon_groups".
3506 * 2. The mon group itself should "not" be named "mon_groups".
3507 * This makes sure "mon_groups" directory always has a ctrl_mon group
3510 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
3512 return (!strcmp(kn->name, "mon_groups") &&
3513 strcmp(name, "mon_groups"));
3516 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
3519 /* Do not accept '\n' to avoid unparsable situation. */
3520 if (strchr(name, '\n'))
3524 * If the parent directory is the root directory and RDT
3525 * allocation is supported, add a control and monitoring
3528 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
3529 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
3532 * If RDT monitoring is supported and the parent directory is a valid
3533 * "mon_groups" directory, add a monitoring subdirectory.
3535 if (rdt_mon_capable && is_mon_groups(parent_kn, name))
3536 return rdtgroup_mkdir_mon(parent_kn, name, mode);
3541 static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
3543 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
3546 /* Give any tasks back to the parent group */
3547 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
3549 /* Update per cpu rmid of the moved CPUs first */
3550 for_each_cpu(cpu, &rdtgrp->cpu_mask)
3551 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
3553 * Update the MSR on moved CPUs and CPUs which have moved
3554 * task running on them.
3556 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3557 update_closid_rmid(tmpmask, NULL);
3559 rdtgrp->flags = RDT_DELETED;
3560 free_rmid(rdtgrp->mon.rmid);
3563 * Remove the rdtgrp from the parent ctrl_mon group's list
3565 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
3566 list_del(&rdtgrp->mon.crdtgrp_list);
3568 kernfs_remove(rdtgrp->kn);
3573 static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp)
3575 rdtgrp->flags = RDT_DELETED;
3576 list_del(&rdtgrp->rdtgroup_list);
3578 kernfs_remove(rdtgrp->kn);
3582 static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
3586 /* Give any tasks back to the default group */
3587 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
3589 /* Give any CPUs back to the default group */
3590 cpumask_or(&rdtgroup_default.cpu_mask,
3591 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
3593 /* Update per cpu closid and rmid of the moved CPUs first */
3594 for_each_cpu(cpu, &rdtgrp->cpu_mask) {
3595 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
3596 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
3600 * Update the MSR on moved CPUs and CPUs which have moved
3601 * task running on them.
3603 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3604 update_closid_rmid(tmpmask, NULL);
3606 closid_free(rdtgrp->closid);
3607 free_rmid(rdtgrp->mon.rmid);
3609 rdtgroup_ctrl_remove(rdtgrp);
3612 * Free all the child monitor group rmids.
3614 free_all_child_rdtgrp(rdtgrp);
3619 static int rdtgroup_rmdir(struct kernfs_node *kn)
3621 struct kernfs_node *parent_kn = kn->parent;
3622 struct rdtgroup *rdtgrp;
3623 cpumask_var_t tmpmask;
3626 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
3629 rdtgrp = rdtgroup_kn_lock_live(kn);
3636 * If the rdtgroup is a ctrl_mon group and parent directory
3637 * is the root directory, remove the ctrl_mon group.
3639 * If the rdtgroup is a mon group and parent directory
3640 * is a valid "mon_groups" directory, remove the mon group.
3642 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
3643 rdtgrp != &rdtgroup_default) {
3644 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3645 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
3646 ret = rdtgroup_ctrl_remove(rdtgrp);
3648 ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask);
3650 } else if (rdtgrp->type == RDTMON_GROUP &&
3651 is_mon_groups(parent_kn, kn->name)) {
3652 ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask);
3658 rdtgroup_kn_unlock(kn);
3659 free_cpumask_var(tmpmask);
3664 * mongrp_reparent() - replace parent CTRL_MON group of a MON group
3665 * @rdtgrp: the MON group whose parent should be replaced
3666 * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp
3667 * @cpus: cpumask provided by the caller for use during this call
3669 * Replaces the parent CTRL_MON group for a MON group, resulting in all member
3670 * tasks' CLOSID immediately changing to that of the new parent group.
3671 * Monitoring data for the group is unaffected by this operation.
3673 static void mongrp_reparent(struct rdtgroup *rdtgrp,
3674 struct rdtgroup *new_prdtgrp,
3677 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
3679 WARN_ON(rdtgrp->type != RDTMON_GROUP);
3680 WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP);
3682 /* Nothing to do when simply renaming a MON group. */
3683 if (prdtgrp == new_prdtgrp)
3686 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
3687 list_move_tail(&rdtgrp->mon.crdtgrp_list,
3688 &new_prdtgrp->mon.crdtgrp_list);
3690 rdtgrp->mon.parent = new_prdtgrp;
3691 rdtgrp->closid = new_prdtgrp->closid;
3693 /* Propagate updated closid to all tasks in this group. */
3694 rdt_move_group_tasks(rdtgrp, rdtgrp, cpus);
3696 update_closid_rmid(cpus, NULL);
3699 static int rdtgroup_rename(struct kernfs_node *kn,
3700 struct kernfs_node *new_parent, const char *new_name)
3702 struct rdtgroup *new_prdtgrp;
3703 struct rdtgroup *rdtgrp;
3704 cpumask_var_t tmpmask;
3707 rdtgrp = kernfs_to_rdtgroup(kn);
3708 new_prdtgrp = kernfs_to_rdtgroup(new_parent);
3709 if (!rdtgrp || !new_prdtgrp)
3712 /* Release both kernfs active_refs before obtaining rdtgroup mutex. */
3713 rdtgroup_kn_get(rdtgrp, kn);
3714 rdtgroup_kn_get(new_prdtgrp, new_parent);
3716 mutex_lock(&rdtgroup_mutex);
3718 rdt_last_cmd_clear();
3721 * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if
3722 * either kernfs_node is a file.
3724 if (kernfs_type(kn) != KERNFS_DIR ||
3725 kernfs_type(new_parent) != KERNFS_DIR) {
3726 rdt_last_cmd_puts("Source and destination must be directories");
3731 if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) {
3736 if (rdtgrp->type != RDTMON_GROUP || !kn->parent ||
3737 !is_mon_groups(kn->parent, kn->name)) {
3738 rdt_last_cmd_puts("Source must be a MON group\n");
3743 if (!is_mon_groups(new_parent, new_name)) {
3744 rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n");
3750 * If the MON group is monitoring CPUs, the CPUs must be assigned to the
3751 * current parent CTRL_MON group and therefore cannot be assigned to
3752 * the new parent, making the move illegal.
3754 if (!cpumask_empty(&rdtgrp->cpu_mask) &&
3755 rdtgrp->mon.parent != new_prdtgrp) {
3756 rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n");
3762 * Allocate the cpumask for use in mongrp_reparent() to avoid the
3763 * possibility of failing to allocate it after kernfs_rename() has
3766 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) {
3772 * Perform all input validation and allocations needed to ensure
3773 * mongrp_reparent() will succeed before calling kernfs_rename(),
3774 * otherwise it would be necessary to revert this call if
3775 * mongrp_reparent() failed.
3777 ret = kernfs_rename(kn, new_parent, new_name);
3779 mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask);
3781 free_cpumask_var(tmpmask);
3784 mutex_unlock(&rdtgroup_mutex);
3785 rdtgroup_kn_put(rdtgrp, kn);
3786 rdtgroup_kn_put(new_prdtgrp, new_parent);
3790 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
3792 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
3793 seq_puts(seq, ",cdp");
3795 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
3796 seq_puts(seq, ",cdpl2");
3798 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl))
3799 seq_puts(seq, ",mba_MBps");
3802 seq_puts(seq, ",debug");
3807 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
3808 .mkdir = rdtgroup_mkdir,
3809 .rmdir = rdtgroup_rmdir,
3810 .rename = rdtgroup_rename,
3811 .show_options = rdtgroup_show_options,
3814 static int rdtgroup_setup_root(struct rdt_fs_context *ctx)
3816 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
3817 KERNFS_ROOT_CREATE_DEACTIVATED |
3818 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
3820 if (IS_ERR(rdt_root))
3821 return PTR_ERR(rdt_root);
3823 ctx->kfc.root = rdt_root;
3824 rdtgroup_default.kn = kernfs_root_to_node(rdt_root);
3829 static void rdtgroup_destroy_root(void)
3831 kernfs_destroy_root(rdt_root);
3832 rdtgroup_default.kn = NULL;
3835 static void __init rdtgroup_setup_default(void)
3837 mutex_lock(&rdtgroup_mutex);
3839 rdtgroup_default.closid = 0;
3840 rdtgroup_default.mon.rmid = 0;
3841 rdtgroup_default.type = RDTCTRL_GROUP;
3842 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
3844 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
3846 mutex_unlock(&rdtgroup_mutex);
3849 static void domain_destroy_mon_state(struct rdt_domain *d)
3851 bitmap_free(d->rmid_busy_llc);
3852 kfree(d->mbm_total);
3853 kfree(d->mbm_local);
3856 void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d)
3858 lockdep_assert_held(&rdtgroup_mutex);
3860 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
3861 mba_sc_domain_destroy(r, d);
3863 if (!r->mon_capable)
3867 * If resctrl is mounted, remove all the
3868 * per domain monitor data directories.
3870 if (static_branch_unlikely(&rdt_mon_enable_key))
3871 rmdir_mondata_subdir_allrdtgrp(r, d->id);
3873 if (is_mbm_enabled())
3874 cancel_delayed_work(&d->mbm_over);
3875 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
3877 * When a package is going down, forcefully
3878 * decrement rmid->ebusy. There is no way to know
3879 * that the L3 was flushed and hence may lead to
3880 * incorrect counts in rare scenarios, but leaving
3881 * the RMID as busy creates RMID leaks if the
3882 * package never comes back.
3884 __check_limbo(d, true);
3885 cancel_delayed_work(&d->cqm_limbo);
3888 domain_destroy_mon_state(d);
3891 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
3895 if (is_llc_occupancy_enabled()) {
3896 d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
3897 if (!d->rmid_busy_llc)
3900 if (is_mbm_total_enabled()) {
3901 tsize = sizeof(*d->mbm_total);
3902 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
3903 if (!d->mbm_total) {
3904 bitmap_free(d->rmid_busy_llc);
3908 if (is_mbm_local_enabled()) {
3909 tsize = sizeof(*d->mbm_local);
3910 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
3911 if (!d->mbm_local) {
3912 bitmap_free(d->rmid_busy_llc);
3913 kfree(d->mbm_total);
3921 int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d)
3925 lockdep_assert_held(&rdtgroup_mutex);
3927 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
3928 /* RDT_RESOURCE_MBA is never mon_capable */
3929 return mba_sc_domain_allocate(r, d);
3931 if (!r->mon_capable)
3934 err = domain_setup_mon_state(r, d);
3938 if (is_mbm_enabled()) {
3939 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
3940 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
3943 if (is_llc_occupancy_enabled())
3944 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
3946 /* If resctrl is mounted, add per domain monitor data directories. */
3947 if (static_branch_unlikely(&rdt_mon_enable_key))
3948 mkdir_mondata_subdir_allrdtgrp(r, d);
3954 * rdtgroup_init - rdtgroup initialization
3956 * Setup resctrl file system including set up root, create mount point,
3957 * register rdtgroup filesystem, and initialize files under root directory.
3959 * Return: 0 on success or -errno
3961 int __init rdtgroup_init(void)
3965 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
3966 sizeof(last_cmd_status_buf));
3968 rdtgroup_setup_default();
3970 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
3974 ret = register_filesystem(&rdt_fs_type);
3976 goto cleanup_mountpoint;
3979 * Adding the resctrl debugfs directory here may not be ideal since
3980 * it would let the resctrl debugfs directory appear on the debugfs
3981 * filesystem before the resctrl filesystem is mounted.
3982 * It may also be ok since that would enable debugging of RDT before
3983 * resctrl is mounted.
3984 * The reason why the debugfs directory is created here and not in
3985 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and
3986 * during the debugfs directory creation also &sb->s_type->i_mutex_key
3987 * (the lockdep class of inode->i_rwsem). Other filesystem
3988 * interactions (eg. SyS_getdents) have the lock ordering:
3989 * &sb->s_type->i_mutex_key --> &mm->mmap_lock
3990 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex
3991 * is taken, thus creating dependency:
3992 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause
3993 * issues considering the other two lock dependencies.
3994 * By creating the debugfs directory here we avoid a dependency
3995 * that may cause deadlock (even though file operations cannot
3996 * occur until the filesystem is mounted, but I do not know how to
3997 * tell lockdep that).
3999 debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
4004 sysfs_remove_mount_point(fs_kobj, "resctrl");
4009 void __exit rdtgroup_exit(void)
4011 debugfs_remove_recursive(debugfs_resctrl);
4012 unregister_filesystem(&rdt_fs_type);
4013 sysfs_remove_mount_point(fs_kobj, "resctrl");