Merge tag 'x86-asm-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-2.6-block.git] / arch / x86 / kernel / cpu / resctrl / rdtgroup.c
index 69a1de92384ab26a4b7eef9063e857121a7f4f1d..011e17efb1a66e5b003fee07b53c13633f9f0f58 100644 (file)
 DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
+
+/* Mutex to protect rdtgroup access. */
+DEFINE_MUTEX(rdtgroup_mutex);
+
 static struct kernfs_root *rdt_root;
 struct rdtgroup rdtgroup_default;
 LIST_HEAD(rdt_all_groups);
@@ -42,6 +46,9 @@ LIST_HEAD(rdt_all_groups);
 /* list of entries for the schemata file */
 LIST_HEAD(resctrl_schema_all);
 
+/* The filesystem can only be mounted once. */
+bool resctrl_mounted;
+
 /* Kernel fs node for "info" directory under root */
 static struct kernfs_node *kn_info;
 
@@ -102,7 +109,7 @@ void rdt_staged_configs_clear(void)
  *
  * Using a global CLOSID across all resources has some advantages and
  * some drawbacks:
- * + We can simply set "current->closid" to assign a task to a resource
+ * + We can simply set current's closid to assign a task to a resource
  *   group.
  * + Context switch code can avoid extra memory references deciding which
  *   CLOSID to load into the PQR_ASSOC MSR
@@ -111,7 +118,7 @@ void rdt_staged_configs_clear(void)
  * - Our choices on how to configure each resource become progressively more
  *   limited as the number of resources grows.
  */
-static int closid_free_map;
+static unsigned long closid_free_map;
 static int closid_free_map_len;
 
 int closids_supported(void)
@@ -130,26 +137,39 @@ static void closid_init(void)
 
        closid_free_map = BIT_MASK(rdt_min_closid) - 1;
 
-       /* CLOSID 0 is always reserved for the default group */
-       closid_free_map &= ~1;
+       /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */
+       __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map);
        closid_free_map_len = rdt_min_closid;
 }
 
 static int closid_alloc(void)
 {
-       u32 closid = ffs(closid_free_map);
+       int cleanest_closid;
+       u32 closid;
 
-       if (closid == 0)
-               return -ENOSPC;
-       closid--;
-       closid_free_map &= ~(1 << closid);
+       lockdep_assert_held(&rdtgroup_mutex);
+
+       if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
+               cleanest_closid = resctrl_find_cleanest_closid();
+               if (cleanest_closid < 0)
+                       return cleanest_closid;
+               closid = cleanest_closid;
+       } else {
+               closid = ffs(closid_free_map);
+               if (closid == 0)
+                       return -ENOSPC;
+               closid--;
+       }
+       __clear_bit(closid, &closid_free_map);
 
        return closid;
 }
 
 void closid_free(int closid)
 {
-       closid_free_map |= 1 << closid;
+       lockdep_assert_held(&rdtgroup_mutex);
+
+       __set_bit(closid, &closid_free_map);
 }
 
 /**
@@ -159,9 +179,11 @@ void closid_free(int closid)
  * Return: true if @closid is currently associated with a resource group,
  * false if @closid is free
  */
-static bool closid_allocated(unsigned int closid)
+bool closid_allocated(unsigned int closid)
 {
-       return (closid_free_map & (1 << closid)) == 0;
+       lockdep_assert_held(&rdtgroup_mutex);
+
+       return !test_bit(closid, &closid_free_map);
 }
 
 /**
@@ -559,14 +581,26 @@ static void update_task_closid_rmid(struct task_struct *t)
                _update_task_closid_rmid(t);
 }
 
+static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp)
+{
+       u32 closid, rmid = rdtgrp->mon.rmid;
+
+       if (rdtgrp->type == RDTCTRL_GROUP)
+               closid = rdtgrp->closid;
+       else if (rdtgrp->type == RDTMON_GROUP)
+               closid = rdtgrp->mon.parent->closid;
+       else
+               return false;
+
+       return resctrl_arch_match_closid(tsk, closid) &&
+              resctrl_arch_match_rmid(tsk, closid, rmid);
+}
+
 static int __rdtgroup_move_task(struct task_struct *tsk,
                                struct rdtgroup *rdtgrp)
 {
        /* If the task is already in rdtgrp, no need to move the task. */
-       if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
-            tsk->rmid == rdtgrp->mon.rmid) ||
-           (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
-            tsk->closid == rdtgrp->mon.parent->closid))
+       if (task_in_rdtgroup(tsk, rdtgrp))
                return 0;
 
        /*
@@ -577,19 +611,19 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
         * For monitor groups, can move the tasks only from
         * their parent CTRL group.
         */
-
-       if (rdtgrp->type == RDTCTRL_GROUP) {
-               WRITE_ONCE(tsk->closid, rdtgrp->closid);
-               WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
-       } else if (rdtgrp->type == RDTMON_GROUP) {
-               if (rdtgrp->mon.parent->closid == tsk->closid) {
-                       WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
-               } else {
-                       rdt_last_cmd_puts("Can't move task to different control group\n");
-                       return -EINVAL;
-               }
+       if (rdtgrp->type == RDTMON_GROUP &&
+           !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) {
+               rdt_last_cmd_puts("Can't move task to different control group\n");
+               return -EINVAL;
        }
 
+       if (rdtgrp->type == RDTMON_GROUP)
+               resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid,
+                                            rdtgrp->mon.rmid);
+       else
+               resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid,
+                                            rdtgrp->mon.rmid);
+
        /*
         * Ensure the task's closid and rmid are written before determining if
         * the task is current that will decide if it will be interrupted.
@@ -611,14 +645,15 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
 
 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
 {
-       return (rdt_alloc_capable &&
-              (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
+       return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) &&
+               resctrl_arch_match_closid(t, r->closid));
 }
 
 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
 {
-       return (rdt_mon_capable &&
-              (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
+       return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) &&
+               resctrl_arch_match_rmid(t, r->mon.parent->closid,
+                                       r->mon.rmid));
 }
 
 /**
@@ -853,7 +888,7 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
        mutex_lock(&rdtgroup_mutex);
 
        /* Return empty if resctrl has not been mounted. */
-       if (!static_branch_unlikely(&rdt_enable_key)) {
+       if (!resctrl_mounted) {
                seq_puts(s, "res:\nmon:\n");
                goto unlock;
        }
@@ -869,7 +904,7 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
                    rdtg->mode != RDT_MODE_EXCLUSIVE)
                        continue;
 
-               if (rdtg->closid != tsk->closid)
+               if (!resctrl_arch_match_closid(tsk, rdtg->closid))
                        continue;
 
                seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
@@ -877,7 +912,8 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
                seq_puts(s, "mon:");
                list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
                                    mon.crdtgrp_list) {
-                       if (tsk->rmid != crg->mon.rmid)
+                       if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid,
+                                                    crg->mon.rmid))
                                continue;
                        seq_printf(s, "%s", crg->kn->name);
                        break;
@@ -982,6 +1018,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
        bool sep = false;
        u32 ctrl_val;
 
+       cpus_read_lock();
        mutex_lock(&rdtgroup_mutex);
        hw_shareable = r->cache.shareable_bits;
        list_for_each_entry(dom, &r->domains, list) {
@@ -1042,6 +1079,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
        }
        seq_putc(seq, '\n');
        mutex_unlock(&rdtgroup_mutex);
+       cpus_read_unlock();
        return 0;
 }
 
@@ -1297,6 +1335,9 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
        struct rdt_domain *d;
        u32 ctrl;
 
+       /* Walking r->domains, ensure it can't race with cpuhp */
+       lockdep_assert_cpus_held();
+
        list_for_each_entry(s, &resctrl_schema_all, list) {
                r = s->res;
                if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)
@@ -1561,6 +1602,7 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid
        struct rdt_domain *dom;
        bool sep = false;
 
+       cpus_read_lock();
        mutex_lock(&rdtgroup_mutex);
 
        list_for_each_entry(dom, &r->domains, list) {
@@ -1577,6 +1619,7 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid
        seq_puts(s, "\n");
 
        mutex_unlock(&rdtgroup_mutex);
+       cpus_read_unlock();
 
        return 0;
 }
@@ -1614,17 +1657,10 @@ static void mon_event_config_write(void *info)
        wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0);
 }
 
-static int mbm_config_write_domain(struct rdt_resource *r,
-                                  struct rdt_domain *d, u32 evtid, u32 val)
+static void mbm_config_write_domain(struct rdt_resource *r,
+                                   struct rdt_domain *d, u32 evtid, u32 val)
 {
        struct mon_config_info mon_info = {0};
-       int ret = 0;
-
-       /* mon_config cannot be more than the supported set of events */
-       if (val > MAX_EVT_CONFIG_BITS) {
-               rdt_last_cmd_puts("Invalid event configuration\n");
-               return -EINVAL;
-       }
 
        /*
         * Read the current config value first. If both are the same then
@@ -1633,7 +1669,7 @@ static int mbm_config_write_domain(struct rdt_resource *r,
        mon_info.evtid = evtid;
        mondata_config_read(d, &mon_info);
        if (mon_info.mon_config == val)
-               goto out;
+               return;
 
        mon_info.mon_config = val;
 
@@ -1656,17 +1692,17 @@ static int mbm_config_write_domain(struct rdt_resource *r,
         * mbm_local and mbm_total counts for all the RMIDs.
         */
        resctrl_arch_reset_rmid_all(r, d);
-
-out:
-       return ret;
 }
 
 static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
 {
+       struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
        char *dom_str = NULL, *id_str;
        unsigned long dom_id, val;
        struct rdt_domain *d;
-       int ret = 0;
+
+       /* Walking r->domains, ensure it can't race with cpuhp */
+       lockdep_assert_cpus_held();
 
 next:
        if (!tok || tok[0] == '\0')
@@ -1686,11 +1722,16 @@ next:
                return -EINVAL;
        }
 
+       /* Value from user cannot be more than the supported set of events */
+       if ((val & hw_res->mbm_cfg_mask) != val) {
+               rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n",
+                                   hw_res->mbm_cfg_mask);
+               return -EINVAL;
+       }
+
        list_for_each_entry(d, &r->domains, list) {
                if (d->id == dom_id) {
-                       ret = mbm_config_write_domain(r, d, evtid, val);
-                       if (ret)
-                               return -EINVAL;
+                       mbm_config_write_domain(r, d, evtid, val);
                        goto next;
                }
        }
@@ -1709,6 +1750,7 @@ static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of,
        if (nbytes == 0 || buf[nbytes - 1] != '\n')
                return -EINVAL;
 
+       cpus_read_lock();
        mutex_lock(&rdtgroup_mutex);
 
        rdt_last_cmd_clear();
@@ -1718,6 +1760,7 @@ static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of,
        ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID);
 
        mutex_unlock(&rdtgroup_mutex);
+       cpus_read_unlock();
 
        return ret ?: nbytes;
 }
@@ -1733,6 +1776,7 @@ static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of,
        if (nbytes == 0 || buf[nbytes - 1] != '\n')
                return -EINVAL;
 
+       cpus_read_lock();
        mutex_lock(&rdtgroup_mutex);
 
        rdt_last_cmd_clear();
@@ -1742,6 +1786,7 @@ static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of,
        ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID);
 
        mutex_unlock(&rdtgroup_mutex);
+       cpus_read_unlock();
 
        return ret ?: nbytes;
 }
@@ -2218,6 +2263,9 @@ static int set_cache_qos_cfg(int level, bool enable)
        struct rdt_domain *d;
        int cpu;
 
+       /* Walking r->domains, ensure it can't race with cpuhp */
+       lockdep_assert_cpus_held();
+
        if (level == RDT_RESOURCE_L3)
                update = l3_qos_cfg_update;
        else if (level == RDT_RESOURCE_L2)
@@ -2417,6 +2465,7 @@ struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
 
        rdtgroup_kn_get(rdtgrp, kn);
 
+       cpus_read_lock();
        mutex_lock(&rdtgroup_mutex);
 
        /* Was this group deleted while we waited? */
@@ -2434,6 +2483,8 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
                return;
 
        mutex_unlock(&rdtgroup_mutex);
+       cpus_read_unlock();
+
        rdtgroup_kn_put(rdtgrp, kn);
 }
 
@@ -2584,7 +2635,7 @@ static int rdt_get_tree(struct fs_context *fc)
        /*
         * resctrl file system can only be mounted once.
         */
-       if (static_branch_unlikely(&rdt_enable_key)) {
+       if (resctrl_mounted) {
                ret = -EBUSY;
                goto out;
        }
@@ -2605,7 +2656,7 @@ static int rdt_get_tree(struct fs_context *fc)
 
        closid_init();
 
-       if (rdt_mon_capable)
+       if (resctrl_arch_mon_capable())
                flags |= RFTYPE_MON;
 
        ret = rdtgroup_add_files(rdtgroup_default.kn, flags);
@@ -2618,7 +2669,7 @@ static int rdt_get_tree(struct fs_context *fc)
        if (ret < 0)
                goto out_schemata_free;
 
-       if (rdt_mon_capable) {
+       if (resctrl_arch_mon_capable()) {
                ret = mongroup_create_dir(rdtgroup_default.kn,
                                          &rdtgroup_default, "mon_groups",
                                          &kn_mongrp);
@@ -2640,18 +2691,19 @@ static int rdt_get_tree(struct fs_context *fc)
        if (ret < 0)
                goto out_psl;
 
-       if (rdt_alloc_capable)
-               static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
-       if (rdt_mon_capable)
-               static_branch_enable_cpuslocked(&rdt_mon_enable_key);
+       if (resctrl_arch_alloc_capable())
+               resctrl_arch_enable_alloc();
+       if (resctrl_arch_mon_capable())
+               resctrl_arch_enable_mon();
 
-       if (rdt_alloc_capable || rdt_mon_capable)
-               static_branch_enable_cpuslocked(&rdt_enable_key);
+       if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable())
+               resctrl_mounted = true;
 
        if (is_mbm_enabled()) {
                r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
                list_for_each_entry(dom, &r->domains, list)
-                       mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
+                       mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL,
+                                                  RESCTRL_PICK_ANY_CPU);
        }
 
        goto out;
@@ -2659,10 +2711,10 @@ static int rdt_get_tree(struct fs_context *fc)
 out_psl:
        rdt_pseudo_lock_release();
 out_mondata:
-       if (rdt_mon_capable)
+       if (resctrl_arch_mon_capable())
                kernfs_remove(kn_mondata);
 out_mongrp:
-       if (rdt_mon_capable)
+       if (resctrl_arch_mon_capable())
                kernfs_remove(kn_mongrp);
 out_info:
        kernfs_remove(kn_info);
@@ -2765,6 +2817,9 @@ static int reset_all_ctrls(struct rdt_resource *r)
        struct rdt_domain *d;
        int i;
 
+       /* Walking r->domains, ensure it can't race with cpuhp */
+       lockdep_assert_cpus_held();
+
        if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
                return -ENOMEM;
 
@@ -2810,8 +2865,8 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
        for_each_process_thread(p, t) {
                if (!from || is_closid_match(t, from) ||
                    is_rmid_match(t, from)) {
-                       WRITE_ONCE(t->closid, to->closid);
-                       WRITE_ONCE(t->rmid, to->mon.rmid);
+                       resctrl_arch_set_closid_rmid(t, to->closid,
+                                                    to->mon.rmid);
 
                        /*
                         * Order the closid/rmid stores above before the loads
@@ -2842,7 +2897,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
 
        head = &rdtgrp->mon.crdtgrp_list;
        list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
-               free_rmid(sentry->mon.rmid);
+               free_rmid(sentry->closid, sentry->mon.rmid);
                list_del(&sentry->mon.crdtgrp_list);
 
                if (atomic_read(&sentry->waitcount) != 0)
@@ -2882,7 +2937,7 @@ static void rmdir_all_sub(void)
                cpumask_or(&rdtgroup_default.cpu_mask,
                           &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
 
-               free_rmid(rdtgrp->mon.rmid);
+               free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
 
                kernfs_remove(rdtgrp->kn);
                list_del(&rdtgrp->rdtgroup_list);
@@ -2917,9 +2972,11 @@ static void rdt_kill_sb(struct super_block *sb)
        rdtgroup_default.mode = RDT_MODE_SHAREABLE;
        schemata_list_destroy();
        rdtgroup_destroy_root();
-       static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
-       static_branch_disable_cpuslocked(&rdt_mon_enable_key);
-       static_branch_disable_cpuslocked(&rdt_enable_key);
+       if (resctrl_arch_alloc_capable())
+               resctrl_arch_disable_alloc();
+       if (resctrl_arch_mon_capable())
+               resctrl_arch_disable_mon();
+       resctrl_mounted = false;
        kernfs_kill_sb(sb);
        mutex_unlock(&rdtgroup_mutex);
        cpus_read_unlock();
@@ -3047,6 +3104,9 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
        struct rdt_domain *dom;
        int ret;
 
+       /* Walking r->domains, ensure it can't race with cpuhp */
+       lockdep_assert_cpus_held();
+
        list_for_each_entry(dom, &r->domains, list) {
                ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
                if (ret)
@@ -3293,6 +3353,36 @@ out:
        return ret;
 }
 
+static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp)
+{
+       int ret;
+
+       if (!resctrl_arch_mon_capable())
+               return 0;
+
+       ret = alloc_rmid(rdtgrp->closid);
+       if (ret < 0) {
+               rdt_last_cmd_puts("Out of RMIDs\n");
+               return ret;
+       }
+       rdtgrp->mon.rmid = ret;
+
+       ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
+       if (ret) {
+               rdt_last_cmd_puts("kernfs subdir error\n");
+               free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
+{
+       if (resctrl_arch_mon_capable())
+               free_rmid(rgrp->closid, rgrp->mon.rmid);
+}
+
 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
                             const char *name, umode_t mode,
                             enum rdt_group_type rtype, struct rdtgroup **r)
@@ -3353,7 +3443,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
 
        if (rtype == RDTCTRL_GROUP) {
                files = RFTYPE_BASE | RFTYPE_CTRL;
-               if (rdt_mon_capable)
+               if (resctrl_arch_mon_capable())
                        files |= RFTYPE_MON;
        } else {
                files = RFTYPE_BASE | RFTYPE_MON;
@@ -3365,29 +3455,11 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
                goto out_destroy;
        }
 
-       if (rdt_mon_capable) {
-               ret = alloc_rmid();
-               if (ret < 0) {
-                       rdt_last_cmd_puts("Out of RMIDs\n");
-                       goto out_destroy;
-               }
-               rdtgrp->mon.rmid = ret;
-
-               ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
-               if (ret) {
-                       rdt_last_cmd_puts("kernfs subdir error\n");
-                       goto out_idfree;
-               }
-       }
-       kernfs_activate(kn);
-
        /*
         * The caller unlocks the parent_kn upon success.
         */
        return 0;
 
-out_idfree:
-       free_rmid(rdtgrp->mon.rmid);
 out_destroy:
        kernfs_put(rdtgrp->kn);
        kernfs_remove(rdtgrp->kn);
@@ -3401,7 +3473,6 @@ out_unlock:
 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
 {
        kernfs_remove(rgrp->kn);
-       free_rmid(rgrp->mon.rmid);
        rdtgroup_remove(rgrp);
 }
 
@@ -3423,12 +3494,21 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
        prgrp = rdtgrp->mon.parent;
        rdtgrp->closid = prgrp->closid;
 
+       ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp);
+       if (ret) {
+               mkdir_rdt_prepare_clean(rdtgrp);
+               goto out_unlock;
+       }
+
+       kernfs_activate(rdtgrp->kn);
+
        /*
         * Add the rdtgrp to the list of rdtgrps the parent
         * ctrl_mon group has to track.
         */
        list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
 
+out_unlock:
        rdtgroup_kn_unlock(parent_kn);
        return ret;
 }
@@ -3459,13 +3539,20 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
        ret = 0;
 
        rdtgrp->closid = closid;
+
+       ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp);
+       if (ret)
+               goto out_closid_free;
+
+       kernfs_activate(rdtgrp->kn);
+
        ret = rdtgroup_init_alloc(rdtgrp);
        if (ret < 0)
-               goto out_id_free;
+               goto out_rmid_free;
 
        list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
 
-       if (rdt_mon_capable) {
+       if (resctrl_arch_mon_capable()) {
                /*
                 * Create an empty mon_groups directory to hold the subset
                 * of tasks and cpus to monitor.
@@ -3481,7 +3568,9 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
 
 out_del_list:
        list_del(&rdtgrp->rdtgroup_list);
-out_id_free:
+out_rmid_free:
+       mkdir_rdt_prepare_rmid_free(rdtgrp);
+out_closid_free:
        closid_free(closid);
 out_common_fail:
        mkdir_rdt_prepare_clean(rdtgrp);
@@ -3518,14 +3607,14 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
         * allocation is supported, add a control and monitoring
         * subdirectory
         */
-       if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
+       if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn)
                return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
 
        /*
         * If RDT monitoring is supported and the parent directory is a valid
         * "mon_groups" directory, add a monitoring subdirectory.
         */
-       if (rdt_mon_capable && is_mon_groups(parent_kn, name))
+       if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name))
                return rdtgroup_mkdir_mon(parent_kn, name, mode);
 
        return -EPERM;
@@ -3550,7 +3639,7 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
        update_closid_rmid(tmpmask, NULL);
 
        rdtgrp->flags = RDT_DELETED;
-       free_rmid(rdtgrp->mon.rmid);
+       free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
 
        /*
         * Remove the rdtgrp from the parent ctrl_mon group's list
@@ -3596,8 +3685,8 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
        cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
        update_closid_rmid(tmpmask, NULL);
 
+       free_rmid(rdtgrp->closid, rdtgrp->mon.rmid);
        closid_free(rdtgrp->closid);
-       free_rmid(rdtgrp->mon.rmid);
 
        rdtgroup_ctrl_remove(rdtgrp);
 
@@ -3829,8 +3918,8 @@ static void __init rdtgroup_setup_default(void)
 {
        mutex_lock(&rdtgroup_mutex);
 
-       rdtgroup_default.closid = 0;
-       rdtgroup_default.mon.rmid = 0;
+       rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID;
+       rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID;
        rdtgroup_default.type = RDTCTRL_GROUP;
        INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
 
@@ -3848,24 +3937,24 @@ static void domain_destroy_mon_state(struct rdt_domain *d)
 
 void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d)
 {
-       lockdep_assert_held(&rdtgroup_mutex);
+       mutex_lock(&rdtgroup_mutex);
 
        if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
                mba_sc_domain_destroy(r, d);
 
        if (!r->mon_capable)
-               return;
+               goto out_unlock;
 
        /*
         * If resctrl is mounted, remove all the
         * per domain monitor data directories.
         */
-       if (static_branch_unlikely(&rdt_mon_enable_key))
+       if (resctrl_mounted && resctrl_arch_mon_capable())
                rmdir_mondata_subdir_allrdtgrp(r, d->id);
 
        if (is_mbm_enabled())
                cancel_delayed_work(&d->mbm_over);
-       if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
+       if (is_llc_occupancy_enabled() && has_busy_rmid(d)) {
                /*
                 * When a package is going down, forcefully
                 * decrement rmid->ebusy. There is no way to know
@@ -3879,20 +3968,24 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d)
        }
 
        domain_destroy_mon_state(d);
+
+out_unlock:
+       mutex_unlock(&rdtgroup_mutex);
 }
 
 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
 {
+       u32 idx_limit = resctrl_arch_system_num_rmid_idx();
        size_t tsize;
 
        if (is_llc_occupancy_enabled()) {
-               d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
+               d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL);
                if (!d->rmid_busy_llc)
                        return -ENOMEM;
        }
        if (is_mbm_total_enabled()) {
                tsize = sizeof(*d->mbm_total);
-               d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
+               d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL);
                if (!d->mbm_total) {
                        bitmap_free(d->rmid_busy_llc);
                        return -ENOMEM;
@@ -3900,7 +3993,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
        }
        if (is_mbm_local_enabled()) {
                tsize = sizeof(*d->mbm_local);
-               d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
+               d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL);
                if (!d->mbm_local) {
                        bitmap_free(d->rmid_busy_llc);
                        kfree(d->mbm_total);
@@ -3913,34 +4006,97 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
 
 int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d)
 {
-       int err;
+       int err = 0;
 
-       lockdep_assert_held(&rdtgroup_mutex);
+       mutex_lock(&rdtgroup_mutex);
 
-       if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA)
+       if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) {
                /* RDT_RESOURCE_MBA is never mon_capable */
-               return mba_sc_domain_allocate(r, d);
+               err = mba_sc_domain_allocate(r, d);
+               goto out_unlock;
+       }
 
        if (!r->mon_capable)
-               return 0;
+               goto out_unlock;
 
        err = domain_setup_mon_state(r, d);
        if (err)
-               return err;
+               goto out_unlock;
 
        if (is_mbm_enabled()) {
                INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
-               mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
+               mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL,
+                                          RESCTRL_PICK_ANY_CPU);
        }
 
        if (is_llc_occupancy_enabled())
                INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
 
-       /* If resctrl is mounted, add per domain monitor data directories. */
-       if (static_branch_unlikely(&rdt_mon_enable_key))
+       /*
+        * If the filesystem is not mounted then only the default resource group
+        * exists. Creation of its directories is deferred until mount time
+        * by rdt_get_tree() calling mkdir_mondata_all().
+        * If resctrl is mounted, add per domain monitor data directories.
+        */
+       if (resctrl_mounted && resctrl_arch_mon_capable())
                mkdir_mondata_subdir_allrdtgrp(r, d);
 
-       return 0;
+out_unlock:
+       mutex_unlock(&rdtgroup_mutex);
+
+       return err;
+}
+
+void resctrl_online_cpu(unsigned int cpu)
+{
+       mutex_lock(&rdtgroup_mutex);
+       /* The CPU is set in default rdtgroup after online. */
+       cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
+       mutex_unlock(&rdtgroup_mutex);
+}
+
+static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
+{
+       struct rdtgroup *cr;
+
+       list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
+               if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask))
+                       break;
+       }
+}
+
+void resctrl_offline_cpu(unsigned int cpu)
+{
+       struct rdt_resource *l3 = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+       struct rdtgroup *rdtgrp;
+       struct rdt_domain *d;
+
+       mutex_lock(&rdtgroup_mutex);
+       list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+               if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
+                       clear_childcpus(rdtgrp, cpu);
+                       break;
+               }
+       }
+
+       if (!l3->mon_capable)
+               goto out_unlock;
+
+       d = get_domain_from_cpu(cpu, l3);
+       if (d) {
+               if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
+                       cancel_delayed_work(&d->mbm_over);
+                       mbm_setup_overflow_handler(d, 0, cpu);
+               }
+               if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
+                   has_busy_rmid(d)) {
+                       cancel_delayed_work(&d->cqm_limbo);
+                       cqm_setup_limbo_handler(d, 0, cpu);
+               }
+       }
+
+out_unlock:
+       mutex_unlock(&rdtgroup_mutex);
 }
 
 /*