2 * Resource Director Technology(RDT)
3 * - Cache Allocation code.
5 * Copyright (C) 2016 Intel Corporation
8 * Fenghua Yu <fenghua.yu@intel.com>
9 * Tony Luck <tony.luck@intel.com>
10 * Vikas Shivappa <vikas.shivappa@intel.com>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * More information about RDT be found in the Intel (R) x86 Architecture
22 * Software Developer Manual June 2016, volume 3, section 17.17.
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/slab.h>
28 #include <linux/err.h>
29 #include <linux/cacheinfo.h>
30 #include <linux/cpuhotplug.h>
32 #include <asm/intel-family.h>
33 #include <asm/intel_rdt_sched.h>
34 #include "intel_rdt.h"
36 #define MAX_MBA_BW 100u
37 #define MBA_IS_LINEAR 0x4
38 #define MBA_MAX_MBPS U32_MAX
40 /* Mutex to protect rdtgroup access. */
41 DEFINE_MUTEX(rdtgroup_mutex);
44 * The cached intel_pqr_state is strictly per CPU and can never be
45 * updated from a remote CPU. Functions which modify the state
46 * are called with interrupts disabled and no preemption, which
47 * is sufficient for the protection.
49 DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
52 * Used to store the max resource name width and max resource data width
53 * to display the schemata in a tabular format
55 int max_name_width, max_data_width;
58 * Global boolean for rdt_alloc which is true if any
59 * resource allocation is enabled.
61 bool rdt_alloc_capable;
64 mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
66 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
68 #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
70 struct rdt_resource rdt_resources_all[] = {
73 .rid = RDT_RESOURCE_L3,
75 .domains = domain_init(RDT_RESOURCE_L3),
76 .msr_base = IA32_L3_CBM_BASE,
77 .msr_update = cat_wrmsr,
84 .parse_ctrlval = parse_cbm,
85 .format_str = "%d=%0*x",
86 .fflags = RFTYPE_RES_CACHE,
88 [RDT_RESOURCE_L3DATA] =
90 .rid = RDT_RESOURCE_L3DATA,
92 .domains = domain_init(RDT_RESOURCE_L3DATA),
93 .msr_base = IA32_L3_CBM_BASE,
94 .msr_update = cat_wrmsr,
101 .parse_ctrlval = parse_cbm,
102 .format_str = "%d=%0*x",
103 .fflags = RFTYPE_RES_CACHE,
105 [RDT_RESOURCE_L3CODE] =
107 .rid = RDT_RESOURCE_L3CODE,
109 .domains = domain_init(RDT_RESOURCE_L3CODE),
110 .msr_base = IA32_L3_CBM_BASE,
111 .msr_update = cat_wrmsr,
118 .parse_ctrlval = parse_cbm,
119 .format_str = "%d=%0*x",
120 .fflags = RFTYPE_RES_CACHE,
124 .rid = RDT_RESOURCE_L2,
126 .domains = domain_init(RDT_RESOURCE_L2),
127 .msr_base = IA32_L2_CBM_BASE,
128 .msr_update = cat_wrmsr,
135 .parse_ctrlval = parse_cbm,
136 .format_str = "%d=%0*x",
137 .fflags = RFTYPE_RES_CACHE,
139 [RDT_RESOURCE_L2DATA] =
141 .rid = RDT_RESOURCE_L2DATA,
143 .domains = domain_init(RDT_RESOURCE_L2DATA),
144 .msr_base = IA32_L2_CBM_BASE,
145 .msr_update = cat_wrmsr,
152 .parse_ctrlval = parse_cbm,
153 .format_str = "%d=%0*x",
154 .fflags = RFTYPE_RES_CACHE,
156 [RDT_RESOURCE_L2CODE] =
158 .rid = RDT_RESOURCE_L2CODE,
160 .domains = domain_init(RDT_RESOURCE_L2CODE),
161 .msr_base = IA32_L2_CBM_BASE,
162 .msr_update = cat_wrmsr,
169 .parse_ctrlval = parse_cbm,
170 .format_str = "%d=%0*x",
171 .fflags = RFTYPE_RES_CACHE,
175 .rid = RDT_RESOURCE_MBA,
177 .domains = domain_init(RDT_RESOURCE_MBA),
178 .msr_base = IA32_MBA_THRTL_BASE,
179 .msr_update = mba_wrmsr,
181 .parse_ctrlval = parse_bw,
182 .format_str = "%d=%*d",
183 .fflags = RFTYPE_RES_MB,
187 static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
189 return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
193 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
194 * as they do not have CPUID enumeration support for Cache allocation.
195 * The check for Vendor/Family/Model is not enough to guarantee that
196 * the MSRs won't #GP fault because only the following SKUs support
198 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
199 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
200 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
201 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
202 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
203 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
205 * Probe by trying to write the first of the L3 cach mask registers
206 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
207 * is always 20 on hsw server parts. The minimum cache bitmask length
208 * allowed for HSW server is always 2 bits. Hardcode all of them.
210 static inline void cache_alloc_hsw_probe(void)
212 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
213 u32 l, h, max_cbm = BIT_MASK(20) - 1;
215 if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
217 rdmsr(IA32_L3_CBM_BASE, l, h);
219 /* If all the bits were set in MSR, return success */
224 r->default_ctrl = max_cbm;
225 r->cache.cbm_len = 20;
226 r->cache.shareable_bits = 0xc0000;
227 r->cache.min_cbm_bits = 2;
228 r->alloc_capable = true;
229 r->alloc_enabled = true;
231 rdt_alloc_capable = true;
234 bool is_mba_sc(struct rdt_resource *r)
237 return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
239 return r->membw.mba_sc;
243 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
244 * exposed to user interface and the h/w understandable delay values.
246 * The non-linear delay values have the granularity of power of two
247 * and also the h/w does not guarantee a curve for configured delay
248 * values vs. actual b/w enforced.
249 * Hence we need a mapping that is pre calibrated so the user can
250 * express the memory b/w as a percentage value.
252 static inline bool rdt_get_mb_table(struct rdt_resource *r)
255 * There are no Intel SKUs as of now to support non-linear delay.
257 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
258 boot_cpu_data.x86, boot_cpu_data.x86_model);
263 static bool rdt_get_mem_config(struct rdt_resource *r)
265 union cpuid_0x10_3_eax eax;
266 union cpuid_0x10_x_edx edx;
269 cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
270 r->num_closid = edx.split.cos_max + 1;
271 r->membw.max_delay = eax.split.max_delay + 1;
272 r->default_ctrl = MAX_MBA_BW;
273 if (ecx & MBA_IS_LINEAR) {
274 r->membw.delay_linear = true;
275 r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay;
276 r->membw.bw_gran = MAX_MBA_BW - r->membw.max_delay;
278 if (!rdt_get_mb_table(r))
283 r->alloc_capable = true;
284 r->alloc_enabled = true;
289 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
291 union cpuid_0x10_1_eax eax;
292 union cpuid_0x10_x_edx edx;
295 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
296 r->num_closid = edx.split.cos_max + 1;
297 r->cache.cbm_len = eax.split.cbm_len + 1;
298 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
299 r->cache.shareable_bits = ebx & r->default_ctrl;
300 r->data_width = (r->cache.cbm_len + 3) / 4;
301 r->alloc_capable = true;
302 r->alloc_enabled = true;
305 static void rdt_get_cdp_config(int level, int type)
307 struct rdt_resource *r_l = &rdt_resources_all[level];
308 struct rdt_resource *r = &rdt_resources_all[type];
310 r->num_closid = r_l->num_closid / 2;
311 r->cache.cbm_len = r_l->cache.cbm_len;
312 r->default_ctrl = r_l->default_ctrl;
313 r->cache.shareable_bits = r_l->cache.shareable_bits;
314 r->data_width = (r->cache.cbm_len + 3) / 4;
315 r->alloc_capable = true;
317 * By default, CDP is disabled. CDP can be enabled by mount parameter
318 * "cdp" during resctrl file system mount time.
320 r->alloc_enabled = false;
323 static void rdt_get_cdp_l3_config(void)
325 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
326 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
329 static void rdt_get_cdp_l2_config(void)
331 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
332 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
335 static int get_cache_id(int cpu, int level)
337 struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
340 for (i = 0; i < ci->num_leaves; i++) {
341 if (ci->info_list[i].level == level)
342 return ci->info_list[i].id;
349 * Map the memory b/w percentage value to delay values
350 * that can be written to QOS_MSRs.
351 * There are currently no SKUs which support non linear delay values.
353 static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
355 if (r->membw.delay_linear)
356 return MAX_MBA_BW - bw;
358 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
359 return r->default_ctrl;
363 mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
367 /* Write the delay values for mba. */
368 for (i = m->low; i < m->high; i++)
369 wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
373 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
377 for (i = m->low; i < m->high; i++)
378 wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
381 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
383 struct rdt_domain *d;
385 list_for_each_entry(d, &r->domains, list) {
386 /* Find the domain that contains this CPU */
387 if (cpumask_test_cpu(cpu, &d->cpu_mask))
394 void rdt_ctrl_update(void *arg)
396 struct msr_param *m = arg;
397 struct rdt_resource *r = m->res;
398 int cpu = smp_processor_id();
399 struct rdt_domain *d;
401 d = get_domain_from_cpu(cpu, r);
403 r->msr_update(d, m, r);
406 pr_warn_once("cpu %d not found in any domain for resource %s\n",
411 * rdt_find_domain - Find a domain in a resource that matches input resource id
413 * Search resource r's domain list to find the resource id. If the resource
414 * id is found in a domain, return the domain. Otherwise, if requested by
415 * caller, return the first domain whose id is bigger than the input id.
416 * The domain list is sorted by id in ascending order.
418 struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
419 struct list_head **pos)
421 struct rdt_domain *d;
427 list_for_each(l, &r->domains) {
428 d = list_entry(l, struct rdt_domain, list);
429 /* When id is found, return its domain. */
432 /* Stop searching when finding id's position in sorted list. */
443 void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
448 * Initialize the Control MSRs to having no control.
449 * For Cache Allocation: Set all bits in cbm
450 * For Memory Allocation: Set b/w requested to 100%
451 * and the bandwidth in MBps to U32_MAX
453 for (i = 0; i < r->num_closid; i++, dc++, dm++) {
454 *dc = r->default_ctrl;
459 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
464 dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
468 dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL);
476 setup_default_ctrlval(r, dc, dm);
479 m.high = r->num_closid;
480 r->msr_update(d, &m, r);
484 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
488 if (is_llc_occupancy_enabled()) {
489 d->rmid_busy_llc = kcalloc(BITS_TO_LONGS(r->num_rmid),
490 sizeof(unsigned long),
492 if (!d->rmid_busy_llc)
494 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
496 if (is_mbm_total_enabled()) {
497 tsize = sizeof(*d->mbm_total);
498 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
500 kfree(d->rmid_busy_llc);
504 if (is_mbm_local_enabled()) {
505 tsize = sizeof(*d->mbm_local);
506 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
508 kfree(d->rmid_busy_llc);
514 if (is_mbm_enabled()) {
515 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
516 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
523 * domain_add_cpu - Add a cpu to a resource's domain list.
525 * If an existing domain in the resource r's domain list matches the cpu's
526 * resource id, add the cpu in the domain.
528 * Otherwise, a new domain is allocated and inserted into the right position
529 * in the domain list sorted by id in ascending order.
531 * The order in the domain list is visible to users when we print entries
532 * in the schemata file and schemata input is validated to have the same order
535 static void domain_add_cpu(int cpu, struct rdt_resource *r)
537 int id = get_cache_id(cpu, r->cache_level);
538 struct list_head *add_pos = NULL;
539 struct rdt_domain *d;
541 d = rdt_find_domain(r, id, &add_pos);
543 pr_warn("Could't find cache id for cpu %d\n", cpu);
548 cpumask_set_cpu(cpu, &d->cpu_mask);
552 d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
557 cpumask_set_cpu(cpu, &d->cpu_mask);
559 if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
564 if (r->mon_capable && domain_setup_mon_state(r, d)) {
569 list_add_tail(&d->list, add_pos);
572 * If resctrl is mounted, add
573 * per domain monitor data directories.
575 if (static_branch_unlikely(&rdt_mon_enable_key))
576 mkdir_mondata_subdir_allrdtgrp(r, d);
579 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
581 int id = get_cache_id(cpu, r->cache_level);
582 struct rdt_domain *d;
584 d = rdt_find_domain(r, id, NULL);
585 if (IS_ERR_OR_NULL(d)) {
586 pr_warn("Could't find cache id for cpu %d\n", cpu);
590 cpumask_clear_cpu(cpu, &d->cpu_mask);
591 if (cpumask_empty(&d->cpu_mask)) {
593 * If resctrl is mounted, remove all the
594 * per domain monitor data directories.
596 if (static_branch_unlikely(&rdt_mon_enable_key))
597 rmdir_mondata_subdir_allrdtgrp(r, d->id);
599 if (is_mbm_enabled())
600 cancel_delayed_work(&d->mbm_over);
601 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
603 * When a package is going down, forcefully
604 * decrement rmid->ebusy. There is no way to know
605 * that the L3 was flushed and hence may lead to
606 * incorrect counts in rare scenarios, but leaving
607 * the RMID as busy creates RMID leaks if the
608 * package never comes back.
610 __check_limbo(d, true);
611 cancel_delayed_work(&d->cqm_limbo);
616 kfree(d->rmid_busy_llc);
623 if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
624 if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
625 cancel_delayed_work(&d->mbm_over);
626 mbm_setup_overflow_handler(d, 0);
628 if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
629 has_busy_rmid(r, d)) {
630 cancel_delayed_work(&d->cqm_limbo);
631 cqm_setup_limbo_handler(d, 0);
636 static void clear_closid_rmid(int cpu)
638 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
640 state->default_closid = 0;
641 state->default_rmid = 0;
642 state->cur_closid = 0;
644 wrmsr(IA32_PQR_ASSOC, 0, 0);
647 static int intel_rdt_online_cpu(unsigned int cpu)
649 struct rdt_resource *r;
651 mutex_lock(&rdtgroup_mutex);
652 for_each_capable_rdt_resource(r)
653 domain_add_cpu(cpu, r);
654 /* The cpu is set in default rdtgroup after online. */
655 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
656 clear_closid_rmid(cpu);
657 mutex_unlock(&rdtgroup_mutex);
662 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
666 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
667 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
673 static int intel_rdt_offline_cpu(unsigned int cpu)
675 struct rdtgroup *rdtgrp;
676 struct rdt_resource *r;
678 mutex_lock(&rdtgroup_mutex);
679 for_each_capable_rdt_resource(r)
680 domain_remove_cpu(cpu, r);
681 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
682 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
683 clear_childcpus(rdtgrp, cpu);
687 clear_closid_rmid(cpu);
688 mutex_unlock(&rdtgroup_mutex);
694 * Choose a width for the resource name and resource data based on the
695 * resource that has widest name and cbm.
697 static __init void rdt_init_padding(void)
699 struct rdt_resource *r;
702 for_each_alloc_capable_rdt_resource(r) {
703 cl = strlen(r->name);
704 if (cl > max_name_width)
707 if (r->data_width > max_data_width)
708 max_data_width = r->data_width;
723 #define RDT_OPT(idx, n, f) \
732 bool force_off, force_on;
735 static struct rdt_options rdt_options[] __initdata = {
736 RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
737 RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
738 RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
739 RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
740 RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
741 RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
742 RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
743 RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
745 #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
747 static int __init set_rdt_options(char *str)
749 struct rdt_options *o;
755 while ((tok = strsep(&str, ",")) != NULL) {
756 force_off = *tok == '!';
759 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
760 if (strcmp(tok, o->name) == 0) {
771 __setup("rdt", set_rdt_options);
773 static bool __init rdt_cpu_has(int flag)
775 bool ret = boot_cpu_has(flag);
776 struct rdt_options *o;
781 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
782 if (flag == o->flag) {
793 static __init bool get_rdt_alloc_resources(void)
797 if (rdt_alloc_capable)
800 if (!boot_cpu_has(X86_FEATURE_RDT_A))
803 if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
804 rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
805 if (rdt_cpu_has(X86_FEATURE_CDP_L3))
806 rdt_get_cdp_l3_config();
809 if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
810 /* CPUID 0x10.2 fields are same format at 0x10.1 */
811 rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
812 if (rdt_cpu_has(X86_FEATURE_CDP_L2))
813 rdt_get_cdp_l2_config();
817 if (rdt_cpu_has(X86_FEATURE_MBA)) {
818 if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]))
824 static __init bool get_rdt_mon_resources(void)
826 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
827 rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
828 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
829 rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
830 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
831 rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
833 if (!rdt_mon_features)
836 return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
839 static __init void rdt_quirks(void)
841 switch (boot_cpu_data.x86_model) {
842 case INTEL_FAM6_HASWELL_X:
843 if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
844 cache_alloc_hsw_probe();
846 case INTEL_FAM6_SKYLAKE_X:
847 if (boot_cpu_data.x86_stepping <= 4)
848 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
852 static __init bool get_rdt_resources(void)
855 rdt_alloc_capable = get_rdt_alloc_resources();
856 rdt_mon_capable = get_rdt_mon_resources();
858 return (rdt_mon_capable || rdt_alloc_capable);
861 static int __init intel_rdt_late_init(void)
863 struct rdt_resource *r;
866 if (!get_rdt_resources())
871 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
872 "x86/rdt/cat:online:",
873 intel_rdt_online_cpu, intel_rdt_offline_cpu);
877 ret = rdtgroup_init();
879 cpuhp_remove_state(state);
883 for_each_alloc_capable_rdt_resource(r)
884 pr_info("Intel RDT %s allocation detected\n", r->name);
886 for_each_mon_capable_rdt_resource(r)
887 pr_info("Intel RDT %s monitoring detected\n", r->name);
892 late_initcall(intel_rdt_late_init);