Merge tag 'soc-drivers-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / arch / x86 / kernel / cpu / resctrl / ctrlmondata.c
CommitLineData
2025cf9e 1// SPDX-License-Identifier: GPL-2.0-only
60ec2440
TL
2/*
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
5 *
6 * Copyright (C) 2016 Intel Corporation
7 *
8 * Authors:
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Tony Luck <tony.luck@intel.com>
11 *
60ec2440
TL
12 * More information about RDT be found in the Intel (R) x86 Architecture
13 * Software Developer Manual June 2016, volume 3, section 17.17.
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
80b71c34 18#include <linux/cpu.h>
60ec2440
TL
19#include <linux/kernfs.h>
20#include <linux/seq_file.h>
21#include <linux/slab.h>
09909e09
JM
22#include <linux/tick.h>
23
fa7d9493 24#include "internal.h"
60ec2440 25
64e8ed3d
VS
26/*
27 * Check whether MBA bandwidth percentage value is correct. The value is
28 * checked against the minimum and max bandwidth values specified by the
29 * hardware. The allocated bandwidth percentage is rounded to the next
30 * control step available on the hardware.
31 */
32static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
33{
34 unsigned long bw;
35 int ret;
36
37 /*
38 * Only linear delay values is supported for current Intel SKUs.
39 */
41215b79 40 if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
c377dcfb 41 rdt_last_cmd_puts("No support for non-linear MB domains\n");
64e8ed3d 42 return false;
c377dcfb 43 }
64e8ed3d
VS
44
45 ret = kstrtoul(buf, 10, &bw);
c377dcfb
TL
46 if (ret) {
47 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
64e8ed3d 48 return false;
c377dcfb 49 }
64e8ed3d 50
8205a078
VS
51 if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
52 !is_mba_sc(r)) {
c377dcfb
TL
53 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
54 r->membw.min_bw, r->default_ctrl);
64e8ed3d 55 return false;
c377dcfb 56 }
64e8ed3d
VS
57
58 *data = roundup(bw, (unsigned long)r->membw.bw_gran);
59 return true;
60}
61
1c290682 62int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
5df3ca93 63 struct rdt_domain *d)
64e8ed3d 64{
75408e43 65 struct resctrl_staged_config *cfg;
6ce1560d 66 u32 closid = data->rdtgrp->closid;
1c290682 67 struct rdt_resource *r = s->res;
753694a8 68 unsigned long bw_val;
64e8ed3d 69
75408e43 70 cfg = &d->staged_config[s->conf_type];
e8f72825 71 if (cfg->have_new_ctrl) {
723f1a0d 72 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
64e8ed3d 73 return -EINVAL;
c377dcfb 74 }
64e8ed3d 75
753694a8 76 if (!bw_validate(data->buf, &bw_val, r))
64e8ed3d 77 return -EINVAL;
6ce1560d
JM
78
79 if (is_mba_sc(r)) {
80 d->mbps_val[closid] = bw_val;
81 return 0;
82 }
83
e8f72825
JM
84 cfg->new_ctrl = bw_val;
85 cfg->have_new_ctrl = true;
64e8ed3d
VS
86
87 return 0;
88}
89
60ec2440 90/*
316e7f90 91 * Check whether a cache bit mask is valid.
0e3cd31f
MWR
92 * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID:
93 * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1
94 * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1
95 *
96 * Haswell does not support a non-contiguous 1s value and additionally
97 * requires at least two bits set.
316e7f90 98 * AMD allows non-contiguous bitmasks.
60ec2440 99 */
316e7f90 100static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
60ec2440 101{
c6ea67de 102 unsigned long first_bit, zero_bit, val;
d3e11b4d 103 unsigned int cbm_len = r->cache.cbm_len;
c6ea67de
VS
104 int ret;
105
106 ret = kstrtoul(buf, 16, &val);
c377dcfb 107 if (ret) {
723f1a0d 108 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
c6ea67de 109 return false;
c377dcfb 110 }
60ec2440 111
2d4daa54 112 if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) {
723f1a0d 113 rdt_last_cmd_puts("Mask out of range\n");
60ec2440 114 return false;
c377dcfb 115 }
60ec2440 116
c6ea67de
VS
117 first_bit = find_first_bit(&val, cbm_len);
118 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
60ec2440 119
39c6eed1
MWR
120 /* Are non-contiguous bitmasks allowed? */
121 if (!r->cache.arch_has_sparse_bitmasks &&
316e7f90 122 (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
723f1a0d 123 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
60ec2440 124 return false;
c377dcfb 125 }
60ec2440 126
c377dcfb 127 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
723f1a0d 128 rdt_last_cmd_printf("Need at least %d bits in the mask\n",
c377dcfb 129 r->cache.min_cbm_bits);
60ec2440 130 return false;
c377dcfb 131 }
c6ea67de
VS
132
133 *data = val;
60ec2440
TL
134 return true;
135}
136
137/*
138 * Read one cache bit mask (hex). Check that it is valid for the current
139 * resource type.
140 */
1c290682 141int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
753694a8 142 struct rdt_domain *d)
60ec2440 143{
9ab9aa15 144 struct rdtgroup *rdtgrp = data->rdtgrp;
75408e43 145 struct resctrl_staged_config *cfg;
1c290682 146 struct rdt_resource *r = s->res;
9af4c0a6 147 u32 cbm_val;
60ec2440 148
75408e43 149 cfg = &d->staged_config[s->conf_type];
e8f72825 150 if (cfg->have_new_ctrl) {
723f1a0d 151 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
c4026b7b 152 return -EINVAL;
c377dcfb 153 }
c4026b7b 154
e0bdfe8e
RC
155 /*
156 * Cannot set up more than one pseudo-locked region in a cache
157 * hierarchy.
158 */
159 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
160 rdtgroup_pseudo_locked_in_hierarchy(d)) {
45682489 161 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
e0bdfe8e
RC
162 return -EINVAL;
163 }
164
316e7f90 165 if (!cbm_validate(data->buf, &cbm_val, r))
9ab9aa15
RC
166 return -EINVAL;
167
e0bdfe8e
RC
168 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
169 rdtgrp->mode == RDT_MODE_SHAREABLE) &&
170 rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
45682489 171 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
e0bdfe8e
RC
172 return -EINVAL;
173 }
174
9ab9aa15
RC
175 /*
176 * The CBM may not overlap with the CBM of another closid if
177 * either is exclusive.
178 */
1c290682 179 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
45682489 180 rdt_last_cmd_puts("Overlaps with exclusive group\n");
60ec2440 181 return -EINVAL;
9ab9aa15
RC
182 }
183
1c290682 184 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
dfe9674b
RC
185 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
186 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
45682489 187 rdt_last_cmd_puts("Overlaps with other group\n");
9ab9aa15
RC
188 return -EINVAL;
189 }
190 }
9af4c0a6 191
e8f72825
JM
192 cfg->new_ctrl = cbm_val;
193 cfg->have_new_ctrl = true;
60ec2440
TL
194
195 return 0;
196}
197
198/*
199 * For each domain in this resource we expect to find a series of:
200 * id=mask
c4026b7b
TL
201 * separated by ";". The "id" is in decimal, and must match one of
202 * the "id"s for this resource.
60ec2440 203 */
1c290682 204static int parse_line(char *line, struct resctrl_schema *s,
9ab9aa15 205 struct rdtgroup *rdtgrp)
60ec2440 206{
75408e43 207 enum resctrl_conf_type t = s->conf_type;
e8f72825 208 struct resctrl_staged_config *cfg;
1c290682 209 struct rdt_resource *r = s->res;
753694a8 210 struct rdt_parse_data data;
60ec2440
TL
211 char *dom = NULL, *id;
212 struct rdt_domain *d;
213 unsigned long dom_id;
214
fb700810
JM
215 /* Walking r->domains, ensure it can't race with cpuhp */
216 lockdep_assert_cpus_held();
217
32d736ab 218 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
5b6fac3f 219 (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) {
32d736ab
RC
220 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
221 return -EINVAL;
222 }
223
c4026b7b
TL
224next:
225 if (!line || line[0] == '\0')
226 return 0;
227 dom = strsep(&line, ";");
228 id = strsep(&dom, "=");
c377dcfb
TL
229 if (!dom || kstrtoul(id, 10, &dom_id)) {
230 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
c4026b7b 231 return -EINVAL;
c377dcfb 232 }
634b0e04 233 dom = strim(dom);
60ec2440 234 list_for_each_entry(d, &r->domains, list) {
c4026b7b 235 if (d->id == dom_id) {
9ab9aa15
RC
236 data.buf = dom;
237 data.rdtgrp = rdtgrp;
1c290682 238 if (r->parse_ctrlval(&data, s, d))
c4026b7b 239 return -EINVAL;
e0bdfe8e 240 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
75408e43 241 cfg = &d->staged_config[t];
e0bdfe8e
RC
242 /*
243 * In pseudo-locking setup mode and just
244 * parsed a valid CBM that should be
245 * pseudo-locked. Only one locked region per
246 * resource group and domain so just do
247 * the required initialization for single
248 * region and return.
249 */
32150edd 250 rdtgrp->plr->s = s;
e0bdfe8e 251 rdtgrp->plr->d = d;
e8f72825 252 rdtgrp->plr->cbm = cfg->new_ctrl;
e0bdfe8e
RC
253 d->plr = rdtgrp->plr;
254 return 0;
255 }
c4026b7b
TL
256 goto next;
257 }
60ec2440 258 }
c4026b7b 259 return -EINVAL;
60ec2440
TL
260}
261
2b8dd4ab 262static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
2e7df368 263{
2b8dd4ab
JM
264 switch (type) {
265 default:
266 case CDP_NONE:
2e7df368 267 return closid;
2b8dd4ab
JM
268 case CDP_CODE:
269 return closid * 2 + 1;
270 case CDP_DATA:
271 return closid * 2;
272 }
2e7df368
JM
273}
274
275static bool apply_config(struct rdt_hw_domain *hw_dom,
276 struct resctrl_staged_config *cfg, u32 idx,
6ce1560d 277 cpumask_var_t cpu_mask)
e8f72825
JM
278{
279 struct rdt_domain *dom = &hw_dom->d_resctrl;
e8f72825 280
6ce1560d 281 if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
e8f72825 282 cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
6ce1560d 283 hw_dom->ctrl_val[idx] = cfg->new_ctrl;
2e7df368
JM
284
285 return true;
e8f72825 286 }
2e7df368
JM
287
288 return false;
e8f72825
JM
289}
290
ff6357bb
JM
291int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
292 u32 closid, enum resctrl_conf_type t, u32 cfg_val)
293{
294 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
295 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
296 u32 idx = get_config_index(closid, t);
297 struct msr_param msr_param;
298
299 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
300 return -EINVAL;
301
302 hw_dom->ctrl_val[idx] = cfg_val;
303
304 msr_param.res = r;
305 msr_param.low = idx;
306 msr_param.high = idx + 1;
307 hw_res->msr_update(d, &msr_param, r);
308
309 return 0;
310}
311
2e667819 312int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
60ec2440 313{
e8f72825 314 struct resctrl_staged_config *cfg;
792e0f6f 315 struct rdt_hw_domain *hw_dom;
60ec2440 316 struct msr_param msr_param;
75408e43 317 enum resctrl_conf_type t;
60ec2440
TL
318 cpumask_var_t cpu_mask;
319 struct rdt_domain *d;
2e7df368 320 u32 idx;
60ec2440 321
fb700810
JM
322 /* Walking r->domains, ensure it can't race with cpuhp */
323 lockdep_assert_cpus_held();
324
60ec2440
TL
325 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
326 return -ENOMEM;
327
327364d5 328 msr_param.res = NULL;
60ec2440 329 list_for_each_entry(d, &r->domains, list) {
792e0f6f 330 hw_dom = resctrl_to_arch_dom(d);
75408e43
JM
331 for (t = 0; t < CDP_NUM_TYPES; t++) {
332 cfg = &hw_dom->d_resctrl.staged_config[t];
333 if (!cfg->have_new_ctrl)
334 continue;
335
2b8dd4ab 336 idx = get_config_index(closid, t);
6ce1560d 337 if (!apply_config(hw_dom, cfg, idx, cpu_mask))
2e7df368 338 continue;
2b8dd4ab 339
327364d5
JM
340 if (!msr_param.res) {
341 msr_param.low = idx;
342 msr_param.high = msr_param.low + 1;
343 msr_param.res = r;
344 } else {
345 msr_param.low = min(msr_param.low, idx);
346 msr_param.high = max(msr_param.high, idx + 1);
347 }
75408e43 348 }
60ec2440 349 }
8205a078 350
6ce1560d 351 if (cpumask_empty(cpu_mask))
c4026b7b 352 goto done;
fc3b618c
BM
353
354 /* Update resource control msr on all the CPUs. */
355 on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
60ec2440 356
c4026b7b 357done:
60ec2440
TL
358 free_cpumask_var(cpu_mask);
359
360 return 0;
361}
362
9ab9aa15
RC
363static int rdtgroup_parse_resource(char *resname, char *tok,
364 struct rdtgroup *rdtgrp)
4797b7df 365{
331ebe4c 366 struct resctrl_schema *s;
4797b7df 367
331ebe4c 368 list_for_each_entry(s, &resctrl_schema_all, list) {
e198fde3 369 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
1c290682 370 return parse_line(tok, s, rdtgrp);
4797b7df 371 }
723f1a0d 372 rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
4797b7df
VS
373 return -EINVAL;
374}
375
60ec2440
TL
376ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
377 char *buf, size_t nbytes, loff_t off)
378{
331ebe4c 379 struct resctrl_schema *s;
60ec2440
TL
380 struct rdtgroup *rdtgrp;
381 struct rdt_resource *r;
382 char *tok, *resname;
9ab9aa15 383 int ret = 0;
60ec2440
TL
384
385 /* Valid input requires a trailing newline */
29e74f35 386 if (nbytes == 0 || buf[nbytes - 1] != '\n')
60ec2440
TL
387 return -EINVAL;
388 buf[nbytes - 1] = '\0';
389
390 rdtgrp = rdtgroup_kn_lock_live(of->kn);
391 if (!rdtgrp) {
392 rdtgroup_kn_unlock(of->kn);
393 return -ENOENT;
394 }
c377dcfb 395 rdt_last_cmd_clear();
60ec2440 396
c966dac8
RC
397 /*
398 * No changes to pseudo-locked region allowed. It has to be removed
399 * and re-created instead.
400 */
401 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
402 ret = -EINVAL;
723f1a0d 403 rdt_last_cmd_puts("Resource group is pseudo-locked\n");
c966dac8
RC
404 goto out;
405 }
406
0424a7df 407 rdt_staged_configs_clear();
60ec2440
TL
408
409 while ((tok = strsep(&buf, "\n")) != NULL) {
634b0e04 410 resname = strim(strsep(&tok, ":"));
60ec2440 411 if (!tok) {
c377dcfb 412 rdt_last_cmd_puts("Missing ':'\n");
60ec2440
TL
413 ret = -EINVAL;
414 goto out;
415 }
2244645a
XS
416 if (tok[0] == '\0') {
417 rdt_last_cmd_printf("Missing '%s' value\n", resname);
418 ret = -EINVAL;
419 goto out;
420 }
9ab9aa15 421 ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
4797b7df 422 if (ret)
60ec2440 423 goto out;
60ec2440
TL
424 }
425
331ebe4c
JM
426 list_for_each_entry(s, &resctrl_schema_all, list) {
427 r = s->res;
6ce1560d
JM
428
429 /*
430 * Writes to mba_sc resources update the software controller,
431 * not the control MSR.
432 */
433 if (is_mba_sc(r))
434 continue;
435
2e667819 436 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
60ec2440
TL
437 if (ret)
438 goto out;
439 }
440
e0bdfe8e
RC
441 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
442 /*
443 * If pseudo-locking fails we keep the resource group in
444 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
445 * active and updated for just the domain the pseudo-locked
446 * region was requested for.
447 */
448 ret = rdtgroup_pseudo_lock_create(rdtgrp);
449 }
450
60ec2440 451out:
0424a7df 452 rdt_staged_configs_clear();
60ec2440 453 rdtgroup_kn_unlock(of->kn);
60ec2440
TL
454 return ret ?: nbytes;
455}
456
111136e6
JM
457u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
458 u32 closid, enum resctrl_conf_type type)
f07e9d02
JM
459{
460 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
2b8dd4ab 461 u32 idx = get_config_index(closid, type);
f07e9d02 462
6ce1560d 463 return hw_dom->ctrl_val[idx];
f07e9d02
JM
464}
465
1c290682 466static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
60ec2440 467{
1c290682 468 struct rdt_resource *r = schema->res;
60ec2440
TL
469 struct rdt_domain *dom;
470 bool sep = false;
8205a078 471 u32 ctrl_val;
60ec2440 472
fb700810
JM
473 /* Walking r->domains, ensure it can't race with cpuhp */
474 lockdep_assert_cpus_held();
475
e198fde3 476 seq_printf(s, "%*s:", max_name_width, schema->name);
60ec2440
TL
477 list_for_each_entry(dom, &r->domains, list) {
478 if (sep)
479 seq_puts(s, ";");
8205a078 480
6ce1560d
JM
481 if (is_mba_sc(r))
482 ctrl_val = dom->mbps_val[closid];
483 else
484 ctrl_val = resctrl_arch_get_config(r, dom, closid,
485 schema->conf_type);
486
c6ea67de 487 seq_printf(s, r->format_str, dom->id, max_data_width,
8205a078 488 ctrl_val);
60ec2440
TL
489 sep = true;
490 }
491 seq_puts(s, "\n");
492}
493
494int rdtgroup_schemata_show(struct kernfs_open_file *of,
495 struct seq_file *s, void *v)
496{
331ebe4c 497 struct resctrl_schema *schema;
60ec2440 498 struct rdtgroup *rdtgrp;
d89b7379
VS
499 int ret = 0;
500 u32 closid;
60ec2440
TL
501
502 rdtgrp = rdtgroup_kn_lock_live(of->kn);
503 if (rdtgrp) {
dfe9674b 504 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
331ebe4c 505 list_for_each_entry(schema, &resctrl_schema_all, list) {
e198fde3 506 seq_printf(s, "%s:uninitialized\n", schema->name);
331ebe4c 507 }
f4e80d67 508 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
b61b8bba
JJ
509 if (!rdtgrp->plr->d) {
510 rdt_last_cmd_clear();
511 rdt_last_cmd_puts("Cache domain offline\n");
512 ret = -ENODEV;
513 } else {
514 seq_printf(s, "%s:%d=%x\n",
32150edd 515 rdtgrp->plr->s->res->name,
b61b8bba
JJ
516 rdtgrp->plr->d->id,
517 rdtgrp->plr->cbm);
518 }
dfe9674b
RC
519 } else {
520 closid = rdtgrp->closid;
331ebe4c 521 list_for_each_entry(schema, &resctrl_schema_all, list) {
3183e87c 522 if (closid < schema->num_closid)
1c290682 523 show_doms(s, schema, closid);
dfe9674b 524 }
60ec2440
TL
525 }
526 } else {
527 ret = -ENOENT;
528 }
529 rdtgroup_kn_unlock(of->kn);
530 return ret;
531}
d89b7379 532
09909e09
JM
533static int smp_mon_event_count(void *arg)
534{
535 mon_event_count(arg);
536
537 return 0;
538}
539
46637d45
RC
540void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
541 struct rdt_domain *d, struct rdtgroup *rdtgrp,
542 int evtid, int first)
d89b7379 543{
09909e09
JM
544 int cpu;
545
fb700810
JM
546 /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */
547 lockdep_assert_cpus_held();
548
d89b7379 549 /*
09909e09 550 * Setup the parameters to pass to mon_event_count() to read the data.
d89b7379
VS
551 */
552 rr->rgrp = rdtgrp;
553 rr->evtid = evtid;
46637d45 554 rr->r = r;
9f52425b 555 rr->d = d;
d89b7379 556 rr->val = 0;
a4de1dfd 557 rr->first = first;
e557999f
JM
558 rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
559 if (IS_ERR(rr->arch_mon_ctx)) {
560 rr->err = -EINVAL;
561 return;
562 }
d89b7379 563
978fcca9 564 cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU);
09909e09
JM
565
566 /*
567 * cpumask_any_housekeeping() prefers housekeeping CPUs, but
568 * are all the CPUs nohz_full? If yes, pick a CPU to IPI.
569 * MPAM's resctrl_arch_rmid_read() is unable to read the
570 * counters on some platforms if its called in IRQ context.
571 */
572 if (tick_nohz_full_cpu(cpu))
573 smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
574 else
575 smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
e557999f
JM
576
577 resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
d89b7379
VS
578}
579
580int rdtgroup_mondata_show(struct seq_file *m, void *arg)
581{
582 struct kernfs_open_file *of = m->private;
583 u32 resid, evtid, domid;
584 struct rdtgroup *rdtgrp;
585 struct rdt_resource *r;
586 union mon_data_bits md;
587 struct rdt_domain *d;
588 struct rmid_read rr;
589 int ret = 0;
590
591 rdtgrp = rdtgroup_kn_lock_live(of->kn);
26467b0f
XS
592 if (!rdtgrp) {
593 ret = -ENOENT;
594 goto out;
595 }
d89b7379
VS
596
597 md.priv = of->kn->priv;
598 resid = md.u.rid;
599 domid = md.u.domid;
600 evtid = md.u.evtid;
601
f7b1843e 602 r = &rdt_resources_all[resid].r_resctrl;
d89b7379 603 d = rdt_find_domain(r, domid, NULL);
52eb7433 604 if (IS_ERR_OR_NULL(d)) {
d89b7379
VS
605 ret = -ENOENT;
606 goto out;
607 }
608
46637d45 609 mon_event_read(&rr, r, d, rdtgrp, evtid, false);
d89b7379 610
4d044c52 611 if (rr.err == -EIO)
d89b7379 612 seq_puts(m, "Error\n");
4d044c52 613 else if (rr.err == -EINVAL)
d89b7379
VS
614 seq_puts(m, "Unavailable\n");
615 else
f7b1843e 616 seq_printf(m, "%llu\n", rr.val);
d89b7379
VS
617
618out:
619 rdtgroup_kn_unlock(of->kn);
620 return ret;
621}