1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
6 * Copyright (C) 2016 Intel Corporation
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Tony Luck <tony.luck@intel.com>
12 * More information about RDT be found in the Intel (R) x86 Architecture
13 * Software Developer Manual June 2016, volume 3, section 17.17.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/cpu.h>
19 #include <linux/kernfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
25 * Check whether MBA bandwidth percentage value is correct. The value is
26 * checked against the minimum and max bandwidth values specified by the
27 * hardware. The allocated bandwidth percentage is rounded to the next
28 * control step available on the hardware.
30 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
36 * Only linear delay values is supported for current Intel SKUs.
38 if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
39 rdt_last_cmd_puts("No support for non-linear MB domains\n");
43 ret = kstrtoul(buf, 10, &bw);
45 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
49 if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
51 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
52 r->membw.min_bw, r->default_ctrl);
56 *data = roundup(bw, (unsigned long)r->membw.bw_gran);
60 int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
63 struct resctrl_staged_config *cfg;
64 u32 closid = data->rdtgrp->closid;
65 struct rdt_resource *r = s->res;
68 cfg = &d->staged_config[s->conf_type];
69 if (cfg->have_new_ctrl) {
70 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
74 if (!bw_validate(data->buf, &bw_val, r))
78 d->mbps_val[closid] = bw_val;
82 cfg->new_ctrl = bw_val;
83 cfg->have_new_ctrl = true;
89 * Check whether a cache bit mask is valid.
90 * For Intel the SDM says:
91 * Please note that all (and only) contiguous '1' combinations
92 * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
93 * Additionally Haswell requires at least two bits set.
94 * AMD allows non-contiguous bitmasks.
96 static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
98 unsigned long first_bit, zero_bit, val;
99 unsigned int cbm_len = r->cache.cbm_len;
102 ret = kstrtoul(buf, 16, &val);
104 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
108 if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) {
109 rdt_last_cmd_puts("Mask out of range\n");
113 first_bit = find_first_bit(&val, cbm_len);
114 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
116 /* Are non-contiguous bitmaps allowed? */
117 if (!r->cache.arch_has_sparse_bitmaps &&
118 (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
119 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
123 if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
124 rdt_last_cmd_printf("Need at least %d bits in the mask\n",
125 r->cache.min_cbm_bits);
134 * Read one cache bit mask (hex). Check that it is valid for the current
137 int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
138 struct rdt_domain *d)
140 struct rdtgroup *rdtgrp = data->rdtgrp;
141 struct resctrl_staged_config *cfg;
142 struct rdt_resource *r = s->res;
145 cfg = &d->staged_config[s->conf_type];
146 if (cfg->have_new_ctrl) {
147 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
152 * Cannot set up more than one pseudo-locked region in a cache
155 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
156 rdtgroup_pseudo_locked_in_hierarchy(d)) {
157 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
161 if (!cbm_validate(data->buf, &cbm_val, r))
164 if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
165 rdtgrp->mode == RDT_MODE_SHAREABLE) &&
166 rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
167 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
172 * The CBM may not overlap with the CBM of another closid if
173 * either is exclusive.
175 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
176 rdt_last_cmd_puts("Overlaps with exclusive group\n");
180 if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
181 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
182 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
183 rdt_last_cmd_puts("Overlaps with other group\n");
188 cfg->new_ctrl = cbm_val;
189 cfg->have_new_ctrl = true;
195 * For each domain in this resource we expect to find a series of:
197 * separated by ";". The "id" is in decimal, and must match one of
198 * the "id"s for this resource.
200 static int parse_line(char *line, struct resctrl_schema *s,
201 struct rdtgroup *rdtgrp)
203 enum resctrl_conf_type t = s->conf_type;
204 struct resctrl_staged_config *cfg;
205 struct rdt_resource *r = s->res;
206 struct rdt_parse_data data;
207 char *dom = NULL, *id;
208 struct rdt_domain *d;
209 unsigned long dom_id;
211 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
212 r->rid == RDT_RESOURCE_MBA) {
213 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
218 if (!line || line[0] == '\0')
220 dom = strsep(&line, ";");
221 id = strsep(&dom, "=");
222 if (!dom || kstrtoul(id, 10, &dom_id)) {
223 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
227 list_for_each_entry(d, &r->domains, list) {
228 if (d->id == dom_id) {
230 data.rdtgrp = rdtgrp;
231 if (r->parse_ctrlval(&data, s, d))
233 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
234 cfg = &d->staged_config[t];
236 * In pseudo-locking setup mode and just
237 * parsed a valid CBM that should be
238 * pseudo-locked. Only one locked region per
239 * resource group and domain so just do
240 * the required initialization for single
245 rdtgrp->plr->cbm = cfg->new_ctrl;
246 d->plr = rdtgrp->plr;
255 static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
262 return closid * 2 + 1;
268 static bool apply_config(struct rdt_hw_domain *hw_dom,
269 struct resctrl_staged_config *cfg, u32 idx,
270 cpumask_var_t cpu_mask)
272 struct rdt_domain *dom = &hw_dom->d_resctrl;
274 if (cfg->new_ctrl != hw_dom->ctrl_val[idx]) {
275 cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
276 hw_dom->ctrl_val[idx] = cfg->new_ctrl;
284 int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
285 u32 closid, enum resctrl_conf_type t, u32 cfg_val)
287 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
288 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
289 u32 idx = get_config_index(closid, t);
290 struct msr_param msr_param;
292 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
295 hw_dom->ctrl_val[idx] = cfg_val;
299 msr_param.high = idx + 1;
300 hw_res->msr_update(d, &msr_param, r);
305 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
307 struct resctrl_staged_config *cfg;
308 struct rdt_hw_domain *hw_dom;
309 struct msr_param msr_param;
310 enum resctrl_conf_type t;
311 cpumask_var_t cpu_mask;
312 struct rdt_domain *d;
315 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
318 msr_param.res = NULL;
319 list_for_each_entry(d, &r->domains, list) {
320 hw_dom = resctrl_to_arch_dom(d);
321 for (t = 0; t < CDP_NUM_TYPES; t++) {
322 cfg = &hw_dom->d_resctrl.staged_config[t];
323 if (!cfg->have_new_ctrl)
326 idx = get_config_index(closid, t);
327 if (!apply_config(hw_dom, cfg, idx, cpu_mask))
330 if (!msr_param.res) {
332 msr_param.high = msr_param.low + 1;
335 msr_param.low = min(msr_param.low, idx);
336 msr_param.high = max(msr_param.high, idx + 1);
341 if (cpumask_empty(cpu_mask))
344 /* Update resource control msr on all the CPUs. */
345 on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1);
348 free_cpumask_var(cpu_mask);
353 static int rdtgroup_parse_resource(char *resname, char *tok,
354 struct rdtgroup *rdtgrp)
356 struct resctrl_schema *s;
358 list_for_each_entry(s, &resctrl_schema_all, list) {
359 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
360 return parse_line(tok, s, rdtgrp);
362 rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
366 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
367 char *buf, size_t nbytes, loff_t off)
369 struct resctrl_schema *s;
370 struct rdtgroup *rdtgrp;
371 struct rdt_domain *dom;
372 struct rdt_resource *r;
376 /* Valid input requires a trailing newline */
377 if (nbytes == 0 || buf[nbytes - 1] != '\n')
379 buf[nbytes - 1] = '\0';
382 rdtgrp = rdtgroup_kn_lock_live(of->kn);
384 rdtgroup_kn_unlock(of->kn);
388 rdt_last_cmd_clear();
391 * No changes to pseudo-locked region allowed. It has to be removed
392 * and re-created instead.
394 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
396 rdt_last_cmd_puts("Resource group is pseudo-locked\n");
400 list_for_each_entry(s, &resctrl_schema_all, list) {
401 list_for_each_entry(dom, &s->res->domains, list)
402 memset(dom->staged_config, 0, sizeof(dom->staged_config));
405 while ((tok = strsep(&buf, "\n")) != NULL) {
406 resname = strim(strsep(&tok, ":"));
408 rdt_last_cmd_puts("Missing ':'\n");
412 if (tok[0] == '\0') {
413 rdt_last_cmd_printf("Missing '%s' value\n", resname);
417 ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
422 list_for_each_entry(s, &resctrl_schema_all, list) {
426 * Writes to mba_sc resources update the software controller,
427 * not the control MSR.
432 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
437 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
439 * If pseudo-locking fails we keep the resource group in
440 * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
441 * active and updated for just the domain the pseudo-locked
442 * region was requested for.
444 ret = rdtgroup_pseudo_lock_create(rdtgrp);
448 rdtgroup_kn_unlock(of->kn);
450 return ret ?: nbytes;
453 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
454 u32 closid, enum resctrl_conf_type type)
456 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
457 u32 idx = get_config_index(closid, type);
459 return hw_dom->ctrl_val[idx];
462 static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
464 struct rdt_resource *r = schema->res;
465 struct rdt_domain *dom;
469 seq_printf(s, "%*s:", max_name_width, schema->name);
470 list_for_each_entry(dom, &r->domains, list) {
475 ctrl_val = dom->mbps_val[closid];
477 ctrl_val = resctrl_arch_get_config(r, dom, closid,
480 seq_printf(s, r->format_str, dom->id, max_data_width,
487 int rdtgroup_schemata_show(struct kernfs_open_file *of,
488 struct seq_file *s, void *v)
490 struct resctrl_schema *schema;
491 struct rdtgroup *rdtgrp;
495 rdtgrp = rdtgroup_kn_lock_live(of->kn);
497 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
498 list_for_each_entry(schema, &resctrl_schema_all, list) {
499 seq_printf(s, "%s:uninitialized\n", schema->name);
501 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
502 if (!rdtgrp->plr->d) {
503 rdt_last_cmd_clear();
504 rdt_last_cmd_puts("Cache domain offline\n");
507 seq_printf(s, "%s:%d=%x\n",
508 rdtgrp->plr->s->res->name,
513 closid = rdtgrp->closid;
514 list_for_each_entry(schema, &resctrl_schema_all, list) {
515 if (closid < schema->num_closid)
516 show_doms(s, schema, closid);
522 rdtgroup_kn_unlock(of->kn);
526 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
527 struct rdt_domain *d, struct rdtgroup *rdtgrp,
528 int evtid, int first)
531 * setup the parameters to send to the IPI to read the data.
540 smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
543 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
545 struct kernfs_open_file *of = m->private;
546 u32 resid, evtid, domid;
547 struct rdtgroup *rdtgrp;
548 struct rdt_resource *r;
549 union mon_data_bits md;
550 struct rdt_domain *d;
554 rdtgrp = rdtgroup_kn_lock_live(of->kn);
560 md.priv = of->kn->priv;
565 r = &rdt_resources_all[resid].r_resctrl;
566 d = rdt_find_domain(r, domid, NULL);
567 if (IS_ERR_OR_NULL(d)) {
572 mon_event_read(&rr, r, d, rdtgrp, evtid, false);
575 seq_puts(m, "Error\n");
576 else if (rr.err == -EINVAL)
577 seq_puts(m, "Unavailable\n");
579 seq_printf(m, "%llu\n", rr.val);
582 rdtgroup_kn_unlock(of->kn);