x86/intel_rdt/mba: Memory bandwith allocation feature detect
[linux-2.6-block.git] / arch / x86 / include / asm / intel_rdt.h
CommitLineData
113c6097
FY
1#ifndef _ASM_X86_INTEL_RDT_H
2#define _ASM_X86_INTEL_RDT_H
3
4f341a5e
FY
4#ifdef CONFIG_INTEL_RDT_A
5
5b825c3a 6#include <linux/sched.h>
8ff42c02 7#include <linux/kernfs.h>
5ff193fb
FY
8#include <linux/jump_label.h>
9
4f341a5e
FY
10#include <asm/intel_rdt_common.h>
11
5ff193fb 12#define IA32_L3_QOS_CFG 0xc81
113c6097 13#define IA32_L3_CBM_BASE 0xc90
c1c7c3f9 14#define IA32_L2_CBM_BASE 0xd10
113c6097 15
5ff193fb
FY
16#define L3_QOS_CDP_ENABLE 0x01ULL
17
18/**
19 * struct rdtgroup - store rdtgroup's data in resctrl file system.
20 * @kn: kernfs node
21 * @rdtgroup_list: linked list for all rdtgroups
22 * @closid: closid for this rdtgroup
12e0110c 23 * @cpu_mask: CPUs assigned to this rdtgroup
60cf5e10
FY
24 * @flags: status bits
25 * @waitcount: how many cpus expect to find this
12e0110c 26 * group when they acquire rdtgroup_mutex
5ff193fb
FY
27 */
28struct rdtgroup {
29 struct kernfs_node *kn;
30 struct list_head rdtgroup_list;
31 int closid;
12e0110c 32 struct cpumask cpu_mask;
60cf5e10
FY
33 int flags;
34 atomic_t waitcount;
5ff193fb
FY
35};
36
60cf5e10
FY
37/* rdtgroup.flags */
38#define RDT_DELETED 1
39
4ffa3c97
JO
40/* rftype.flags */
41#define RFTYPE_FLAGS_CPUS_LIST 1
42
5ff193fb
FY
43/* List of all resource groups */
44extern struct list_head rdt_all_groups;
45
de016df8
VS
46extern int max_name_width, max_data_width;
47
5ff193fb
FY
48int __init rdtgroup_init(void);
49
4e978d06
FY
50/**
51 * struct rftype - describe each file in the resctrl file system
17f8ba1d
TG
52 * @name: File name
53 * @mode: Access mode
54 * @kf_ops: File operations
4ffa3c97 55 * @flags: File specific RFTYPE_FLAGS_* flags
17f8ba1d
TG
56 * @seq_show: Show content of the file
57 * @write: Write to the file
4e978d06
FY
58 */
59struct rftype {
60 char *name;
61 umode_t mode;
62 struct kernfs_ops *kf_ops;
4ffa3c97 63 unsigned long flags;
4e978d06
FY
64
65 int (*seq_show)(struct kernfs_open_file *of,
66 struct seq_file *sf, void *v);
67 /*
68 * write() is the generic write callback which maps directly to
69 * kernfs write operation and overrides all other operations.
70 * Maximum write size is determined by ->max_write_len.
71 */
72 ssize_t (*write)(struct kernfs_open_file *of,
73 char *buf, size_t nbytes, loff_t off);
74};
75
0921c547
TG
76/**
77 * struct rdt_domain - group of cpus sharing an RDT resource
78 * @list: all instances of this resource
79 * @id: unique id for this instance
80 * @cpu_mask: which cpus share this resource
81 * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
82 * @new_ctrl: new ctrl value to be loaded
83 * @have_new_ctrl: did user provide new_ctrl for this domain
84 */
85struct rdt_domain {
86 struct list_head list;
87 int id;
88 struct cpumask cpu_mask;
89 u32 *ctrl_val;
90 u32 new_ctrl;
91 bool have_new_ctrl;
92};
93
94/**
95 * struct msr_param - set a range of MSRs from a domain
96 * @res: The resource to use
97 * @low: Beginning index from base MSR
98 * @high: End index
99 */
100struct msr_param {
101 struct rdt_resource *res;
102 int low;
103 int high;
104};
105
d3e11b4d
TG
106/**
107 * struct rdt_cache - Cache allocation related data
108 * @cbm_len: Length of the cache bit mask
109 * @min_cbm_bits: Minimum number of consecutive bits to be set
110 * @cbm_idx_mult: Multiplier of CBM index
111 * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
112 * closid * cbm_idx_multi + cbm_idx_offset
113 * in a cache bit mask
114 */
115struct rdt_cache {
116 unsigned int cbm_len;
117 unsigned int min_cbm_bits;
118 unsigned int cbm_idx_mult;
119 unsigned int cbm_idx_offset;
120};
121
c1c7c3f9
FY
122/**
123 * struct rdt_resource - attributes of an RDT resource
d3e11b4d
TG
124 * @enabled: Is this feature enabled on this machine
125 * @capable: Is this feature available on this machine
126 * @name: Name to use in "schemata" file
127 * @num_closid: Number of CLOSIDs available
128 * @cache_level: Which cache level defines scope of this resource
129 * @default_ctrl: Specifies default cache cbm or memory B/W percent.
130 * @msr_base: Base MSR address for CBMs
0921c547 131 * @msr_update: Function pointer to update QOS MSRs
d3e11b4d
TG
132 * @data_width: Character width of data when displaying
133 * @domains: All domains for this resource
134 * @cache: Cache allocation related data
c1c7c3f9
FY
135 */
136struct rdt_resource {
137 bool enabled;
138 bool capable;
139 char *name;
140 int num_closid;
d3e11b4d 141 int cache_level;
2545e9f5 142 u32 default_ctrl;
d3e11b4d 143 unsigned int msr_base;
0921c547
TG
144 void (*msr_update) (struct rdt_domain *d, struct msr_param *m,
145 struct rdt_resource *r);
de016df8 146 int data_width;
c1c7c3f9 147 struct list_head domains;
d3e11b4d 148 struct rdt_cache cache;
c1c7c3f9
FY
149};
150
2264d9c7
TL
151extern struct mutex rdtgroup_mutex;
152
c1c7c3f9 153extern struct rdt_resource rdt_resources_all[];
5ff193fb
FY
154extern struct rdtgroup rdtgroup_default;
155DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
156
157int __init rdtgroup_init(void);
c1c7c3f9
FY
158
159enum {
160 RDT_RESOURCE_L3,
161 RDT_RESOURCE_L3DATA,
162 RDT_RESOURCE_L3CODE,
163 RDT_RESOURCE_L2,
164
165 /* Must be the last */
166 RDT_NUM_RESOURCES,
167};
168
169#define for_each_capable_rdt_resource(r) \
170 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
17f8ba1d 171 r++) \
c1c7c3f9
FY
172 if (r->capable)
173
2264d9c7
TL
174#define for_each_enabled_rdt_resource(r) \
175 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
176 r++) \
177 if (r->enabled)
178
c1c7c3f9
FY
179/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
180union cpuid_0x10_1_eax {
181 struct {
182 unsigned int cbm_len:5;
183 } split;
184 unsigned int full;
185};
186
ab66a33b
VS
187/* CPUID.(EAX=10H, ECX=ResID=3).EAX */
188union cpuid_0x10_3_eax {
189 struct {
190 unsigned int max_delay:12;
191 } split;
192 unsigned int full;
193};
194
2545e9f5
VS
195/* CPUID.(EAX=10H, ECX=ResID).EDX */
196union cpuid_0x10_x_edx {
c1c7c3f9
FY
197 struct {
198 unsigned int cos_max:16;
199 } split;
200 unsigned int full;
201};
2264d9c7 202
12e0110c
TL
203DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
204
2545e9f5 205void rdt_ctrl_update(void *arg);
60cf5e10
FY
206struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
207void rdtgroup_kn_unlock(struct kernfs_node *kn);
60ec2440
TL
208ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
209 char *buf, size_t nbytes, loff_t off);
210int rdtgroup_schemata_show(struct kernfs_open_file *of,
211 struct seq_file *s, void *v);
4f341a5e
FY
212
213/*
214 * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
215 *
216 * Following considerations are made so that this has minimal impact
217 * on scheduler hot path:
218 * - This will stay as no-op unless we are running on an Intel SKU
219 * which supports resource control and we enable by mounting the
220 * resctrl file system.
221 * - Caches the per cpu CLOSid values and does the MSR write only
222 * when a task with a different CLOSid is scheduled in.
74fcdae1
FY
223 *
224 * Must be called with preemption disabled.
4f341a5e
FY
225 */
226static inline void intel_rdt_sched_in(void)
227{
228 if (static_branch_likely(&rdt_enable_key)) {
229 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
230 int closid;
231
232 /*
233 * If this task has a closid assigned, use it.
234 * Else use the closid assigned to this cpu.
235 */
236 closid = current->closid;
237 if (closid == 0)
238 closid = this_cpu_read(cpu_closid);
239
240 if (closid != state->closid) {
241 state->closid = closid;
242 wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
243 }
244 }
245}
246
247#else
248
249static inline void intel_rdt_sched_in(void) {}
250
251#endif /* CONFIG_INTEL_RDT_A */
113c6097 252#endif /* _ASM_X86_INTEL_RDT_H */