Merge tag 'bcachefs-2024-10-05' of git://evilpiepirate.org/bcachefs
[linux-block.git] / arch / x86 / events / intel / cstate.c
CommitLineData
7ce1346a 1/*
940b2f2f 2 * Support cstate residency counters
7ce1346a
KL
3 *
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
16 *
17 */
18
19/*
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
25 * access code.
26 *
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
29 *
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
36 *
37 * All of these counters are specified in the IntelĀ® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
39 *
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
42 * perf code: 0x00
2da202aa 43 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
26579860 44 * MTL,SRF,GRR,ARL,LNL
7ce1346a
KL
45 * Scope: Core (each processor core has a MSR)
46 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
47 * perf code: 0x01
1159e094 48 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
ecf71fbc 49 * CNL,KBL,CML,TNT
7ce1346a
KL
50 * Scope: Core
51 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
52 * perf code: 0x02
1159e094 53 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
87bf399f 54 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
bbb96869 55 * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF,
26579860 56 * GRR,ARL,LNL
7ce1346a
KL
57 * Scope: Core
58 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
59 * perf code: 0x03
f1857a24 60 * Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
26579860 61 * ICL,TGL,RKL,ADL,RPL,MTL,ARL,LNL
7ce1346a
KL
62 * Scope: Core
63 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
64 * perf code: 0x00
1ffa6c04 65 * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
2da202aa 66 * KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
b1d0e15c 67 * RPL,SPR,MTL,ARL,LNL,SRF
7ce1346a
KL
68 * Scope: Package (physical package)
69 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
70 * perf code: 0x01
1159e094 71 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
d0ca946b 72 * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
26579860 73 * ADL,RPL,MTL,ARL,LNL
7ce1346a
KL
74 * Scope: Package (physical package)
75 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
76 * perf code: 0x02
ecf71fbc 77 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
87bf399f 78 * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
a3100075 79 * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF,
26579860 80 * ARL,LNL
7ce1346a
KL
81 * Scope: Package (physical package)
82 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
83 * perf code: 0x03
1ffa6c04 84 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
2c3aedd9 85 * KBL,CML,ICL,TGL,RKL
7ce1346a
KL
86 * Scope: Package (physical package)
87 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
88 * perf code: 0x04
d0ca946b 89 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
a3100075 90 * ADL,RPL,MTL,ARL
7ce1346a
KL
91 * Scope: Package (physical package)
92 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
93 * perf code: 0x05
2c3aedd9 94 * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
7ce1346a
KL
95 * Scope: Package (physical package)
96 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
97 * perf code: 0x06
ecf71fbc 98 * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
26579860 99 * TNT,RKL,ADL,RPL,MTL,ARL,LNL
7ce1346a 100 * Scope: Package (physical package)
3877d55a
KL
101 * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter.
102 * perf code: 0x00
bbb96869 103 * Available model: SRF,GRR
3877d55a 104 * Scope: A cluster of cores shared L2 cache
7ce1346a
KL
105 *
106 */
107
108#include <linux/module.h>
109#include <linux/slab.h>
110#include <linux/perf_event.h>
a5f81290 111#include <linux/nospec.h>
7ce1346a 112#include <asm/cpu_device_id.h>
bf4ad541 113#include <asm/intel-family.h>
27f6d22b 114#include "../perf_event.h"
8f2a28c5 115#include "../probe.h"
7ce1346a 116
dc8e5dfb 117MODULE_DESCRIPTION("Support for Intel cstate performance events");
c7afba32
TG
118MODULE_LICENSE("GPL");
119
7ce1346a 120#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
ebd19fc3
ST
121static ssize_t __cstate_##_var##_show(struct device *dev, \
122 struct device_attribute *attr, \
7ce1346a
KL
123 char *page) \
124{ \
125 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
126 return sprintf(page, _format "\n"); \
127} \
ebd19fc3 128static struct device_attribute format_attr_##_var = \
7ce1346a
KL
129 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
130
424646ee
TG
131/* Model -> events mapping */
132struct cstate_model {
133 unsigned long core_events;
134 unsigned long pkg_events;
3877d55a 135 unsigned long module_events;
424646ee
TG
136 unsigned long quirks;
137};
138
139/* Quirk flags */
140#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
889882bc 141#define KNL_CORE_C6_MSR (1UL << 1)
424646ee 142
7ce1346a 143/* cstate_core PMU */
7ce1346a
KL
144static struct pmu cstate_core_pmu;
145static bool has_cstate_core;
146
424646ee 147enum perf_cstate_core_events {
7ce1346a
KL
148 PERF_CSTATE_CORE_C1_RES = 0,
149 PERF_CSTATE_CORE_C3_RES,
150 PERF_CSTATE_CORE_C6_RES,
151 PERF_CSTATE_CORE_C7_RES,
152
153 PERF_CSTATE_CORE_EVENT_MAX,
154};
155
8f2a28c5
JO
156PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00");
157PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01");
158PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02");
159PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03");
7ce1346a 160
8f2a28c5
JO
161static unsigned long core_msr_mask;
162
163PMU_EVENT_GROUP(events, cstate_core_c1);
164PMU_EVENT_GROUP(events, cstate_core_c3);
165PMU_EVENT_GROUP(events, cstate_core_c6);
166PMU_EVENT_GROUP(events, cstate_core_c7);
167
168static bool test_msr(int idx, void *data)
169{
170 return test_bit(idx, (unsigned long *) data);
171}
172
173static struct perf_msr core_msr[] = {
174 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &group_cstate_core_c1, test_msr },
175 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &group_cstate_core_c3, test_msr },
176 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &group_cstate_core_c6, test_msr },
177 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &group_cstate_core_c7, test_msr },
7ce1346a
KL
178};
179
8f2a28c5 180static struct attribute *attrs_empty[] = {
7ce1346a
KL
181 NULL,
182};
183
8f2a28c5
JO
184/*
185 * There are no default events, but we need to create
186 * "events" group (with empty attrs) before updating
187 * it with detected events.
188 */
243218ca 189static struct attribute_group cstate_events_attr_group = {
7ce1346a 190 .name = "events",
8f2a28c5 191 .attrs = attrs_empty,
7ce1346a
KL
192};
193
243218ca
KL
194DEFINE_CSTATE_FORMAT_ATTR(cstate_event, event, "config:0-63");
195static struct attribute *cstate_format_attrs[] = {
196 &format_attr_cstate_event.attr,
7ce1346a
KL
197 NULL,
198};
199
243218ca 200static struct attribute_group cstate_format_attr_group = {
7ce1346a 201 .name = "format",
243218ca 202 .attrs = cstate_format_attrs,
7ce1346a
KL
203};
204
243218ca
KL
205static const struct attribute_group *cstate_attr_groups[] = {
206 &cstate_events_attr_group,
207 &cstate_format_attr_group,
7ce1346a
KL
208 NULL,
209};
210
7ce1346a 211/* cstate_pkg PMU */
7ce1346a
KL
212static struct pmu cstate_pkg_pmu;
213static bool has_cstate_pkg;
214
424646ee 215enum perf_cstate_pkg_events {
7ce1346a
KL
216 PERF_CSTATE_PKG_C2_RES = 0,
217 PERF_CSTATE_PKG_C3_RES,
218 PERF_CSTATE_PKG_C6_RES,
219 PERF_CSTATE_PKG_C7_RES,
220 PERF_CSTATE_PKG_C8_RES,
221 PERF_CSTATE_PKG_C9_RES,
222 PERF_CSTATE_PKG_C10_RES,
223
224 PERF_CSTATE_PKG_EVENT_MAX,
225};
226
8f2a28c5
JO
227PMU_EVENT_ATTR_STRING(c2-residency, attr_cstate_pkg_c2, "event=0x00");
228PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_pkg_c3, "event=0x01");
229PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_pkg_c6, "event=0x02");
230PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_pkg_c7, "event=0x03");
231PMU_EVENT_ATTR_STRING(c8-residency, attr_cstate_pkg_c8, "event=0x04");
232PMU_EVENT_ATTR_STRING(c9-residency, attr_cstate_pkg_c9, "event=0x05");
233PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06");
234
235static unsigned long pkg_msr_mask;
236
237PMU_EVENT_GROUP(events, cstate_pkg_c2);
238PMU_EVENT_GROUP(events, cstate_pkg_c3);
239PMU_EVENT_GROUP(events, cstate_pkg_c6);
240PMU_EVENT_GROUP(events, cstate_pkg_c7);
241PMU_EVENT_GROUP(events, cstate_pkg_c8);
242PMU_EVENT_GROUP(events, cstate_pkg_c9);
243PMU_EVENT_GROUP(events, cstate_pkg_c10);
244
245static struct perf_msr pkg_msr[] = {
246 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &group_cstate_pkg_c2, test_msr },
247 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &group_cstate_pkg_c3, test_msr },
248 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &group_cstate_pkg_c6, test_msr },
249 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &group_cstate_pkg_c7, test_msr },
250 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &group_cstate_pkg_c8, test_msr },
251 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &group_cstate_pkg_c9, test_msr },
252 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr },
7ce1346a
KL
253};
254
3877d55a
KL
255/* cstate_module PMU */
256static struct pmu cstate_module_pmu;
257static bool has_cstate_module;
258
259enum perf_cstate_module_events {
260 PERF_CSTATE_MODULE_C6_RES = 0,
261
262 PERF_CSTATE_MODULE_EVENT_MAX,
263};
264
265PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_module_c6, "event=0x00");
266
267static unsigned long module_msr_mask;
268
269PMU_EVENT_GROUP(events, cstate_module_c6);
270
271static struct perf_msr module_msr[] = {
272 [PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr },
273};
274
7ce1346a
KL
275static int cstate_pmu_event_init(struct perf_event *event)
276{
277 u64 cfg = event->attr.config;
7ce1346a
KL
278
279 if (event->attr.type != event->pmu->type)
280 return -ENOENT;
281
282 /* unsupported modes and filters */
2ff40250 283 if (event->attr.sample_period) /* no sampling */
7ce1346a
KL
284 return -EINVAL;
285
49de0493
TG
286 if (event->cpu < 0)
287 return -EINVAL;
288
7ce1346a
KL
289 if (event->pmu == &cstate_core_pmu) {
290 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
291 return -EINVAL;
8f2a28c5
JO
292 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX);
293 if (!(core_msr_mask & (1 << cfg)))
7ce1346a
KL
294 return -EINVAL;
295 event->hw.event_base = core_msr[cfg].msr;
296 } else if (event->pmu == &cstate_pkg_pmu) {
297 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
298 return -EINVAL;
a5f81290 299 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
8f2a28c5 300 if (!(pkg_msr_mask & (1 << cfg)))
7ce1346a
KL
301 return -EINVAL;
302 event->hw.event_base = pkg_msr[cfg].msr;
3877d55a
KL
303 } else if (event->pmu == &cstate_module_pmu) {
304 if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX)
305 return -EINVAL;
306 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_MODULE_EVENT_MAX);
307 if (!(module_msr_mask & (1 << cfg)))
308 return -EINVAL;
309 event->hw.event_base = module_msr[cfg].msr;
49de0493 310 } else {
7ce1346a 311 return -ENOENT;
49de0493
TG
312 }
313
7ce1346a
KL
314 event->hw.config = cfg;
315 event->hw.idx = -1;
49de0493 316 return 0;
7ce1346a
KL
317}
318
319static inline u64 cstate_pmu_read_counter(struct perf_event *event)
320{
321 u64 val;
322
323 rdmsrl(event->hw.event_base, val);
324 return val;
325}
326
327static void cstate_pmu_event_update(struct perf_event *event)
328{
329 struct hw_perf_event *hwc = &event->hw;
330 u64 prev_raw_count, new_raw_count;
331
7ce1346a 332 prev_raw_count = local64_read(&hwc->prev_count);
4c1c9dea
UB
333 do {
334 new_raw_count = cstate_pmu_read_counter(event);
335 } while (!local64_try_cmpxchg(&hwc->prev_count,
336 &prev_raw_count, new_raw_count));
7ce1346a
KL
337
338 local64_add(new_raw_count - prev_raw_count, &event->count);
339}
340
341static void cstate_pmu_event_start(struct perf_event *event, int mode)
342{
343 local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
344}
345
346static void cstate_pmu_event_stop(struct perf_event *event, int mode)
347{
348 cstate_pmu_event_update(event);
349}
350
351static void cstate_pmu_event_del(struct perf_event *event, int mode)
352{
353 cstate_pmu_event_stop(event, PERF_EF_UPDATE);
354}
355
356static int cstate_pmu_event_add(struct perf_event *event, int mode)
357{
358 if (mode & PERF_EF_START)
359 cstate_pmu_event_start(event, mode);
360
361 return 0;
362}
363
d9f3b450 364static const struct attribute_group *core_attr_update[] = {
8f2a28c5
JO
365 &group_cstate_core_c1,
366 &group_cstate_core_c3,
367 &group_cstate_core_c6,
368 &group_cstate_core_c7,
369 NULL,
370};
371
d9f3b450 372static const struct attribute_group *pkg_attr_update[] = {
8f2a28c5
JO
373 &group_cstate_pkg_c2,
374 &group_cstate_pkg_c3,
375 &group_cstate_pkg_c6,
376 &group_cstate_pkg_c7,
377 &group_cstate_pkg_c8,
378 &group_cstate_pkg_c9,
379 &group_cstate_pkg_c10,
380 NULL,
381};
382
3877d55a
KL
383static const struct attribute_group *module_attr_update[] = {
384 &group_cstate_module_c6,
385 NULL
386};
387
424646ee 388static struct pmu cstate_core_pmu = {
243218ca 389 .attr_groups = cstate_attr_groups,
8f2a28c5 390 .attr_update = core_attr_update,
424646ee
TG
391 .name = "cstate_core",
392 .task_ctx_nr = perf_invalid_context,
393 .event_init = cstate_pmu_event_init,
394 .add = cstate_pmu_event_add,
395 .del = cstate_pmu_event_del,
396 .start = cstate_pmu_event_start,
397 .stop = cstate_pmu_event_stop,
398 .read = cstate_pmu_event_update,
2ff40250 399 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
08155c7f 400 .scope = PERF_PMU_SCOPE_CORE,
74545f63 401 .module = THIS_MODULE,
424646ee
TG
402};
403
404static struct pmu cstate_pkg_pmu = {
243218ca 405 .attr_groups = cstate_attr_groups,
8f2a28c5 406 .attr_update = pkg_attr_update,
424646ee
TG
407 .name = "cstate_pkg",
408 .task_ctx_nr = perf_invalid_context,
409 .event_init = cstate_pmu_event_init,
410 .add = cstate_pmu_event_add,
411 .del = cstate_pmu_event_del,
412 .start = cstate_pmu_event_start,
413 .stop = cstate_pmu_event_stop,
414 .read = cstate_pmu_event_update,
2ff40250 415 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
08155c7f 416 .scope = PERF_PMU_SCOPE_PKG,
74545f63 417 .module = THIS_MODULE,
424646ee
TG
418};
419
3877d55a
KL
420static struct pmu cstate_module_pmu = {
421 .attr_groups = cstate_attr_groups,
422 .attr_update = module_attr_update,
423 .name = "cstate_module",
424 .task_ctx_nr = perf_invalid_context,
425 .event_init = cstate_pmu_event_init,
426 .add = cstate_pmu_event_add,
427 .del = cstate_pmu_event_del,
428 .start = cstate_pmu_event_start,
429 .stop = cstate_pmu_event_stop,
430 .read = cstate_pmu_event_update,
431 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
08155c7f 432 .scope = PERF_PMU_SCOPE_CLUSTER,
3877d55a
KL
433 .module = THIS_MODULE,
434};
435
424646ee
TG
436static const struct cstate_model nhm_cstates __initconst = {
437 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
438 BIT(PERF_CSTATE_CORE_C6_RES),
439
440 .pkg_events = BIT(PERF_CSTATE_PKG_C3_RES) |
441 BIT(PERF_CSTATE_PKG_C6_RES) |
442 BIT(PERF_CSTATE_PKG_C7_RES),
443};
444
445static const struct cstate_model snb_cstates __initconst = {
446 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
447 BIT(PERF_CSTATE_CORE_C6_RES) |
448 BIT(PERF_CSTATE_CORE_C7_RES),
449
450 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
451 BIT(PERF_CSTATE_PKG_C3_RES) |
452 BIT(PERF_CSTATE_PKG_C6_RES) |
453 BIT(PERF_CSTATE_PKG_C7_RES),
454};
455
456static const struct cstate_model hswult_cstates __initconst = {
457 .core_events = BIT(PERF_CSTATE_CORE_C3_RES) |
458 BIT(PERF_CSTATE_CORE_C6_RES) |
459 BIT(PERF_CSTATE_CORE_C7_RES),
460
461 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
462 BIT(PERF_CSTATE_PKG_C3_RES) |
463 BIT(PERF_CSTATE_PKG_C6_RES) |
464 BIT(PERF_CSTATE_PKG_C7_RES) |
465 BIT(PERF_CSTATE_PKG_C8_RES) |
466 BIT(PERF_CSTATE_PKG_C9_RES) |
467 BIT(PERF_CSTATE_PKG_C10_RES),
468};
469
1159e094
HP
470static const struct cstate_model cnl_cstates __initconst = {
471 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
472 BIT(PERF_CSTATE_CORE_C3_RES) |
473 BIT(PERF_CSTATE_CORE_C6_RES) |
474 BIT(PERF_CSTATE_CORE_C7_RES),
475
476 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
477 BIT(PERF_CSTATE_PKG_C3_RES) |
478 BIT(PERF_CSTATE_PKG_C6_RES) |
479 BIT(PERF_CSTATE_PKG_C7_RES) |
480 BIT(PERF_CSTATE_PKG_C8_RES) |
481 BIT(PERF_CSTATE_PKG_C9_RES) |
482 BIT(PERF_CSTATE_PKG_C10_RES),
483};
484
f1857a24
KL
485static const struct cstate_model icl_cstates __initconst = {
486 .core_events = BIT(PERF_CSTATE_CORE_C6_RES) |
487 BIT(PERF_CSTATE_CORE_C7_RES),
488
489 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
490 BIT(PERF_CSTATE_PKG_C3_RES) |
491 BIT(PERF_CSTATE_PKG_C6_RES) |
492 BIT(PERF_CSTATE_PKG_C7_RES) |
493 BIT(PERF_CSTATE_PKG_C8_RES) |
494 BIT(PERF_CSTATE_PKG_C9_RES) |
495 BIT(PERF_CSTATE_PKG_C10_RES),
496};
497
87bf399f
ZR
498static const struct cstate_model icx_cstates __initconst = {
499 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
500 BIT(PERF_CSTATE_CORE_C6_RES),
501
502 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
503 BIT(PERF_CSTATE_PKG_C6_RES),
504};
505
d0ca946b
KL
506static const struct cstate_model adl_cstates __initconst = {
507 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
508 BIT(PERF_CSTATE_CORE_C6_RES) |
509 BIT(PERF_CSTATE_CORE_C7_RES),
510
511 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
512 BIT(PERF_CSTATE_PKG_C3_RES) |
513 BIT(PERF_CSTATE_PKG_C6_RES) |
d0ca946b 514 BIT(PERF_CSTATE_PKG_C8_RES) |
d0ca946b
KL
515 BIT(PERF_CSTATE_PKG_C10_RES),
516};
517
26579860
ZR
518static const struct cstate_model lnl_cstates __initconst = {
519 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
520 BIT(PERF_CSTATE_CORE_C6_RES) |
521 BIT(PERF_CSTATE_CORE_C7_RES),
522
523 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
524 BIT(PERF_CSTATE_PKG_C3_RES) |
525 BIT(PERF_CSTATE_PKG_C6_RES) |
d0ca946b
KL
526 BIT(PERF_CSTATE_PKG_C10_RES),
527};
528
424646ee
TG
529static const struct cstate_model slm_cstates __initconst = {
530 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
531 BIT(PERF_CSTATE_CORE_C6_RES),
532
533 .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
534 .quirks = SLM_PKG_C6_USE_C7_MSR,
535};
536
889882bc
LO
537
538static const struct cstate_model knl_cstates __initconst = {
539 .core_events = BIT(PERF_CSTATE_CORE_C6_RES),
540
541 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
542 BIT(PERF_CSTATE_PKG_C3_RES) |
543 BIT(PERF_CSTATE_PKG_C6_RES),
544 .quirks = KNL_CORE_C6_MSR,
545};
546
547
5c10b048
HP
548static const struct cstate_model glm_cstates __initconst = {
549 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
550 BIT(PERF_CSTATE_CORE_C3_RES) |
551 BIT(PERF_CSTATE_CORE_C6_RES),
552
553 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
554 BIT(PERF_CSTATE_PKG_C3_RES) |
555 BIT(PERF_CSTATE_PKG_C6_RES) |
556 BIT(PERF_CSTATE_PKG_C10_RES),
557};
558
bbb96869
KL
559static const struct cstate_model grr_cstates __initconst = {
560 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
561 BIT(PERF_CSTATE_CORE_C6_RES),
562
563 .module_events = BIT(PERF_CSTATE_MODULE_C6_RES),
564};
565
3877d55a
KL
566static const struct cstate_model srf_cstates __initconst = {
567 .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
568 BIT(PERF_CSTATE_CORE_C6_RES),
569
b1d0e15c
ZW
570 .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
571 BIT(PERF_CSTATE_PKG_C6_RES),
3877d55a
KL
572
573 .module_events = BIT(PERF_CSTATE_MODULE_C6_RES),
574};
575
889882bc 576
424646ee 577static const struct x86_cpu_id intel_cstates_match[] __initconst = {
5ee80094
TL
578 X86_MATCH_VFM(INTEL_NEHALEM, &nhm_cstates),
579 X86_MATCH_VFM(INTEL_NEHALEM_EP, &nhm_cstates),
580 X86_MATCH_VFM(INTEL_NEHALEM_EX, &nhm_cstates),
581
582 X86_MATCH_VFM(INTEL_WESTMERE, &nhm_cstates),
583 X86_MATCH_VFM(INTEL_WESTMERE_EP, &nhm_cstates),
584 X86_MATCH_VFM(INTEL_WESTMERE_EX, &nhm_cstates),
585
586 X86_MATCH_VFM(INTEL_SANDYBRIDGE, &snb_cstates),
587 X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &snb_cstates),
588
589 X86_MATCH_VFM(INTEL_IVYBRIDGE, &snb_cstates),
590 X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &snb_cstates),
591
592 X86_MATCH_VFM(INTEL_HASWELL, &snb_cstates),
593 X86_MATCH_VFM(INTEL_HASWELL_X, &snb_cstates),
594 X86_MATCH_VFM(INTEL_HASWELL_G, &snb_cstates),
595
596 X86_MATCH_VFM(INTEL_HASWELL_L, &hswult_cstates),
597
598 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &slm_cstates),
599 X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &slm_cstates),
600 X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &slm_cstates),
601
602 X86_MATCH_VFM(INTEL_BROADWELL, &snb_cstates),
603 X86_MATCH_VFM(INTEL_BROADWELL_D, &snb_cstates),
604 X86_MATCH_VFM(INTEL_BROADWELL_G, &snb_cstates),
605 X86_MATCH_VFM(INTEL_BROADWELL_X, &snb_cstates),
606
607 X86_MATCH_VFM(INTEL_SKYLAKE_L, &snb_cstates),
608 X86_MATCH_VFM(INTEL_SKYLAKE, &snb_cstates),
609 X86_MATCH_VFM(INTEL_SKYLAKE_X, &snb_cstates),
610
611 X86_MATCH_VFM(INTEL_KABYLAKE_L, &hswult_cstates),
612 X86_MATCH_VFM(INTEL_KABYLAKE, &hswult_cstates),
613 X86_MATCH_VFM(INTEL_COMETLAKE_L, &hswult_cstates),
614 X86_MATCH_VFM(INTEL_COMETLAKE, &hswult_cstates),
615
616 X86_MATCH_VFM(INTEL_CANNONLAKE_L, &cnl_cstates),
617
618 X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &knl_cstates),
619 X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &knl_cstates),
620
621 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &glm_cstates),
622 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &glm_cstates),
623 X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &glm_cstates),
624 X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &glm_cstates),
625 X86_MATCH_VFM(INTEL_ATOM_TREMONT, &glm_cstates),
626 X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &glm_cstates),
627 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_cstates),
628 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &srf_cstates),
629 X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &grr_cstates),
630
631 X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_cstates),
632 X86_MATCH_VFM(INTEL_ICELAKE, &icl_cstates),
633 X86_MATCH_VFM(INTEL_ICELAKE_X, &icx_cstates),
634 X86_MATCH_VFM(INTEL_ICELAKE_D, &icx_cstates),
635 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &icx_cstates),
636 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &icx_cstates),
637 X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &icx_cstates),
638 X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, &icx_cstates),
639
640 X86_MATCH_VFM(INTEL_TIGERLAKE_L, &icl_cstates),
641 X86_MATCH_VFM(INTEL_TIGERLAKE, &icl_cstates),
642 X86_MATCH_VFM(INTEL_ROCKETLAKE, &icl_cstates),
643 X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_cstates),
644 X86_MATCH_VFM(INTEL_ALDERLAKE_L, &adl_cstates),
645 X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_cstates),
646 X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &adl_cstates),
647 X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_cstates),
648 X86_MATCH_VFM(INTEL_METEORLAKE, &adl_cstates),
649 X86_MATCH_VFM(INTEL_METEORLAKE_L, &adl_cstates),
a3100075
ZR
650 X86_MATCH_VFM(INTEL_ARROWLAKE, &adl_cstates),
651 X86_MATCH_VFM(INTEL_ARROWLAKE_H, &adl_cstates),
652 X86_MATCH_VFM(INTEL_ARROWLAKE_U, &adl_cstates),
26579860 653 X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_cstates),
424646ee
TG
654 { },
655};
656MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
657
424646ee 658static int __init cstate_probe(const struct cstate_model *cm)
7ce1346a
KL
659{
660 /* SLM has different MSR for PKG C6 */
424646ee 661 if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
7ce1346a 662 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
7ce1346a 663
889882bc
LO
664 /* KNL has different MSR for CORE C6 */
665 if (cm->quirks & KNL_CORE_C6_MSR)
666 pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
667
668
8f2a28c5
JO
669 core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX,
670 true, (void *) &cm->core_events);
671
672 pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX,
673 true, (void *) &cm->pkg_events);
7ce1346a 674
3877d55a
KL
675 module_msr_mask = perf_msr_probe(module_msr, PERF_CSTATE_MODULE_EVENT_MAX,
676 true, (void *) &cm->module_events);
677
8f2a28c5
JO
678 has_cstate_core = !!core_msr_mask;
679 has_cstate_pkg = !!pkg_msr_mask;
3877d55a 680 has_cstate_module = !!module_msr_mask;
7ce1346a 681
3877d55a 682 return (has_cstate_core || has_cstate_pkg || has_cstate_module) ? 0 : -ENODEV;
7ce1346a
KL
683}
684
c7afba32 685static inline void cstate_cleanup(void)
7ce1346a 686{
d29859e7
TG
687 if (has_cstate_core)
688 perf_pmu_unregister(&cstate_core_pmu);
7ce1346a 689
d29859e7
TG
690 if (has_cstate_pkg)
691 perf_pmu_unregister(&cstate_pkg_pmu);
3877d55a
KL
692
693 if (has_cstate_module)
694 perf_pmu_unregister(&cstate_module_pmu);
7ce1346a
KL
695}
696
d29859e7 697static int __init cstate_init(void)
7ce1346a 698{
77c34ef1 699 int err;
d29859e7 700
7ce1346a
KL
701 if (has_cstate_core) {
702 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
d29859e7
TG
703 if (err) {
704 has_cstate_core = false;
705 pr_info("Failed to register cstate core pmu\n");
834fcd29 706 cstate_cleanup();
77c34ef1 707 return err;
d29859e7 708 }
7ce1346a
KL
709 }
710
711 if (has_cstate_pkg) {
bd745d1c 712 if (topology_max_dies_per_package() > 1) {
08155c7f
KL
713 /* CLX-AP is multi-die and the cstate is die-scope */
714 cstate_pkg_pmu.scope = PERF_PMU_SCOPE_DIE;
cb63ba0f
KL
715 err = perf_pmu_register(&cstate_pkg_pmu,
716 "cstate_die", -1);
717 } else {
718 err = perf_pmu_register(&cstate_pkg_pmu,
719 cstate_pkg_pmu.name, -1);
720 }
d29859e7
TG
721 if (err) {
722 has_cstate_pkg = false;
723 pr_info("Failed to register cstate pkg pmu\n");
724 cstate_cleanup();
77c34ef1 725 return err;
d29859e7 726 }
7ce1346a 727 }
3877d55a
KL
728
729 if (has_cstate_module) {
730 err = perf_pmu_register(&cstate_module_pmu, cstate_module_pmu.name, -1);
731 if (err) {
732 has_cstate_module = false;
733 pr_info("Failed to register cstate cluster pmu\n");
734 cstate_cleanup();
735 return err;
736 }
737 }
834fcd29 738 return 0;
7ce1346a
KL
739}
740
741static int __init cstate_pmu_init(void)
742{
424646ee 743 const struct x86_cpu_id *id;
7ce1346a
KL
744 int err;
745
424646ee 746 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
7ce1346a
KL
747 return -ENODEV;
748
424646ee
TG
749 id = x86_match_cpu(intel_cstates_match);
750 if (!id)
751 return -ENODEV;
752
753 err = cstate_probe((const struct cstate_model *) id->driver_data);
7ce1346a
KL
754 if (err)
755 return err;
756
d29859e7 757 return cstate_init();
7ce1346a 758}
c7afba32
TG
759module_init(cstate_pmu_init);
760
761static void __exit cstate_pmu_exit(void)
762{
c7afba32 763 cstate_cleanup();
c7afba32
TG
764}
765module_exit(cstate_pmu_exit);