treewide: Add SPDX license identifier for more missed files
[linux-2.6-block.git] / arch / x86 / events / intel / rapl.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
4788e5b4 2/*
940b2f2f 3 * Support Intel RAPL energy consumption counters
4788e5b4
SE
4 * Copyright (C) 2013 Google, Inc., Stephane Eranian
5 *
6 * Intel RAPL interface is specified in the IA-32 Manual Vol3b
7 * section 14.7.1 (September 2013)
8 *
9 * RAPL provides more controls than just reporting energy consumption
10 * however here we only expose the 3 energy consumption free running
11 * counters (pp0, pkg, dram).
12 *
13 * Each of those counters increments in a power unit defined by the
14 * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
15 * but it can vary.
16 *
17 * Counter to rapl events mappings:
18 *
19 * pp0 counter: consumption of all physical cores (power plane 0)
20 * event: rapl_energy_cores
21 * perf code: 0x1
22 *
23 * pkg counter: consumption of the whole processor package
24 * event: rapl_energy_pkg
25 * perf code: 0x2
26 *
27 * dram counter: consumption of the dram domain (servers only)
28 * event: rapl_energy_dram
29 * perf code: 0x3
30 *
dcee75b3 31 * gpu counter: consumption of the builtin-gpu domain (client only)
f228c5b8
SE
32 * event: rapl_energy_gpu
33 * perf code: 0x4
34 *
dcee75b3
SP
35 * psys counter: consumption of the builtin-psys domain (client only)
36 * event: rapl_energy_psys
37 * perf code: 0x5
38 *
4788e5b4
SE
39 * We manage those counters as free running (read-only). They may be
40 * use simultaneously by other tools, such as turbostat.
41 *
42 * The events only support system-wide mode counting. There is no
43 * sampling support because it does not make sense and is not
44 * supported by the RAPL hardware.
45 *
46 * Because we want to avoid floating-point operations in the kernel,
47 * the events are all reported in fixed point arithmetic (32.32).
48 * Tools must adjust the counts to convert them to Watts using
49 * the duration of the measurement. Tools may use a function such as
50 * ldexp(raw_count, -32);
51 */
512089d9
TG
52
53#define pr_fmt(fmt) "RAPL PMU: " fmt
54
4788e5b4
SE
55#include <linux/module.h>
56#include <linux/slab.h>
57#include <linux/perf_event.h>
58#include <asm/cpu_device_id.h>
7f2236d0 59#include <asm/intel-family.h>
27f6d22b 60#include "../perf_event.h"
4788e5b4 61
4b6e2571
KL
62MODULE_LICENSE("GPL");
63
4788e5b4
SE
64/*
65 * RAPL energy status counters
66 */
67#define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
68#define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
69#define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
70#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
71#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
72#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
e69af465 73#define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
f228c5b8 74#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
dcee75b3
SP
75#define RAPL_IDX_PSYS_NRG_STAT 4 /* psys */
76#define INTEL_RAPL_PSYS 0x5 /* pseudo-encoding */
4788e5b4 77
dcee75b3 78#define NR_RAPL_DOMAINS 0x5
da008ee7 79static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
64552396
JP
80 "pp0-core",
81 "package",
82 "dram",
83 "pp1-gpu",
dcee75b3 84 "psys",
64552396
JP
85};
86
4788e5b4
SE
87/* Clients have PP0, PKG */
88#define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
f228c5b8
SE
89 1<<RAPL_IDX_PKG_NRG_STAT|\
90 1<<RAPL_IDX_PP1_NRG_STAT)
4788e5b4
SE
91
92/* Servers have PP0, PKG, RAM */
93#define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
94 1<<RAPL_IDX_PKG_NRG_STAT|\
95 1<<RAPL_IDX_RAM_NRG_STAT)
96
e69af465
VW
97/* Servers have PP0, PKG, RAM, PP1 */
98#define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
99 1<<RAPL_IDX_PKG_NRG_STAT|\
100 1<<RAPL_IDX_RAM_NRG_STAT|\
101 1<<RAPL_IDX_PP1_NRG_STAT)
102
dcee75b3
SP
103/* SKL clients have PP0, PKG, RAM, PP1, PSYS */
104#define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
105 1<<RAPL_IDX_PKG_NRG_STAT|\
106 1<<RAPL_IDX_RAM_NRG_STAT|\
107 1<<RAPL_IDX_PP1_NRG_STAT|\
108 1<<RAPL_IDX_PSYS_NRG_STAT)
109
3a2a7797
DC
110/* Knights Landing has PKG, RAM */
111#define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\
112 1<<RAPL_IDX_RAM_NRG_STAT)
113
4788e5b4
SE
114/*
115 * event code: LSB 8 bits, passed in attr->config
116 * any other bit is reserved
117 */
118#define RAPL_EVENT_MASK 0xFFULL
119
120#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
121static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
122 struct kobj_attribute *attr, \
123 char *page) \
124{ \
125 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
126 return sprintf(page, _format "\n"); \
127} \
128static struct kobj_attribute format_attr_##_var = \
129 __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
130
7162b8fe 131#define RAPL_CNTR_WIDTH 32
4788e5b4 132
d3bcd64b
HR
133#define RAPL_EVENT_ATTR_STR(_name, v, str) \
134static struct perf_pmu_events_attr event_attr_##v = { \
135 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
136 .id = 0, \
137 .event_str = str, \
433678bd
SE
138};
139
4788e5b4 140struct rapl_pmu {
a208749c 141 raw_spinlock_t lock;
7162b8fe 142 int n_active;
8a6d2f8f 143 int cpu;
7162b8fe
TG
144 struct list_head active_list;
145 struct pmu *pmu;
146 ktime_t timer_interval;
147 struct hrtimer hrtimer;
4788e5b4
SE
148};
149
9de8d686
TG
150struct rapl_pmus {
151 struct pmu pmu;
152 unsigned int maxpkg;
153 struct rapl_pmu *pmus[];
154};
155
7162b8fe
TG
156 /* 1/2^hw_unit Joule */
157static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
9de8d686 158static struct rapl_pmus *rapl_pmus;
4788e5b4 159static cpumask_t rapl_cpu_mask;
9de8d686 160static unsigned int rapl_cntr_mask;
75c7003f 161static u64 rapl_timer_ms;
4788e5b4 162
9de8d686
TG
163static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
164{
dd86e373
TG
165 unsigned int pkgid = topology_logical_package_id(cpu);
166
167 /*
168 * The unsigned check also catches the '-1' return value for non
169 * existent mappings in the topology map.
170 */
171 return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
9de8d686 172}
4788e5b4
SE
173
174static inline u64 rapl_read_counter(struct perf_event *event)
175{
176 u64 raw;
177 rdmsrl(event->hw.event_base, raw);
178 return raw;
179}
180
64552396 181static inline u64 rapl_scale(u64 v, int cfg)
4788e5b4 182{
64552396 183 if (cfg > NR_RAPL_DOMAINS) {
512089d9 184 pr_warn("Invalid domain %d, failed to scale data\n", cfg);
64552396
JP
185 return v;
186 }
4788e5b4
SE
187 /*
188 * scale delta to smallest unit (1/2^32)
189 * users must then scale back: count * 1/(1e9*2^32) to get Joules
190 * or use ldexp(count, -32).
191 * Watts = Joules/Time delta
192 */
64552396 193 return v << (32 - rapl_hw_unit[cfg - 1]);
4788e5b4
SE
194}
195
196static u64 rapl_event_update(struct perf_event *event)
197{
198 struct hw_perf_event *hwc = &event->hw;
199 u64 prev_raw_count, new_raw_count;
200 s64 delta, sdelta;
201 int shift = RAPL_CNTR_WIDTH;
202
203again:
204 prev_raw_count = local64_read(&hwc->prev_count);
205 rdmsrl(event->hw.event_base, new_raw_count);
206
207 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
208 new_raw_count) != prev_raw_count) {
209 cpu_relax();
210 goto again;
211 }
212
213 /*
214 * Now we have the new raw value and have updated the prev
215 * timestamp already. We can now calculate the elapsed delta
216 * (event-)time and add that to the generic event.
217 *
218 * Careful, not all hw sign-extends above the physical width
219 * of the count.
220 */
221 delta = (new_raw_count << shift) - (prev_raw_count << shift);
222 delta >>= shift;
223
64552396 224 sdelta = rapl_scale(delta, event->hw.config);
4788e5b4
SE
225
226 local64_add(sdelta, &event->count);
227
228 return new_raw_count;
229}
230
65661f96
SE
231static void rapl_start_hrtimer(struct rapl_pmu *pmu)
232{
514c2304
TG
233 hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
234 HRTIMER_MODE_REL_PINNED);
65661f96
SE
235}
236
65661f96
SE
237static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
238{
8a6d2f8f 239 struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
65661f96
SE
240 struct perf_event *event;
241 unsigned long flags;
242
243 if (!pmu->n_active)
244 return HRTIMER_NORESTART;
245
a208749c 246 raw_spin_lock_irqsave(&pmu->lock, flags);
65661f96 247
7162b8fe 248 list_for_each_entry(event, &pmu->active_list, active_entry)
65661f96 249 rapl_event_update(event);
65661f96 250
a208749c 251 raw_spin_unlock_irqrestore(&pmu->lock, flags);
65661f96
SE
252
253 hrtimer_forward_now(hrtimer, pmu->timer_interval);
254
255 return HRTIMER_RESTART;
256}
257
258static void rapl_hrtimer_init(struct rapl_pmu *pmu)
259{
260 struct hrtimer *hr = &pmu->hrtimer;
261
262 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
263 hr->function = rapl_hrtimer_handle;
264}
265
4788e5b4
SE
266static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
267 struct perf_event *event)
268{
269 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
270 return;
271
272 event->hw.state = 0;
273
274 list_add_tail(&event->active_entry, &pmu->active_list);
275
276 local64_set(&event->hw.prev_count, rapl_read_counter(event));
277
278 pmu->n_active++;
65661f96
SE
279 if (pmu->n_active == 1)
280 rapl_start_hrtimer(pmu);
4788e5b4
SE
281}
282
283static void rapl_pmu_event_start(struct perf_event *event, int mode)
284{
8a6d2f8f 285 struct rapl_pmu *pmu = event->pmu_private;
4788e5b4
SE
286 unsigned long flags;
287
a208749c 288 raw_spin_lock_irqsave(&pmu->lock, flags);
4788e5b4 289 __rapl_pmu_event_start(pmu, event);
a208749c 290 raw_spin_unlock_irqrestore(&pmu->lock, flags);
4788e5b4
SE
291}
292
293static void rapl_pmu_event_stop(struct perf_event *event, int mode)
294{
8a6d2f8f 295 struct rapl_pmu *pmu = event->pmu_private;
4788e5b4
SE
296 struct hw_perf_event *hwc = &event->hw;
297 unsigned long flags;
298
a208749c 299 raw_spin_lock_irqsave(&pmu->lock, flags);
4788e5b4
SE
300
301 /* mark event as deactivated and stopped */
302 if (!(hwc->state & PERF_HES_STOPPED)) {
303 WARN_ON_ONCE(pmu->n_active <= 0);
304 pmu->n_active--;
65661f96 305 if (pmu->n_active == 0)
7162b8fe 306 hrtimer_cancel(&pmu->hrtimer);
4788e5b4
SE
307
308 list_del(&event->active_entry);
309
310 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
311 hwc->state |= PERF_HES_STOPPED;
312 }
313
314 /* check if update of sw counter is necessary */
315 if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
316 /*
317 * Drain the remaining delta count out of a event
318 * that we are disabling:
319 */
320 rapl_event_update(event);
321 hwc->state |= PERF_HES_UPTODATE;
322 }
323
a208749c 324 raw_spin_unlock_irqrestore(&pmu->lock, flags);
4788e5b4
SE
325}
326
327static int rapl_pmu_event_add(struct perf_event *event, int mode)
328{
8a6d2f8f 329 struct rapl_pmu *pmu = event->pmu_private;
4788e5b4
SE
330 struct hw_perf_event *hwc = &event->hw;
331 unsigned long flags;
332
a208749c 333 raw_spin_lock_irqsave(&pmu->lock, flags);
4788e5b4
SE
334
335 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
336
337 if (mode & PERF_EF_START)
338 __rapl_pmu_event_start(pmu, event);
339
a208749c 340 raw_spin_unlock_irqrestore(&pmu->lock, flags);
4788e5b4
SE
341
342 return 0;
343}
344
345static void rapl_pmu_event_del(struct perf_event *event, int flags)
346{
347 rapl_pmu_event_stop(event, PERF_EF_UPDATE);
348}
349
350static int rapl_pmu_event_init(struct perf_event *event)
351{
352 u64 cfg = event->attr.config & RAPL_EVENT_MASK;
353 int bit, msr, ret = 0;
9de8d686 354 struct rapl_pmu *pmu;
4788e5b4
SE
355
356 /* only look at RAPL events */
9de8d686 357 if (event->attr.type != rapl_pmus->pmu.type)
4788e5b4
SE
358 return -ENOENT;
359
360 /* check only supported bits are set */
361 if (event->attr.config & ~RAPL_EVENT_MASK)
362 return -EINVAL;
363
8a6d2f8f
TG
364 if (event->cpu < 0)
365 return -EINVAL;
366
e64cd6f7
DCC
367 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
368
4788e5b4
SE
369 /*
370 * check event is known (determines counter)
371 */
372 switch (cfg) {
373 case INTEL_RAPL_PP0:
374 bit = RAPL_IDX_PP0_NRG_STAT;
375 msr = MSR_PP0_ENERGY_STATUS;
376 break;
377 case INTEL_RAPL_PKG:
378 bit = RAPL_IDX_PKG_NRG_STAT;
379 msr = MSR_PKG_ENERGY_STATUS;
380 break;
381 case INTEL_RAPL_RAM:
382 bit = RAPL_IDX_RAM_NRG_STAT;
383 msr = MSR_DRAM_ENERGY_STATUS;
384 break;
f228c5b8
SE
385 case INTEL_RAPL_PP1:
386 bit = RAPL_IDX_PP1_NRG_STAT;
387 msr = MSR_PP1_ENERGY_STATUS;
388 break;
dcee75b3
SP
389 case INTEL_RAPL_PSYS:
390 bit = RAPL_IDX_PSYS_NRG_STAT;
391 msr = MSR_PLATFORM_ENERGY_STATUS;
392 break;
4788e5b4
SE
393 default:
394 return -EINVAL;
395 }
396 /* check event supported */
397 if (!(rapl_cntr_mask & (1 << bit)))
398 return -EINVAL;
399
400 /* unsupported modes and filters */
2ff40250 401 if (event->attr.sample_period) /* no sampling */
4788e5b4
SE
402 return -EINVAL;
403
404 /* must be done before validate_group */
9de8d686 405 pmu = cpu_to_rapl_pmu(event->cpu);
dd86e373
TG
406 if (!pmu)
407 return -EINVAL;
8a6d2f8f
TG
408 event->cpu = pmu->cpu;
409 event->pmu_private = pmu;
4788e5b4
SE
410 event->hw.event_base = msr;
411 event->hw.config = cfg;
412 event->hw.idx = bit;
413
414 return ret;
415}
416
417static void rapl_pmu_event_read(struct perf_event *event)
418{
419 rapl_event_update(event);
420}
421
422static ssize_t rapl_get_attr_cpumask(struct device *dev,
423 struct device_attribute *attr, char *buf)
424{
5aaba363 425 return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
4788e5b4
SE
426}
427
428static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
429
430static struct attribute *rapl_pmu_attrs[] = {
431 &dev_attr_cpumask.attr,
432 NULL,
433};
434
435static struct attribute_group rapl_pmu_attr_group = {
436 .attrs = rapl_pmu_attrs,
437};
438
433678bd
SE
439RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
440RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
441RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
442RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
dcee75b3 443RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
4788e5b4 444
433678bd
SE
445RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
446RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
447RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
448RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
dcee75b3 449RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules");
4788e5b4
SE
450
451/*
452 * we compute in 0.23 nJ increments regardless of MSR
453 */
433678bd
SE
454RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
455RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
456RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
457RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
dcee75b3 458RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10");
4788e5b4
SE
459
460static struct attribute *rapl_events_srv_attr[] = {
461 EVENT_PTR(rapl_cores),
462 EVENT_PTR(rapl_pkg),
463 EVENT_PTR(rapl_ram),
464
465 EVENT_PTR(rapl_cores_unit),
466 EVENT_PTR(rapl_pkg_unit),
467 EVENT_PTR(rapl_ram_unit),
468
469 EVENT_PTR(rapl_cores_scale),
470 EVENT_PTR(rapl_pkg_scale),
471 EVENT_PTR(rapl_ram_scale),
472 NULL,
473};
474
475static struct attribute *rapl_events_cln_attr[] = {
476 EVENT_PTR(rapl_cores),
477 EVENT_PTR(rapl_pkg),
f228c5b8 478 EVENT_PTR(rapl_gpu),
4788e5b4
SE
479
480 EVENT_PTR(rapl_cores_unit),
481 EVENT_PTR(rapl_pkg_unit),
f228c5b8 482 EVENT_PTR(rapl_gpu_unit),
4788e5b4
SE
483
484 EVENT_PTR(rapl_cores_scale),
485 EVENT_PTR(rapl_pkg_scale),
f228c5b8 486 EVENT_PTR(rapl_gpu_scale),
4788e5b4
SE
487 NULL,
488};
489
e69af465
VW
490static struct attribute *rapl_events_hsw_attr[] = {
491 EVENT_PTR(rapl_cores),
492 EVENT_PTR(rapl_pkg),
493 EVENT_PTR(rapl_gpu),
494 EVENT_PTR(rapl_ram),
495
496 EVENT_PTR(rapl_cores_unit),
497 EVENT_PTR(rapl_pkg_unit),
498 EVENT_PTR(rapl_gpu_unit),
499 EVENT_PTR(rapl_ram_unit),
500
501 EVENT_PTR(rapl_cores_scale),
502 EVENT_PTR(rapl_pkg_scale),
503 EVENT_PTR(rapl_gpu_scale),
504 EVENT_PTR(rapl_ram_scale),
505 NULL,
506};
507
dcee75b3
SP
508static struct attribute *rapl_events_skl_attr[] = {
509 EVENT_PTR(rapl_cores),
510 EVENT_PTR(rapl_pkg),
511 EVENT_PTR(rapl_gpu),
512 EVENT_PTR(rapl_ram),
513 EVENT_PTR(rapl_psys),
514
515 EVENT_PTR(rapl_cores_unit),
516 EVENT_PTR(rapl_pkg_unit),
517 EVENT_PTR(rapl_gpu_unit),
518 EVENT_PTR(rapl_ram_unit),
519 EVENT_PTR(rapl_psys_unit),
520
521 EVENT_PTR(rapl_cores_scale),
522 EVENT_PTR(rapl_pkg_scale),
523 EVENT_PTR(rapl_gpu_scale),
524 EVENT_PTR(rapl_ram_scale),
525 EVENT_PTR(rapl_psys_scale),
526 NULL,
527};
528
3a2a7797
DC
529static struct attribute *rapl_events_knl_attr[] = {
530 EVENT_PTR(rapl_pkg),
531 EVENT_PTR(rapl_ram),
532
533 EVENT_PTR(rapl_pkg_unit),
534 EVENT_PTR(rapl_ram_unit),
535
536 EVENT_PTR(rapl_pkg_scale),
537 EVENT_PTR(rapl_ram_scale),
538 NULL,
539};
540
4788e5b4
SE
541static struct attribute_group rapl_pmu_events_group = {
542 .name = "events",
543 .attrs = NULL, /* patched at runtime */
544};
545
546DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
547static struct attribute *rapl_formats_attr[] = {
548 &format_attr_event.attr,
549 NULL,
550};
551
552static struct attribute_group rapl_pmu_format_group = {
553 .name = "format",
554 .attrs = rapl_formats_attr,
555};
556
b45e4c45 557static const struct attribute_group *rapl_attr_groups[] = {
4788e5b4
SE
558 &rapl_pmu_attr_group,
559 &rapl_pmu_format_group,
560 &rapl_pmu_events_group,
561 NULL,
562};
563
8b5b773d 564static int rapl_cpu_offline(unsigned int cpu)
4788e5b4 565{
9de8d686
TG
566 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
567 int target;
4788e5b4 568
9de8d686
TG
569 /* Check if exiting cpu is used for collecting rapl events */
570 if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
8b5b773d 571 return 0;
4788e5b4 572
9de8d686
TG
573 pmu->cpu = -1;
574 /* Find a new cpu to collect rapl events */
575 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
65661f96 576
9de8d686
TG
577 /* Migrate rapl events to the new target */
578 if (target < nr_cpu_ids) {
579 cpumask_set_cpu(target, &rapl_cpu_mask);
580 pmu->cpu = target;
581 perf_pmu_migrate_context(pmu->pmu, cpu, target);
582 }
8b5b773d 583 return 0;
4788e5b4
SE
584}
585
8b5b773d 586static int rapl_cpu_online(unsigned int cpu)
4788e5b4 587{
9de8d686
TG
588 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
589 int target;
590
dd86e373
TG
591 if (!pmu) {
592 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
593 if (!pmu)
594 return -ENOMEM;
595
596 raw_spin_lock_init(&pmu->lock);
597 INIT_LIST_HEAD(&pmu->active_list);
598 pmu->pmu = &rapl_pmus->pmu;
599 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
600 rapl_hrtimer_init(pmu);
601
602 rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
603 }
604
9de8d686
TG
605 /*
606 * Check if there is an online cpu in the package which collects rapl
607 * events already.
608 */
609 target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
610 if (target < nr_cpu_ids)
8b5b773d 611 return 0;
4788e5b4 612
4788e5b4 613 cpumask_set_cpu(cpu, &rapl_cpu_mask);
9de8d686 614 pmu->cpu = cpu;
8b5b773d 615 return 0;
4788e5b4
SE
616}
617
7a869805 618static int rapl_check_hw_unit(bool apply_quirk)
64552396
JP
619{
620 u64 msr_rapl_power_unit_bits;
621 int i;
622
623 /* protect rdmsrl() to handle virtualization */
624 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
625 return -1;
626 for (i = 0; i < NR_RAPL_DOMAINS; i++)
627 rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
628
7a869805
BP
629 /*
630 * DRAM domain on HSW server and KNL has fixed energy unit which can be
631 * different than the unit from power unit MSR. See
632 * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
633 * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
634 */
635 if (apply_quirk)
636 rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
75c7003f
TG
637
638 /*
639 * Calculate the timer rate:
640 * Use reference of 200W for scaling the timeout to avoid counter
641 * overflows. 200W = 200 Joules/sec
642 * Divide interval by 2 to avoid lockstep (2 * 100)
643 * if hw unit is 32, then we use 2 ms 1/200/2
644 */
645 rapl_timer_ms = 2;
646 if (rapl_hw_unit[0] < 32) {
647 rapl_timer_ms = (1000 / (2 * 100));
648 rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
649 }
64552396
JP
650 return 0;
651}
652
512089d9
TG
653static void __init rapl_advertise(void)
654{
655 int i;
656
657 pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
658 hweight32(rapl_cntr_mask), rapl_timer_ms);
659
660 for (i = 0; i < NR_RAPL_DOMAINS; i++) {
661 if (rapl_cntr_mask & (1 << i)) {
662 pr_info("hw unit of domain %s 2^-%d Joules\n",
663 rapl_domain_names[i], rapl_hw_unit[i]);
664 }
665 }
666}
667
4b6e2571 668static void cleanup_rapl_pmus(void)
55f2890f 669{
9de8d686
TG
670 int i;
671
672 for (i = 0; i < rapl_pmus->maxpkg; i++)
275ae411 673 kfree(rapl_pmus->pmus[i]);
9de8d686
TG
674 kfree(rapl_pmus);
675}
55f2890f 676
9de8d686
TG
677static int __init init_rapl_pmus(void)
678{
679 int maxpkg = topology_max_packages();
680 size_t size;
681
682 size = sizeof(*rapl_pmus) + maxpkg * sizeof(struct rapl_pmu *);
683 rapl_pmus = kzalloc(size, GFP_KERNEL);
684 if (!rapl_pmus)
685 return -ENOMEM;
686
687 rapl_pmus->maxpkg = maxpkg;
688 rapl_pmus->pmu.attr_groups = rapl_attr_groups;
689 rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
690 rapl_pmus->pmu.event_init = rapl_pmu_event_init;
691 rapl_pmus->pmu.add = rapl_pmu_event_add;
692 rapl_pmus->pmu.del = rapl_pmu_event_del;
693 rapl_pmus->pmu.start = rapl_pmu_event_start;
694 rapl_pmus->pmu.stop = rapl_pmu_event_stop;
695 rapl_pmus->pmu.read = rapl_pmu_event_read;
74545f63 696 rapl_pmus->pmu.module = THIS_MODULE;
2ff40250 697 rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
9de8d686 698 return 0;
55f2890f
TG
699}
700
4b6e2571
KL
701#define X86_RAPL_MODEL_MATCH(model, init) \
702 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
703
704struct intel_rapl_init_fun {
705 bool apply_quirk;
706 int cntr_mask;
707 struct attribute **attrs;
708};
709
710static const struct intel_rapl_init_fun snb_rapl_init __initconst = {
711 .apply_quirk = false,
712 .cntr_mask = RAPL_IDX_CLN,
713 .attrs = rapl_events_cln_attr,
714};
715
716static const struct intel_rapl_init_fun hsx_rapl_init __initconst = {
717 .apply_quirk = true,
718 .cntr_mask = RAPL_IDX_SRV,
719 .attrs = rapl_events_srv_attr,
720};
721
722static const struct intel_rapl_init_fun hsw_rapl_init __initconst = {
723 .apply_quirk = false,
724 .cntr_mask = RAPL_IDX_HSW,
725 .attrs = rapl_events_hsw_attr,
726};
727
728static const struct intel_rapl_init_fun snbep_rapl_init __initconst = {
729 .apply_quirk = false,
730 .cntr_mask = RAPL_IDX_SRV,
731 .attrs = rapl_events_srv_attr,
732};
733
734static const struct intel_rapl_init_fun knl_rapl_init __initconst = {
735 .apply_quirk = true,
736 .cntr_mask = RAPL_IDX_KNL,
737 .attrs = rapl_events_knl_attr,
738};
739
dcee75b3
SP
740static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
741 .apply_quirk = false,
742 .cntr_mask = RAPL_IDX_SKL_CLN,
743 .attrs = rapl_events_skl_attr,
744};
745
7162b8fe 746static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
7f2236d0
DH
747 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_rapl_init),
748 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init),
c416e5aa 749
7f2236d0
DH
750 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, snb_rapl_init),
751 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
c416e5aa 752
7f2236d0 753 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
1289e0e2 754 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsx_rapl_init),
7f2236d0
DH
755 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
756 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
c416e5aa 757
7f2236d0
DH
758 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
759 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
33b88e70 760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
1289e0e2 761 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
c416e5aa 762
7f2236d0 763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
36c4b6c1 764 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
c416e5aa 765
7f2236d0
DH
766 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
767 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
348c5ac6 768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, hsx_rapl_init),
2668c619 769
f2029b1e
SP
770 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_rapl_init),
771 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
772
490d03e8
HP
773 X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, skl_rapl_init),
774
2668c619 775 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
f2c4db1b 776 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
450a9789 777
f2c4db1b 778 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
b3377c3a
KL
779
780 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
4b6e2571 781 {},
4788e5b4
SE
782};
783
4b6e2571
KL
784MODULE_DEVICE_TABLE(x86cpu, rapl_cpu_match);
785
4788e5b4
SE
786static int __init rapl_pmu_init(void)
787{
4b6e2571
KL
788 const struct x86_cpu_id *id;
789 struct intel_rapl_init_fun *rapl_init;
790 bool apply_quirk;
7162b8fe 791 int ret;
4788e5b4 792
4b6e2571
KL
793 id = x86_match_cpu(rapl_cpu_match);
794 if (!id)
55f2890f 795 return -ENODEV;
4788e5b4 796
4b6e2571
KL
797 rapl_init = (struct intel_rapl_init_fun *)id->driver_data;
798 apply_quirk = rapl_init->apply_quirk;
799 rapl_cntr_mask = rapl_init->cntr_mask;
800 rapl_pmu_events_group.attrs = rapl_init->attrs;
55f2890f 801
7a869805 802 ret = rapl_check_hw_unit(apply_quirk);
64552396
JP
803 if (ret)
804 return ret;
fd537e56 805
9de8d686
TG
806 ret = init_rapl_pmus();
807 if (ret)
808 return ret;
809
8b5b773d
RC
810 /*
811 * Install callbacks. Core will call them for each online cpu.
812 */
8b5b773d 813 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
73c1b41e 814 "perf/x86/rapl:online",
8b5b773d
RC
815 rapl_cpu_online, rapl_cpu_offline);
816 if (ret)
dd86e373 817 goto out;
8b5b773d 818
9de8d686 819 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
512089d9 820 if (ret)
dd86e373 821 goto out1;
4788e5b4 822
512089d9 823 rapl_advertise();
4788e5b4 824 return 0;
55f2890f 825
8b5b773d 826out1:
dd86e373 827 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
55f2890f 828out:
512089d9 829 pr_warn("Initialization failed (%d), disabled\n", ret);
55f2890f 830 cleanup_rapl_pmus();
55f2890f 831 return ret;
4788e5b4 832}
4b6e2571
KL
833module_init(rapl_pmu_init);
834
835static void __exit intel_rapl_exit(void)
836{
8b5b773d 837 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
4b6e2571
KL
838 perf_pmu_unregister(&rapl_pmus->pmu);
839 cleanup_rapl_pmus();
4b6e2571
KL
840}
841module_exit(intel_rapl_exit);