License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / arch / x86 / events / intel / uncore_snbep.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
8268fdfc 2/* SandyBridge-EP/IvyTown uncore support */
ed367e6c 3#include "uncore.h"
8268fdfc 4
68ce4a0d
KL
5/* SNB-EP pci bus to socket mapping */
6#define SNBEP_CPUNODEID 0x40
7#define SNBEP_GIDNIDMAP 0x54
8
8268fdfc
YZ
9/* SNB-EP Box level control */
10#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
11#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
12#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
13#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
14#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 SNBEP_PMON_BOX_CTL_FRZ_EN)
17/* SNB-EP event control */
18#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
19#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
20#define SNBEP_PMON_CTL_RST (1 << 17)
21#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
22#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21)
23#define SNBEP_PMON_CTL_EN (1 << 22)
24#define SNBEP_PMON_CTL_INVERT (1 << 23)
25#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
26#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
27 SNBEP_PMON_CTL_UMASK_MASK | \
28 SNBEP_PMON_CTL_EDGE_DET | \
29 SNBEP_PMON_CTL_INVERT | \
30 SNBEP_PMON_CTL_TRESH_MASK)
31
32/* SNB-EP Ubox event control */
33#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
34#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
35 (SNBEP_PMON_CTL_EV_SEL_MASK | \
36 SNBEP_PMON_CTL_UMASK_MASK | \
37 SNBEP_PMON_CTL_EDGE_DET | \
38 SNBEP_PMON_CTL_INVERT | \
39 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40
41#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
42#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
43 SNBEP_CBO_PMON_CTL_TID_EN)
44
45/* SNB-EP PCU event control */
46#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
47#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
48#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
49#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
50#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
51 (SNBEP_PMON_CTL_EV_SEL_MASK | \
52 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 SNBEP_PMON_CTL_EDGE_DET | \
8268fdfc
YZ
54 SNBEP_PMON_CTL_INVERT | \
55 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58
59#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
60 (SNBEP_PMON_RAW_EVENT_MASK | \
61 SNBEP_PMON_CTL_EV_SEL_EXT)
62
63/* SNB-EP pci control register */
64#define SNBEP_PCI_PMON_BOX_CTL 0xf4
65#define SNBEP_PCI_PMON_CTL0 0xd8
66/* SNB-EP pci counter register */
67#define SNBEP_PCI_PMON_CTR0 0xa0
68
69/* SNB-EP home agent register */
70#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
71#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
72#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
73/* SNB-EP memory controller register */
74#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
75#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
76/* SNB-EP QPI register */
77#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
78#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
79#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
80#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
81
82/* SNB-EP Ubox register */
83#define SNBEP_U_MSR_PMON_CTR0 0xc16
84#define SNBEP_U_MSR_PMON_CTL0 0xc10
85
86#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
87#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
88
89/* SNB-EP Cbo register */
90#define SNBEP_C0_MSR_PMON_CTR0 0xd16
91#define SNBEP_C0_MSR_PMON_CTL0 0xd10
92#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
93#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
94#define SNBEP_CBO_MSR_OFFSET 0x20
95
96#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID 0x1f
97#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID 0x3fc00
98#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE 0x7c0000
99#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC 0xff800000
100
101#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) { \
102 .event = (e), \
103 .msr = SNBEP_C0_MSR_PMON_BOX_FILTER, \
104 .config_mask = (m), \
105 .idx = (i) \
106}
107
108/* SNB-EP PCU register */
109#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
110#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
111#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
112#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
113#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
114#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
115#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
116
ddcd0973
PZ
117/* IVBEP event control */
118#define IVBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
8268fdfc 119 SNBEP_PMON_BOX_CTL_RST_CTRS)
ddcd0973 120#define IVBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
8268fdfc
YZ
121 SNBEP_PMON_CTL_UMASK_MASK | \
122 SNBEP_PMON_CTL_EDGE_DET | \
123 SNBEP_PMON_CTL_TRESH_MASK)
ddcd0973
PZ
124/* IVBEP Ubox */
125#define IVBEP_U_MSR_PMON_GLOBAL_CTL 0xc00
126#define IVBEP_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
127#define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL (1 << 29)
8268fdfc 128
ddcd0973 129#define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
8268fdfc
YZ
130 (SNBEP_PMON_CTL_EV_SEL_MASK | \
131 SNBEP_PMON_CTL_UMASK_MASK | \
132 SNBEP_PMON_CTL_EDGE_DET | \
133 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
ddcd0973
PZ
134/* IVBEP Cbo */
135#define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK (IVBEP_PMON_RAW_EVENT_MASK | \
8268fdfc
YZ
136 SNBEP_CBO_PMON_CTL_TID_EN)
137
ddcd0973
PZ
138#define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID (0x1fULL << 0)
139#define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 5)
140#define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x3fULL << 17)
141#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
142#define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
143#define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
144#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
7e96ae1a 145#define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
ddcd0973
PZ
146
147/* IVBEP home agent */
148#define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST (1 << 16)
149#define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK \
150 (IVBEP_PMON_RAW_EVENT_MASK | \
151 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152/* IVBEP PCU */
153#define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
8268fdfc 154 (SNBEP_PMON_CTL_EV_SEL_MASK | \
8268fdfc
YZ
155 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 SNBEP_PMON_CTL_EDGE_DET | \
157 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
ddcd0973
PZ
160/* IVBEP QPI */
161#define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
162 (IVBEP_PMON_RAW_EVENT_MASK | \
8268fdfc
YZ
163 SNBEP_PMON_CTL_EV_SEL_EXT)
164
165#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
166 ((1ULL << (n)) - 1)))
167
e735b9db 168/* Haswell-EP Ubox */
8cf1a3de
KL
169#define HSWEP_U_MSR_PMON_CTR0 0x709
170#define HSWEP_U_MSR_PMON_CTL0 0x705
e735b9db
YZ
171#define HSWEP_U_MSR_PMON_FILTER 0x707
172
173#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703
174#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR 0x704
175
176#define HSWEP_U_MSR_PMON_BOX_FILTER_TID (0x1 << 0)
177#define HSWEP_U_MSR_PMON_BOX_FILTER_CID (0x1fULL << 1)
178#define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181
182/* Haswell-EP CBo */
183#define HSWEP_C0_MSR_PMON_CTR0 0xe08
184#define HSWEP_C0_MSR_PMON_CTL0 0xe01
185#define HSWEP_C0_MSR_PMON_BOX_CTL 0xe00
186#define HSWEP_C0_MSR_PMON_BOX_FILTER0 0xe05
187#define HSWEP_CBO_MSR_OFFSET 0x10
188
189
190#define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID (0x3fULL << 0)
191#define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK (0xfULL << 6)
192#define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE (0x7fULL << 17)
193#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID (0xffffULL << 32)
194#define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC (0x1ffULL << 52)
195#define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
196#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
197#define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
198
199
200/* Haswell-EP Sbox */
201#define HSWEP_S0_MSR_PMON_CTR0 0x726
202#define HSWEP_S0_MSR_PMON_CTL0 0x721
203#define HSWEP_S0_MSR_PMON_BOX_CTL 0x720
204#define HSWEP_SBOX_MSR_OFFSET 0xa
205#define HSWEP_S_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
206 SNBEP_CBO_PMON_CTL_TID_EN)
207
208/* Haswell-EP PCU */
209#define HSWEP_PCU_MSR_PMON_CTR0 0x717
210#define HSWEP_PCU_MSR_PMON_CTL0 0x711
211#define HSWEP_PCU_MSR_PMON_BOX_CTL 0x710
212#define HSWEP_PCU_MSR_PMON_BOX_FILTER 0x715
213
77af0037
HC
214/* KNL Ubox */
215#define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 SNBEP_CBO_PMON_CTL_TID_EN)
218/* KNL CHA */
219#define KNL_CHA_MSR_OFFSET 0xc
220#define KNL_CHA_MSR_PMON_CTL_QOR (1 << 16)
221#define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 KNL_CHA_MSR_PMON_CTL_QOR)
224#define KNL_CHA_MSR_PMON_BOX_FILTER_TID 0x1ff
225#define KNL_CHA_MSR_PMON_BOX_FILTER_STATE (7 << 18)
226#define KNL_CHA_MSR_PMON_BOX_FILTER_OP (0xfffffe2aULL << 32)
ec336c87 227#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
228#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
229#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC (0x1ULL << 37)
77af0037
HC
230
231/* KNL EDC/MC UCLK */
232#define KNL_UCLK_MSR_PMON_CTR0_LOW 0x400
233#define KNL_UCLK_MSR_PMON_CTL0 0x420
234#define KNL_UCLK_MSR_PMON_BOX_CTL 0x430
235#define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW 0x44c
236#define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL 0x454
237#define KNL_PMON_FIXED_CTL_EN 0x1
238
239/* KNL EDC */
240#define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW 0xa00
241#define KNL_EDC0_ECLK_MSR_PMON_CTL0 0xa20
242#define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL 0xa30
243#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW 0xa3c
244#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL 0xa44
245
246/* KNL MC */
247#define KNL_MC0_CH0_MSR_PMON_CTR0_LOW 0xb00
248#define KNL_MC0_CH0_MSR_PMON_CTL0 0xb20
249#define KNL_MC0_CH0_MSR_PMON_BOX_CTL 0xb30
250#define KNL_MC0_CH0_MSR_PMON_FIXED_LOW 0xb3c
251#define KNL_MC0_CH0_MSR_PMON_FIXED_CTL 0xb44
252
253/* KNL IRP */
254#define KNL_IRP_PCI_PMON_BOX_CTL 0xf0
255#define KNL_IRP_PCI_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
256 KNL_CHA_MSR_PMON_CTL_QOR)
257/* KNL PCU */
258#define KNL_PCU_PMON_CTL_EV_SEL_MASK 0x0000007f
259#define KNL_PCU_PMON_CTL_USE_OCC_CTR (1 << 7)
260#define KNL_PCU_MSR_PMON_CTL_TRESH_MASK 0x3f000000
261#define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
262 (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 SNBEP_PMON_CTL_EDGE_DET | \
266 SNBEP_CBO_PMON_CTL_TID_EN | \
77af0037
HC
267 SNBEP_PMON_CTL_INVERT | \
268 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
e735b9db 271
cd34cd97
KL
272/* SKX pci bus to socket mapping */
273#define SKX_CPUNODEID 0xc0
274#define SKX_GIDNIDMAP 0xd4
275
276/* SKX CHA */
277#define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0)
278#define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9)
279#define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17)
280#define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32)
281#define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33)
282#define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35)
283#define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36)
284#define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37)
285#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41)
286#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51)
287#define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61)
288#define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62)
289#define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63)
290
291/* SKX IIO */
292#define SKX_IIO0_MSR_PMON_CTL0 0xa48
293#define SKX_IIO0_MSR_PMON_CTR0 0xa41
294#define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40
295#define SKX_IIO_MSR_OFFSET 0x20
296
297#define SKX_PMON_CTL_TRESH_MASK (0xff << 24)
298#define SKX_PMON_CTL_TRESH_MASK_EXT (0xf)
299#define SKX_PMON_CTL_CH_MASK (0xff << 4)
300#define SKX_PMON_CTL_FC_MASK (0x7 << 12)
301#define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
302 SNBEP_PMON_CTL_UMASK_MASK | \
303 SNBEP_PMON_CTL_EDGE_DET | \
304 SNBEP_PMON_CTL_INVERT | \
305 SKX_PMON_CTL_TRESH_MASK)
306#define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
307 SKX_PMON_CTL_CH_MASK | \
308 SKX_PMON_CTL_FC_MASK)
309
310/* SKX IRP */
311#define SKX_IRP0_MSR_PMON_CTL0 0xa5b
312#define SKX_IRP0_MSR_PMON_CTR0 0xa59
313#define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58
314#define SKX_IRP_MSR_OFFSET 0x20
315
316/* SKX UPI */
317#define SKX_UPI_PCI_PMON_CTL0 0x350
318#define SKX_UPI_PCI_PMON_CTR0 0x318
319#define SKX_UPI_PCI_PMON_BOX_CTL 0x378
b3625980 320#define SKX_UPI_CTL_UMASK_EXT 0xffefff
cd34cd97
KL
321
322/* SKX M2M */
323#define SKX_M2M_PCI_PMON_CTL0 0x228
324#define SKX_M2M_PCI_PMON_CTR0 0x200
325#define SKX_M2M_PCI_PMON_BOX_CTL 0x258
326
8268fdfc 327DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
77af0037 328DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
8268fdfc 329DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
77af0037 330DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
8268fdfc 331DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
b3625980 332DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
77af0037 333DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
8268fdfc
YZ
334DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
335DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
336DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
cd34cd97 337DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
8268fdfc 338DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
77af0037 339DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
8268fdfc
YZ
340DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
341DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
342DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
343DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
77af0037 344DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
cd34cd97
KL
345DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
346DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
8268fdfc 347DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
e735b9db
YZ
348DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
349DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
77af0037 350DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
e735b9db 351DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
8268fdfc 352DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
e735b9db 353DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
77af0037 354DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
8268fdfc
YZ
355DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
356DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
357DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
358DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
e735b9db 359DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
77af0037 360DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
cd34cd97
KL
361DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
362DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
363DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
364DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
365DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
77af0037
HC
366DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
367DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
368DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
8268fdfc
YZ
369DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
370DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
77af0037 371DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
cd34cd97
KL
372DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
373DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
e735b9db
YZ
374DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
375DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
376DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
8268fdfc
YZ
377DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
378DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
379DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
380DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
381DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
382DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
383DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
384DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
385DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
386DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
387DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
388DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
389DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
390DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
391DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
392DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
393DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
394DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
395DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
396DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
397DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
398DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
399
400static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
401{
402 struct pci_dev *pdev = box->pci_dev;
403 int box_ctl = uncore_pci_box_ctl(box);
404 u32 config = 0;
405
406 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
407 config |= SNBEP_PMON_BOX_CTL_FRZ;
408 pci_write_config_dword(pdev, box_ctl, config);
409 }
410}
411
412static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
413{
414 struct pci_dev *pdev = box->pci_dev;
415 int box_ctl = uncore_pci_box_ctl(box);
416 u32 config = 0;
417
418 if (!pci_read_config_dword(pdev, box_ctl, &config)) {
419 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
420 pci_write_config_dword(pdev, box_ctl, config);
421 }
422}
423
424static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
425{
426 struct pci_dev *pdev = box->pci_dev;
427 struct hw_perf_event *hwc = &event->hw;
428
429 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
430}
431
432static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
433{
434 struct pci_dev *pdev = box->pci_dev;
435 struct hw_perf_event *hwc = &event->hw;
436
437 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
438}
439
440static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
441{
442 struct pci_dev *pdev = box->pci_dev;
443 struct hw_perf_event *hwc = &event->hw;
444 u64 count = 0;
445
446 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
447 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
448
449 return count;
450}
451
452static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
453{
454 struct pci_dev *pdev = box->pci_dev;
dae25530 455 int box_ctl = uncore_pci_box_ctl(box);
8268fdfc 456
dae25530 457 pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
8268fdfc
YZ
458}
459
460static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
461{
462 u64 config;
463 unsigned msr;
464
465 msr = uncore_msr_box_ctl(box);
466 if (msr) {
467 rdmsrl(msr, config);
468 config |= SNBEP_PMON_BOX_CTL_FRZ;
469 wrmsrl(msr, config);
470 }
471}
472
473static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
474{
475 u64 config;
476 unsigned msr;
477
478 msr = uncore_msr_box_ctl(box);
479 if (msr) {
480 rdmsrl(msr, config);
481 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
482 wrmsrl(msr, config);
483 }
484}
485
486static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
487{
488 struct hw_perf_event *hwc = &event->hw;
489 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
490
491 if (reg1->idx != EXTRA_REG_NONE)
492 wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
493
494 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
495}
496
497static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
498 struct perf_event *event)
499{
500 struct hw_perf_event *hwc = &event->hw;
501
502 wrmsrl(hwc->config_base, hwc->config);
503}
504
505static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
506{
507 unsigned msr = uncore_msr_box_ctl(box);
508
509 if (msr)
510 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
511}
512
513static struct attribute *snbep_uncore_formats_attr[] = {
514 &format_attr_event.attr,
515 &format_attr_umask.attr,
516 &format_attr_edge.attr,
517 &format_attr_inv.attr,
518 &format_attr_thresh8.attr,
519 NULL,
520};
521
522static struct attribute *snbep_uncore_ubox_formats_attr[] = {
523 &format_attr_event.attr,
524 &format_attr_umask.attr,
525 &format_attr_edge.attr,
526 &format_attr_inv.attr,
527 &format_attr_thresh5.attr,
528 NULL,
529};
530
531static struct attribute *snbep_uncore_cbox_formats_attr[] = {
532 &format_attr_event.attr,
533 &format_attr_umask.attr,
534 &format_attr_edge.attr,
535 &format_attr_tid_en.attr,
536 &format_attr_inv.attr,
537 &format_attr_thresh8.attr,
538 &format_attr_filter_tid.attr,
539 &format_attr_filter_nid.attr,
540 &format_attr_filter_state.attr,
541 &format_attr_filter_opc.attr,
542 NULL,
543};
544
545static struct attribute *snbep_uncore_pcu_formats_attr[] = {
cb225252 546 &format_attr_event.attr,
8268fdfc
YZ
547 &format_attr_occ_sel.attr,
548 &format_attr_edge.attr,
549 &format_attr_inv.attr,
550 &format_attr_thresh5.attr,
551 &format_attr_occ_invert.attr,
552 &format_attr_occ_edge.attr,
553 &format_attr_filter_band0.attr,
554 &format_attr_filter_band1.attr,
555 &format_attr_filter_band2.attr,
556 &format_attr_filter_band3.attr,
557 NULL,
558};
559
560static struct attribute *snbep_uncore_qpi_formats_attr[] = {
561 &format_attr_event_ext.attr,
562 &format_attr_umask.attr,
563 &format_attr_edge.attr,
564 &format_attr_inv.attr,
565 &format_attr_thresh8.attr,
566 &format_attr_match_rds.attr,
567 &format_attr_match_rnid30.attr,
568 &format_attr_match_rnid4.attr,
569 &format_attr_match_dnid.attr,
570 &format_attr_match_mc.attr,
571 &format_attr_match_opc.attr,
572 &format_attr_match_vnw.attr,
573 &format_attr_match0.attr,
574 &format_attr_match1.attr,
575 &format_attr_mask_rds.attr,
576 &format_attr_mask_rnid30.attr,
577 &format_attr_mask_rnid4.attr,
578 &format_attr_mask_dnid.attr,
579 &format_attr_mask_mc.attr,
580 &format_attr_mask_opc.attr,
581 &format_attr_mask_vnw.attr,
582 &format_attr_mask0.attr,
583 &format_attr_mask1.attr,
584 NULL,
585};
586
587static struct uncore_event_desc snbep_uncore_imc_events[] = {
588 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
589 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
c0737ce4
AK
590 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
591 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
8268fdfc 592 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
c0737ce4
AK
593 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
594 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
8268fdfc
YZ
595 { /* end: all zeroes */ },
596};
597
598static struct uncore_event_desc snbep_uncore_qpi_events[] = {
599 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
600 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
601 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
602 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
603 { /* end: all zeroes */ },
604};
605
45bd07ad 606static const struct attribute_group snbep_uncore_format_group = {
8268fdfc
YZ
607 .name = "format",
608 .attrs = snbep_uncore_formats_attr,
609};
610
45bd07ad 611static const struct attribute_group snbep_uncore_ubox_format_group = {
8268fdfc
YZ
612 .name = "format",
613 .attrs = snbep_uncore_ubox_formats_attr,
614};
615
45bd07ad 616static const struct attribute_group snbep_uncore_cbox_format_group = {
8268fdfc
YZ
617 .name = "format",
618 .attrs = snbep_uncore_cbox_formats_attr,
619};
620
45bd07ad 621static const struct attribute_group snbep_uncore_pcu_format_group = {
8268fdfc
YZ
622 .name = "format",
623 .attrs = snbep_uncore_pcu_formats_attr,
624};
625
45bd07ad 626static const struct attribute_group snbep_uncore_qpi_format_group = {
8268fdfc
YZ
627 .name = "format",
628 .attrs = snbep_uncore_qpi_formats_attr,
629};
630
68055915 631#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
8268fdfc
YZ
632 .disable_box = snbep_uncore_msr_disable_box, \
633 .enable_box = snbep_uncore_msr_enable_box, \
634 .disable_event = snbep_uncore_msr_disable_event, \
635 .enable_event = snbep_uncore_msr_enable_event, \
636 .read_counter = uncore_msr_read_counter
637
68055915
AK
638#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
639 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
640 .init_box = snbep_uncore_msr_init_box \
641
8268fdfc
YZ
642static struct intel_uncore_ops snbep_uncore_msr_ops = {
643 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
644};
645
646#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
647 .init_box = snbep_uncore_pci_init_box, \
648 .disable_box = snbep_uncore_pci_disable_box, \
649 .enable_box = snbep_uncore_pci_enable_box, \
650 .disable_event = snbep_uncore_pci_disable_event, \
651 .read_counter = snbep_uncore_pci_read_counter
652
653static struct intel_uncore_ops snbep_uncore_pci_ops = {
654 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
655 .enable_event = snbep_uncore_pci_enable_event, \
656};
657
658static struct event_constraint snbep_uncore_cbox_constraints[] = {
659 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
660 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
661 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
662 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
663 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
664 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
665 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
666 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
667 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
668 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
669 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
670 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
671 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
1134c2b5 672 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
8268fdfc
YZ
673 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
674 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
675 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
676 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
677 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
678 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
679 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
680 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
681 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
682 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
683 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
684 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
685 EVENT_CONSTRAINT_END
686};
687
688static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
689 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
690 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
691 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
692 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
693 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
694 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
695 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
696 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
697 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
698 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
699 EVENT_CONSTRAINT_END
700};
701
702static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
703 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
704 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
705 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
706 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
707 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
708 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
709 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
710 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
711 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
712 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
713 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
714 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
715 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
716 UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
717 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
718 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
719 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
720 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
721 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
722 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
723 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
724 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
725 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
726 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
727 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
728 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
729 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
730 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
731 EVENT_CONSTRAINT_END
732};
733
734static struct intel_uncore_type snbep_uncore_ubox = {
735 .name = "ubox",
736 .num_counters = 2,
737 .num_boxes = 1,
738 .perf_ctr_bits = 44,
739 .fixed_ctr_bits = 48,
740 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
741 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
742 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
743 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
744 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
745 .ops = &snbep_uncore_msr_ops,
746 .format_group = &snbep_uncore_ubox_format_group,
747};
748
749static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
750 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
751 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
752 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
753 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
754 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
755 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
756 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
757 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
758 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
759 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
760 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
761 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
762 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
763 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
764 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
765 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
766 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
767 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
768 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
769 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
770 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
771 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
772 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
773 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
774 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
775 EVENT_EXTRA_END
776};
777
778static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
779{
780 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
781 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
782 int i;
783
784 if (uncore_box_is_fake(box))
785 return;
786
787 for (i = 0; i < 5; i++) {
788 if (reg1->alloc & (0x1 << i))
789 atomic_sub(1 << (i * 6), &er->ref);
790 }
791 reg1->alloc = 0;
792}
793
794static struct event_constraint *
795__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
796 u64 (*cbox_filter_mask)(int fields))
797{
798 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
799 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
800 int i, alloc = 0;
801 unsigned long flags;
802 u64 mask;
803
804 if (reg1->idx == EXTRA_REG_NONE)
805 return NULL;
806
807 raw_spin_lock_irqsave(&er->lock, flags);
808 for (i = 0; i < 5; i++) {
809 if (!(reg1->idx & (0x1 << i)))
810 continue;
811 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
812 continue;
813
814 mask = cbox_filter_mask(0x1 << i);
815 if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
816 !((reg1->config ^ er->config) & mask)) {
817 atomic_add(1 << (i * 6), &er->ref);
818 er->config &= ~mask;
819 er->config |= reg1->config & mask;
820 alloc |= (0x1 << i);
821 } else {
822 break;
823 }
824 }
825 raw_spin_unlock_irqrestore(&er->lock, flags);
826 if (i < 5)
827 goto fail;
828
829 if (!uncore_box_is_fake(box))
830 reg1->alloc |= alloc;
831
832 return NULL;
833fail:
834 for (; i >= 0; i--) {
835 if (alloc & (0x1 << i))
836 atomic_sub(1 << (i * 6), &er->ref);
837 }
838 return &uncore_constraint_empty;
839}
840
841static u64 snbep_cbox_filter_mask(int fields)
842{
843 u64 mask = 0;
844
845 if (fields & 0x1)
846 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
847 if (fields & 0x2)
848 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
849 if (fields & 0x4)
850 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
851 if (fields & 0x8)
852 mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
853
854 return mask;
855}
856
857static struct event_constraint *
858snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
859{
860 return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
861}
862
863static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
864{
865 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
866 struct extra_reg *er;
867 int idx = 0;
868
869 for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
870 if (er->event != (event->hw.config & er->config_mask))
871 continue;
872 idx |= er->idx;
873 }
874
875 if (idx) {
876 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
877 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
878 reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
879 reg1->idx = idx;
880 }
881 return 0;
882}
883
884static struct intel_uncore_ops snbep_uncore_cbox_ops = {
885 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
886 .hw_config = snbep_cbox_hw_config,
887 .get_constraint = snbep_cbox_get_constraint,
888 .put_constraint = snbep_cbox_put_constraint,
889};
890
891static struct intel_uncore_type snbep_uncore_cbox = {
892 .name = "cbox",
893 .num_counters = 4,
894 .num_boxes = 8,
895 .perf_ctr_bits = 44,
896 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
897 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
898 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
899 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
900 .msr_offset = SNBEP_CBO_MSR_OFFSET,
901 .num_shared_regs = 1,
902 .constraints = snbep_uncore_cbox_constraints,
903 .ops = &snbep_uncore_cbox_ops,
904 .format_group = &snbep_uncore_cbox_format_group,
905};
906
907static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
908{
909 struct hw_perf_event *hwc = &event->hw;
910 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
911 u64 config = reg1->config;
912
913 if (new_idx > reg1->idx)
914 config <<= 8 * (new_idx - reg1->idx);
915 else
916 config >>= 8 * (reg1->idx - new_idx);
917
918 if (modify) {
919 hwc->config += new_idx - reg1->idx;
920 reg1->config = config;
921 reg1->idx = new_idx;
922 }
923 return config;
924}
925
926static struct event_constraint *
927snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
928{
929 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
930 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
931 unsigned long flags;
932 int idx = reg1->idx;
933 u64 mask, config1 = reg1->config;
934 bool ok = false;
935
936 if (reg1->idx == EXTRA_REG_NONE ||
937 (!uncore_box_is_fake(box) && reg1->alloc))
938 return NULL;
939again:
940 mask = 0xffULL << (idx * 8);
941 raw_spin_lock_irqsave(&er->lock, flags);
942 if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
943 !((config1 ^ er->config) & mask)) {
944 atomic_add(1 << (idx * 8), &er->ref);
945 er->config &= ~mask;
946 er->config |= config1 & mask;
947 ok = true;
948 }
949 raw_spin_unlock_irqrestore(&er->lock, flags);
950
951 if (!ok) {
952 idx = (idx + 1) % 4;
953 if (idx != reg1->idx) {
954 config1 = snbep_pcu_alter_er(event, idx, false);
955 goto again;
956 }
957 return &uncore_constraint_empty;
958 }
959
960 if (!uncore_box_is_fake(box)) {
961 if (idx != reg1->idx)
962 snbep_pcu_alter_er(event, idx, true);
963 reg1->alloc = 1;
964 }
965 return NULL;
966}
967
968static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
969{
970 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
971 struct intel_uncore_extra_reg *er = &box->shared_regs[0];
972
973 if (uncore_box_is_fake(box) || !reg1->alloc)
974 return;
975
976 atomic_sub(1 << (reg1->idx * 8), &er->ref);
977 reg1->alloc = 0;
978}
979
980static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
981{
982 struct hw_perf_event *hwc = &event->hw;
983 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
984 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
985
986 if (ev_sel >= 0xb && ev_sel <= 0xe) {
987 reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
988 reg1->idx = ev_sel - 0xb;
b10fc1c3 989 reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
8268fdfc
YZ
990 }
991 return 0;
992}
993
994static struct intel_uncore_ops snbep_uncore_pcu_ops = {
995 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
996 .hw_config = snbep_pcu_hw_config,
997 .get_constraint = snbep_pcu_get_constraint,
998 .put_constraint = snbep_pcu_put_constraint,
999};
1000
1001static struct intel_uncore_type snbep_uncore_pcu = {
1002 .name = "pcu",
1003 .num_counters = 4,
1004 .num_boxes = 1,
1005 .perf_ctr_bits = 48,
1006 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1007 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
1008 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1009 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1010 .num_shared_regs = 1,
1011 .ops = &snbep_uncore_pcu_ops,
1012 .format_group = &snbep_uncore_pcu_format_group,
1013};
1014
1015static struct intel_uncore_type *snbep_msr_uncores[] = {
1016 &snbep_uncore_ubox,
1017 &snbep_uncore_cbox,
1018 &snbep_uncore_pcu,
1019 NULL,
1020};
1021
1022void snbep_uncore_cpu_init(void)
1023{
1024 if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1025 snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1026 uncore_msr_uncores = snbep_msr_uncores;
1027}
1028
1029enum {
1030 SNBEP_PCI_QPI_PORT0_FILTER,
1031 SNBEP_PCI_QPI_PORT1_FILTER,
5306c31c 1032 HSWEP_PCI_PCU_3,
8268fdfc
YZ
1033};
1034
1035static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1036{
1037 struct hw_perf_event *hwc = &event->hw;
1038 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1039 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1040
1041 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1042 reg1->idx = 0;
1043 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1044 reg1->config = event->attr.config1;
1045 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1046 reg2->config = event->attr.config2;
1047 }
1048 return 0;
1049}
1050
1051static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1052{
1053 struct pci_dev *pdev = box->pci_dev;
1054 struct hw_perf_event *hwc = &event->hw;
1055 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1056 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1057
1058 if (reg1->idx != EXTRA_REG_NONE) {
1059 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
cf6d445f
TG
1060 int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
1061 struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
1062
8268fdfc
YZ
1063 if (filter_pdev) {
1064 pci_write_config_dword(filter_pdev, reg1->reg,
1065 (u32)reg1->config);
1066 pci_write_config_dword(filter_pdev, reg1->reg + 4,
1067 (u32)(reg1->config >> 32));
1068 pci_write_config_dword(filter_pdev, reg2->reg,
1069 (u32)reg2->config);
1070 pci_write_config_dword(filter_pdev, reg2->reg + 4,
1071 (u32)(reg2->config >> 32));
1072 }
1073 }
1074
1075 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1076}
1077
1078static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1079 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1080 .enable_event = snbep_qpi_enable_event,
1081 .hw_config = snbep_qpi_hw_config,
1082 .get_constraint = uncore_get_constraint,
1083 .put_constraint = uncore_put_constraint,
1084};
1085
1086#define SNBEP_UNCORE_PCI_COMMON_INIT() \
1087 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1088 .event_ctl = SNBEP_PCI_PMON_CTL0, \
1089 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
1090 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
1091 .ops = &snbep_uncore_pci_ops, \
1092 .format_group = &snbep_uncore_format_group
1093
1094static struct intel_uncore_type snbep_uncore_ha = {
1095 .name = "ha",
1096 .num_counters = 4,
1097 .num_boxes = 1,
1098 .perf_ctr_bits = 48,
1099 SNBEP_UNCORE_PCI_COMMON_INIT(),
1100};
1101
1102static struct intel_uncore_type snbep_uncore_imc = {
1103 .name = "imc",
1104 .num_counters = 4,
1105 .num_boxes = 4,
1106 .perf_ctr_bits = 48,
1107 .fixed_ctr_bits = 48,
1108 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1109 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1110 .event_descs = snbep_uncore_imc_events,
1111 SNBEP_UNCORE_PCI_COMMON_INIT(),
1112};
1113
1114static struct intel_uncore_type snbep_uncore_qpi = {
1115 .name = "qpi",
1116 .num_counters = 4,
1117 .num_boxes = 2,
1118 .perf_ctr_bits = 48,
1119 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1120 .event_ctl = SNBEP_PCI_PMON_CTL0,
1121 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1122 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1123 .num_shared_regs = 1,
1124 .ops = &snbep_uncore_qpi_ops,
1125 .event_descs = snbep_uncore_qpi_events,
1126 .format_group = &snbep_uncore_qpi_format_group,
1127};
1128
1129
1130static struct intel_uncore_type snbep_uncore_r2pcie = {
1131 .name = "r2pcie",
1132 .num_counters = 4,
1133 .num_boxes = 1,
1134 .perf_ctr_bits = 44,
1135 .constraints = snbep_uncore_r2pcie_constraints,
1136 SNBEP_UNCORE_PCI_COMMON_INIT(),
1137};
1138
1139static struct intel_uncore_type snbep_uncore_r3qpi = {
1140 .name = "r3qpi",
1141 .num_counters = 3,
1142 .num_boxes = 2,
1143 .perf_ctr_bits = 44,
1144 .constraints = snbep_uncore_r3qpi_constraints,
1145 SNBEP_UNCORE_PCI_COMMON_INIT(),
1146};
1147
1148enum {
1149 SNBEP_PCI_UNCORE_HA,
1150 SNBEP_PCI_UNCORE_IMC,
1151 SNBEP_PCI_UNCORE_QPI,
1152 SNBEP_PCI_UNCORE_R2PCIE,
1153 SNBEP_PCI_UNCORE_R3QPI,
1154};
1155
1156static struct intel_uncore_type *snbep_pci_uncores[] = {
1157 [SNBEP_PCI_UNCORE_HA] = &snbep_uncore_ha,
1158 [SNBEP_PCI_UNCORE_IMC] = &snbep_uncore_imc,
1159 [SNBEP_PCI_UNCORE_QPI] = &snbep_uncore_qpi,
1160 [SNBEP_PCI_UNCORE_R2PCIE] = &snbep_uncore_r2pcie,
1161 [SNBEP_PCI_UNCORE_R3QPI] = &snbep_uncore_r3qpi,
1162 NULL,
1163};
1164
83bc90e1 1165static const struct pci_device_id snbep_uncore_pci_ids[] = {
8268fdfc
YZ
1166 { /* Home Agent */
1167 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1168 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1169 },
1170 { /* MC Channel 0 */
1171 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1172 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1173 },
1174 { /* MC Channel 1 */
1175 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1176 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1177 },
1178 { /* MC Channel 2 */
1179 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1180 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1181 },
1182 { /* MC Channel 3 */
1183 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1184 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1185 },
1186 { /* QPI Port 0 */
1187 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1188 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1189 },
1190 { /* QPI Port 1 */
1191 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1192 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1193 },
1194 { /* R2PCIe */
1195 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1196 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1197 },
1198 { /* R3QPI Link 0 */
1199 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1200 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1201 },
1202 { /* R3QPI Link 1 */
1203 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1204 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1205 },
1206 { /* QPI Port 0 filter */
1207 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1208 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1209 SNBEP_PCI_QPI_PORT0_FILTER),
1210 },
1211 { /* QPI Port 0 filter */
1212 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1213 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1214 SNBEP_PCI_QPI_PORT1_FILTER),
1215 },
1216 { /* end: all zeroes */ }
1217};
1218
1219static struct pci_driver snbep_uncore_pci_driver = {
1220 .name = "snbep_uncore",
1221 .id_table = snbep_uncore_pci_ids,
1222};
1223
1224/*
1225 * build pci bus to socket mapping
1226 */
68ce4a0d 1227static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
8268fdfc
YZ
1228{
1229 struct pci_dev *ubox_dev = NULL;
712df65c
TI
1230 int i, bus, nodeid, segment;
1231 struct pci2phy_map *map;
8268fdfc
YZ
1232 int err = 0;
1233 u32 config = 0;
1234
1235 while (1) {
1236 /* find the UBOX device */
1237 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1238 if (!ubox_dev)
1239 break;
1240 bus = ubox_dev->bus->number;
1241 /* get the Node ID of the local register */
68ce4a0d 1242 err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
8268fdfc
YZ
1243 if (err)
1244 break;
1245 nodeid = config;
1246 /* get the Node ID mapping */
68ce4a0d 1247 err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
8268fdfc
YZ
1248 if (err)
1249 break;
712df65c
TI
1250
1251 segment = pci_domain_nr(ubox_dev->bus);
1252 raw_spin_lock(&pci2phy_map_lock);
1253 map = __find_pci2phy_map(segment);
1254 if (!map) {
1255 raw_spin_unlock(&pci2phy_map_lock);
1256 err = -ENOMEM;
1257 break;
1258 }
1259
8268fdfc
YZ
1260 /*
1261 * every three bits in the Node ID mapping register maps
1262 * to a particular node.
1263 */
1264 for (i = 0; i < 8; i++) {
1265 if (nodeid == ((config >> (3 * i)) & 0x7)) {
712df65c 1266 map->pbus_to_physid[bus] = i;
8268fdfc
YZ
1267 break;
1268 }
1269 }
712df65c 1270 raw_spin_unlock(&pci2phy_map_lock);
8268fdfc
YZ
1271 }
1272
1273 if (!err) {
1274 /*
1275 * For PCI bus with no UBOX device, find the next bus
1276 * that has UBOX device and use its mapping.
1277 */
712df65c
TI
1278 raw_spin_lock(&pci2phy_map_lock);
1279 list_for_each_entry(map, &pci2phy_map_head, list) {
1280 i = -1;
68ce4a0d
KL
1281 if (reverse) {
1282 for (bus = 255; bus >= 0; bus--) {
1283 if (map->pbus_to_physid[bus] >= 0)
1284 i = map->pbus_to_physid[bus];
1285 else
1286 map->pbus_to_physid[bus] = i;
1287 }
1288 } else {
1289 for (bus = 0; bus <= 255; bus++) {
1290 if (map->pbus_to_physid[bus] >= 0)
1291 i = map->pbus_to_physid[bus];
1292 else
1293 map->pbus_to_physid[bus] = i;
1294 }
712df65c 1295 }
8268fdfc 1296 }
712df65c 1297 raw_spin_unlock(&pci2phy_map_lock);
8268fdfc
YZ
1298 }
1299
8e57c586 1300 pci_dev_put(ubox_dev);
8268fdfc
YZ
1301
1302 return err ? pcibios_err_to_errno(err) : 0;
1303}
1304
1305int snbep_uncore_pci_init(void)
1306{
68ce4a0d 1307 int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
8268fdfc
YZ
1308 if (ret)
1309 return ret;
1310 uncore_pci_uncores = snbep_pci_uncores;
1311 uncore_pci_driver = &snbep_uncore_pci_driver;
1312 return 0;
1313}
1314/* end of Sandy Bridge-EP uncore support */
1315
1316/* IvyTown uncore support */
ddcd0973 1317static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
8268fdfc
YZ
1318{
1319 unsigned msr = uncore_msr_box_ctl(box);
1320 if (msr)
ddcd0973 1321 wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
8268fdfc
YZ
1322}
1323
ddcd0973 1324static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
8268fdfc
YZ
1325{
1326 struct pci_dev *pdev = box->pci_dev;
1327
ddcd0973 1328 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
8268fdfc
YZ
1329}
1330
ddcd0973
PZ
1331#define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
1332 .init_box = ivbep_uncore_msr_init_box, \
8268fdfc
YZ
1333 .disable_box = snbep_uncore_msr_disable_box, \
1334 .enable_box = snbep_uncore_msr_enable_box, \
1335 .disable_event = snbep_uncore_msr_disable_event, \
1336 .enable_event = snbep_uncore_msr_enable_event, \
1337 .read_counter = uncore_msr_read_counter
1338
ddcd0973
PZ
1339static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1340 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
8268fdfc
YZ
1341};
1342
ddcd0973
PZ
1343static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1344 .init_box = ivbep_uncore_pci_init_box,
8268fdfc
YZ
1345 .disable_box = snbep_uncore_pci_disable_box,
1346 .enable_box = snbep_uncore_pci_enable_box,
1347 .disable_event = snbep_uncore_pci_disable_event,
1348 .enable_event = snbep_uncore_pci_enable_event,
1349 .read_counter = snbep_uncore_pci_read_counter,
1350};
1351
ddcd0973 1352#define IVBEP_UNCORE_PCI_COMMON_INIT() \
8268fdfc
YZ
1353 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
1354 .event_ctl = SNBEP_PCI_PMON_CTL0, \
ddcd0973 1355 .event_mask = IVBEP_PMON_RAW_EVENT_MASK, \
8268fdfc 1356 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
ddcd0973
PZ
1357 .ops = &ivbep_uncore_pci_ops, \
1358 .format_group = &ivbep_uncore_format_group
8268fdfc 1359
ddcd0973 1360static struct attribute *ivbep_uncore_formats_attr[] = {
8268fdfc
YZ
1361 &format_attr_event.attr,
1362 &format_attr_umask.attr,
1363 &format_attr_edge.attr,
1364 &format_attr_inv.attr,
1365 &format_attr_thresh8.attr,
1366 NULL,
1367};
1368
ddcd0973 1369static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
8268fdfc
YZ
1370 &format_attr_event.attr,
1371 &format_attr_umask.attr,
1372 &format_attr_edge.attr,
1373 &format_attr_inv.attr,
1374 &format_attr_thresh5.attr,
1375 NULL,
1376};
1377
ddcd0973 1378static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
8268fdfc
YZ
1379 &format_attr_event.attr,
1380 &format_attr_umask.attr,
1381 &format_attr_edge.attr,
1382 &format_attr_tid_en.attr,
1383 &format_attr_thresh8.attr,
1384 &format_attr_filter_tid.attr,
1385 &format_attr_filter_link.attr,
1386 &format_attr_filter_state2.attr,
1387 &format_attr_filter_nid2.attr,
1388 &format_attr_filter_opc2.attr,
7e96ae1a
AK
1389 &format_attr_filter_nc.attr,
1390 &format_attr_filter_c6.attr,
1391 &format_attr_filter_isoc.attr,
8268fdfc
YZ
1392 NULL,
1393};
1394
ddcd0973 1395static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
cb225252 1396 &format_attr_event.attr,
8268fdfc
YZ
1397 &format_attr_occ_sel.attr,
1398 &format_attr_edge.attr,
1399 &format_attr_thresh5.attr,
1400 &format_attr_occ_invert.attr,
1401 &format_attr_occ_edge.attr,
1402 &format_attr_filter_band0.attr,
1403 &format_attr_filter_band1.attr,
1404 &format_attr_filter_band2.attr,
1405 &format_attr_filter_band3.attr,
1406 NULL,
1407};
1408
ddcd0973 1409static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
8268fdfc
YZ
1410 &format_attr_event_ext.attr,
1411 &format_attr_umask.attr,
1412 &format_attr_edge.attr,
1413 &format_attr_thresh8.attr,
1414 &format_attr_match_rds.attr,
1415 &format_attr_match_rnid30.attr,
1416 &format_attr_match_rnid4.attr,
1417 &format_attr_match_dnid.attr,
1418 &format_attr_match_mc.attr,
1419 &format_attr_match_opc.attr,
1420 &format_attr_match_vnw.attr,
1421 &format_attr_match0.attr,
1422 &format_attr_match1.attr,
1423 &format_attr_mask_rds.attr,
1424 &format_attr_mask_rnid30.attr,
1425 &format_attr_mask_rnid4.attr,
1426 &format_attr_mask_dnid.attr,
1427 &format_attr_mask_mc.attr,
1428 &format_attr_mask_opc.attr,
1429 &format_attr_mask_vnw.attr,
1430 &format_attr_mask0.attr,
1431 &format_attr_mask1.attr,
1432 NULL,
1433};
1434
45bd07ad 1435static const struct attribute_group ivbep_uncore_format_group = {
8268fdfc 1436 .name = "format",
ddcd0973 1437 .attrs = ivbep_uncore_formats_attr,
8268fdfc
YZ
1438};
1439
45bd07ad 1440static const struct attribute_group ivbep_uncore_ubox_format_group = {
8268fdfc 1441 .name = "format",
ddcd0973 1442 .attrs = ivbep_uncore_ubox_formats_attr,
8268fdfc
YZ
1443};
1444
45bd07ad 1445static const struct attribute_group ivbep_uncore_cbox_format_group = {
8268fdfc 1446 .name = "format",
ddcd0973 1447 .attrs = ivbep_uncore_cbox_formats_attr,
8268fdfc
YZ
1448};
1449
45bd07ad 1450static const struct attribute_group ivbep_uncore_pcu_format_group = {
8268fdfc 1451 .name = "format",
ddcd0973 1452 .attrs = ivbep_uncore_pcu_formats_attr,
8268fdfc
YZ
1453};
1454
45bd07ad 1455static const struct attribute_group ivbep_uncore_qpi_format_group = {
8268fdfc 1456 .name = "format",
ddcd0973 1457 .attrs = ivbep_uncore_qpi_formats_attr,
8268fdfc
YZ
1458};
1459
ddcd0973 1460static struct intel_uncore_type ivbep_uncore_ubox = {
8268fdfc
YZ
1461 .name = "ubox",
1462 .num_counters = 2,
1463 .num_boxes = 1,
1464 .perf_ctr_bits = 44,
1465 .fixed_ctr_bits = 48,
1466 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
1467 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
ddcd0973 1468 .event_mask = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
8268fdfc
YZ
1469 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1470 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
ddcd0973
PZ
1471 .ops = &ivbep_uncore_msr_ops,
1472 .format_group = &ivbep_uncore_ubox_format_group,
8268fdfc
YZ
1473};
1474
ddcd0973 1475static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
8268fdfc
YZ
1476 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1477 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1478 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1479 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1480 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1481 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1482 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1483 SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1484 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1485 SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1486 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1487 SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1488 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1489 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1490 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1491 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1492 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1493 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1494 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1495 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1496 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1497 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1498 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1499 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1500 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1501 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1502 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1503 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1504 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1505 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1506 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1507 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1508 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1509 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1510 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1511 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1512 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1513 EVENT_EXTRA_END
1514};
1515
ddcd0973 1516static u64 ivbep_cbox_filter_mask(int fields)
8268fdfc
YZ
1517{
1518 u64 mask = 0;
1519
1520 if (fields & 0x1)
ddcd0973 1521 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
8268fdfc 1522 if (fields & 0x2)
ddcd0973 1523 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
8268fdfc 1524 if (fields & 0x4)
ddcd0973 1525 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
8268fdfc 1526 if (fields & 0x8)
ddcd0973 1527 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
7e96ae1a 1528 if (fields & 0x10) {
ddcd0973 1529 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
7e96ae1a
AK
1530 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1531 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1532 mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1533 }
8268fdfc
YZ
1534
1535 return mask;
1536}
1537
1538static struct event_constraint *
ddcd0973 1539ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc 1540{
ddcd0973 1541 return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
8268fdfc
YZ
1542}
1543
ddcd0973 1544static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1545{
1546 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1547 struct extra_reg *er;
1548 int idx = 0;
1549
ddcd0973 1550 for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
8268fdfc
YZ
1551 if (er->event != (event->hw.config & er->config_mask))
1552 continue;
1553 idx |= er->idx;
1554 }
1555
1556 if (idx) {
1557 reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1558 SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
ddcd0973 1559 reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
8268fdfc
YZ
1560 reg1->idx = idx;
1561 }
1562 return 0;
1563}
1564
ddcd0973 1565static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1566{
1567 struct hw_perf_event *hwc = &event->hw;
1568 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1569
1570 if (reg1->idx != EXTRA_REG_NONE) {
1571 u64 filter = uncore_shared_reg_config(box, 0);
1572 wrmsrl(reg1->reg, filter & 0xffffffff);
1573 wrmsrl(reg1->reg + 6, filter >> 32);
1574 }
1575
1576 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1577}
1578
ddcd0973
PZ
1579static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1580 .init_box = ivbep_uncore_msr_init_box,
8268fdfc
YZ
1581 .disable_box = snbep_uncore_msr_disable_box,
1582 .enable_box = snbep_uncore_msr_enable_box,
1583 .disable_event = snbep_uncore_msr_disable_event,
ddcd0973 1584 .enable_event = ivbep_cbox_enable_event,
8268fdfc 1585 .read_counter = uncore_msr_read_counter,
ddcd0973
PZ
1586 .hw_config = ivbep_cbox_hw_config,
1587 .get_constraint = ivbep_cbox_get_constraint,
8268fdfc
YZ
1588 .put_constraint = snbep_cbox_put_constraint,
1589};
1590
ddcd0973 1591static struct intel_uncore_type ivbep_uncore_cbox = {
8268fdfc
YZ
1592 .name = "cbox",
1593 .num_counters = 4,
1594 .num_boxes = 15,
1595 .perf_ctr_bits = 44,
1596 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
1597 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
ddcd0973 1598 .event_mask = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
8268fdfc
YZ
1599 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
1600 .msr_offset = SNBEP_CBO_MSR_OFFSET,
1601 .num_shared_regs = 1,
1602 .constraints = snbep_uncore_cbox_constraints,
ddcd0973
PZ
1603 .ops = &ivbep_uncore_cbox_ops,
1604 .format_group = &ivbep_uncore_cbox_format_group,
8268fdfc
YZ
1605};
1606
ddcd0973
PZ
1607static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1608 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
8268fdfc
YZ
1609 .hw_config = snbep_pcu_hw_config,
1610 .get_constraint = snbep_pcu_get_constraint,
1611 .put_constraint = snbep_pcu_put_constraint,
1612};
1613
ddcd0973 1614static struct intel_uncore_type ivbep_uncore_pcu = {
8268fdfc
YZ
1615 .name = "pcu",
1616 .num_counters = 4,
1617 .num_boxes = 1,
1618 .perf_ctr_bits = 48,
1619 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
1620 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
ddcd0973 1621 .event_mask = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
8268fdfc
YZ
1622 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
1623 .num_shared_regs = 1,
ddcd0973
PZ
1624 .ops = &ivbep_uncore_pcu_ops,
1625 .format_group = &ivbep_uncore_pcu_format_group,
8268fdfc
YZ
1626};
1627
ddcd0973
PZ
1628static struct intel_uncore_type *ivbep_msr_uncores[] = {
1629 &ivbep_uncore_ubox,
1630 &ivbep_uncore_cbox,
1631 &ivbep_uncore_pcu,
8268fdfc
YZ
1632 NULL,
1633};
1634
ddcd0973 1635void ivbep_uncore_cpu_init(void)
8268fdfc 1636{
ddcd0973
PZ
1637 if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1638 ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1639 uncore_msr_uncores = ivbep_msr_uncores;
8268fdfc
YZ
1640}
1641
ddcd0973 1642static struct intel_uncore_type ivbep_uncore_ha = {
8268fdfc
YZ
1643 .name = "ha",
1644 .num_counters = 4,
1645 .num_boxes = 2,
1646 .perf_ctr_bits = 48,
ddcd0973 1647 IVBEP_UNCORE_PCI_COMMON_INIT(),
8268fdfc
YZ
1648};
1649
ddcd0973 1650static struct intel_uncore_type ivbep_uncore_imc = {
8268fdfc
YZ
1651 .name = "imc",
1652 .num_counters = 4,
1653 .num_boxes = 8,
1654 .perf_ctr_bits = 48,
1655 .fixed_ctr_bits = 48,
1656 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1657 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
85a16ef6 1658 .event_descs = snbep_uncore_imc_events,
ddcd0973 1659 IVBEP_UNCORE_PCI_COMMON_INIT(),
8268fdfc
YZ
1660};
1661
1662/* registers in IRP boxes are not properly aligned */
ddcd0973
PZ
1663static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1664static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
8268fdfc 1665
ddcd0973 1666static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1667{
1668 struct pci_dev *pdev = box->pci_dev;
1669 struct hw_perf_event *hwc = &event->hw;
1670
ddcd0973 1671 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
8268fdfc
YZ
1672 hwc->config | SNBEP_PMON_CTL_EN);
1673}
1674
ddcd0973 1675static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1676{
1677 struct pci_dev *pdev = box->pci_dev;
1678 struct hw_perf_event *hwc = &event->hw;
1679
ddcd0973 1680 pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
8268fdfc
YZ
1681}
1682
ddcd0973 1683static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
8268fdfc
YZ
1684{
1685 struct pci_dev *pdev = box->pci_dev;
1686 struct hw_perf_event *hwc = &event->hw;
1687 u64 count = 0;
1688
ddcd0973
PZ
1689 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1690 pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
8268fdfc
YZ
1691
1692 return count;
1693}
1694
ddcd0973
PZ
1695static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1696 .init_box = ivbep_uncore_pci_init_box,
8268fdfc
YZ
1697 .disable_box = snbep_uncore_pci_disable_box,
1698 .enable_box = snbep_uncore_pci_enable_box,
ddcd0973
PZ
1699 .disable_event = ivbep_uncore_irp_disable_event,
1700 .enable_event = ivbep_uncore_irp_enable_event,
1701 .read_counter = ivbep_uncore_irp_read_counter,
8268fdfc
YZ
1702};
1703
ddcd0973 1704static struct intel_uncore_type ivbep_uncore_irp = {
8268fdfc
YZ
1705 .name = "irp",
1706 .num_counters = 4,
1707 .num_boxes = 1,
1708 .perf_ctr_bits = 48,
ddcd0973 1709 .event_mask = IVBEP_PMON_RAW_EVENT_MASK,
8268fdfc 1710 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
ddcd0973
PZ
1711 .ops = &ivbep_uncore_irp_ops,
1712 .format_group = &ivbep_uncore_format_group,
8268fdfc
YZ
1713};
1714
ddcd0973
PZ
1715static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1716 .init_box = ivbep_uncore_pci_init_box,
8268fdfc
YZ
1717 .disable_box = snbep_uncore_pci_disable_box,
1718 .enable_box = snbep_uncore_pci_enable_box,
1719 .disable_event = snbep_uncore_pci_disable_event,
1720 .enable_event = snbep_qpi_enable_event,
1721 .read_counter = snbep_uncore_pci_read_counter,
1722 .hw_config = snbep_qpi_hw_config,
1723 .get_constraint = uncore_get_constraint,
1724 .put_constraint = uncore_put_constraint,
1725};
1726
ddcd0973 1727static struct intel_uncore_type ivbep_uncore_qpi = {
8268fdfc
YZ
1728 .name = "qpi",
1729 .num_counters = 4,
1730 .num_boxes = 3,
1731 .perf_ctr_bits = 48,
1732 .perf_ctr = SNBEP_PCI_PMON_CTR0,
1733 .event_ctl = SNBEP_PCI_PMON_CTL0,
ddcd0973 1734 .event_mask = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
8268fdfc
YZ
1735 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
1736 .num_shared_regs = 1,
ddcd0973
PZ
1737 .ops = &ivbep_uncore_qpi_ops,
1738 .format_group = &ivbep_uncore_qpi_format_group,
8268fdfc
YZ
1739};
1740
ddcd0973 1741static struct intel_uncore_type ivbep_uncore_r2pcie = {
8268fdfc
YZ
1742 .name = "r2pcie",
1743 .num_counters = 4,
1744 .num_boxes = 1,
1745 .perf_ctr_bits = 44,
1746 .constraints = snbep_uncore_r2pcie_constraints,
ddcd0973 1747 IVBEP_UNCORE_PCI_COMMON_INIT(),
8268fdfc
YZ
1748};
1749
ddcd0973 1750static struct intel_uncore_type ivbep_uncore_r3qpi = {
8268fdfc
YZ
1751 .name = "r3qpi",
1752 .num_counters = 3,
1753 .num_boxes = 2,
1754 .perf_ctr_bits = 44,
1755 .constraints = snbep_uncore_r3qpi_constraints,
ddcd0973 1756 IVBEP_UNCORE_PCI_COMMON_INIT(),
8268fdfc
YZ
1757};
1758
1759enum {
ddcd0973
PZ
1760 IVBEP_PCI_UNCORE_HA,
1761 IVBEP_PCI_UNCORE_IMC,
1762 IVBEP_PCI_UNCORE_IRP,
1763 IVBEP_PCI_UNCORE_QPI,
1764 IVBEP_PCI_UNCORE_R2PCIE,
1765 IVBEP_PCI_UNCORE_R3QPI,
1766};
1767
1768static struct intel_uncore_type *ivbep_pci_uncores[] = {
1769 [IVBEP_PCI_UNCORE_HA] = &ivbep_uncore_ha,
1770 [IVBEP_PCI_UNCORE_IMC] = &ivbep_uncore_imc,
1771 [IVBEP_PCI_UNCORE_IRP] = &ivbep_uncore_irp,
1772 [IVBEP_PCI_UNCORE_QPI] = &ivbep_uncore_qpi,
1773 [IVBEP_PCI_UNCORE_R2PCIE] = &ivbep_uncore_r2pcie,
1774 [IVBEP_PCI_UNCORE_R3QPI] = &ivbep_uncore_r3qpi,
8268fdfc
YZ
1775 NULL,
1776};
1777
83bc90e1 1778static const struct pci_device_id ivbep_uncore_pci_ids[] = {
8268fdfc
YZ
1779 { /* Home Agent 0 */
1780 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
ddcd0973 1781 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
8268fdfc
YZ
1782 },
1783 { /* Home Agent 1 */
1784 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
ddcd0973 1785 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
8268fdfc
YZ
1786 },
1787 { /* MC0 Channel 0 */
1788 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
ddcd0973 1789 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
8268fdfc
YZ
1790 },
1791 { /* MC0 Channel 1 */
1792 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
ddcd0973 1793 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
8268fdfc
YZ
1794 },
1795 { /* MC0 Channel 3 */
1796 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
ddcd0973 1797 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
8268fdfc
YZ
1798 },
1799 { /* MC0 Channel 4 */
1800 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
ddcd0973 1801 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
8268fdfc
YZ
1802 },
1803 { /* MC1 Channel 0 */
1804 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
ddcd0973 1805 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
8268fdfc
YZ
1806 },
1807 { /* MC1 Channel 1 */
1808 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
ddcd0973 1809 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
8268fdfc
YZ
1810 },
1811 { /* MC1 Channel 3 */
1812 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
ddcd0973 1813 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
8268fdfc
YZ
1814 },
1815 { /* MC1 Channel 4 */
1816 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
ddcd0973 1817 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
8268fdfc
YZ
1818 },
1819 { /* IRP */
1820 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
ddcd0973 1821 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
8268fdfc
YZ
1822 },
1823 { /* QPI0 Port 0 */
1824 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
ddcd0973 1825 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
8268fdfc
YZ
1826 },
1827 { /* QPI0 Port 1 */
1828 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
ddcd0973 1829 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
8268fdfc
YZ
1830 },
1831 { /* QPI1 Port 2 */
1832 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
ddcd0973 1833 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
8268fdfc
YZ
1834 },
1835 { /* R2PCIe */
1836 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
ddcd0973 1837 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
8268fdfc
YZ
1838 },
1839 { /* R3QPI0 Link 0 */
1840 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
ddcd0973 1841 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
8268fdfc
YZ
1842 },
1843 { /* R3QPI0 Link 1 */
1844 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
ddcd0973 1845 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
8268fdfc
YZ
1846 },
1847 { /* R3QPI1 Link 2 */
1848 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
ddcd0973 1849 .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
8268fdfc
YZ
1850 },
1851 { /* QPI Port 0 filter */
1852 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1853 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1854 SNBEP_PCI_QPI_PORT0_FILTER),
1855 },
1856 { /* QPI Port 0 filter */
1857 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1858 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1859 SNBEP_PCI_QPI_PORT1_FILTER),
1860 },
1861 { /* end: all zeroes */ }
1862};
1863
ddcd0973
PZ
1864static struct pci_driver ivbep_uncore_pci_driver = {
1865 .name = "ivbep_uncore",
1866 .id_table = ivbep_uncore_pci_ids,
8268fdfc
YZ
1867};
1868
ddcd0973 1869int ivbep_uncore_pci_init(void)
8268fdfc 1870{
68ce4a0d 1871 int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
8268fdfc
YZ
1872 if (ret)
1873 return ret;
ddcd0973
PZ
1874 uncore_pci_uncores = ivbep_pci_uncores;
1875 uncore_pci_driver = &ivbep_uncore_pci_driver;
8268fdfc
YZ
1876 return 0;
1877}
1878/* end of IvyTown uncore support */
e735b9db 1879
77af0037
HC
1880/* KNL uncore support */
1881static struct attribute *knl_uncore_ubox_formats_attr[] = {
1882 &format_attr_event.attr,
1883 &format_attr_umask.attr,
1884 &format_attr_edge.attr,
1885 &format_attr_tid_en.attr,
1886 &format_attr_inv.attr,
1887 &format_attr_thresh5.attr,
1888 NULL,
1889};
1890
45bd07ad 1891static const struct attribute_group knl_uncore_ubox_format_group = {
77af0037
HC
1892 .name = "format",
1893 .attrs = knl_uncore_ubox_formats_attr,
1894};
1895
1896static struct intel_uncore_type knl_uncore_ubox = {
1897 .name = "ubox",
1898 .num_counters = 2,
1899 .num_boxes = 1,
1900 .perf_ctr_bits = 48,
1901 .fixed_ctr_bits = 48,
1902 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
1903 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
1904 .event_mask = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1905 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1906 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1907 .ops = &snbep_uncore_msr_ops,
1908 .format_group = &knl_uncore_ubox_format_group,
1909};
1910
1911static struct attribute *knl_uncore_cha_formats_attr[] = {
1912 &format_attr_event.attr,
1913 &format_attr_umask.attr,
1914 &format_attr_qor.attr,
1915 &format_attr_edge.attr,
1916 &format_attr_tid_en.attr,
1917 &format_attr_inv.attr,
1918 &format_attr_thresh8.attr,
1919 &format_attr_filter_tid4.attr,
1920 &format_attr_filter_link3.attr,
1921 &format_attr_filter_state4.attr,
1922 &format_attr_filter_local.attr,
1923 &format_attr_filter_all_op.attr,
1924 &format_attr_filter_nnm.attr,
1925 &format_attr_filter_opc3.attr,
1926 &format_attr_filter_nc.attr,
1927 &format_attr_filter_isoc.attr,
1928 NULL,
1929};
1930
45bd07ad 1931static const struct attribute_group knl_uncore_cha_format_group = {
77af0037
HC
1932 .name = "format",
1933 .attrs = knl_uncore_cha_formats_attr,
1934};
1935
1936static struct event_constraint knl_uncore_cha_constraints[] = {
1937 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1938 UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1939 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1940 EVENT_CONSTRAINT_END
1941};
1942
1943static struct extra_reg knl_uncore_cha_extra_regs[] = {
1944 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1945 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1946 SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1947 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1948 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1949 EVENT_EXTRA_END
1950};
1951
1952static u64 knl_cha_filter_mask(int fields)
1953{
1954 u64 mask = 0;
1955
1956 if (fields & 0x1)
1957 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1958 if (fields & 0x2)
1959 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1960 if (fields & 0x4)
1961 mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1962 return mask;
1963}
1964
1965static struct event_constraint *
1966knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1967{
1968 return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1969}
1970
1971static int knl_cha_hw_config(struct intel_uncore_box *box,
1972 struct perf_event *event)
1973{
1974 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1975 struct extra_reg *er;
1976 int idx = 0;
1977
1978 for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1979 if (er->event != (event->hw.config & er->config_mask))
1980 continue;
1981 idx |= er->idx;
1982 }
1983
1984 if (idx) {
1985 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1986 KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1987 reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
ec336c87 1988
1989 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
1990 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
1991 reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
77af0037
HC
1992 reg1->idx = idx;
1993 }
1994 return 0;
1995}
1996
1997static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1998 struct perf_event *event);
1999
2000static struct intel_uncore_ops knl_uncore_cha_ops = {
2001 .init_box = snbep_uncore_msr_init_box,
2002 .disable_box = snbep_uncore_msr_disable_box,
2003 .enable_box = snbep_uncore_msr_enable_box,
2004 .disable_event = snbep_uncore_msr_disable_event,
2005 .enable_event = hswep_cbox_enable_event,
2006 .read_counter = uncore_msr_read_counter,
2007 .hw_config = knl_cha_hw_config,
2008 .get_constraint = knl_cha_get_constraint,
2009 .put_constraint = snbep_cbox_put_constraint,
2010};
2011
2012static struct intel_uncore_type knl_uncore_cha = {
2013 .name = "cha",
2014 .num_counters = 4,
2015 .num_boxes = 38,
2016 .perf_ctr_bits = 48,
2017 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2018 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2019 .event_mask = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2020 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2021 .msr_offset = KNL_CHA_MSR_OFFSET,
2022 .num_shared_regs = 1,
2023 .constraints = knl_uncore_cha_constraints,
2024 .ops = &knl_uncore_cha_ops,
2025 .format_group = &knl_uncore_cha_format_group,
2026};
2027
2028static struct attribute *knl_uncore_pcu_formats_attr[] = {
2029 &format_attr_event2.attr,
2030 &format_attr_use_occ_ctr.attr,
2031 &format_attr_occ_sel.attr,
2032 &format_attr_edge.attr,
2033 &format_attr_tid_en.attr,
2034 &format_attr_inv.attr,
2035 &format_attr_thresh6.attr,
2036 &format_attr_occ_invert.attr,
2037 &format_attr_occ_edge_det.attr,
2038 NULL,
2039};
2040
45bd07ad 2041static const struct attribute_group knl_uncore_pcu_format_group = {
77af0037
HC
2042 .name = "format",
2043 .attrs = knl_uncore_pcu_formats_attr,
2044};
2045
2046static struct intel_uncore_type knl_uncore_pcu = {
2047 .name = "pcu",
2048 .num_counters = 4,
2049 .num_boxes = 1,
2050 .perf_ctr_bits = 48,
2051 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2052 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2053 .event_mask = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2054 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2055 .ops = &snbep_uncore_msr_ops,
2056 .format_group = &knl_uncore_pcu_format_group,
2057};
2058
2059static struct intel_uncore_type *knl_msr_uncores[] = {
2060 &knl_uncore_ubox,
2061 &knl_uncore_cha,
2062 &knl_uncore_pcu,
2063 NULL,
2064};
2065
2066void knl_uncore_cpu_init(void)
2067{
2068 uncore_msr_uncores = knl_msr_uncores;
2069}
2070
2071static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2072{
2073 struct pci_dev *pdev = box->pci_dev;
2074 int box_ctl = uncore_pci_box_ctl(box);
2075
2076 pci_write_config_dword(pdev, box_ctl, 0);
2077}
2078
2079static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2080 struct perf_event *event)
2081{
2082 struct pci_dev *pdev = box->pci_dev;
2083 struct hw_perf_event *hwc = &event->hw;
2084
2085 if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2086 == UNCORE_FIXED_EVENT)
2087 pci_write_config_dword(pdev, hwc->config_base,
2088 hwc->config | KNL_PMON_FIXED_CTL_EN);
2089 else
2090 pci_write_config_dword(pdev, hwc->config_base,
2091 hwc->config | SNBEP_PMON_CTL_EN);
2092}
2093
2094static struct intel_uncore_ops knl_uncore_imc_ops = {
2095 .init_box = snbep_uncore_pci_init_box,
2096 .disable_box = snbep_uncore_pci_disable_box,
2097 .enable_box = knl_uncore_imc_enable_box,
2098 .read_counter = snbep_uncore_pci_read_counter,
2099 .enable_event = knl_uncore_imc_enable_event,
2100 .disable_event = snbep_uncore_pci_disable_event,
2101};
2102
2103static struct intel_uncore_type knl_uncore_imc_uclk = {
2104 .name = "imc_uclk",
2105 .num_counters = 4,
2106 .num_boxes = 2,
2107 .perf_ctr_bits = 48,
2108 .fixed_ctr_bits = 48,
2109 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2110 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2111 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2112 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2113 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2114 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2115 .ops = &knl_uncore_imc_ops,
2116 .format_group = &snbep_uncore_format_group,
2117};
2118
2119static struct intel_uncore_type knl_uncore_imc_dclk = {
2120 .name = "imc",
2121 .num_counters = 4,
2122 .num_boxes = 6,
2123 .perf_ctr_bits = 48,
2124 .fixed_ctr_bits = 48,
2125 .perf_ctr = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2126 .event_ctl = KNL_MC0_CH0_MSR_PMON_CTL0,
2127 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2128 .fixed_ctr = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2129 .fixed_ctl = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2130 .box_ctl = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2131 .ops = &knl_uncore_imc_ops,
2132 .format_group = &snbep_uncore_format_group,
2133};
2134
2135static struct intel_uncore_type knl_uncore_edc_uclk = {
2136 .name = "edc_uclk",
2137 .num_counters = 4,
2138 .num_boxes = 8,
2139 .perf_ctr_bits = 48,
2140 .fixed_ctr_bits = 48,
2141 .perf_ctr = KNL_UCLK_MSR_PMON_CTR0_LOW,
2142 .event_ctl = KNL_UCLK_MSR_PMON_CTL0,
2143 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2144 .fixed_ctr = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2145 .fixed_ctl = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2146 .box_ctl = KNL_UCLK_MSR_PMON_BOX_CTL,
2147 .ops = &knl_uncore_imc_ops,
2148 .format_group = &snbep_uncore_format_group,
2149};
2150
2151static struct intel_uncore_type knl_uncore_edc_eclk = {
2152 .name = "edc_eclk",
2153 .num_counters = 4,
2154 .num_boxes = 8,
2155 .perf_ctr_bits = 48,
2156 .fixed_ctr_bits = 48,
2157 .perf_ctr = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2158 .event_ctl = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2159 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2160 .fixed_ctr = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2161 .fixed_ctl = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2162 .box_ctl = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2163 .ops = &knl_uncore_imc_ops,
2164 .format_group = &snbep_uncore_format_group,
2165};
2166
2167static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2168 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2169 EVENT_CONSTRAINT_END
2170};
2171
2172static struct intel_uncore_type knl_uncore_m2pcie = {
2173 .name = "m2pcie",
2174 .num_counters = 4,
2175 .num_boxes = 1,
2176 .perf_ctr_bits = 48,
2177 .constraints = knl_uncore_m2pcie_constraints,
2178 SNBEP_UNCORE_PCI_COMMON_INIT(),
2179};
2180
2181static struct attribute *knl_uncore_irp_formats_attr[] = {
2182 &format_attr_event.attr,
2183 &format_attr_umask.attr,
2184 &format_attr_qor.attr,
2185 &format_attr_edge.attr,
2186 &format_attr_inv.attr,
2187 &format_attr_thresh8.attr,
2188 NULL,
2189};
2190
45bd07ad 2191static const struct attribute_group knl_uncore_irp_format_group = {
77af0037
HC
2192 .name = "format",
2193 .attrs = knl_uncore_irp_formats_attr,
2194};
2195
2196static struct intel_uncore_type knl_uncore_irp = {
2197 .name = "irp",
2198 .num_counters = 2,
2199 .num_boxes = 1,
2200 .perf_ctr_bits = 48,
2201 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2202 .event_ctl = SNBEP_PCI_PMON_CTL0,
2203 .event_mask = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2204 .box_ctl = KNL_IRP_PCI_PMON_BOX_CTL,
2205 .ops = &snbep_uncore_pci_ops,
2206 .format_group = &knl_uncore_irp_format_group,
2207};
2208
2209enum {
2210 KNL_PCI_UNCORE_MC_UCLK,
2211 KNL_PCI_UNCORE_MC_DCLK,
2212 KNL_PCI_UNCORE_EDC_UCLK,
2213 KNL_PCI_UNCORE_EDC_ECLK,
2214 KNL_PCI_UNCORE_M2PCIE,
2215 KNL_PCI_UNCORE_IRP,
2216};
2217
2218static struct intel_uncore_type *knl_pci_uncores[] = {
2219 [KNL_PCI_UNCORE_MC_UCLK] = &knl_uncore_imc_uclk,
2220 [KNL_PCI_UNCORE_MC_DCLK] = &knl_uncore_imc_dclk,
2221 [KNL_PCI_UNCORE_EDC_UCLK] = &knl_uncore_edc_uclk,
2222 [KNL_PCI_UNCORE_EDC_ECLK] = &knl_uncore_edc_eclk,
2223 [KNL_PCI_UNCORE_M2PCIE] = &knl_uncore_m2pcie,
2224 [KNL_PCI_UNCORE_IRP] = &knl_uncore_irp,
2225 NULL,
2226};
2227
2228/*
2229 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2230 * device type. prior to KNL, each instance of a PMU device type had a unique
2231 * device ID.
2232 *
2233 * PCI Device ID Uncore PMU Devices
2234 * ----------------------------------
2235 * 0x7841 MC0 UClk, MC1 UClk
2236 * 0x7843 MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2237 * MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2238 * 0x7833 EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2239 * EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2240 * 0x7835 EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2241 * EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2242 * 0x7817 M2PCIe
2243 * 0x7814 IRP
2244*/
2245
2246static const struct pci_device_id knl_uncore_pci_ids[] = {
a54fa079 2247 { /* MC0 UClk */
77af0037 2248 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
a54fa079 2249 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
77af0037 2250 },
a54fa079
KL
2251 { /* MC1 UClk */
2252 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2253 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2254 },
2255 { /* MC0 DClk CH 0 */
2256 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2257 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2258 },
2259 { /* MC0 DClk CH 1 */
2260 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2261 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2262 },
2263 { /* MC0 DClk CH 2 */
2264 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2265 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2266 },
2267 { /* MC1 DClk CH 0 */
2268 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2269 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2270 },
2271 { /* MC1 DClk CH 1 */
77af0037 2272 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
a54fa079
KL
2273 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2274 },
2275 { /* MC1 DClk CH 2 */
2276 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2277 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2278 },
2279 { /* EDC0 UClk */
2280 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2281 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2282 },
2283 { /* EDC1 UClk */
2284 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2285 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2286 },
2287 { /* EDC2 UClk */
2288 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2289 .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2290 },
2291 { /* EDC3 UClk */
2292 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2293 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
77af0037 2294 },
a54fa079 2295 { /* EDC4 UClk */
77af0037 2296 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
a54fa079
KL
2297 .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2298 },
2299 { /* EDC5 UClk */
2300 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2301 .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2302 },
2303 { /* EDC6 UClk */
2304 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2305 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2306 },
2307 { /* EDC7 UClk */
2308 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2309 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2310 },
2311 { /* EDC0 EClk */
2312 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2313 .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2314 },
2315 { /* EDC1 EClk */
2316 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2317 .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2318 },
2319 { /* EDC2 EClk */
2320 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2321 .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2322 },
2323 { /* EDC3 EClk */
2324 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2325 .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2326 },
2327 { /* EDC4 EClk */
2328 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2329 .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2330 },
2331 { /* EDC5 EClk */
2332 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2333 .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2334 },
2335 { /* EDC6 EClk */
2336 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2337 .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
77af0037 2338 },
a54fa079 2339 { /* EDC7 EClk */
77af0037 2340 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
a54fa079 2341 .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
77af0037
HC
2342 },
2343 { /* M2PCIe */
2344 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2345 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2346 },
2347 { /* IRP */
2348 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2349 .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2350 },
2351 { /* end: all zeroes */ }
2352};
2353
2354static struct pci_driver knl_uncore_pci_driver = {
2355 .name = "knl_uncore",
2356 .id_table = knl_uncore_pci_ids,
2357};
2358
2359int knl_uncore_pci_init(void)
2360{
2361 int ret;
2362
2363 /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2364 ret = snb_pci2phy_map_init(0x7814); /* IRP */
2365 if (ret)
2366 return ret;
2367 ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2368 if (ret)
2369 return ret;
2370 uncore_pci_uncores = knl_pci_uncores;
2371 uncore_pci_driver = &knl_uncore_pci_driver;
2372 return 0;
2373}
2374
2375/* end of KNL uncore support */
2376
e735b9db
YZ
2377/* Haswell-EP uncore support */
2378static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2379 &format_attr_event.attr,
2380 &format_attr_umask.attr,
2381 &format_attr_edge.attr,
2382 &format_attr_inv.attr,
2383 &format_attr_thresh5.attr,
2384 &format_attr_filter_tid2.attr,
2385 &format_attr_filter_cid.attr,
2386 NULL,
2387};
2388
45bd07ad 2389static const struct attribute_group hswep_uncore_ubox_format_group = {
e735b9db
YZ
2390 .name = "format",
2391 .attrs = hswep_uncore_ubox_formats_attr,
2392};
2393
2394static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2395{
2396 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2397 reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2398 reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2399 reg1->idx = 0;
2400 return 0;
2401}
2402
2403static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2404 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2405 .hw_config = hswep_ubox_hw_config,
2406 .get_constraint = uncore_get_constraint,
2407 .put_constraint = uncore_put_constraint,
2408};
2409
2410static struct intel_uncore_type hswep_uncore_ubox = {
2411 .name = "ubox",
2412 .num_counters = 2,
2413 .num_boxes = 1,
2414 .perf_ctr_bits = 44,
2415 .fixed_ctr_bits = 48,
2416 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2417 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2418 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2419 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2420 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2421 .num_shared_regs = 1,
2422 .ops = &hswep_uncore_ubox_ops,
2423 .format_group = &hswep_uncore_ubox_format_group,
2424};
2425
2426static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2427 &format_attr_event.attr,
2428 &format_attr_umask.attr,
2429 &format_attr_edge.attr,
2430 &format_attr_tid_en.attr,
2431 &format_attr_thresh8.attr,
2432 &format_attr_filter_tid3.attr,
2433 &format_attr_filter_link2.attr,
2434 &format_attr_filter_state3.attr,
2435 &format_attr_filter_nid2.attr,
2436 &format_attr_filter_opc2.attr,
2437 &format_attr_filter_nc.attr,
2438 &format_attr_filter_c6.attr,
2439 &format_attr_filter_isoc.attr,
2440 NULL,
2441};
2442
45bd07ad 2443static const struct attribute_group hswep_uncore_cbox_format_group = {
e735b9db
YZ
2444 .name = "format",
2445 .attrs = hswep_uncore_cbox_formats_attr,
2446};
2447
2448static struct event_constraint hswep_uncore_cbox_constraints[] = {
2449 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2450 UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2451 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2452 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2453 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2454 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2455 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2456 EVENT_CONSTRAINT_END
2457};
2458
2459static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2460 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2461 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2462 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2463 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2464 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2465 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2466 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2467 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2468 SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2469 SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2470 SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2471 SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2472 SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2473 SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2474 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2475 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2476 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2477 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2478 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2479 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2480 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2481 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2482 SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2483 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2484 SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2485 SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2486 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2487 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2488 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2489 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2490 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2491 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2492 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2493 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2494 SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2495 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2496 SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2497 SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2498 EVENT_EXTRA_END
2499};
2500
2501static u64 hswep_cbox_filter_mask(int fields)
2502{
2503 u64 mask = 0;
2504 if (fields & 0x1)
2505 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2506 if (fields & 0x2)
2507 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2508 if (fields & 0x4)
2509 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2510 if (fields & 0x8)
2511 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2512 if (fields & 0x10) {
2513 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2514 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2515 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2516 mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2517 }
2518 return mask;
2519}
2520
2521static struct event_constraint *
2522hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2523{
2524 return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2525}
2526
2527static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2528{
2529 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2530 struct extra_reg *er;
2531 int idx = 0;
2532
2533 for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2534 if (er->event != (event->hw.config & er->config_mask))
2535 continue;
2536 idx |= er->idx;
2537 }
2538
2539 if (idx) {
2540 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2541 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2542 reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2543 reg1->idx = idx;
2544 }
2545 return 0;
2546}
2547
2548static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2549 struct perf_event *event)
2550{
2551 struct hw_perf_event *hwc = &event->hw;
2552 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2553
2554 if (reg1->idx != EXTRA_REG_NONE) {
2555 u64 filter = uncore_shared_reg_config(box, 0);
2556 wrmsrl(reg1->reg, filter & 0xffffffff);
2557 wrmsrl(reg1->reg + 1, filter >> 32);
2558 }
2559
2560 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2561}
2562
2563static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2564 .init_box = snbep_uncore_msr_init_box,
2565 .disable_box = snbep_uncore_msr_disable_box,
2566 .enable_box = snbep_uncore_msr_enable_box,
2567 .disable_event = snbep_uncore_msr_disable_event,
2568 .enable_event = hswep_cbox_enable_event,
2569 .read_counter = uncore_msr_read_counter,
2570 .hw_config = hswep_cbox_hw_config,
2571 .get_constraint = hswep_cbox_get_constraint,
2572 .put_constraint = snbep_cbox_put_constraint,
2573};
2574
2575static struct intel_uncore_type hswep_uncore_cbox = {
2576 .name = "cbox",
2577 .num_counters = 4,
2578 .num_boxes = 18,
8cf1a3de 2579 .perf_ctr_bits = 48,
e735b9db
YZ
2580 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
2581 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
2582 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2583 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
2584 .msr_offset = HSWEP_CBO_MSR_OFFSET,
2585 .num_shared_regs = 1,
2586 .constraints = hswep_uncore_cbox_constraints,
2587 .ops = &hswep_uncore_cbox_ops,
2588 .format_group = &hswep_uncore_cbox_format_group,
2589};
2590
68055915
AK
2591/*
2592 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2593 */
2594static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2595{
2596 unsigned msr = uncore_msr_box_ctl(box);
2597
2598 if (msr) {
2599 u64 init = SNBEP_PMON_BOX_CTL_INT;
2600 u64 flags = 0;
2601 int i;
2602
2603 for_each_set_bit(i, (unsigned long *)&init, 64) {
2604 flags |= (1ULL << i);
2605 wrmsrl(msr, flags);
2606 }
2607 }
2608}
2609
2610static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2611 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2612 .init_box = hswep_uncore_sbox_msr_init_box
2613};
2614
e735b9db
YZ
2615static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2616 &format_attr_event.attr,
2617 &format_attr_umask.attr,
2618 &format_attr_edge.attr,
2619 &format_attr_tid_en.attr,
2620 &format_attr_inv.attr,
2621 &format_attr_thresh8.attr,
2622 NULL,
2623};
2624
45bd07ad 2625static const struct attribute_group hswep_uncore_sbox_format_group = {
e735b9db
YZ
2626 .name = "format",
2627 .attrs = hswep_uncore_sbox_formats_attr,
2628};
2629
2630static struct intel_uncore_type hswep_uncore_sbox = {
2631 .name = "sbox",
2632 .num_counters = 4,
2633 .num_boxes = 4,
2634 .perf_ctr_bits = 44,
2635 .event_ctl = HSWEP_S0_MSR_PMON_CTL0,
2636 .perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
2637 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2638 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
2639 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
68055915 2640 .ops = &hswep_uncore_sbox_msr_ops,
e735b9db
YZ
2641 .format_group = &hswep_uncore_sbox_format_group,
2642};
2643
2644static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2645{
2646 struct hw_perf_event *hwc = &event->hw;
2647 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2648 int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2649
2650 if (ev_sel >= 0xb && ev_sel <= 0xe) {
2651 reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2652 reg1->idx = ev_sel - 0xb;
2653 reg1->config = event->attr.config1 & (0xff << reg1->idx);
2654 }
2655 return 0;
2656}
2657
2658static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2659 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2660 .hw_config = hswep_pcu_hw_config,
2661 .get_constraint = snbep_pcu_get_constraint,
2662 .put_constraint = snbep_pcu_put_constraint,
2663};
2664
2665static struct intel_uncore_type hswep_uncore_pcu = {
2666 .name = "pcu",
2667 .num_counters = 4,
2668 .num_boxes = 1,
2669 .perf_ctr_bits = 48,
2670 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
2671 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
2672 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2673 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
2674 .num_shared_regs = 1,
2675 .ops = &hswep_uncore_pcu_ops,
2676 .format_group = &snbep_uncore_pcu_format_group,
2677};
2678
2679static struct intel_uncore_type *hswep_msr_uncores[] = {
2680 &hswep_uncore_ubox,
2681 &hswep_uncore_cbox,
2682 &hswep_uncore_sbox,
2683 &hswep_uncore_pcu,
2684 NULL,
2685};
2686
2687void hswep_uncore_cpu_init(void)
2688{
6d6daa20 2689 int pkg = boot_cpu_data.logical_proc_id;
cf6d445f 2690
e735b9db
YZ
2691 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2692 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
5306c31c
AK
2693
2694 /* Detect 6-8 core systems with only two SBOXes */
cf6d445f 2695 if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
5306c31c
AK
2696 u32 capid4;
2697
cf6d445f 2698 pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
5306c31c
AK
2699 0x94, &capid4);
2700 if (((capid4 >> 6) & 0x3) == 0)
2701 hswep_uncore_sbox.num_boxes = 2;
2702 }
2703
e735b9db
YZ
2704 uncore_msr_uncores = hswep_msr_uncores;
2705}
2706
2707static struct intel_uncore_type hswep_uncore_ha = {
2708 .name = "ha",
10e9e7bd 2709 .num_counters = 4,
e735b9db
YZ
2710 .num_boxes = 2,
2711 .perf_ctr_bits = 48,
2712 SNBEP_UNCORE_PCI_COMMON_INIT(),
2713};
2714
2715static struct uncore_event_desc hswep_uncore_imc_events[] = {
2716 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x00,umask=0x00"),
2717 INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
c0737ce4
AK
2718 INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2719 INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
e735b9db 2720 INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
c0737ce4
AK
2721 INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2722 INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
e735b9db
YZ
2723 { /* end: all zeroes */ },
2724};
2725
2726static struct intel_uncore_type hswep_uncore_imc = {
2727 .name = "imc",
10e9e7bd 2728 .num_counters = 4,
e735b9db
YZ
2729 .num_boxes = 8,
2730 .perf_ctr_bits = 48,
2731 .fixed_ctr_bits = 48,
2732 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2733 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2734 .event_descs = hswep_uncore_imc_events,
2735 SNBEP_UNCORE_PCI_COMMON_INIT(),
2736};
2737
41a134a5
AK
2738static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2739
2740static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2741{
2742 struct pci_dev *pdev = box->pci_dev;
2743 struct hw_perf_event *hwc = &event->hw;
2744 u64 count = 0;
2745
2746 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2747 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2748
2749 return count;
2750}
2751
e735b9db
YZ
2752static struct intel_uncore_ops hswep_uncore_irp_ops = {
2753 .init_box = snbep_uncore_pci_init_box,
2754 .disable_box = snbep_uncore_pci_disable_box,
2755 .enable_box = snbep_uncore_pci_enable_box,
2756 .disable_event = ivbep_uncore_irp_disable_event,
2757 .enable_event = ivbep_uncore_irp_enable_event,
41a134a5 2758 .read_counter = hswep_uncore_irp_read_counter,
e735b9db
YZ
2759};
2760
2761static struct intel_uncore_type hswep_uncore_irp = {
2762 .name = "irp",
2763 .num_counters = 4,
2764 .num_boxes = 1,
2765 .perf_ctr_bits = 48,
2766 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
2767 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2768 .ops = &hswep_uncore_irp_ops,
2769 .format_group = &snbep_uncore_format_group,
2770};
2771
2772static struct intel_uncore_type hswep_uncore_qpi = {
2773 .name = "qpi",
10e9e7bd 2774 .num_counters = 4,
e735b9db
YZ
2775 .num_boxes = 3,
2776 .perf_ctr_bits = 48,
2777 .perf_ctr = SNBEP_PCI_PMON_CTR0,
2778 .event_ctl = SNBEP_PCI_PMON_CTL0,
2779 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2780 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
2781 .num_shared_regs = 1,
2782 .ops = &snbep_uncore_qpi_ops,
2783 .format_group = &snbep_uncore_qpi_format_group,
2784};
2785
2786static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2787 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2788 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2789 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2790 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2791 UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2792 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2793 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2794 UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2795 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2796 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2797 UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2798 UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2799 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2800 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2801 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2802 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2803 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2804 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2805 EVENT_CONSTRAINT_END
2806};
2807
2808static struct intel_uncore_type hswep_uncore_r2pcie = {
2809 .name = "r2pcie",
2810 .num_counters = 4,
2811 .num_boxes = 1,
2812 .perf_ctr_bits = 48,
2813 .constraints = hswep_uncore_r2pcie_constraints,
2814 SNBEP_UNCORE_PCI_COMMON_INIT(),
2815};
2816
2817static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2818 UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2819 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2820 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2821 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2822 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2823 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2824 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2825 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2826 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2827 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2828 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2829 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2830 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2831 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2832 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2833 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2834 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2835 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2836 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2837 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2838 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2839 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2840 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2841 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2842 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2843 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2844 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2845 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2846 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2847 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2848 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2849 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2850 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2851 EVENT_CONSTRAINT_END
2852};
2853
2854static struct intel_uncore_type hswep_uncore_r3qpi = {
2855 .name = "r3qpi",
10e9e7bd 2856 .num_counters = 3,
e735b9db
YZ
2857 .num_boxes = 3,
2858 .perf_ctr_bits = 44,
2859 .constraints = hswep_uncore_r3qpi_constraints,
2860 SNBEP_UNCORE_PCI_COMMON_INIT(),
2861};
2862
2863enum {
2864 HSWEP_PCI_UNCORE_HA,
2865 HSWEP_PCI_UNCORE_IMC,
2866 HSWEP_PCI_UNCORE_IRP,
2867 HSWEP_PCI_UNCORE_QPI,
2868 HSWEP_PCI_UNCORE_R2PCIE,
2869 HSWEP_PCI_UNCORE_R3QPI,
2870};
2871
2872static struct intel_uncore_type *hswep_pci_uncores[] = {
2873 [HSWEP_PCI_UNCORE_HA] = &hswep_uncore_ha,
2874 [HSWEP_PCI_UNCORE_IMC] = &hswep_uncore_imc,
2875 [HSWEP_PCI_UNCORE_IRP] = &hswep_uncore_irp,
2876 [HSWEP_PCI_UNCORE_QPI] = &hswep_uncore_qpi,
2877 [HSWEP_PCI_UNCORE_R2PCIE] = &hswep_uncore_r2pcie,
2878 [HSWEP_PCI_UNCORE_R3QPI] = &hswep_uncore_r3qpi,
2879 NULL,
2880};
2881
070a7cdf 2882static const struct pci_device_id hswep_uncore_pci_ids[] = {
e735b9db
YZ
2883 { /* Home Agent 0 */
2884 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2885 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2886 },
2887 { /* Home Agent 1 */
2888 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2889 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2890 },
2891 { /* MC0 Channel 0 */
2892 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2893 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2894 },
2895 { /* MC0 Channel 1 */
2896 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2897 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2898 },
2899 { /* MC0 Channel 2 */
2900 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2901 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2902 },
2903 { /* MC0 Channel 3 */
2904 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2905 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2906 },
2907 { /* MC1 Channel 0 */
2908 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2909 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2910 },
2911 { /* MC1 Channel 1 */
2912 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2913 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2914 },
2915 { /* MC1 Channel 2 */
2916 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2917 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2918 },
2919 { /* MC1 Channel 3 */
2920 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2921 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2922 },
2923 { /* IRP */
2924 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2925 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2926 },
2927 { /* QPI0 Port 0 */
2928 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2929 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2930 },
2931 { /* QPI0 Port 1 */
2932 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2933 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2934 },
2935 { /* QPI1 Port 2 */
2936 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2937 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2938 },
2939 { /* R2PCIe */
2940 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2941 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2942 },
2943 { /* R3QPI0 Link 0 */
2944 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2945 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2946 },
2947 { /* R3QPI0 Link 1 */
2948 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2949 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2950 },
2951 { /* R3QPI1 Link 2 */
2952 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2953 .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2954 },
2955 { /* QPI Port 0 filter */
2956 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2957 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2958 SNBEP_PCI_QPI_PORT0_FILTER),
2959 },
2960 { /* QPI Port 1 filter */
2961 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2962 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2963 SNBEP_PCI_QPI_PORT1_FILTER),
2964 },
5306c31c
AK
2965 { /* PCU.3 (for Capability registers) */
2966 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2967 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2968 HSWEP_PCI_PCU_3),
2969 },
e735b9db
YZ
2970 { /* end: all zeroes */ }
2971};
2972
2973static struct pci_driver hswep_uncore_pci_driver = {
2974 .name = "hswep_uncore",
2975 .id_table = hswep_uncore_pci_ids,
2976};
2977
2978int hswep_uncore_pci_init(void)
2979{
68ce4a0d 2980 int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
e735b9db
YZ
2981 if (ret)
2982 return ret;
2983 uncore_pci_uncores = hswep_pci_uncores;
2984 uncore_pci_driver = &hswep_uncore_pci_driver;
2985 return 0;
2986}
2987/* end of Haswell-EP uncore support */
070e9887 2988
d6980ef3 2989/* BDX uncore support */
070e9887
KL
2990
2991static struct intel_uncore_type bdx_uncore_ubox = {
2992 .name = "ubox",
2993 .num_counters = 2,
2994 .num_boxes = 1,
2995 .perf_ctr_bits = 48,
2996 .fixed_ctr_bits = 48,
2997 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
2998 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
2999 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3000 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3001 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3002 .num_shared_regs = 1,
3003 .ops = &ivbep_uncore_msr_ops,
3004 .format_group = &ivbep_uncore_ubox_format_group,
3005};
3006
3007static struct event_constraint bdx_uncore_cbox_constraints[] = {
3008 UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3009 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3010 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
d6980ef3 3011 UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
070e9887
KL
3012 EVENT_CONSTRAINT_END
3013};
3014
3015static struct intel_uncore_type bdx_uncore_cbox = {
3016 .name = "cbox",
3017 .num_counters = 4,
d6980ef3 3018 .num_boxes = 24,
070e9887
KL
3019 .perf_ctr_bits = 48,
3020 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3021 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3022 .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3023 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3024 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3025 .num_shared_regs = 1,
3026 .constraints = bdx_uncore_cbox_constraints,
3027 .ops = &hswep_uncore_cbox_ops,
3028 .format_group = &hswep_uncore_cbox_format_group,
3029};
3030
3031static struct intel_uncore_type *bdx_msr_uncores[] = {
3032 &bdx_uncore_ubox,
3033 &bdx_uncore_cbox,
3034 &hswep_uncore_pcu,
3035 NULL,
3036};
3037
3038void bdx_uncore_cpu_init(void)
3039{
3040 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3041 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3042 uncore_msr_uncores = bdx_msr_uncores;
3043}
3044
3045static struct intel_uncore_type bdx_uncore_ha = {
3046 .name = "ha",
3047 .num_counters = 4,
d6980ef3 3048 .num_boxes = 2,
070e9887
KL
3049 .perf_ctr_bits = 48,
3050 SNBEP_UNCORE_PCI_COMMON_INIT(),
3051};
3052
3053static struct intel_uncore_type bdx_uncore_imc = {
3054 .name = "imc",
10e9e7bd 3055 .num_counters = 4,
d6980ef3 3056 .num_boxes = 8,
070e9887
KL
3057 .perf_ctr_bits = 48,
3058 .fixed_ctr_bits = 48,
3059 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3060 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3061 .event_descs = hswep_uncore_imc_events,
3062 SNBEP_UNCORE_PCI_COMMON_INIT(),
3063};
3064
3065static struct intel_uncore_type bdx_uncore_irp = {
3066 .name = "irp",
3067 .num_counters = 4,
3068 .num_boxes = 1,
3069 .perf_ctr_bits = 48,
3070 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3071 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3072 .ops = &hswep_uncore_irp_ops,
3073 .format_group = &snbep_uncore_format_group,
3074};
3075
d6980ef3
KL
3076static struct intel_uncore_type bdx_uncore_qpi = {
3077 .name = "qpi",
3078 .num_counters = 4,
3079 .num_boxes = 3,
3080 .perf_ctr_bits = 48,
3081 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3082 .event_ctl = SNBEP_PCI_PMON_CTL0,
3083 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3084 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3085 .num_shared_regs = 1,
3086 .ops = &snbep_uncore_qpi_ops,
3087 .format_group = &snbep_uncore_qpi_format_group,
3088};
070e9887
KL
3089
3090static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3091 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3092 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3093 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3094 UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3095 UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3096 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
d6980ef3
KL
3097 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3098 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
070e9887
KL
3099 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3100 EVENT_CONSTRAINT_END
3101};
3102
3103static struct intel_uncore_type bdx_uncore_r2pcie = {
3104 .name = "r2pcie",
3105 .num_counters = 4,
3106 .num_boxes = 1,
3107 .perf_ctr_bits = 48,
3108 .constraints = bdx_uncore_r2pcie_constraints,
3109 SNBEP_UNCORE_PCI_COMMON_INIT(),
3110};
3111
d6980ef3
KL
3112static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3113 UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3114 UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3115 UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3116 UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3117 UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3118 UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3119 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3120 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3121 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3122 UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3123 UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3124 UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3125 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3126 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3127 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3128 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3129 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3130 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3131 UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3132 UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3133 UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3134 UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3135 UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3136 UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3137 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3138 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3139 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3140 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3141 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3142 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3143 EVENT_CONSTRAINT_END
3144};
3145
3146static struct intel_uncore_type bdx_uncore_r3qpi = {
3147 .name = "r3qpi",
3148 .num_counters = 3,
3149 .num_boxes = 3,
3150 .perf_ctr_bits = 48,
3151 .constraints = bdx_uncore_r3qpi_constraints,
3152 SNBEP_UNCORE_PCI_COMMON_INIT(),
3153};
3154
070e9887
KL
3155enum {
3156 BDX_PCI_UNCORE_HA,
3157 BDX_PCI_UNCORE_IMC,
3158 BDX_PCI_UNCORE_IRP,
d6980ef3 3159 BDX_PCI_UNCORE_QPI,
070e9887 3160 BDX_PCI_UNCORE_R2PCIE,
d6980ef3 3161 BDX_PCI_UNCORE_R3QPI,
070e9887
KL
3162};
3163
3164static struct intel_uncore_type *bdx_pci_uncores[] = {
3165 [BDX_PCI_UNCORE_HA] = &bdx_uncore_ha,
3166 [BDX_PCI_UNCORE_IMC] = &bdx_uncore_imc,
3167 [BDX_PCI_UNCORE_IRP] = &bdx_uncore_irp,
d6980ef3 3168 [BDX_PCI_UNCORE_QPI] = &bdx_uncore_qpi,
070e9887 3169 [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
d6980ef3 3170 [BDX_PCI_UNCORE_R3QPI] = &bdx_uncore_r3qpi,
070e9887
KL
3171 NULL,
3172};
3173
c2365b93 3174static const struct pci_device_id bdx_uncore_pci_ids[] = {
070e9887
KL
3175 { /* Home Agent 0 */
3176 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3177 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3178 },
d6980ef3
KL
3179 { /* Home Agent 1 */
3180 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3181 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3182 },
070e9887
KL
3183 { /* MC0 Channel 0 */
3184 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3185 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3186 },
3187 { /* MC0 Channel 1 */
3188 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3189 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3190 },
d6980ef3
KL
3191 { /* MC0 Channel 2 */
3192 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3193 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3194 },
3195 { /* MC0 Channel 3 */
3196 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3197 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3198 },
3199 { /* MC1 Channel 0 */
3200 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3201 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3202 },
3203 { /* MC1 Channel 1 */
3204 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3205 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3206 },
3207 { /* MC1 Channel 2 */
3208 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3209 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3210 },
3211 { /* MC1 Channel 3 */
3212 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3213 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3214 },
070e9887
KL
3215 { /* IRP */
3216 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3217 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3218 },
d6980ef3
KL
3219 { /* QPI0 Port 0 */
3220 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3221 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3222 },
3223 { /* QPI0 Port 1 */
3224 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3225 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3226 },
3227 { /* QPI1 Port 2 */
3228 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3229 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3230 },
070e9887
KL
3231 { /* R2PCIe */
3232 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3233 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3234 },
d6980ef3
KL
3235 { /* R3QPI0 Link 0 */
3236 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3237 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3238 },
3239 { /* R3QPI0 Link 1 */
3240 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3241 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3242 },
3243 { /* R3QPI1 Link 2 */
3244 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3245 .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3246 },
3247 { /* QPI Port 0 filter */
3248 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3249 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
3250 },
3251 { /* QPI Port 1 filter */
3252 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3253 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
3254 },
3255 { /* QPI Port 2 filter */
3256 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3257 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3258 },
070e9887
KL
3259 { /* end: all zeroes */ }
3260};
3261
3262static struct pci_driver bdx_uncore_pci_driver = {
3263 .name = "bdx_uncore",
3264 .id_table = bdx_uncore_pci_ids,
3265};
3266
3267int bdx_uncore_pci_init(void)
3268{
68ce4a0d 3269 int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
070e9887
KL
3270
3271 if (ret)
3272 return ret;
3273 uncore_pci_uncores = bdx_pci_uncores;
3274 uncore_pci_driver = &bdx_uncore_pci_driver;
3275 return 0;
3276}
3277
d6980ef3 3278/* end of BDX uncore support */
cd34cd97
KL
3279
3280/* SKX uncore support */
3281
3282static struct intel_uncore_type skx_uncore_ubox = {
3283 .name = "ubox",
3284 .num_counters = 2,
3285 .num_boxes = 1,
3286 .perf_ctr_bits = 48,
3287 .fixed_ctr_bits = 48,
3288 .perf_ctr = HSWEP_U_MSR_PMON_CTR0,
3289 .event_ctl = HSWEP_U_MSR_PMON_CTL0,
3290 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3291 .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3292 .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3293 .ops = &ivbep_uncore_msr_ops,
3294 .format_group = &ivbep_uncore_ubox_format_group,
3295};
3296
3297static struct attribute *skx_uncore_cha_formats_attr[] = {
3298 &format_attr_event.attr,
3299 &format_attr_umask.attr,
3300 &format_attr_edge.attr,
3301 &format_attr_tid_en.attr,
3302 &format_attr_inv.attr,
3303 &format_attr_thresh8.attr,
3304 &format_attr_filter_tid4.attr,
cd34cd97
KL
3305 &format_attr_filter_state5.attr,
3306 &format_attr_filter_rem.attr,
3307 &format_attr_filter_loc.attr,
3308 &format_attr_filter_nm.attr,
3309 &format_attr_filter_all_op.attr,
3310 &format_attr_filter_not_nm.attr,
3311 &format_attr_filter_opc_0.attr,
3312 &format_attr_filter_opc_1.attr,
3313 &format_attr_filter_nc.attr,
cd34cd97
KL
3314 &format_attr_filter_isoc.attr,
3315 NULL,
3316};
3317
45bd07ad 3318static const struct attribute_group skx_uncore_chabox_format_group = {
cd34cd97
KL
3319 .name = "format",
3320 .attrs = skx_uncore_cha_formats_attr,
3321};
3322
3323static struct event_constraint skx_uncore_chabox_constraints[] = {
3324 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3325 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3326 EVENT_CONSTRAINT_END
3327};
3328
3329static struct extra_reg skx_uncore_cha_extra_regs[] = {
3330 SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3331 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3332 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3333 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
c3f02682
KL
3334 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3335 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
8aa7b7b4
SE
3336 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3337 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
ba883b4a 3338 EVENT_EXTRA_END
cd34cd97
KL
3339};
3340
3341static u64 skx_cha_filter_mask(int fields)
3342{
3343 u64 mask = 0;
3344
3345 if (fields & 0x1)
3346 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3347 if (fields & 0x2)
3348 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3349 if (fields & 0x4)
3350 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
8aa7b7b4
SE
3351 if (fields & 0x8) {
3352 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3353 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3354 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3355 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3356 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3357 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3358 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3359 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3360 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3361 }
cd34cd97
KL
3362 return mask;
3363}
3364
3365static struct event_constraint *
3366skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3367{
3368 return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3369}
3370
3371static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3372{
3373 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3374 struct extra_reg *er;
3375 int idx = 0;
3376
3377 for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3378 if (er->event != (event->hw.config & er->config_mask))
3379 continue;
3380 idx |= er->idx;
3381 }
3382
3383 if (idx) {
3384 reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3385 HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3386 reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3387 reg1->idx = idx;
3388 }
3389 return 0;
3390}
3391
3392static struct intel_uncore_ops skx_uncore_chabox_ops = {
3393 /* There is no frz_en for chabox ctl */
3394 .init_box = ivbep_uncore_msr_init_box,
3395 .disable_box = snbep_uncore_msr_disable_box,
3396 .enable_box = snbep_uncore_msr_enable_box,
3397 .disable_event = snbep_uncore_msr_disable_event,
3398 .enable_event = hswep_cbox_enable_event,
3399 .read_counter = uncore_msr_read_counter,
3400 .hw_config = skx_cha_hw_config,
3401 .get_constraint = skx_cha_get_constraint,
3402 .put_constraint = snbep_cbox_put_constraint,
3403};
3404
3405static struct intel_uncore_type skx_uncore_chabox = {
3406 .name = "cha",
3407 .num_counters = 4,
3408 .perf_ctr_bits = 48,
3409 .event_ctl = HSWEP_C0_MSR_PMON_CTL0,
3410 .perf_ctr = HSWEP_C0_MSR_PMON_CTR0,
3411 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3412 .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL,
3413 .msr_offset = HSWEP_CBO_MSR_OFFSET,
3414 .num_shared_regs = 1,
3415 .constraints = skx_uncore_chabox_constraints,
3416 .ops = &skx_uncore_chabox_ops,
3417 .format_group = &skx_uncore_chabox_format_group,
3418};
3419
3420static struct attribute *skx_uncore_iio_formats_attr[] = {
3421 &format_attr_event.attr,
3422 &format_attr_umask.attr,
3423 &format_attr_edge.attr,
3424 &format_attr_inv.attr,
3425 &format_attr_thresh9.attr,
3426 &format_attr_ch_mask.attr,
3427 &format_attr_fc_mask.attr,
3428 NULL,
3429};
3430
45bd07ad 3431static const struct attribute_group skx_uncore_iio_format_group = {
cd34cd97
KL
3432 .name = "format",
3433 .attrs = skx_uncore_iio_formats_attr,
3434};
3435
3436static struct event_constraint skx_uncore_iio_constraints[] = {
3437 UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3438 UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3439 UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3440 UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3441 UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3442 UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3443 EVENT_CONSTRAINT_END
3444};
3445
3446static void skx_iio_enable_event(struct intel_uncore_box *box,
3447 struct perf_event *event)
3448{
3449 struct hw_perf_event *hwc = &event->hw;
3450
3451 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3452}
3453
3454static struct intel_uncore_ops skx_uncore_iio_ops = {
3455 .init_box = ivbep_uncore_msr_init_box,
3456 .disable_box = snbep_uncore_msr_disable_box,
3457 .enable_box = snbep_uncore_msr_enable_box,
3458 .disable_event = snbep_uncore_msr_disable_event,
3459 .enable_event = skx_iio_enable_event,
3460 .read_counter = uncore_msr_read_counter,
3461};
3462
3463static struct intel_uncore_type skx_uncore_iio = {
3464 .name = "iio",
3465 .num_counters = 4,
29b46dfb 3466 .num_boxes = 6,
cd34cd97
KL
3467 .perf_ctr_bits = 48,
3468 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3469 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
3470 .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK,
3471 .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3472 .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL,
3473 .msr_offset = SKX_IIO_MSR_OFFSET,
3474 .constraints = skx_uncore_iio_constraints,
3475 .ops = &skx_uncore_iio_ops,
3476 .format_group = &skx_uncore_iio_format_group,
3477};
3478
3479static struct attribute *skx_uncore_formats_attr[] = {
3480 &format_attr_event.attr,
3481 &format_attr_umask.attr,
3482 &format_attr_edge.attr,
3483 &format_attr_inv.attr,
3484 &format_attr_thresh8.attr,
3485 NULL,
3486};
3487
45bd07ad 3488static const struct attribute_group skx_uncore_format_group = {
cd34cd97
KL
3489 .name = "format",
3490 .attrs = skx_uncore_formats_attr,
3491};
3492
3493static struct intel_uncore_type skx_uncore_irp = {
3494 .name = "irp",
3495 .num_counters = 2,
29b46dfb 3496 .num_boxes = 6,
cd34cd97
KL
3497 .perf_ctr_bits = 48,
3498 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3499 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
3500 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3501 .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL,
3502 .msr_offset = SKX_IRP_MSR_OFFSET,
3503 .ops = &skx_uncore_iio_ops,
3504 .format_group = &skx_uncore_format_group,
3505};
3506
bab4e569
KL
3507static struct attribute *skx_uncore_pcu_formats_attr[] = {
3508 &format_attr_event.attr,
3509 &format_attr_umask.attr,
3510 &format_attr_edge.attr,
3511 &format_attr_inv.attr,
3512 &format_attr_thresh8.attr,
3513 &format_attr_occ_invert.attr,
3514 &format_attr_occ_edge_det.attr,
3515 &format_attr_filter_band0.attr,
3516 &format_attr_filter_band1.attr,
3517 &format_attr_filter_band2.attr,
3518 &format_attr_filter_band3.attr,
3519 NULL,
3520};
3521
3522static struct attribute_group skx_uncore_pcu_format_group = {
3523 .name = "format",
3524 .attrs = skx_uncore_pcu_formats_attr,
3525};
3526
cd34cd97
KL
3527static struct intel_uncore_ops skx_uncore_pcu_ops = {
3528 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3529 .hw_config = hswep_pcu_hw_config,
3530 .get_constraint = snbep_pcu_get_constraint,
3531 .put_constraint = snbep_pcu_put_constraint,
3532};
3533
3534static struct intel_uncore_type skx_uncore_pcu = {
3535 .name = "pcu",
3536 .num_counters = 4,
3537 .num_boxes = 1,
3538 .perf_ctr_bits = 48,
3539 .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0,
3540 .event_ctl = HSWEP_PCU_MSR_PMON_CTL0,
3541 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3542 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
3543 .num_shared_regs = 1,
3544 .ops = &skx_uncore_pcu_ops,
bab4e569 3545 .format_group = &skx_uncore_pcu_format_group,
cd34cd97
KL
3546};
3547
3548static struct intel_uncore_type *skx_msr_uncores[] = {
3549 &skx_uncore_ubox,
3550 &skx_uncore_chabox,
3551 &skx_uncore_iio,
3552 &skx_uncore_irp,
3553 &skx_uncore_pcu,
3554 NULL,
3555};
3556
3557static int skx_count_chabox(void)
3558{
3559 struct pci_dev *chabox_dev = NULL;
3560 int bus, count = 0;
3561
3562 while (1) {
3563 chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev);
3564 if (!chabox_dev)
3565 break;
3566 if (count == 0)
3567 bus = chabox_dev->bus->number;
3568 if (bus != chabox_dev->bus->number)
3569 break;
3570 count++;
3571 }
3572
3573 pci_dev_put(chabox_dev);
3574 return count;
3575}
3576
3577void skx_uncore_cpu_init(void)
3578{
3579 skx_uncore_chabox.num_boxes = skx_count_chabox();
3580 uncore_msr_uncores = skx_msr_uncores;
3581}
3582
3583static struct intel_uncore_type skx_uncore_imc = {
3584 .name = "imc",
3585 .num_counters = 4,
3586 .num_boxes = 6,
3587 .perf_ctr_bits = 48,
3588 .fixed_ctr_bits = 48,
3589 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3590 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3591 .event_descs = hswep_uncore_imc_events,
3592 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3593 .event_ctl = SNBEP_PCI_PMON_CTL0,
3594 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3595 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3596 .ops = &ivbep_uncore_pci_ops,
3597 .format_group = &skx_uncore_format_group,
3598};
3599
3600static struct attribute *skx_upi_uncore_formats_attr[] = {
3601 &format_attr_event_ext.attr,
3602 &format_attr_umask_ext.attr,
3603 &format_attr_edge.attr,
3604 &format_attr_inv.attr,
3605 &format_attr_thresh8.attr,
3606 NULL,
3607};
3608
45bd07ad 3609static const struct attribute_group skx_upi_uncore_format_group = {
cd34cd97
KL
3610 .name = "format",
3611 .attrs = skx_upi_uncore_formats_attr,
3612};
3613
3614static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3615{
3616 struct pci_dev *pdev = box->pci_dev;
3617
3618 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3619 pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3620}
3621
3622static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3623 .init_box = skx_upi_uncore_pci_init_box,
3624 .disable_box = snbep_uncore_pci_disable_box,
3625 .enable_box = snbep_uncore_pci_enable_box,
3626 .disable_event = snbep_uncore_pci_disable_event,
3627 .enable_event = snbep_uncore_pci_enable_event,
3628 .read_counter = snbep_uncore_pci_read_counter,
3629};
3630
3631static struct intel_uncore_type skx_uncore_upi = {
3632 .name = "upi",
3633 .num_counters = 4,
3634 .num_boxes = 3,
3635 .perf_ctr_bits = 48,
3636 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
3637 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
b3625980
SE
3638 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3639 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
cd34cd97
KL
3640 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
3641 .ops = &skx_upi_uncore_pci_ops,
3642 .format_group = &skx_upi_uncore_format_group,
3643};
3644
3645static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3646{
3647 struct pci_dev *pdev = box->pci_dev;
3648
3649 __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3650 pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3651}
3652
3653static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3654 .init_box = skx_m2m_uncore_pci_init_box,
3655 .disable_box = snbep_uncore_pci_disable_box,
3656 .enable_box = snbep_uncore_pci_enable_box,
3657 .disable_event = snbep_uncore_pci_disable_event,
3658 .enable_event = snbep_uncore_pci_enable_event,
3659 .read_counter = snbep_uncore_pci_read_counter,
3660};
3661
3662static struct intel_uncore_type skx_uncore_m2m = {
3663 .name = "m2m",
3664 .num_counters = 4,
3665 .num_boxes = 2,
3666 .perf_ctr_bits = 48,
3667 .perf_ctr = SKX_M2M_PCI_PMON_CTR0,
3668 .event_ctl = SKX_M2M_PCI_PMON_CTL0,
3669 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3670 .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL,
3671 .ops = &skx_m2m_uncore_pci_ops,
3672 .format_group = &skx_uncore_format_group,
3673};
3674
3675static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3676 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3677 EVENT_CONSTRAINT_END
3678};
3679
3680static struct intel_uncore_type skx_uncore_m2pcie = {
3681 .name = "m2pcie",
3682 .num_counters = 4,
3683 .num_boxes = 4,
3684 .perf_ctr_bits = 48,
3685 .constraints = skx_uncore_m2pcie_constraints,
3686 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3687 .event_ctl = SNBEP_PCI_PMON_CTL0,
3688 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3689 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3690 .ops = &ivbep_uncore_pci_ops,
3691 .format_group = &skx_uncore_format_group,
3692};
3693
3694static struct event_constraint skx_uncore_m3upi_constraints[] = {
3695 UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3696 UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3697 UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3698 UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3699 UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3700 UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3701 UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3702 UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3703 EVENT_CONSTRAINT_END
3704};
3705
3706static struct intel_uncore_type skx_uncore_m3upi = {
3707 .name = "m3upi",
3708 .num_counters = 3,
3709 .num_boxes = 3,
3710 .perf_ctr_bits = 48,
3711 .constraints = skx_uncore_m3upi_constraints,
3712 .perf_ctr = SNBEP_PCI_PMON_CTR0,
3713 .event_ctl = SNBEP_PCI_PMON_CTL0,
3714 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3715 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
3716 .ops = &ivbep_uncore_pci_ops,
3717 .format_group = &skx_uncore_format_group,
3718};
3719
3720enum {
3721 SKX_PCI_UNCORE_IMC,
3722 SKX_PCI_UNCORE_M2M,
3723 SKX_PCI_UNCORE_UPI,
3724 SKX_PCI_UNCORE_M2PCIE,
3725 SKX_PCI_UNCORE_M3UPI,
3726};
3727
3728static struct intel_uncore_type *skx_pci_uncores[] = {
3729 [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc,
3730 [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m,
3731 [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi,
3732 [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3733 [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi,
3734 NULL,
3735};
3736
3737static const struct pci_device_id skx_uncore_pci_ids[] = {
3738 { /* MC0 Channel 0 */
3739 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3740 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3741 },
3742 { /* MC0 Channel 1 */
3743 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3744 .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3745 },
3746 { /* MC0 Channel 2 */
3747 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3748 .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3749 },
3750 { /* MC1 Channel 0 */
3751 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3752 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3753 },
3754 { /* MC1 Channel 1 */
3755 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3756 .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3757 },
3758 { /* MC1 Channel 2 */
3759 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3760 .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3761 },
3762 { /* M2M0 */
3763 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3764 .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3765 },
3766 { /* M2M1 */
3767 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3768 .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3769 },
3770 { /* UPI0 Link 0 */
3771 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3772 .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3773 },
3774 { /* UPI0 Link 1 */
3775 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3776 .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3777 },
3778 { /* UPI1 Link 2 */
3779 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3780 .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3781 },
3782 { /* M2PCIe 0 */
3783 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3784 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3785 },
3786 { /* M2PCIe 1 */
3787 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3788 .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3789 },
3790 { /* M2PCIe 2 */
3791 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3792 .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3793 },
3794 { /* M2PCIe 3 */
3795 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3796 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3797 },
3798 { /* M3UPI0 Link 0 */
3799 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3800 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
3801 },
3802 { /* M3UPI0 Link 1 */
3803 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3804 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
3805 },
3806 { /* M3UPI1 Link 2 */
3807 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3808 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
3809 },
3810 { /* end: all zeroes */ }
3811};
3812
3813
3814static struct pci_driver skx_uncore_pci_driver = {
3815 .name = "skx_uncore",
3816 .id_table = skx_uncore_pci_ids,
3817};
3818
3819int skx_uncore_pci_init(void)
3820{
3821 /* need to double check pci address */
3822 int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
3823
3824 if (ret)
3825 return ret;
3826
3827 uncore_pci_uncores = skx_pci_uncores;
3828 uncore_pci_driver = &skx_uncore_pci_driver;
3829 return 0;
3830}
3831
3832/* end of SKX uncore support */