Commit | Line | Data |
---|---|---|
cdd6c482 | 1 | /* Performance event support for sparc64. |
59abbd1e DM |
2 | * |
3 | * Copyright (C) 2009 David S. Miller <davem@davemloft.net> | |
4 | * | |
cdd6c482 | 5 | * This code is based almost entirely upon the x86 perf event |
59abbd1e DM |
6 | * code, which is: |
7 | * | |
8 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
9 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
10 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
11 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
12 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
13 | */ | |
14 | ||
cdd6c482 | 15 | #include <linux/perf_event.h> |
59abbd1e DM |
16 | #include <linux/kprobes.h> |
17 | #include <linux/kernel.h> | |
18 | #include <linux/kdebug.h> | |
19 | #include <linux/mutex.h> | |
20 | ||
21 | #include <asm/cpudata.h> | |
22 | #include <asm/atomic.h> | |
23 | #include <asm/nmi.h> | |
24 | #include <asm/pcr.h> | |
25 | ||
26 | /* Sparc64 chips have two performance counters, 32-bits each, with | |
27 | * overflow interrupts generated on transition from 0xffffffff to 0. | |
28 | * The counters are accessed in one go using a 64-bit register. | |
29 | * | |
30 | * Both counters are controlled using a single control register. The | |
31 | * only way to stop all sampling is to clear all of the context (user, | |
32 | * supervisor, hypervisor) sampling enable bits. But these bits apply | |
33 | * to both counters, thus the two counters can't be enabled/disabled | |
34 | * individually. | |
35 | * | |
36 | * The control register has two event fields, one for each of the two | |
37 | * counters. It's thus nearly impossible to have one counter going | |
38 | * while keeping the other one stopped. Therefore it is possible to | |
39 | * get overflow interrupts for counters not currently "in use" and | |
40 | * that condition must be checked in the overflow interrupt handler. | |
41 | * | |
42 | * So we use a hack, in that we program inactive counters with the | |
43 | * "sw_count0" and "sw_count1" events. These count how many times | |
44 | * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an | |
45 | * unusual way to encode a NOP and therefore will not trigger in | |
46 | * normal code. | |
47 | */ | |
48 | ||
cdd6c482 | 49 | #define MAX_HWEVENTS 2 |
59abbd1e DM |
50 | #define MAX_PERIOD ((1UL << 32) - 1) |
51 | ||
52 | #define PIC_UPPER_INDEX 0 | |
53 | #define PIC_LOWER_INDEX 1 | |
54 | ||
cdd6c482 IM |
55 | struct cpu_hw_events { |
56 | struct perf_event *events[MAX_HWEVENTS]; | |
57 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | |
58 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | |
59abbd1e DM |
59 | int enabled; |
60 | }; | |
cdd6c482 | 61 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; |
59abbd1e DM |
62 | |
63 | struct perf_event_map { | |
64 | u16 encoding; | |
65 | u8 pic_mask; | |
66 | #define PIC_NONE 0x00 | |
67 | #define PIC_UPPER 0x01 | |
68 | #define PIC_LOWER 0x02 | |
69 | }; | |
70 | ||
2ce4da2e DM |
71 | #define C(x) PERF_COUNT_HW_CACHE_##x |
72 | ||
73 | #define CACHE_OP_UNSUPPORTED 0xfffe | |
74 | #define CACHE_OP_NONSENSE 0xffff | |
75 | ||
76 | typedef struct perf_event_map cache_map_t | |
77 | [PERF_COUNT_HW_CACHE_MAX] | |
78 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
79 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
80 | ||
59abbd1e DM |
81 | struct sparc_pmu { |
82 | const struct perf_event_map *(*event_map)(int); | |
2ce4da2e | 83 | const cache_map_t *cache_map; |
59abbd1e DM |
84 | int max_events; |
85 | int upper_shift; | |
86 | int lower_shift; | |
87 | int event_mask; | |
91b9286d | 88 | int hv_bit; |
496c07e3 | 89 | int irq_bit; |
660d1376 DM |
90 | int upper_nop; |
91 | int lower_nop; | |
59abbd1e DM |
92 | }; |
93 | ||
28e8f9be | 94 | static const struct perf_event_map ultra3_perfmon_event_map[] = { |
59abbd1e DM |
95 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, |
96 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | |
97 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | |
98 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | |
99 | }; | |
100 | ||
28e8f9be | 101 | static const struct perf_event_map *ultra3_event_map(int event_id) |
59abbd1e | 102 | { |
28e8f9be | 103 | return &ultra3_perfmon_event_map[event_id]; |
59abbd1e DM |
104 | } |
105 | ||
28e8f9be | 106 | static const cache_map_t ultra3_cache_map = { |
2ce4da2e DM |
107 | [C(L1D)] = { |
108 | [C(OP_READ)] = { | |
109 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | |
110 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | |
111 | }, | |
112 | [C(OP_WRITE)] = { | |
113 | [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, | |
114 | [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, | |
115 | }, | |
116 | [C(OP_PREFETCH)] = { | |
117 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
118 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
119 | }, | |
120 | }, | |
121 | [C(L1I)] = { | |
122 | [C(OP_READ)] = { | |
123 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | |
124 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | |
125 | }, | |
126 | [ C(OP_WRITE) ] = { | |
127 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
128 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
129 | }, | |
130 | [ C(OP_PREFETCH) ] = { | |
131 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
132 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
133 | }, | |
134 | }, | |
135 | [C(LL)] = { | |
136 | [C(OP_READ)] = { | |
137 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, | |
138 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, | |
139 | }, | |
140 | [C(OP_WRITE)] = { | |
141 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, | |
142 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, | |
143 | }, | |
144 | [C(OP_PREFETCH)] = { | |
145 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
146 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
147 | }, | |
148 | }, | |
149 | [C(DTLB)] = { | |
150 | [C(OP_READ)] = { | |
151 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
152 | [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, | |
153 | }, | |
154 | [ C(OP_WRITE) ] = { | |
155 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
156 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
157 | }, | |
158 | [ C(OP_PREFETCH) ] = { | |
159 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
160 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
161 | }, | |
162 | }, | |
163 | [C(ITLB)] = { | |
164 | [C(OP_READ)] = { | |
165 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
166 | [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, | |
167 | }, | |
168 | [ C(OP_WRITE) ] = { | |
169 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
170 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
171 | }, | |
172 | [ C(OP_PREFETCH) ] = { | |
173 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
174 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
175 | }, | |
176 | }, | |
177 | [C(BPU)] = { | |
178 | [C(OP_READ)] = { | |
179 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
180 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
181 | }, | |
182 | [ C(OP_WRITE) ] = { | |
183 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
184 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
185 | }, | |
186 | [ C(OP_PREFETCH) ] = { | |
187 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
188 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
189 | }, | |
190 | }, | |
191 | }; | |
192 | ||
28e8f9be DM |
193 | static const struct sparc_pmu ultra3_pmu = { |
194 | .event_map = ultra3_event_map, | |
195 | .cache_map = &ultra3_cache_map, | |
196 | .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), | |
59abbd1e DM |
197 | .upper_shift = 11, |
198 | .lower_shift = 4, | |
199 | .event_mask = 0x3f, | |
660d1376 DM |
200 | .upper_nop = 0x1c, |
201 | .lower_nop = 0x14, | |
59abbd1e DM |
202 | }; |
203 | ||
7eebda60 DM |
204 | /* Niagara1 is very limited. The upper PIC is hard-locked to count |
205 | * only instructions, so it is free running which creates all kinds of | |
206 | * problems. Some hardware designs make one wonder if the creastor | |
207 | * even looked at how this stuff gets used by software. | |
208 | */ | |
209 | static const struct perf_event_map niagara1_perfmon_event_map[] = { | |
210 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, | |
211 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, | |
212 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, | |
213 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, | |
214 | }; | |
215 | ||
216 | static const struct perf_event_map *niagara1_event_map(int event_id) | |
217 | { | |
218 | return &niagara1_perfmon_event_map[event_id]; | |
219 | } | |
220 | ||
221 | static const cache_map_t niagara1_cache_map = { | |
222 | [C(L1D)] = { | |
223 | [C(OP_READ)] = { | |
224 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
225 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | |
226 | }, | |
227 | [C(OP_WRITE)] = { | |
228 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
229 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | |
230 | }, | |
231 | [C(OP_PREFETCH)] = { | |
232 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
233 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
234 | }, | |
235 | }, | |
236 | [C(L1I)] = { | |
237 | [C(OP_READ)] = { | |
238 | [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, | |
239 | [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, | |
240 | }, | |
241 | [ C(OP_WRITE) ] = { | |
242 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
243 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
244 | }, | |
245 | [ C(OP_PREFETCH) ] = { | |
246 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
247 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
248 | }, | |
249 | }, | |
250 | [C(LL)] = { | |
251 | [C(OP_READ)] = { | |
252 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
253 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | |
254 | }, | |
255 | [C(OP_WRITE)] = { | |
256 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
257 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | |
258 | }, | |
259 | [C(OP_PREFETCH)] = { | |
260 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
261 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
262 | }, | |
263 | }, | |
264 | [C(DTLB)] = { | |
265 | [C(OP_READ)] = { | |
266 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
267 | [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, | |
268 | }, | |
269 | [ C(OP_WRITE) ] = { | |
270 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
271 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
272 | }, | |
273 | [ C(OP_PREFETCH) ] = { | |
274 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
275 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
276 | }, | |
277 | }, | |
278 | [C(ITLB)] = { | |
279 | [C(OP_READ)] = { | |
280 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
281 | [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, | |
282 | }, | |
283 | [ C(OP_WRITE) ] = { | |
284 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
285 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
286 | }, | |
287 | [ C(OP_PREFETCH) ] = { | |
288 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
289 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
290 | }, | |
291 | }, | |
292 | [C(BPU)] = { | |
293 | [C(OP_READ)] = { | |
294 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
295 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
296 | }, | |
297 | [ C(OP_WRITE) ] = { | |
298 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
299 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
300 | }, | |
301 | [ C(OP_PREFETCH) ] = { | |
302 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
303 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
304 | }, | |
305 | }, | |
306 | }; | |
307 | ||
308 | static const struct sparc_pmu niagara1_pmu = { | |
309 | .event_map = niagara1_event_map, | |
310 | .cache_map = &niagara1_cache_map, | |
311 | .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), | |
312 | .upper_shift = 0, | |
313 | .lower_shift = 4, | |
314 | .event_mask = 0x7, | |
315 | .upper_nop = 0x0, | |
316 | .lower_nop = 0x0, | |
317 | }; | |
318 | ||
b73d8847 DM |
319 | static const struct perf_event_map niagara2_perfmon_event_map[] = { |
320 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | |
321 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | |
322 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, | |
323 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, | |
324 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, | |
325 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, | |
326 | }; | |
327 | ||
cdd6c482 | 328 | static const struct perf_event_map *niagara2_event_map(int event_id) |
b73d8847 | 329 | { |
cdd6c482 | 330 | return &niagara2_perfmon_event_map[event_id]; |
b73d8847 DM |
331 | } |
332 | ||
d0b86480 DM |
333 | static const cache_map_t niagara2_cache_map = { |
334 | [C(L1D)] = { | |
335 | [C(OP_READ)] = { | |
336 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | |
337 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | |
338 | }, | |
339 | [C(OP_WRITE)] = { | |
340 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | |
341 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | |
342 | }, | |
343 | [C(OP_PREFETCH)] = { | |
344 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
345 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
346 | }, | |
347 | }, | |
348 | [C(L1I)] = { | |
349 | [C(OP_READ)] = { | |
350 | [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, | |
351 | [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, | |
352 | }, | |
353 | [ C(OP_WRITE) ] = { | |
354 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
355 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
356 | }, | |
357 | [ C(OP_PREFETCH) ] = { | |
358 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
359 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
360 | }, | |
361 | }, | |
362 | [C(LL)] = { | |
363 | [C(OP_READ)] = { | |
364 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | |
365 | [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, | |
366 | }, | |
367 | [C(OP_WRITE)] = { | |
368 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | |
369 | [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, | |
370 | }, | |
371 | [C(OP_PREFETCH)] = { | |
372 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
373 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
374 | }, | |
375 | }, | |
376 | [C(DTLB)] = { | |
377 | [C(OP_READ)] = { | |
378 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
379 | [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, | |
380 | }, | |
381 | [ C(OP_WRITE) ] = { | |
382 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
383 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
384 | }, | |
385 | [ C(OP_PREFETCH) ] = { | |
386 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
387 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
388 | }, | |
389 | }, | |
390 | [C(ITLB)] = { | |
391 | [C(OP_READ)] = { | |
392 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
393 | [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, | |
394 | }, | |
395 | [ C(OP_WRITE) ] = { | |
396 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
397 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
398 | }, | |
399 | [ C(OP_PREFETCH) ] = { | |
400 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
401 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
402 | }, | |
403 | }, | |
404 | [C(BPU)] = { | |
405 | [C(OP_READ)] = { | |
406 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
407 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
408 | }, | |
409 | [ C(OP_WRITE) ] = { | |
410 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
411 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
412 | }, | |
413 | [ C(OP_PREFETCH) ] = { | |
414 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
415 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
416 | }, | |
417 | }, | |
418 | }; | |
419 | ||
b73d8847 DM |
420 | static const struct sparc_pmu niagara2_pmu = { |
421 | .event_map = niagara2_event_map, | |
d0b86480 | 422 | .cache_map = &niagara2_cache_map, |
b73d8847 DM |
423 | .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), |
424 | .upper_shift = 19, | |
425 | .lower_shift = 6, | |
426 | .event_mask = 0xfff, | |
427 | .hv_bit = 0x8, | |
428 | .irq_bit = 0x03, | |
429 | .upper_nop = 0x220, | |
430 | .lower_nop = 0x220, | |
431 | }; | |
432 | ||
59abbd1e DM |
433 | static const struct sparc_pmu *sparc_pmu __read_mostly; |
434 | ||
cdd6c482 | 435 | static u64 event_encoding(u64 event_id, int idx) |
59abbd1e DM |
436 | { |
437 | if (idx == PIC_UPPER_INDEX) | |
cdd6c482 | 438 | event_id <<= sparc_pmu->upper_shift; |
59abbd1e | 439 | else |
cdd6c482 IM |
440 | event_id <<= sparc_pmu->lower_shift; |
441 | return event_id; | |
59abbd1e DM |
442 | } |
443 | ||
444 | static u64 mask_for_index(int idx) | |
445 | { | |
446 | return event_encoding(sparc_pmu->event_mask, idx); | |
447 | } | |
448 | ||
449 | static u64 nop_for_index(int idx) | |
450 | { | |
451 | return event_encoding(idx == PIC_UPPER_INDEX ? | |
660d1376 DM |
452 | sparc_pmu->upper_nop : |
453 | sparc_pmu->lower_nop, idx); | |
59abbd1e DM |
454 | } |
455 | ||
cdd6c482 | 456 | static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, |
59abbd1e DM |
457 | int idx) |
458 | { | |
459 | u64 val, mask = mask_for_index(idx); | |
460 | ||
461 | val = pcr_ops->read(); | |
462 | pcr_ops->write((val & ~mask) | hwc->config); | |
463 | } | |
464 | ||
cdd6c482 | 465 | static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, |
59abbd1e DM |
466 | int idx) |
467 | { | |
468 | u64 mask = mask_for_index(idx); | |
469 | u64 nop = nop_for_index(idx); | |
470 | u64 val = pcr_ops->read(); | |
471 | ||
472 | pcr_ops->write((val & ~mask) | nop); | |
473 | } | |
474 | ||
475 | void hw_perf_enable(void) | |
476 | { | |
cdd6c482 | 477 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
59abbd1e DM |
478 | u64 val; |
479 | int i; | |
480 | ||
481 | if (cpuc->enabled) | |
482 | return; | |
483 | ||
484 | cpuc->enabled = 1; | |
485 | barrier(); | |
486 | ||
487 | val = pcr_ops->read(); | |
488 | ||
cdd6c482 IM |
489 | for (i = 0; i < MAX_HWEVENTS; i++) { |
490 | struct perf_event *cp = cpuc->events[i]; | |
491 | struct hw_perf_event *hwc; | |
59abbd1e DM |
492 | |
493 | if (!cp) | |
494 | continue; | |
495 | hwc = &cp->hw; | |
496 | val |= hwc->config_base; | |
497 | } | |
498 | ||
499 | pcr_ops->write(val); | |
500 | } | |
501 | ||
502 | void hw_perf_disable(void) | |
503 | { | |
cdd6c482 | 504 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
59abbd1e DM |
505 | u64 val; |
506 | ||
507 | if (!cpuc->enabled) | |
508 | return; | |
509 | ||
510 | cpuc->enabled = 0; | |
511 | ||
512 | val = pcr_ops->read(); | |
496c07e3 DM |
513 | val &= ~(PCR_UTRACE | PCR_STRACE | |
514 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); | |
59abbd1e DM |
515 | pcr_ops->write(val); |
516 | } | |
517 | ||
518 | static u32 read_pmc(int idx) | |
519 | { | |
520 | u64 val; | |
521 | ||
522 | read_pic(val); | |
523 | if (idx == PIC_UPPER_INDEX) | |
524 | val >>= 32; | |
525 | ||
526 | return val & 0xffffffff; | |
527 | } | |
528 | ||
529 | static void write_pmc(int idx, u64 val) | |
530 | { | |
531 | u64 shift, mask, pic; | |
532 | ||
533 | shift = 0; | |
534 | if (idx == PIC_UPPER_INDEX) | |
535 | shift = 32; | |
536 | ||
537 | mask = ((u64) 0xffffffff) << shift; | |
538 | val <<= shift; | |
539 | ||
540 | read_pic(pic); | |
541 | pic &= ~mask; | |
542 | pic |= val; | |
543 | write_pic(pic); | |
544 | } | |
545 | ||
cdd6c482 IM |
546 | static int sparc_perf_event_set_period(struct perf_event *event, |
547 | struct hw_perf_event *hwc, int idx) | |
59abbd1e DM |
548 | { |
549 | s64 left = atomic64_read(&hwc->period_left); | |
550 | s64 period = hwc->sample_period; | |
551 | int ret = 0; | |
552 | ||
553 | if (unlikely(left <= -period)) { | |
554 | left = period; | |
555 | atomic64_set(&hwc->period_left, left); | |
556 | hwc->last_period = period; | |
557 | ret = 1; | |
558 | } | |
559 | ||
560 | if (unlikely(left <= 0)) { | |
561 | left += period; | |
562 | atomic64_set(&hwc->period_left, left); | |
563 | hwc->last_period = period; | |
564 | ret = 1; | |
565 | } | |
566 | if (left > MAX_PERIOD) | |
567 | left = MAX_PERIOD; | |
568 | ||
569 | atomic64_set(&hwc->prev_count, (u64)-left); | |
570 | ||
571 | write_pmc(idx, (u64)(-left) & 0xffffffff); | |
572 | ||
cdd6c482 | 573 | perf_event_update_userpage(event); |
59abbd1e DM |
574 | |
575 | return ret; | |
576 | } | |
577 | ||
cdd6c482 | 578 | static int sparc_pmu_enable(struct perf_event *event) |
59abbd1e | 579 | { |
cdd6c482 IM |
580 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
581 | struct hw_perf_event *hwc = &event->hw; | |
59abbd1e DM |
582 | int idx = hwc->idx; |
583 | ||
584 | if (test_and_set_bit(idx, cpuc->used_mask)) | |
585 | return -EAGAIN; | |
586 | ||
cdd6c482 | 587 | sparc_pmu_disable_event(hwc, idx); |
59abbd1e | 588 | |
cdd6c482 | 589 | cpuc->events[idx] = event; |
59abbd1e DM |
590 | set_bit(idx, cpuc->active_mask); |
591 | ||
cdd6c482 IM |
592 | sparc_perf_event_set_period(event, hwc, idx); |
593 | sparc_pmu_enable_event(hwc, idx); | |
594 | perf_event_update_userpage(event); | |
59abbd1e DM |
595 | return 0; |
596 | } | |
597 | ||
cdd6c482 IM |
598 | static u64 sparc_perf_event_update(struct perf_event *event, |
599 | struct hw_perf_event *hwc, int idx) | |
59abbd1e DM |
600 | { |
601 | int shift = 64 - 32; | |
602 | u64 prev_raw_count, new_raw_count; | |
603 | s64 delta; | |
604 | ||
605 | again: | |
606 | prev_raw_count = atomic64_read(&hwc->prev_count); | |
607 | new_raw_count = read_pmc(idx); | |
608 | ||
609 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
610 | new_raw_count) != prev_raw_count) | |
611 | goto again; | |
612 | ||
613 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
614 | delta >>= shift; | |
615 | ||
cdd6c482 | 616 | atomic64_add(delta, &event->count); |
59abbd1e DM |
617 | atomic64_sub(delta, &hwc->period_left); |
618 | ||
619 | return new_raw_count; | |
620 | } | |
621 | ||
cdd6c482 | 622 | static void sparc_pmu_disable(struct perf_event *event) |
59abbd1e | 623 | { |
cdd6c482 IM |
624 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
625 | struct hw_perf_event *hwc = &event->hw; | |
59abbd1e DM |
626 | int idx = hwc->idx; |
627 | ||
628 | clear_bit(idx, cpuc->active_mask); | |
cdd6c482 | 629 | sparc_pmu_disable_event(hwc, idx); |
59abbd1e DM |
630 | |
631 | barrier(); | |
632 | ||
cdd6c482 IM |
633 | sparc_perf_event_update(event, hwc, idx); |
634 | cpuc->events[idx] = NULL; | |
59abbd1e DM |
635 | clear_bit(idx, cpuc->used_mask); |
636 | ||
cdd6c482 | 637 | perf_event_update_userpage(event); |
59abbd1e DM |
638 | } |
639 | ||
cdd6c482 | 640 | static void sparc_pmu_read(struct perf_event *event) |
59abbd1e | 641 | { |
cdd6c482 IM |
642 | struct hw_perf_event *hwc = &event->hw; |
643 | sparc_perf_event_update(event, hwc, hwc->idx); | |
59abbd1e DM |
644 | } |
645 | ||
cdd6c482 | 646 | static void sparc_pmu_unthrottle(struct perf_event *event) |
59abbd1e | 647 | { |
cdd6c482 IM |
648 | struct hw_perf_event *hwc = &event->hw; |
649 | sparc_pmu_enable_event(hwc, hwc->idx); | |
59abbd1e DM |
650 | } |
651 | ||
cdd6c482 | 652 | static atomic_t active_events = ATOMIC_INIT(0); |
59abbd1e DM |
653 | static DEFINE_MUTEX(pmc_grab_mutex); |
654 | ||
cdd6c482 | 655 | void perf_event_grab_pmc(void) |
59abbd1e | 656 | { |
cdd6c482 | 657 | if (atomic_inc_not_zero(&active_events)) |
59abbd1e DM |
658 | return; |
659 | ||
660 | mutex_lock(&pmc_grab_mutex); | |
cdd6c482 | 661 | if (atomic_read(&active_events) == 0) { |
59abbd1e DM |
662 | if (atomic_read(&nmi_active) > 0) { |
663 | on_each_cpu(stop_nmi_watchdog, NULL, 1); | |
664 | BUG_ON(atomic_read(&nmi_active) != 0); | |
665 | } | |
cdd6c482 | 666 | atomic_inc(&active_events); |
59abbd1e DM |
667 | } |
668 | mutex_unlock(&pmc_grab_mutex); | |
669 | } | |
670 | ||
cdd6c482 | 671 | void perf_event_release_pmc(void) |
59abbd1e | 672 | { |
cdd6c482 | 673 | if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { |
59abbd1e DM |
674 | if (atomic_read(&nmi_active) == 0) |
675 | on_each_cpu(start_nmi_watchdog, NULL, 1); | |
676 | mutex_unlock(&pmc_grab_mutex); | |
677 | } | |
678 | } | |
679 | ||
2ce4da2e DM |
680 | static const struct perf_event_map *sparc_map_cache_event(u64 config) |
681 | { | |
682 | unsigned int cache_type, cache_op, cache_result; | |
683 | const struct perf_event_map *pmap; | |
684 | ||
685 | if (!sparc_pmu->cache_map) | |
686 | return ERR_PTR(-ENOENT); | |
687 | ||
688 | cache_type = (config >> 0) & 0xff; | |
689 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
690 | return ERR_PTR(-EINVAL); | |
691 | ||
692 | cache_op = (config >> 8) & 0xff; | |
693 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
694 | return ERR_PTR(-EINVAL); | |
695 | ||
696 | cache_result = (config >> 16) & 0xff; | |
697 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
698 | return ERR_PTR(-EINVAL); | |
699 | ||
700 | pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); | |
701 | ||
702 | if (pmap->encoding == CACHE_OP_UNSUPPORTED) | |
703 | return ERR_PTR(-ENOENT); | |
704 | ||
705 | if (pmap->encoding == CACHE_OP_NONSENSE) | |
706 | return ERR_PTR(-EINVAL); | |
707 | ||
708 | return pmap; | |
709 | } | |
710 | ||
cdd6c482 | 711 | static void hw_perf_event_destroy(struct perf_event *event) |
59abbd1e | 712 | { |
cdd6c482 | 713 | perf_event_release_pmc(); |
59abbd1e DM |
714 | } |
715 | ||
01552f76 DM |
716 | static int check_excludes(struct perf_event **evts, int n_prev, int n_new) |
717 | { | |
718 | int eu = 0, ek = 0, eh = 0; | |
719 | struct perf_event *event; | |
720 | int i, n, first; | |
721 | ||
722 | n = n_prev + n_new; | |
723 | if (n <= 1) | |
724 | return 0; | |
725 | ||
726 | first = 1; | |
727 | for (i = 0; i < n; i++) { | |
728 | event = evts[i]; | |
729 | if (first) { | |
730 | eu = event->attr.exclude_user; | |
731 | ek = event->attr.exclude_kernel; | |
732 | eh = event->attr.exclude_hv; | |
733 | first = 0; | |
734 | } else if (event->attr.exclude_user != eu || | |
735 | event->attr.exclude_kernel != ek || | |
736 | event->attr.exclude_hv != eh) { | |
737 | return -EAGAIN; | |
738 | } | |
739 | } | |
740 | ||
741 | return 0; | |
742 | } | |
743 | ||
744 | static int collect_events(struct perf_event *group, int max_count, | |
745 | struct perf_event *evts[], u64 *events) | |
746 | { | |
747 | struct perf_event *event; | |
748 | int n = 0; | |
749 | ||
750 | if (!is_software_event(group)) { | |
751 | if (n >= max_count) | |
752 | return -1; | |
753 | evts[n] = group; | |
754 | events[n++] = group->hw.config; | |
755 | } | |
756 | list_for_each_entry(event, &group->sibling_list, group_entry) { | |
757 | if (!is_software_event(event) && | |
758 | event->state != PERF_EVENT_STATE_OFF) { | |
759 | if (n >= max_count) | |
760 | return -1; | |
761 | evts[n] = event; | |
762 | events[n++] = event->hw.config; | |
763 | } | |
764 | } | |
765 | return n; | |
766 | } | |
767 | ||
cdd6c482 | 768 | static int __hw_perf_event_init(struct perf_event *event) |
59abbd1e | 769 | { |
cdd6c482 | 770 | struct perf_event_attr *attr = &event->attr; |
01552f76 | 771 | struct perf_event *evts[MAX_HWEVENTS]; |
cdd6c482 | 772 | struct hw_perf_event *hwc = &event->hw; |
59abbd1e | 773 | const struct perf_event_map *pmap; |
01552f76 DM |
774 | u64 enc, events[MAX_HWEVENTS]; |
775 | int n; | |
59abbd1e DM |
776 | |
777 | if (atomic_read(&nmi_active) < 0) | |
778 | return -ENODEV; | |
779 | ||
2ce4da2e DM |
780 | if (attr->type == PERF_TYPE_HARDWARE) { |
781 | if (attr->config >= sparc_pmu->max_events) | |
782 | return -EINVAL; | |
783 | pmap = sparc_pmu->event_map(attr->config); | |
784 | } else if (attr->type == PERF_TYPE_HW_CACHE) { | |
785 | pmap = sparc_map_cache_event(attr->config); | |
786 | if (IS_ERR(pmap)) | |
787 | return PTR_ERR(pmap); | |
788 | } else | |
59abbd1e DM |
789 | return -EOPNOTSUPP; |
790 | ||
59abbd1e DM |
791 | /* We save the enable bits in the config_base. So to |
792 | * turn off sampling just write 'config', and to enable | |
793 | * things write 'config | config_base'. | |
794 | */ | |
496c07e3 | 795 | hwc->config_base = sparc_pmu->irq_bit; |
59abbd1e DM |
796 | if (!attr->exclude_user) |
797 | hwc->config_base |= PCR_UTRACE; | |
798 | if (!attr->exclude_kernel) | |
799 | hwc->config_base |= PCR_STRACE; | |
91b9286d DM |
800 | if (!attr->exclude_hv) |
801 | hwc->config_base |= sparc_pmu->hv_bit; | |
59abbd1e | 802 | |
01552f76 DM |
803 | enc = pmap->encoding; |
804 | ||
805 | n = 0; | |
806 | if (event->group_leader != event) { | |
807 | n = collect_events(event->group_leader, | |
808 | perf_max_events - 1, | |
809 | evts, events); | |
810 | if (n < 0) | |
811 | return -EINVAL; | |
812 | } | |
813 | events[n] = enc; | |
814 | evts[n] = event; | |
815 | ||
816 | if (check_excludes(evts, n, 1)) | |
817 | return -EINVAL; | |
818 | ||
819 | /* Try to do all error checking before this point, as unwinding | |
820 | * state after grabbing the PMC is difficult. | |
821 | */ | |
822 | perf_event_grab_pmc(); | |
823 | event->destroy = hw_perf_event_destroy; | |
824 | ||
59abbd1e DM |
825 | if (!hwc->sample_period) { |
826 | hwc->sample_period = MAX_PERIOD; | |
827 | hwc->last_period = hwc->sample_period; | |
828 | atomic64_set(&hwc->period_left, hwc->sample_period); | |
829 | } | |
830 | ||
59abbd1e DM |
831 | if (pmap->pic_mask & PIC_UPPER) { |
832 | hwc->idx = PIC_UPPER_INDEX; | |
833 | enc <<= sparc_pmu->upper_shift; | |
834 | } else { | |
835 | hwc->idx = PIC_LOWER_INDEX; | |
836 | enc <<= sparc_pmu->lower_shift; | |
837 | } | |
838 | ||
839 | hwc->config |= enc; | |
840 | return 0; | |
841 | } | |
842 | ||
843 | static const struct pmu pmu = { | |
844 | .enable = sparc_pmu_enable, | |
845 | .disable = sparc_pmu_disable, | |
846 | .read = sparc_pmu_read, | |
847 | .unthrottle = sparc_pmu_unthrottle, | |
848 | }; | |
849 | ||
cdd6c482 | 850 | const struct pmu *hw_perf_event_init(struct perf_event *event) |
59abbd1e | 851 | { |
cdd6c482 | 852 | int err = __hw_perf_event_init(event); |
59abbd1e DM |
853 | |
854 | if (err) | |
855 | return ERR_PTR(err); | |
856 | return &pmu; | |
857 | } | |
858 | ||
cdd6c482 | 859 | void perf_event_print_debug(void) |
59abbd1e DM |
860 | { |
861 | unsigned long flags; | |
862 | u64 pcr, pic; | |
863 | int cpu; | |
864 | ||
865 | if (!sparc_pmu) | |
866 | return; | |
867 | ||
868 | local_irq_save(flags); | |
869 | ||
870 | cpu = smp_processor_id(); | |
871 | ||
872 | pcr = pcr_ops->read(); | |
873 | read_pic(pic); | |
874 | ||
875 | pr_info("\n"); | |
876 | pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", | |
877 | cpu, pcr, pic); | |
878 | ||
879 | local_irq_restore(flags); | |
880 | } | |
881 | ||
cdd6c482 | 882 | static int __kprobes perf_event_nmi_handler(struct notifier_block *self, |
59abbd1e DM |
883 | unsigned long cmd, void *__args) |
884 | { | |
885 | struct die_args *args = __args; | |
886 | struct perf_sample_data data; | |
cdd6c482 | 887 | struct cpu_hw_events *cpuc; |
59abbd1e DM |
888 | struct pt_regs *regs; |
889 | int idx; | |
890 | ||
cdd6c482 | 891 | if (!atomic_read(&active_events)) |
59abbd1e DM |
892 | return NOTIFY_DONE; |
893 | ||
894 | switch (cmd) { | |
895 | case DIE_NMI: | |
896 | break; | |
897 | ||
898 | default: | |
899 | return NOTIFY_DONE; | |
900 | } | |
901 | ||
902 | regs = args->regs; | |
903 | ||
59abbd1e DM |
904 | data.addr = 0; |
905 | ||
cdd6c482 IM |
906 | cpuc = &__get_cpu_var(cpu_hw_events); |
907 | for (idx = 0; idx < MAX_HWEVENTS; idx++) { | |
908 | struct perf_event *event = cpuc->events[idx]; | |
909 | struct hw_perf_event *hwc; | |
59abbd1e DM |
910 | u64 val; |
911 | ||
912 | if (!test_bit(idx, cpuc->active_mask)) | |
913 | continue; | |
cdd6c482 IM |
914 | hwc = &event->hw; |
915 | val = sparc_perf_event_update(event, hwc, idx); | |
59abbd1e DM |
916 | if (val & (1ULL << 31)) |
917 | continue; | |
918 | ||
cdd6c482 IM |
919 | data.period = event->hw.last_period; |
920 | if (!sparc_perf_event_set_period(event, hwc, idx)) | |
59abbd1e DM |
921 | continue; |
922 | ||
cdd6c482 IM |
923 | if (perf_event_overflow(event, 1, &data, regs)) |
924 | sparc_pmu_disable_event(hwc, idx); | |
59abbd1e DM |
925 | } |
926 | ||
927 | return NOTIFY_STOP; | |
928 | } | |
929 | ||
cdd6c482 IM |
930 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { |
931 | .notifier_call = perf_event_nmi_handler, | |
59abbd1e DM |
932 | }; |
933 | ||
934 | static bool __init supported_pmu(void) | |
935 | { | |
28e8f9be DM |
936 | if (!strcmp(sparc_pmu_type, "ultra3") || |
937 | !strcmp(sparc_pmu_type, "ultra3+") || | |
938 | !strcmp(sparc_pmu_type, "ultra3i") || | |
939 | !strcmp(sparc_pmu_type, "ultra4+")) { | |
940 | sparc_pmu = &ultra3_pmu; | |
59abbd1e DM |
941 | return true; |
942 | } | |
7eebda60 DM |
943 | if (!strcmp(sparc_pmu_type, "niagara")) { |
944 | sparc_pmu = &niagara1_pmu; | |
945 | return true; | |
946 | } | |
b73d8847 DM |
947 | if (!strcmp(sparc_pmu_type, "niagara2")) { |
948 | sparc_pmu = &niagara2_pmu; | |
949 | return true; | |
950 | } | |
59abbd1e DM |
951 | return false; |
952 | } | |
953 | ||
cdd6c482 | 954 | void __init init_hw_perf_events(void) |
59abbd1e | 955 | { |
cdd6c482 | 956 | pr_info("Performance events: "); |
59abbd1e DM |
957 | |
958 | if (!supported_pmu()) { | |
959 | pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); | |
960 | return; | |
961 | } | |
962 | ||
963 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | |
964 | ||
cdd6c482 IM |
965 | /* All sparc64 PMUs currently have 2 events. But this simple |
966 | * driver only supports one active event at a time. | |
59abbd1e | 967 | */ |
cdd6c482 | 968 | perf_max_events = 1; |
59abbd1e | 969 | |
cdd6c482 | 970 | register_die_notifier(&perf_event_nmi_notifier); |
59abbd1e | 971 | } |