Commit | Line | Data |
---|---|---|
f4344b19 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
885dcd70 AS |
2 | /* |
3 | * In-Memory Collection (IMC) Performance Monitor counter support. | |
4 | * | |
5 | * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation. | |
6 | * (C) 2017 Anju T Sudhakar, IBM Corporation. | |
7 | * (C) 2017 Hemant K Shaw, IBM Corporation. | |
885dcd70 | 8 | */ |
e6f6390a | 9 | #include <linux/of.h> |
885dcd70 AS |
10 | #include <linux/perf_event.h> |
11 | #include <linux/slab.h> | |
12 | #include <asm/opal.h> | |
13 | #include <asm/imc-pmu.h> | |
14 | #include <asm/cputhreads.h> | |
15 | #include <asm/smp.h> | |
16 | #include <linux/string.h> | |
17 | ||
18 | /* Nest IMC data structures and variables */ | |
19 | ||
20 | /* | |
21 | * Used to avoid races in counting the nest-pmu units during hotplug | |
22 | * register and unregister | |
23 | */ | |
24 | static DEFINE_MUTEX(nest_init_lock); | |
25 | static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); | |
73ce9aec | 26 | static struct imc_pmu **per_nest_pmu_arr; |
885dcd70 | 27 | static cpumask_t nest_imc_cpumask; |
4851f750 | 28 | static struct imc_pmu_ref *nest_imc_refc; |
885dcd70 AS |
29 | static int nest_pmus; |
30 | ||
39a846db AS |
31 | /* Core IMC data structures and variables */ |
32 | ||
33 | static cpumask_t core_imc_cpumask; | |
4851f750 | 34 | static struct imc_pmu_ref *core_imc_refc; |
39a846db AS |
35 | static struct imc_pmu *core_imc_pmu; |
36 | ||
f74c89bd AS |
37 | /* Thread IMC data structures and variables */ |
38 | ||
39 | static DEFINE_PER_CPU(u64 *, thread_imc_mem); | |
25af86b2 | 40 | static struct imc_pmu *thread_imc_pmu; |
f74c89bd AS |
41 | static int thread_imc_mem_size; |
42 | ||
72c69dcd AS |
43 | /* Trace IMC data structures */ |
44 | static DEFINE_PER_CPU(u64 *, trace_imc_mem); | |
45 | static struct imc_pmu_ref *trace_imc_refc; | |
46 | static int trace_imc_mem_size; | |
47 | ||
a36e8ba6 AS |
48 | /* |
49 | * Global data structure used to avoid races between thread, | |
50 | * core and trace-imc | |
51 | */ | |
52 | static struct imc_pmu_ref imc_global_refc = { | |
53 | .lock = __MUTEX_INITIALIZER(imc_global_refc.lock), | |
54 | .id = 0, | |
55 | .refc = 0, | |
56 | }; | |
57 | ||
4851f750 | 58 | static struct imc_pmu *imc_event_to_pmu(struct perf_event *event) |
885dcd70 AS |
59 | { |
60 | return container_of(event->pmu, struct imc_pmu, pmu); | |
61 | } | |
62 | ||
012ae244 | 63 | PMU_FORMAT_ATTR(event, "config:0-61"); |
885dcd70 AS |
64 | PMU_FORMAT_ATTR(offset, "config:0-31"); |
65 | PMU_FORMAT_ATTR(rvalue, "config:32"); | |
66 | PMU_FORMAT_ATTR(mode, "config:33-40"); | |
67 | static struct attribute *imc_format_attrs[] = { | |
68 | &format_attr_event.attr, | |
69 | &format_attr_offset.attr, | |
70 | &format_attr_rvalue.attr, | |
71 | &format_attr_mode.attr, | |
72 | NULL, | |
73 | }; | |
74 | ||
6b3a3e12 | 75 | static const struct attribute_group imc_format_group = { |
885dcd70 AS |
76 | .name = "format", |
77 | .attrs = imc_format_attrs, | |
78 | }; | |
79 | ||
012ae244 AS |
80 | /* Format attribute for imc trace-mode */ |
81 | PMU_FORMAT_ATTR(cpmc_reserved, "config:0-19"); | |
82 | PMU_FORMAT_ATTR(cpmc_event, "config:20-27"); | |
83 | PMU_FORMAT_ATTR(cpmc_samplesel, "config:28-29"); | |
84 | PMU_FORMAT_ATTR(cpmc_load, "config:30-61"); | |
85 | static struct attribute *trace_imc_format_attrs[] = { | |
86 | &format_attr_event.attr, | |
87 | &format_attr_cpmc_reserved.attr, | |
88 | &format_attr_cpmc_event.attr, | |
89 | &format_attr_cpmc_samplesel.attr, | |
90 | &format_attr_cpmc_load.attr, | |
91 | NULL, | |
92 | }; | |
93 | ||
6b3a3e12 | 94 | static const struct attribute_group trace_imc_format_group = { |
012ae244 AS |
95 | .name = "format", |
96 | .attrs = trace_imc_format_attrs, | |
97 | }; | |
98 | ||
885dcd70 AS |
99 | /* Get the cpumask printed to a buffer "buf" */ |
100 | static ssize_t imc_pmu_cpumask_get_attr(struct device *dev, | |
101 | struct device_attribute *attr, | |
102 | char *buf) | |
103 | { | |
104 | struct pmu *pmu = dev_get_drvdata(dev); | |
105 | struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu); | |
106 | cpumask_t *active_mask; | |
107 | ||
885dcd70 AS |
108 | switch(imc_pmu->domain){ |
109 | case IMC_DOMAIN_NEST: | |
110 | active_mask = &nest_imc_cpumask; | |
111 | break; | |
39a846db AS |
112 | case IMC_DOMAIN_CORE: |
113 | active_mask = &core_imc_cpumask; | |
114 | break; | |
885dcd70 AS |
115 | default: |
116 | return 0; | |
117 | } | |
118 | ||
119 | return cpumap_print_to_pagebuf(true, buf, active_mask); | |
120 | } | |
121 | ||
122 | static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL); | |
123 | ||
124 | static struct attribute *imc_pmu_cpumask_attrs[] = { | |
125 | &dev_attr_cpumask.attr, | |
126 | NULL, | |
127 | }; | |
128 | ||
6b3a3e12 | 129 | static const struct attribute_group imc_pmu_cpumask_attr_group = { |
885dcd70 AS |
130 | .attrs = imc_pmu_cpumask_attrs, |
131 | }; | |
132 | ||
133 | /* device_str_attr_create : Populate event "name" and string "str" in attribute */ | |
134 | static struct attribute *device_str_attr_create(const char *name, const char *str) | |
135 | { | |
136 | struct perf_pmu_events_attr *attr; | |
137 | ||
138 | attr = kzalloc(sizeof(*attr), GFP_KERNEL); | |
139 | if (!attr) | |
140 | return NULL; | |
141 | sysfs_attr_init(&attr->attr.attr); | |
142 | ||
143 | attr->event_str = str; | |
144 | attr->attr.attr.name = name; | |
145 | attr->attr.attr.mode = 0444; | |
146 | attr->attr.show = perf_event_sysfs_show; | |
147 | ||
148 | return &attr->attr.attr; | |
149 | } | |
150 | ||
8b4e6dea AS |
151 | static int imc_parse_event(struct device_node *np, const char *scale, |
152 | const char *unit, const char *prefix, | |
153 | u32 base, struct imc_events *event) | |
885dcd70 | 154 | { |
885dcd70 AS |
155 | const char *s; |
156 | u32 reg; | |
157 | ||
885dcd70 AS |
158 | if (of_property_read_u32(np, "reg", ®)) |
159 | goto error; | |
160 | /* Add the base_reg value to the "reg" */ | |
161 | event->value = base + reg; | |
162 | ||
163 | if (of_property_read_string(np, "event-name", &s)) | |
164 | goto error; | |
165 | ||
166 | event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s); | |
167 | if (!event->name) | |
168 | goto error; | |
169 | ||
170 | if (of_property_read_string(np, "scale", &s)) | |
171 | s = scale; | |
172 | ||
173 | if (s) { | |
174 | event->scale = kstrdup(s, GFP_KERNEL); | |
175 | if (!event->scale) | |
176 | goto error; | |
177 | } | |
178 | ||
179 | if (of_property_read_string(np, "unit", &s)) | |
180 | s = unit; | |
181 | ||
182 | if (s) { | |
183 | event->unit = kstrdup(s, GFP_KERNEL); | |
184 | if (!event->unit) | |
185 | goto error; | |
186 | } | |
187 | ||
8b4e6dea | 188 | return 0; |
885dcd70 AS |
189 | error: |
190 | kfree(event->unit); | |
191 | kfree(event->scale); | |
192 | kfree(event->name); | |
8b4e6dea AS |
193 | return -EINVAL; |
194 | } | |
195 | ||
196 | /* | |
197 | * imc_free_events: Function to cleanup the events list, having | |
198 | * "nr_entries". | |
199 | */ | |
200 | static void imc_free_events(struct imc_events *events, int nr_entries) | |
201 | { | |
202 | int i; | |
203 | ||
204 | /* Nothing to clean, return */ | |
205 | if (!events) | |
206 | return; | |
207 | for (i = 0; i < nr_entries; i++) { | |
208 | kfree(events[i].unit); | |
209 | kfree(events[i].scale); | |
210 | kfree(events[i].name); | |
211 | } | |
885dcd70 | 212 | |
8b4e6dea | 213 | kfree(events); |
885dcd70 AS |
214 | } |
215 | ||
216 | /* | |
217 | * update_events_in_group: Update the "events" information in an attr_group | |
218 | * and assign the attr_group to the pmu "pmu". | |
219 | */ | |
220 | static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) | |
221 | { | |
222 | struct attribute_group *attr_group; | |
223 | struct attribute **attrs, *dev_str; | |
224 | struct device_node *np, *pmu_events; | |
885dcd70 | 225 | u32 handle, base_reg; |
8b4e6dea | 226 | int i = 0, j = 0, ct, ret; |
885dcd70 AS |
227 | const char *prefix, *g_scale, *g_unit; |
228 | const char *ev_val_str, *ev_scale_str, *ev_unit_str; | |
229 | ||
230 | if (!of_property_read_u32(node, "events", &handle)) | |
231 | pmu_events = of_find_node_by_phandle(handle); | |
232 | else | |
233 | return 0; | |
234 | ||
235 | /* Did not find any node with a given phandle */ | |
236 | if (!pmu_events) | |
237 | return 0; | |
238 | ||
239 | /* Get a count of number of child nodes */ | |
240 | ct = of_get_child_count(pmu_events); | |
241 | ||
242 | /* Get the event prefix */ | |
0dd8d2c8 LH |
243 | if (of_property_read_string(node, "events-prefix", &prefix)) { |
244 | of_node_put(pmu_events); | |
885dcd70 | 245 | return 0; |
0dd8d2c8 | 246 | } |
885dcd70 AS |
247 | |
248 | /* Get a global unit and scale data if available */ | |
249 | if (of_property_read_string(node, "scale", &g_scale)) | |
250 | g_scale = NULL; | |
251 | ||
252 | if (of_property_read_string(node, "unit", &g_unit)) | |
253 | g_unit = NULL; | |
254 | ||
255 | /* "reg" property gives out the base offset of the counters data */ | |
256 | of_property_read_u32(node, "reg", &base_reg); | |
257 | ||
258 | /* Allocate memory for the events */ | |
259 | pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); | |
0dd8d2c8 LH |
260 | if (!pmu->events) { |
261 | of_node_put(pmu_events); | |
885dcd70 | 262 | return -ENOMEM; |
0dd8d2c8 | 263 | } |
885dcd70 AS |
264 | |
265 | ct = 0; | |
266 | /* Parse the events and update the struct */ | |
267 | for_each_child_of_node(pmu_events, np) { | |
8b4e6dea AS |
268 | ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); |
269 | if (!ret) | |
270 | ct++; | |
885dcd70 AS |
271 | } |
272 | ||
0dd8d2c8 LH |
273 | of_node_put(pmu_events); |
274 | ||
885dcd70 AS |
275 | /* Allocate memory for attribute group */ |
276 | attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL); | |
8b4e6dea AS |
277 | if (!attr_group) { |
278 | imc_free_events(pmu->events, ct); | |
885dcd70 | 279 | return -ENOMEM; |
8b4e6dea | 280 | } |
885dcd70 AS |
281 | |
282 | /* | |
283 | * Allocate memory for attributes. | |
284 | * Since we have count of events for this pmu, we also allocate | |
285 | * memory for the scale and unit attribute for now. | |
286 | * "ct" has the total event structs added from the events-parent node. | |
287 | * So allocate three times the "ct" (this includes event, event_scale and | |
288 | * event_unit). | |
289 | */ | |
290 | attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL); | |
291 | if (!attrs) { | |
292 | kfree(attr_group); | |
8b4e6dea | 293 | imc_free_events(pmu->events, ct); |
885dcd70 AS |
294 | return -ENOMEM; |
295 | } | |
296 | ||
297 | attr_group->name = "events"; | |
298 | attr_group->attrs = attrs; | |
299 | do { | |
8b4e6dea AS |
300 | ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); |
301 | dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); | |
885dcd70 AS |
302 | if (!dev_str) |
303 | continue; | |
304 | ||
305 | attrs[j++] = dev_str; | |
8b4e6dea AS |
306 | if (pmu->events[i].scale) { |
307 | ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); | |
308 | dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); | |
885dcd70 AS |
309 | if (!dev_str) |
310 | continue; | |
311 | ||
312 | attrs[j++] = dev_str; | |
313 | } | |
314 | ||
8b4e6dea AS |
315 | if (pmu->events[i].unit) { |
316 | ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); | |
317 | dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); | |
885dcd70 AS |
318 | if (!dev_str) |
319 | continue; | |
320 | ||
321 | attrs[j++] = dev_str; | |
322 | } | |
323 | } while (++i < ct); | |
324 | ||
325 | /* Save the event attribute */ | |
326 | pmu->attr_groups[IMC_EVENT_ATTR] = attr_group; | |
327 | ||
885dcd70 AS |
328 | return 0; |
329 | } | |
330 | ||
331 | /* get_nest_pmu_ref: Return the imc_pmu_ref struct for the given node */ | |
332 | static struct imc_pmu_ref *get_nest_pmu_ref(int cpu) | |
333 | { | |
334 | return per_cpu(local_nest_imc_refc, cpu); | |
335 | } | |
336 | ||
337 | static void nest_change_cpu_context(int old_cpu, int new_cpu) | |
338 | { | |
339 | struct imc_pmu **pn = per_nest_pmu_arr; | |
885dcd70 AS |
340 | |
341 | if (old_cpu < 0 || new_cpu < 0) | |
342 | return; | |
343 | ||
73ce9aec | 344 | while (*pn) { |
885dcd70 | 345 | perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); |
73ce9aec MS |
346 | pn++; |
347 | } | |
885dcd70 AS |
348 | } |
349 | ||
350 | static int ppc_nest_imc_cpu_offline(unsigned int cpu) | |
351 | { | |
352 | int nid, target = -1; | |
353 | const struct cpumask *l_cpumask; | |
354 | struct imc_pmu_ref *ref; | |
355 | ||
356 | /* | |
357 | * Check in the designated list for this cpu. Dont bother | |
358 | * if not one of them. | |
359 | */ | |
360 | if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask)) | |
361 | return 0; | |
362 | ||
ad2b6e01 AS |
363 | /* |
364 | * Check whether nest_imc is registered. We could end up here if the | |
365 | * cpuhotplug callback registration fails. i.e, callback invokes the | |
366 | * offline path for all successfully registered nodes. At this stage, | |
367 | * nest_imc pmu will not be registered and we should return here. | |
368 | * | |
369 | * We return with a zero since this is not an offline failure. And | |
370 | * cpuhp_setup_state() returns the actual failure reason to the caller, | |
371 | * which in turn will call the cleanup routine. | |
372 | */ | |
373 | if (!nest_pmus) | |
374 | return 0; | |
375 | ||
885dcd70 AS |
376 | /* |
377 | * Now that this cpu is one of the designated, | |
378 | * find a next cpu a) which is online and b) in same chip. | |
379 | */ | |
380 | nid = cpu_to_node(cpu); | |
381 | l_cpumask = cpumask_of_node(nid); | |
9c9f8fb7 AS |
382 | target = cpumask_last(l_cpumask); |
383 | ||
384 | /* | |
385 | * If this(target) is the last cpu in the cpumask for this chip, | |
386 | * check for any possible online cpu in the chip. | |
387 | */ | |
388 | if (unlikely(target == cpu)) | |
389 | target = cpumask_any_but(l_cpumask, cpu); | |
885dcd70 AS |
390 | |
391 | /* | |
392 | * Update the cpumask with the target cpu and | |
393 | * migrate the context if needed | |
394 | */ | |
395 | if (target >= 0 && target < nr_cpu_ids) { | |
396 | cpumask_set_cpu(target, &nest_imc_cpumask); | |
397 | nest_change_cpu_context(cpu, target); | |
398 | } else { | |
399 | opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, | |
400 | get_hard_smp_processor_id(cpu)); | |
401 | /* | |
402 | * If this is the last cpu in this chip then, skip the reference | |
403 | * count mutex lock and make the reference count on this chip zero. | |
404 | */ | |
405 | ref = get_nest_pmu_ref(cpu); | |
406 | if (!ref) | |
407 | return -EINVAL; | |
408 | ||
409 | ref->refc = 0; | |
410 | } | |
411 | return 0; | |
412 | } | |
413 | ||
414 | static int ppc_nest_imc_cpu_online(unsigned int cpu) | |
415 | { | |
416 | const struct cpumask *l_cpumask; | |
417 | static struct cpumask tmp_mask; | |
418 | int res; | |
419 | ||
420 | /* Get the cpumask of this node */ | |
421 | l_cpumask = cpumask_of_node(cpu_to_node(cpu)); | |
422 | ||
423 | /* | |
424 | * If this is not the first online CPU on this node, then | |
425 | * just return. | |
426 | */ | |
427 | if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask)) | |
428 | return 0; | |
429 | ||
430 | /* | |
431 | * If this is the first online cpu on this node | |
432 | * disable the nest counters by making an OPAL call. | |
433 | */ | |
434 | res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, | |
435 | get_hard_smp_processor_id(cpu)); | |
436 | if (res) | |
437 | return res; | |
438 | ||
439 | /* Make this CPU the designated target for counter collection */ | |
440 | cpumask_set_cpu(cpu, &nest_imc_cpumask); | |
441 | return 0; | |
442 | } | |
443 | ||
444 | static int nest_pmu_cpumask_init(void) | |
445 | { | |
446 | return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, | |
447 | "perf/powerpc/imc:online", | |
448 | ppc_nest_imc_cpu_online, | |
449 | ppc_nest_imc_cpu_offline); | |
450 | } | |
451 | ||
452 | static void nest_imc_counters_release(struct perf_event *event) | |
453 | { | |
454 | int rc, node_id; | |
455 | struct imc_pmu_ref *ref; | |
456 | ||
457 | if (event->cpu < 0) | |
458 | return; | |
459 | ||
460 | node_id = cpu_to_node(event->cpu); | |
461 | ||
462 | /* | |
463 | * See if we need to disable the nest PMU. | |
464 | * If no events are currently in use, then we have to take a | |
465 | * mutex to ensure that we don't race with another task doing | |
466 | * enable or disable the nest counters. | |
467 | */ | |
468 | ref = get_nest_pmu_ref(event->cpu); | |
469 | if (!ref) | |
470 | return; | |
471 | ||
472 | /* Take the mutex lock for this node and then decrement the reference count */ | |
473 | mutex_lock(&ref->lock); | |
0d923820 AS |
474 | if (ref->refc == 0) { |
475 | /* | |
476 | * The scenario where this is true is, when perf session is | |
477 | * started, followed by offlining of all cpus in a given node. | |
478 | * | |
479 | * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline() | |
480 | * function set the ref->count to zero, if the cpu which is | |
481 | * about to offline is the last cpu in a given node and make | |
482 | * an OPAL call to disable the engine in that node. | |
483 | * | |
484 | */ | |
485 | mutex_unlock(&ref->lock); | |
486 | return; | |
487 | } | |
885dcd70 AS |
488 | ref->refc--; |
489 | if (ref->refc == 0) { | |
490 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, | |
491 | get_hard_smp_processor_id(event->cpu)); | |
492 | if (rc) { | |
711bd207 | 493 | mutex_unlock(&ref->lock); |
885dcd70 AS |
494 | pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); |
495 | return; | |
496 | } | |
497 | } else if (ref->refc < 0) { | |
498 | WARN(1, "nest-imc: Invalid event reference count\n"); | |
499 | ref->refc = 0; | |
500 | } | |
501 | mutex_unlock(&ref->lock); | |
502 | } | |
503 | ||
504 | static int nest_imc_event_init(struct perf_event *event) | |
505 | { | |
506 | int chip_id, rc, node_id; | |
507 | u32 l_config, config = event->attr.config; | |
508 | struct imc_mem_info *pcni; | |
509 | struct imc_pmu *pmu; | |
510 | struct imc_pmu_ref *ref; | |
511 | bool flag = false; | |
512 | ||
513 | if (event->attr.type != event->pmu->type) | |
514 | return -ENOENT; | |
515 | ||
516 | /* Sampling not supported */ | |
517 | if (event->hw.sample_period) | |
518 | return -EINVAL; | |
519 | ||
885dcd70 AS |
520 | if (event->cpu < 0) |
521 | return -EINVAL; | |
522 | ||
523 | pmu = imc_event_to_pmu(event); | |
524 | ||
525 | /* Sanity check for config (event offset) */ | |
526 | if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size) | |
527 | return -EINVAL; | |
528 | ||
529 | /* | |
530 | * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). | |
1fd02f66 | 531 | * Get the base memory address for this cpu. |
885dcd70 | 532 | */ |
f3f1dfd6 | 533 | chip_id = cpu_to_chip_id(event->cpu); |
a913e5e8 AS |
534 | |
535 | /* Return, if chip_id is not valid */ | |
536 | if (chip_id < 0) | |
537 | return -ENODEV; | |
538 | ||
885dcd70 AS |
539 | pcni = pmu->mem_info; |
540 | do { | |
541 | if (pcni->id == chip_id) { | |
542 | flag = true; | |
543 | break; | |
544 | } | |
545 | pcni++; | |
860b7d22 | 546 | } while (pcni->vbase != 0); |
885dcd70 AS |
547 | |
548 | if (!flag) | |
549 | return -ENODEV; | |
550 | ||
551 | /* | |
552 | * Add the event offset to the base address. | |
553 | */ | |
554 | l_config = config & IMC_EVENT_OFFSET_MASK; | |
555 | event->hw.event_base = (u64)pcni->vbase + l_config; | |
556 | node_id = cpu_to_node(event->cpu); | |
557 | ||
558 | /* | |
559 | * Get the imc_pmu_ref struct for this node. | |
560 | * Take the mutex lock and then increment the count of nest pmu events | |
561 | * inited. | |
562 | */ | |
563 | ref = get_nest_pmu_ref(event->cpu); | |
564 | if (!ref) | |
565 | return -EINVAL; | |
566 | ||
567 | mutex_lock(&ref->lock); | |
568 | if (ref->refc == 0) { | |
569 | rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, | |
570 | get_hard_smp_processor_id(event->cpu)); | |
571 | if (rc) { | |
711bd207 | 572 | mutex_unlock(&ref->lock); |
885dcd70 AS |
573 | pr_err("nest-imc: Unable to start the counters for node %d\n", |
574 | node_id); | |
575 | return rc; | |
576 | } | |
577 | } | |
578 | ++ref->refc; | |
579 | mutex_unlock(&ref->lock); | |
580 | ||
581 | event->destroy = nest_imc_counters_release; | |
582 | return 0; | |
583 | } | |
584 | ||
39a846db AS |
585 | /* |
586 | * core_imc_mem_init : Initializes memory for the current core. | |
587 | * | |
588 | * Uses alloc_pages_node() and uses the returned address as an argument to | |
589 | * an opal call to configure the pdbar. The address sent as an argument is | |
590 | * converted to physical address before the opal call is made. This is the | |
591 | * base address at which the core imc counters are populated. | |
592 | */ | |
593 | static int core_imc_mem_init(int cpu, int size) | |
594 | { | |
f3f1dfd6 | 595 | int nid, rc = 0, core_id = (cpu / threads_per_core); |
39a846db | 596 | struct imc_mem_info *mem_info; |
10c4bd7c | 597 | struct page *page; |
39a846db AS |
598 | |
599 | /* | |
600 | * alloc_pages_node() will allocate memory for core in the | |
601 | * local node only. | |
602 | */ | |
f3f1dfd6 | 603 | nid = cpu_to_node(cpu); |
39a846db AS |
604 | mem_info = &core_imc_pmu->mem_info[core_id]; |
605 | mem_info->id = core_id; | |
606 | ||
607 | /* We need only vbase for core counters */ | |
10c4bd7c NP |
608 | page = alloc_pages_node(nid, |
609 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | | |
610 | __GFP_NOWARN, get_order(size)); | |
611 | if (!page) | |
39a846db | 612 | return -ENOMEM; |
10c4bd7c | 613 | mem_info->vbase = page_address(page); |
39a846db AS |
614 | |
615 | /* Init the mutex */ | |
616 | core_imc_refc[core_id].id = core_id; | |
617 | mutex_init(&core_imc_refc[core_id].lock); | |
618 | ||
619 | rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, | |
620 | __pa((void *)mem_info->vbase), | |
621 | get_hard_smp_processor_id(cpu)); | |
622 | if (rc) { | |
623 | free_pages((u64)mem_info->vbase, get_order(size)); | |
624 | mem_info->vbase = NULL; | |
625 | } | |
626 | ||
627 | return rc; | |
628 | } | |
629 | ||
630 | static bool is_core_imc_mem_inited(int cpu) | |
631 | { | |
632 | struct imc_mem_info *mem_info; | |
633 | int core_id = (cpu / threads_per_core); | |
634 | ||
635 | mem_info = &core_imc_pmu->mem_info[core_id]; | |
636 | if (!mem_info->vbase) | |
637 | return false; | |
638 | ||
639 | return true; | |
640 | } | |
641 | ||
642 | static int ppc_core_imc_cpu_online(unsigned int cpu) | |
643 | { | |
644 | const struct cpumask *l_cpumask; | |
645 | static struct cpumask tmp_mask; | |
646 | int ret = 0; | |
647 | ||
648 | /* Get the cpumask for this core */ | |
649 | l_cpumask = cpu_sibling_mask(cpu); | |
650 | ||
651 | /* If a cpu for this core is already set, then, don't do anything */ | |
652 | if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask)) | |
653 | return 0; | |
654 | ||
655 | if (!is_core_imc_mem_inited(cpu)) { | |
656 | ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size); | |
657 | if (ret) { | |
658 | pr_info("core_imc memory allocation for cpu %d failed\n", cpu); | |
659 | return ret; | |
660 | } | |
661 | } | |
662 | ||
663 | /* set the cpu in the mask */ | |
664 | cpumask_set_cpu(cpu, &core_imc_cpumask); | |
665 | return 0; | |
666 | } | |
667 | ||
668 | static int ppc_core_imc_cpu_offline(unsigned int cpu) | |
669 | { | |
074db39e AS |
670 | unsigned int core_id; |
671 | int ncpu; | |
39a846db AS |
672 | struct imc_pmu_ref *ref; |
673 | ||
674 | /* | |
675 | * clear this cpu out of the mask, if not present in the mask, | |
676 | * don't bother doing anything. | |
677 | */ | |
678 | if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask)) | |
679 | return 0; | |
680 | ||
7ecb37f6 MS |
681 | /* |
682 | * Check whether core_imc is registered. We could end up here | |
683 | * if the cpuhotplug callback registration fails. i.e, callback | |
1fd02f66 | 684 | * invokes the offline path for all successfully registered cpus. |
7ecb37f6 MS |
685 | * At this stage, core_imc pmu will not be registered and we |
686 | * should return here. | |
687 | * | |
688 | * We return with a zero since this is not an offline failure. | |
689 | * And cpuhp_setup_state() returns the actual failure reason | |
690 | * to the caller, which inturn will call the cleanup routine. | |
691 | */ | |
692 | if (!core_imc_pmu->pmu.event_init) | |
693 | return 0; | |
694 | ||
39a846db | 695 | /* Find any online cpu in that core except the current "cpu" */ |
9c9f8fb7 AS |
696 | ncpu = cpumask_last(cpu_sibling_mask(cpu)); |
697 | ||
698 | if (unlikely(ncpu == cpu)) | |
699 | ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu); | |
39a846db AS |
700 | |
701 | if (ncpu >= 0 && ncpu < nr_cpu_ids) { | |
702 | cpumask_set_cpu(ncpu, &core_imc_cpumask); | |
703 | perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); | |
704 | } else { | |
705 | /* | |
706 | * If this is the last cpu in this core then, skip taking refernce | |
707 | * count mutex lock for this core and directly zero "refc" for | |
708 | * this core. | |
709 | */ | |
710 | opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, | |
711 | get_hard_smp_processor_id(cpu)); | |
712 | core_id = cpu / threads_per_core; | |
713 | ref = &core_imc_refc[core_id]; | |
714 | if (!ref) | |
715 | return -EINVAL; | |
716 | ||
717 | ref->refc = 0; | |
a36e8ba6 AS |
718 | /* |
719 | * Reduce the global reference count, if this is the | |
720 | * last cpu in this core and core-imc event running | |
721 | * in this cpu. | |
722 | */ | |
723 | mutex_lock(&imc_global_refc.lock); | |
724 | if (imc_global_refc.id == IMC_DOMAIN_CORE) | |
725 | imc_global_refc.refc--; | |
726 | ||
727 | mutex_unlock(&imc_global_refc.lock); | |
39a846db AS |
728 | } |
729 | return 0; | |
730 | } | |
731 | ||
732 | static int core_imc_pmu_cpumask_init(void) | |
733 | { | |
734 | return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, | |
735 | "perf/powerpc/imc_core:online", | |
736 | ppc_core_imc_cpu_online, | |
737 | ppc_core_imc_cpu_offline); | |
738 | } | |
739 | ||
a36e8ba6 AS |
740 | static void reset_global_refc(struct perf_event *event) |
741 | { | |
742 | mutex_lock(&imc_global_refc.lock); | |
743 | imc_global_refc.refc--; | |
744 | ||
745 | /* | |
746 | * If no other thread is running any | |
747 | * event for this domain(thread/core/trace), | |
748 | * set the global id to zero. | |
749 | */ | |
750 | if (imc_global_refc.refc <= 0) { | |
751 | imc_global_refc.refc = 0; | |
752 | imc_global_refc.id = 0; | |
753 | } | |
754 | mutex_unlock(&imc_global_refc.lock); | |
755 | } | |
756 | ||
39a846db AS |
757 | static void core_imc_counters_release(struct perf_event *event) |
758 | { | |
759 | int rc, core_id; | |
760 | struct imc_pmu_ref *ref; | |
761 | ||
762 | if (event->cpu < 0) | |
763 | return; | |
764 | /* | |
765 | * See if we need to disable the IMC PMU. | |
766 | * If no events are currently in use, then we have to take a | |
767 | * mutex to ensure that we don't race with another task doing | |
768 | * enable or disable the core counters. | |
769 | */ | |
770 | core_id = event->cpu / threads_per_core; | |
771 | ||
772 | /* Take the mutex lock and decrement the refernce count for this core */ | |
773 | ref = &core_imc_refc[core_id]; | |
774 | if (!ref) | |
775 | return; | |
776 | ||
777 | mutex_lock(&ref->lock); | |
0d923820 AS |
778 | if (ref->refc == 0) { |
779 | /* | |
780 | * The scenario where this is true is, when perf session is | |
781 | * started, followed by offlining of all cpus in a given core. | |
782 | * | |
783 | * In the cpuhotplug offline path, ppc_core_imc_cpu_offline() | |
784 | * function set the ref->count to zero, if the cpu which is | |
785 | * about to offline is the last cpu in a given core and make | |
786 | * an OPAL call to disable the engine in that core. | |
787 | * | |
788 | */ | |
789 | mutex_unlock(&ref->lock); | |
790 | return; | |
791 | } | |
39a846db AS |
792 | ref->refc--; |
793 | if (ref->refc == 0) { | |
794 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, | |
795 | get_hard_smp_processor_id(event->cpu)); | |
796 | if (rc) { | |
797 | mutex_unlock(&ref->lock); | |
798 | pr_err("IMC: Unable to stop the counters for core %d\n", core_id); | |
799 | return; | |
800 | } | |
801 | } else if (ref->refc < 0) { | |
802 | WARN(1, "core-imc: Invalid event reference count\n"); | |
803 | ref->refc = 0; | |
804 | } | |
805 | mutex_unlock(&ref->lock); | |
a36e8ba6 AS |
806 | |
807 | reset_global_refc(event); | |
39a846db AS |
808 | } |
809 | ||
810 | static int core_imc_event_init(struct perf_event *event) | |
811 | { | |
812 | int core_id, rc; | |
813 | u64 config = event->attr.config; | |
814 | struct imc_mem_info *pcmi; | |
815 | struct imc_pmu *pmu; | |
816 | struct imc_pmu_ref *ref; | |
817 | ||
818 | if (event->attr.type != event->pmu->type) | |
819 | return -ENOENT; | |
820 | ||
821 | /* Sampling not supported */ | |
822 | if (event->hw.sample_period) | |
823 | return -EINVAL; | |
824 | ||
39a846db AS |
825 | if (event->cpu < 0) |
826 | return -EINVAL; | |
827 | ||
828 | event->hw.idx = -1; | |
829 | pmu = imc_event_to_pmu(event); | |
830 | ||
831 | /* Sanity check for config (event offset) */ | |
832 | if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) | |
833 | return -EINVAL; | |
834 | ||
835 | if (!is_core_imc_mem_inited(event->cpu)) | |
836 | return -ENODEV; | |
837 | ||
838 | core_id = event->cpu / threads_per_core; | |
839 | pcmi = &core_imc_pmu->mem_info[core_id]; | |
840 | if ((!pcmi->vbase)) | |
841 | return -ENODEV; | |
842 | ||
843 | /* Get the core_imc mutex for this core */ | |
844 | ref = &core_imc_refc[core_id]; | |
845 | if (!ref) | |
846 | return -EINVAL; | |
847 | ||
848 | /* | |
849 | * Core pmu units are enabled only when it is used. | |
850 | * See if this is triggered for the first time. | |
851 | * If yes, take the mutex lock and enable the core counters. | |
852 | * If not, just increment the count in core_imc_refc struct. | |
853 | */ | |
854 | mutex_lock(&ref->lock); | |
855 | if (ref->refc == 0) { | |
856 | rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, | |
857 | get_hard_smp_processor_id(event->cpu)); | |
858 | if (rc) { | |
859 | mutex_unlock(&ref->lock); | |
860 | pr_err("core-imc: Unable to start the counters for core %d\n", | |
861 | core_id); | |
862 | return rc; | |
863 | } | |
864 | } | |
865 | ++ref->refc; | |
866 | mutex_unlock(&ref->lock); | |
867 | ||
a36e8ba6 AS |
868 | /* |
869 | * Since the system can run either in accumulation or trace-mode | |
870 | * of IMC at a time, core-imc events are allowed only if no other | |
871 | * trace/thread imc events are enabled/monitored. | |
872 | * | |
873 | * Take the global lock, and check the refc.id | |
874 | * to know whether any other trace/thread imc | |
875 | * events are running. | |
876 | */ | |
877 | mutex_lock(&imc_global_refc.lock); | |
878 | if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) { | |
879 | /* | |
880 | * No other trace/thread imc events are running in | |
881 | * the system, so set the refc.id to core-imc. | |
882 | */ | |
883 | imc_global_refc.id = IMC_DOMAIN_CORE; | |
884 | imc_global_refc.refc++; | |
885 | } else { | |
886 | mutex_unlock(&imc_global_refc.lock); | |
887 | return -EBUSY; | |
888 | } | |
889 | mutex_unlock(&imc_global_refc.lock); | |
890 | ||
39a846db AS |
891 | event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); |
892 | event->destroy = core_imc_counters_release; | |
893 | return 0; | |
894 | } | |
895 | ||
f74c89bd | 896 | /* |
dd50cf7c AS |
897 | * Allocates a page of memory for each of the online cpus, and load |
898 | * LDBAR with 0. | |
899 | * The physical base address of the page allocated for a cpu will be | |
900 | * written to the LDBAR for that cpu, when the thread-imc event | |
901 | * is added. | |
f74c89bd AS |
902 | * |
903 | * LDBAR Register Layout: | |
904 | * | |
905 | * 0 4 8 12 16 20 24 28 | |
906 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
907 | * | | [ ] [ Counter Address [8:50] | |
908 | * | * Mode | | |
909 | * | * PB Scope | |
910 | * * Enable/Disable | |
911 | * | |
912 | * 32 36 40 44 48 52 56 60 | |
913 | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | | |
914 | * Counter Address [8:50] ] | |
915 | * | |
916 | */ | |
917 | static int thread_imc_mem_alloc(int cpu_id, int size) | |
918 | { | |
dd50cf7c | 919 | u64 *local_mem = per_cpu(thread_imc_mem, cpu_id); |
f3f1dfd6 | 920 | int nid = cpu_to_node(cpu_id); |
f74c89bd AS |
921 | |
922 | if (!local_mem) { | |
10c4bd7c | 923 | struct page *page; |
f74c89bd AS |
924 | /* |
925 | * This case could happen only once at start, since we dont | |
926 | * free the memory in cpu offline path. | |
927 | */ | |
10c4bd7c | 928 | page = alloc_pages_node(nid, |
cd4f2b30 | 929 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
10c4bd7c NP |
930 | __GFP_NOWARN, get_order(size)); |
931 | if (!page) | |
f74c89bd | 932 | return -ENOMEM; |
10c4bd7c | 933 | local_mem = page_address(page); |
f74c89bd AS |
934 | |
935 | per_cpu(thread_imc_mem, cpu_id) = local_mem; | |
936 | } | |
937 | ||
dd50cf7c | 938 | mtspr(SPRN_LDBAR, 0); |
f74c89bd AS |
939 | return 0; |
940 | } | |
941 | ||
942 | static int ppc_thread_imc_cpu_online(unsigned int cpu) | |
885dcd70 | 943 | { |
f74c89bd AS |
944 | return thread_imc_mem_alloc(cpu, thread_imc_mem_size); |
945 | } | |
946 | ||
947 | static int ppc_thread_imc_cpu_offline(unsigned int cpu) | |
948 | { | |
a36e8ba6 AS |
949 | /* |
950 | * Set the bit 0 of LDBAR to zero. | |
951 | * | |
952 | * If bit 0 of LDBAR is unset, it will stop posting | |
953 | * the counter data to memory. | |
954 | * For thread-imc, bit 0 of LDBAR will be set to 1 in the | |
955 | * event_add function. So reset this bit here, to stop the updates | |
956 | * to memory in the cpu_offline path. | |
957 | */ | |
958 | mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); | |
959 | ||
960 | /* Reduce the refc if thread-imc event running on this cpu */ | |
961 | mutex_lock(&imc_global_refc.lock); | |
962 | if (imc_global_refc.id == IMC_DOMAIN_THREAD) | |
963 | imc_global_refc.refc--; | |
964 | mutex_unlock(&imc_global_refc.lock); | |
965 | ||
f74c89bd AS |
966 | return 0; |
967 | } | |
968 | ||
969 | static int thread_imc_cpu_init(void) | |
970 | { | |
971 | return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, | |
972 | "perf/powerpc/imc_thread:online", | |
973 | ppc_thread_imc_cpu_online, | |
974 | ppc_thread_imc_cpu_offline); | |
975 | } | |
976 | ||
f74c89bd AS |
977 | static int thread_imc_event_init(struct perf_event *event) |
978 | { | |
979 | u32 config = event->attr.config; | |
980 | struct task_struct *target; | |
981 | struct imc_pmu *pmu; | |
982 | ||
983 | if (event->attr.type != event->pmu->type) | |
984 | return -ENOENT; | |
985 | ||
ff467583 | 986 | if (!perfmon_capable()) |
216c3087 MS |
987 | return -EACCES; |
988 | ||
f74c89bd AS |
989 | /* Sampling not supported */ |
990 | if (event->hw.sample_period) | |
991 | return -EINVAL; | |
992 | ||
993 | event->hw.idx = -1; | |
994 | pmu = imc_event_to_pmu(event); | |
995 | ||
996 | /* Sanity check for config offset */ | |
997 | if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) | |
998 | return -EINVAL; | |
999 | ||
1000 | target = event->hw.target; | |
1001 | if (!target) | |
1002 | return -EINVAL; | |
1003 | ||
a36e8ba6 AS |
1004 | mutex_lock(&imc_global_refc.lock); |
1005 | /* | |
1006 | * Check if any other trace/core imc events are running in the | |
1007 | * system, if not set the global id to thread-imc. | |
1008 | */ | |
1009 | if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) { | |
1010 | imc_global_refc.id = IMC_DOMAIN_THREAD; | |
1011 | imc_global_refc.refc++; | |
1012 | } else { | |
1013 | mutex_unlock(&imc_global_refc.lock); | |
1014 | return -EBUSY; | |
1015 | } | |
1016 | mutex_unlock(&imc_global_refc.lock); | |
1017 | ||
f74c89bd | 1018 | event->pmu->task_ctx_nr = perf_sw_context; |
a36e8ba6 | 1019 | event->destroy = reset_global_refc; |
f74c89bd AS |
1020 | return 0; |
1021 | } | |
1022 | ||
1023 | static bool is_thread_imc_pmu(struct perf_event *event) | |
1024 | { | |
1025 | if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc"))) | |
1026 | return true; | |
1027 | ||
1028 | return false; | |
1029 | } | |
1030 | ||
1031 | static u64 * get_event_base_addr(struct perf_event *event) | |
1032 | { | |
1033 | u64 addr; | |
1034 | ||
1035 | if (is_thread_imc_pmu(event)) { | |
1036 | addr = (u64)per_cpu(thread_imc_mem, smp_processor_id()); | |
1037 | return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); | |
1038 | } | |
1039 | ||
885dcd70 AS |
1040 | return (u64 *)event->hw.event_base; |
1041 | } | |
1042 | ||
f74c89bd AS |
1043 | static void thread_imc_pmu_start_txn(struct pmu *pmu, |
1044 | unsigned int txn_flags) | |
1045 | { | |
1046 | if (txn_flags & ~PERF_PMU_TXN_ADD) | |
1047 | return; | |
1048 | perf_pmu_disable(pmu); | |
1049 | } | |
1050 | ||
1051 | static void thread_imc_pmu_cancel_txn(struct pmu *pmu) | |
1052 | { | |
1053 | perf_pmu_enable(pmu); | |
1054 | } | |
1055 | ||
1056 | static int thread_imc_pmu_commit_txn(struct pmu *pmu) | |
1057 | { | |
1058 | perf_pmu_enable(pmu); | |
1059 | return 0; | |
1060 | } | |
1061 | ||
885dcd70 AS |
1062 | static u64 imc_read_counter(struct perf_event *event) |
1063 | { | |
1064 | u64 *addr, data; | |
1065 | ||
1066 | /* | |
1067 | * In-Memory Collection (IMC) counters are free flowing counters. | |
1068 | * So we take a snapshot of the counter value on enable and save it | |
1069 | * to calculate the delta at later stage to present the event counter | |
1070 | * value. | |
1071 | */ | |
1072 | addr = get_event_base_addr(event); | |
1073 | data = be64_to_cpu(READ_ONCE(*addr)); | |
1074 | local64_set(&event->hw.prev_count, data); | |
1075 | ||
1076 | return data; | |
1077 | } | |
1078 | ||
1079 | static void imc_event_update(struct perf_event *event) | |
1080 | { | |
1081 | u64 counter_prev, counter_new, final_count; | |
1082 | ||
1083 | counter_prev = local64_read(&event->hw.prev_count); | |
1084 | counter_new = imc_read_counter(event); | |
1085 | final_count = counter_new - counter_prev; | |
1086 | ||
1087 | /* Update the delta to the event count */ | |
1088 | local64_add(final_count, &event->count); | |
1089 | } | |
1090 | ||
1091 | static void imc_event_start(struct perf_event *event, int flags) | |
1092 | { | |
1093 | /* | |
1094 | * In Memory Counters are free flowing counters. HW or the microcode | |
1095 | * keeps adding to the counter offset in memory. To get event | |
1096 | * counter value, we snapshot the value here and we calculate | |
1097 | * delta at later point. | |
1098 | */ | |
1099 | imc_read_counter(event); | |
1100 | } | |
1101 | ||
1102 | static void imc_event_stop(struct perf_event *event, int flags) | |
1103 | { | |
1104 | /* | |
1105 | * Take a snapshot and calculate the delta and update | |
1106 | * the event counter values. | |
1107 | */ | |
1108 | imc_event_update(event); | |
1109 | } | |
1110 | ||
1111 | static int imc_event_add(struct perf_event *event, int flags) | |
1112 | { | |
1113 | if (flags & PERF_EF_START) | |
1114 | imc_event_start(event, flags); | |
1115 | ||
1116 | return 0; | |
1117 | } | |
1118 | ||
f74c89bd AS |
1119 | static int thread_imc_event_add(struct perf_event *event, int flags) |
1120 | { | |
7ccc4fe5 AS |
1121 | int core_id; |
1122 | struct imc_pmu_ref *ref; | |
dd50cf7c | 1123 | u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, smp_processor_id()); |
7ccc4fe5 | 1124 | |
f74c89bd AS |
1125 | if (flags & PERF_EF_START) |
1126 | imc_event_start(event, flags); | |
1127 | ||
7ccc4fe5 AS |
1128 | if (!is_core_imc_mem_inited(smp_processor_id())) |
1129 | return -EINVAL; | |
1130 | ||
1131 | core_id = smp_processor_id() / threads_per_core; | |
dd50cf7c AS |
1132 | ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE; |
1133 | mtspr(SPRN_LDBAR, ldbar_value); | |
1134 | ||
7ccc4fe5 AS |
1135 | /* |
1136 | * imc pmus are enabled only when it is used. | |
1137 | * See if this is triggered for the first time. | |
1138 | * If yes, take the mutex lock and enable the counters. | |
1139 | * If not, just increment the count in ref count struct. | |
1140 | */ | |
1141 | ref = &core_imc_refc[core_id]; | |
1142 | if (!ref) | |
1143 | return -EINVAL; | |
1144 | ||
1145 | mutex_lock(&ref->lock); | |
1146 | if (ref->refc == 0) { | |
1147 | if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, | |
1148 | get_hard_smp_processor_id(smp_processor_id()))) { | |
1149 | mutex_unlock(&ref->lock); | |
1150 | pr_err("thread-imc: Unable to start the counter\ | |
1151 | for core %d\n", core_id); | |
1152 | return -EINVAL; | |
1153 | } | |
1154 | } | |
1155 | ++ref->refc; | |
1156 | mutex_unlock(&ref->lock); | |
f74c89bd AS |
1157 | return 0; |
1158 | } | |
1159 | ||
1160 | static void thread_imc_event_del(struct perf_event *event, int flags) | |
1161 | { | |
7ccc4fe5 AS |
1162 | |
1163 | int core_id; | |
1164 | struct imc_pmu_ref *ref; | |
1165 | ||
7ccc4fe5 AS |
1166 | core_id = smp_processor_id() / threads_per_core; |
1167 | ref = &core_imc_refc[core_id]; | |
a36e8ba6 AS |
1168 | if (!ref) { |
1169 | pr_debug("imc: Failed to get event reference count\n"); | |
1170 | return; | |
1171 | } | |
7ccc4fe5 AS |
1172 | |
1173 | mutex_lock(&ref->lock); | |
1174 | ref->refc--; | |
1175 | if (ref->refc == 0) { | |
1176 | if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, | |
1177 | get_hard_smp_processor_id(smp_processor_id()))) { | |
1178 | mutex_unlock(&ref->lock); | |
1179 | pr_err("thread-imc: Unable to stop the counters\ | |
1180 | for core %d\n", core_id); | |
1181 | return; | |
1182 | } | |
1183 | } else if (ref->refc < 0) { | |
1184 | ref->refc = 0; | |
1185 | } | |
1186 | mutex_unlock(&ref->lock); | |
a36e8ba6 AS |
1187 | |
1188 | /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */ | |
1189 | mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); | |
1190 | ||
dd50cf7c AS |
1191 | /* |
1192 | * Take a snapshot and calculate the delta and update | |
1193 | * the event counter values. | |
1194 | */ | |
1195 | imc_event_update(event); | |
f74c89bd AS |
1196 | } |
1197 | ||
72c69dcd AS |
1198 | /* |
1199 | * Allocate a page of memory for each cpu, and load LDBAR with 0. | |
1200 | */ | |
1201 | static int trace_imc_mem_alloc(int cpu_id, int size) | |
1202 | { | |
1203 | u64 *local_mem = per_cpu(trace_imc_mem, cpu_id); | |
1204 | int phys_id = cpu_to_node(cpu_id), rc = 0; | |
1205 | int core_id = (cpu_id / threads_per_core); | |
1206 | ||
1207 | if (!local_mem) { | |
10c4bd7c NP |
1208 | struct page *page; |
1209 | ||
1210 | page = alloc_pages_node(phys_id, | |
1211 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | | |
1212 | __GFP_NOWARN, get_order(size)); | |
1213 | if (!page) | |
72c69dcd | 1214 | return -ENOMEM; |
10c4bd7c | 1215 | local_mem = page_address(page); |
72c69dcd AS |
1216 | per_cpu(trace_imc_mem, cpu_id) = local_mem; |
1217 | ||
1218 | /* Initialise the counters for trace mode */ | |
1219 | rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_TRACE, __pa((void *)local_mem), | |
1220 | get_hard_smp_processor_id(cpu_id)); | |
1221 | if (rc) { | |
1222 | pr_info("IMC:opal init failed for trace imc\n"); | |
1223 | return rc; | |
1224 | } | |
1225 | } | |
1226 | ||
1227 | /* Init the mutex, if not already */ | |
1228 | trace_imc_refc[core_id].id = core_id; | |
1229 | mutex_init(&trace_imc_refc[core_id].lock); | |
1230 | ||
1231 | mtspr(SPRN_LDBAR, 0); | |
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | static int ppc_trace_imc_cpu_online(unsigned int cpu) | |
1236 | { | |
1237 | return trace_imc_mem_alloc(cpu, trace_imc_mem_size); | |
1238 | } | |
1239 | ||
1240 | static int ppc_trace_imc_cpu_offline(unsigned int cpu) | |
1241 | { | |
a36e8ba6 AS |
1242 | /* |
1243 | * No need to set bit 0 of LDBAR to zero, as | |
1244 | * it is set to zero for imc trace-mode | |
1245 | * | |
1246 | * Reduce the refc if any trace-imc event running | |
1247 | * on this cpu. | |
1248 | */ | |
1249 | mutex_lock(&imc_global_refc.lock); | |
1250 | if (imc_global_refc.id == IMC_DOMAIN_TRACE) | |
1251 | imc_global_refc.refc--; | |
1252 | mutex_unlock(&imc_global_refc.lock); | |
1253 | ||
72c69dcd AS |
1254 | return 0; |
1255 | } | |
1256 | ||
1257 | static int trace_imc_cpu_init(void) | |
1258 | { | |
1259 | return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, | |
1260 | "perf/powerpc/imc_trace:online", | |
1261 | ppc_trace_imc_cpu_online, | |
1262 | ppc_trace_imc_cpu_offline); | |
1263 | } | |
1264 | ||
012ae244 AS |
1265 | static u64 get_trace_imc_event_base_addr(void) |
1266 | { | |
1267 | return (u64)per_cpu(trace_imc_mem, smp_processor_id()); | |
1268 | } | |
1269 | ||
1270 | /* | |
1271 | * Function to parse trace-imc data obtained | |
1272 | * and to prepare the perf sample. | |
1273 | */ | |
1274 | static int trace_imc_prepare_sample(struct trace_imc_data *mem, | |
1275 | struct perf_sample_data *data, | |
1276 | u64 *prev_tb, | |
1277 | struct perf_event_header *header, | |
1278 | struct perf_event *event) | |
1279 | { | |
1280 | /* Sanity checks for a valid record */ | |
1281 | if (be64_to_cpu(READ_ONCE(mem->tb1)) > *prev_tb) | |
1282 | *prev_tb = be64_to_cpu(READ_ONCE(mem->tb1)); | |
1283 | else | |
1284 | return -EINVAL; | |
1285 | ||
1286 | if ((be64_to_cpu(READ_ONCE(mem->tb1)) & IMC_TRACE_RECORD_TB1_MASK) != | |
1287 | be64_to_cpu(READ_ONCE(mem->tb2))) | |
1288 | return -EINVAL; | |
1289 | ||
1290 | /* Prepare perf sample */ | |
1291 | data->ip = be64_to_cpu(READ_ONCE(mem->ip)); | |
1292 | data->period = event->hw.last_period; | |
1293 | ||
1294 | header->type = PERF_RECORD_SAMPLE; | |
1295 | header->size = sizeof(*header) + event->header_size; | |
1296 | header->misc = 0; | |
1297 | ||
77ca3951 | 1298 | if (cpu_has_feature(CPU_FTR_ARCH_31)) { |
82715a0f | 1299 | switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) { |
77ca3951 AS |
1300 | case 0:/* when MSR HV and PR not set in the trace-record */ |
1301 | header->misc |= PERF_RECORD_MISC_GUEST_KERNEL; | |
1302 | break; | |
1303 | case 1: /* MSR HV is 0 and PR is 1 */ | |
1304 | header->misc |= PERF_RECORD_MISC_GUEST_USER; | |
1305 | break; | |
1306 | case 2: /* MSR HV is 1 and PR is 0 */ | |
82715a0f | 1307 | header->misc |= PERF_RECORD_MISC_KERNEL; |
77ca3951 AS |
1308 | break; |
1309 | case 3: /* MSR HV is 1 and PR is 1 */ | |
1310 | header->misc |= PERF_RECORD_MISC_USER; | |
1311 | break; | |
1312 | default: | |
1313 | pr_info("IMC: Unable to set the flag based on MSR bits\n"); | |
1314 | break; | |
1315 | } | |
1316 | } else { | |
1317 | if (is_kernel_addr(data->ip)) | |
1318 | header->misc |= PERF_RECORD_MISC_KERNEL; | |
1319 | else | |
1320 | header->misc |= PERF_RECORD_MISC_USER; | |
1321 | } | |
012ae244 AS |
1322 | perf_event_header__init_id(header, data, event); |
1323 | ||
1324 | return 0; | |
1325 | } | |
1326 | ||
1327 | static void dump_trace_imc_data(struct perf_event *event) | |
1328 | { | |
1329 | struct trace_imc_data *mem; | |
1330 | int i, ret; | |
1331 | u64 prev_tb = 0; | |
1332 | ||
1333 | mem = (struct trace_imc_data *)get_trace_imc_event_base_addr(); | |
1334 | for (i = 0; i < (trace_imc_mem_size / sizeof(struct trace_imc_data)); | |
1335 | i++, mem++) { | |
1336 | struct perf_sample_data data; | |
1337 | struct perf_event_header header; | |
1338 | ||
1339 | ret = trace_imc_prepare_sample(mem, &data, &prev_tb, &header, event); | |
1340 | if (ret) /* Exit, if not a valid record */ | |
1341 | break; | |
1342 | else { | |
1343 | /* If this is a valid record, create the sample */ | |
1344 | struct perf_output_handle handle; | |
1345 | ||
267fb273 | 1346 | if (perf_output_begin(&handle, &data, event, header.size)) |
012ae244 AS |
1347 | return; |
1348 | ||
1349 | perf_output_sample(&handle, &header, &data, event); | |
1350 | perf_output_end(&handle); | |
1351 | } | |
1352 | } | |
1353 | } | |
1354 | ||
1355 | static int trace_imc_event_add(struct perf_event *event, int flags) | |
1356 | { | |
1357 | int core_id = smp_processor_id() / threads_per_core; | |
1358 | struct imc_pmu_ref *ref = NULL; | |
1359 | u64 local_mem, ldbar_value; | |
1360 | ||
1361 | /* Set trace-imc bit in ldbar and load ldbar with per-thread memory address */ | |
1362 | local_mem = get_trace_imc_event_base_addr(); | |
1363 | ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE; | |
1364 | ||
a36e8ba6 AS |
1365 | /* trace-imc reference count */ |
1366 | if (trace_imc_refc) | |
1367 | ref = &trace_imc_refc[core_id]; | |
012ae244 | 1368 | if (!ref) { |
a36e8ba6 AS |
1369 | pr_debug("imc: Failed to get the event reference count\n"); |
1370 | return -EINVAL; | |
012ae244 | 1371 | } |
a36e8ba6 | 1372 | |
012ae244 AS |
1373 | mtspr(SPRN_LDBAR, ldbar_value); |
1374 | mutex_lock(&ref->lock); | |
1375 | if (ref->refc == 0) { | |
1376 | if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE, | |
1377 | get_hard_smp_processor_id(smp_processor_id()))) { | |
1378 | mutex_unlock(&ref->lock); | |
1379 | pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); | |
012ae244 AS |
1380 | return -EINVAL; |
1381 | } | |
1382 | } | |
1383 | ++ref->refc; | |
1384 | mutex_unlock(&ref->lock); | |
012ae244 AS |
1385 | return 0; |
1386 | } | |
1387 | ||
1388 | static void trace_imc_event_read(struct perf_event *event) | |
1389 | { | |
1390 | return; | |
1391 | } | |
1392 | ||
1393 | static void trace_imc_event_stop(struct perf_event *event, int flags) | |
1394 | { | |
1395 | u64 local_mem = get_trace_imc_event_base_addr(); | |
1396 | dump_trace_imc_data(event); | |
1397 | memset((void *)local_mem, 0, sizeof(u64)); | |
1398 | } | |
1399 | ||
1400 | static void trace_imc_event_start(struct perf_event *event, int flags) | |
1401 | { | |
1402 | return; | |
1403 | } | |
1404 | ||
1405 | static void trace_imc_event_del(struct perf_event *event, int flags) | |
1406 | { | |
1407 | int core_id = smp_processor_id() / threads_per_core; | |
1408 | struct imc_pmu_ref *ref = NULL; | |
1409 | ||
a36e8ba6 AS |
1410 | if (trace_imc_refc) |
1411 | ref = &trace_imc_refc[core_id]; | |
012ae244 | 1412 | if (!ref) { |
a36e8ba6 AS |
1413 | pr_debug("imc: Failed to get event reference count\n"); |
1414 | return; | |
012ae244 | 1415 | } |
a36e8ba6 | 1416 | |
012ae244 AS |
1417 | mutex_lock(&ref->lock); |
1418 | ref->refc--; | |
1419 | if (ref->refc == 0) { | |
1420 | if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE, | |
1421 | get_hard_smp_processor_id(smp_processor_id()))) { | |
1422 | mutex_unlock(&ref->lock); | |
1423 | pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); | |
1424 | return; | |
1425 | } | |
1426 | } else if (ref->refc < 0) { | |
1427 | ref->refc = 0; | |
1428 | } | |
1429 | mutex_unlock(&ref->lock); | |
a36e8ba6 | 1430 | |
012ae244 AS |
1431 | trace_imc_event_stop(event, flags); |
1432 | } | |
1433 | ||
1434 | static int trace_imc_event_init(struct perf_event *event) | |
1435 | { | |
012ae244 AS |
1436 | if (event->attr.type != event->pmu->type) |
1437 | return -ENOENT; | |
1438 | ||
ff467583 | 1439 | if (!perfmon_capable()) |
012ae244 AS |
1440 | return -EACCES; |
1441 | ||
1442 | /* Return if this is a couting event */ | |
1443 | if (event->attr.sample_period == 0) | |
1444 | return -ENOENT; | |
1445 | ||
a36e8ba6 AS |
1446 | /* |
1447 | * Take the global lock, and make sure | |
1448 | * no other thread is running any core/thread imc | |
1449 | * events | |
1450 | */ | |
1451 | mutex_lock(&imc_global_refc.lock); | |
1452 | if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) { | |
1453 | /* | |
1454 | * No core/thread imc events are running in the | |
1455 | * system, so set the refc.id to trace-imc. | |
1456 | */ | |
1457 | imc_global_refc.id = IMC_DOMAIN_TRACE; | |
1458 | imc_global_refc.refc++; | |
1459 | } else { | |
1460 | mutex_unlock(&imc_global_refc.lock); | |
1461 | return -EBUSY; | |
1462 | } | |
1463 | mutex_unlock(&imc_global_refc.lock); | |
1464 | ||
012ae244 | 1465 | event->hw.idx = -1; |
012ae244 | 1466 | |
01983223 AR |
1467 | /* |
1468 | * There can only be a single PMU for perf_hw_context events which is assigned to | |
1469 | * core PMU. Hence use "perf_sw_context" for trace_imc. | |
1470 | */ | |
1471 | event->pmu->task_ctx_nr = perf_sw_context; | |
a36e8ba6 | 1472 | event->destroy = reset_global_refc; |
012ae244 AS |
1473 | return 0; |
1474 | } | |
1475 | ||
885dcd70 AS |
1476 | /* update_pmu_ops : Populate the appropriate operations for "pmu" */ |
1477 | static int update_pmu_ops(struct imc_pmu *pmu) | |
1478 | { | |
1479 | pmu->pmu.task_ctx_nr = perf_invalid_context; | |
1480 | pmu->pmu.add = imc_event_add; | |
1481 | pmu->pmu.del = imc_event_stop; | |
1482 | pmu->pmu.start = imc_event_start; | |
1483 | pmu->pmu.stop = imc_event_stop; | |
1484 | pmu->pmu.read = imc_event_update; | |
1485 | pmu->pmu.attr_groups = pmu->attr_groups; | |
c2c9091d | 1486 | pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; |
885dcd70 AS |
1487 | pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; |
1488 | ||
885dcd70 AS |
1489 | switch (pmu->domain) { |
1490 | case IMC_DOMAIN_NEST: | |
1491 | pmu->pmu.event_init = nest_imc_event_init; | |
1492 | pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; | |
1493 | break; | |
39a846db AS |
1494 | case IMC_DOMAIN_CORE: |
1495 | pmu->pmu.event_init = core_imc_event_init; | |
1496 | pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; | |
1497 | break; | |
f74c89bd AS |
1498 | case IMC_DOMAIN_THREAD: |
1499 | pmu->pmu.event_init = thread_imc_event_init; | |
f74c89bd AS |
1500 | pmu->pmu.add = thread_imc_event_add; |
1501 | pmu->pmu.del = thread_imc_event_del; | |
1502 | pmu->pmu.start_txn = thread_imc_pmu_start_txn; | |
1503 | pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn; | |
1504 | pmu->pmu.commit_txn = thread_imc_pmu_commit_txn; | |
1505 | break; | |
012ae244 AS |
1506 | case IMC_DOMAIN_TRACE: |
1507 | pmu->pmu.event_init = trace_imc_event_init; | |
1508 | pmu->pmu.add = trace_imc_event_add; | |
1509 | pmu->pmu.del = trace_imc_event_del; | |
1510 | pmu->pmu.start = trace_imc_event_start; | |
1511 | pmu->pmu.stop = trace_imc_event_stop; | |
1512 | pmu->pmu.read = trace_imc_event_read; | |
1513 | pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group; | |
49a41365 | 1514 | break; |
885dcd70 AS |
1515 | default: |
1516 | break; | |
1517 | } | |
1518 | ||
1519 | return 0; | |
1520 | } | |
1521 | ||
1522 | /* init_nest_pmu_ref: Initialize the imc_pmu_ref struct for all the nodes */ | |
1523 | static int init_nest_pmu_ref(void) | |
1524 | { | |
1525 | int nid, i, cpu; | |
1526 | ||
1527 | nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc), | |
1528 | GFP_KERNEL); | |
1529 | ||
1530 | if (!nest_imc_refc) | |
1531 | return -ENOMEM; | |
1532 | ||
1533 | i = 0; | |
1534 | for_each_node(nid) { | |
1535 | /* | |
1536 | * Mutex lock to avoid races while tracking the number of | |
1537 | * sessions using the chip's nest pmu units. | |
1538 | */ | |
1539 | mutex_init(&nest_imc_refc[i].lock); | |
1540 | ||
1541 | /* | |
1542 | * Loop to init the "id" with the node_id. Variable "i" initialized to | |
1543 | * 0 and will be used as index to the array. "i" will not go off the | |
1544 | * end of the array since the "for_each_node" loops for "N_POSSIBLE" | |
1545 | * nodes only. | |
1546 | */ | |
1547 | nest_imc_refc[i++].id = nid; | |
1548 | } | |
1549 | ||
1550 | /* | |
1551 | * Loop to init the per_cpu "local_nest_imc_refc" with the proper | |
1552 | * "nest_imc_refc" index. This makes get_nest_pmu_ref() alot simple. | |
1553 | */ | |
1554 | for_each_possible_cpu(cpu) { | |
1555 | nid = cpu_to_node(cpu); | |
7efbae90 | 1556 | for (i = 0; i < num_possible_nodes(); i++) { |
885dcd70 AS |
1557 | if (nest_imc_refc[i].id == nid) { |
1558 | per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i]; | |
1559 | break; | |
1560 | } | |
1561 | } | |
1562 | } | |
1563 | return 0; | |
1564 | } | |
1565 | ||
39a846db AS |
1566 | static void cleanup_all_core_imc_memory(void) |
1567 | { | |
d2032678 | 1568 | int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); |
39a846db AS |
1569 | struct imc_mem_info *ptr = core_imc_pmu->mem_info; |
1570 | int size = core_imc_pmu->counter_mem_size; | |
1571 | ||
1572 | /* mem_info will never be NULL */ | |
1573 | for (i = 0; i < nr_cores; i++) { | |
1574 | if (ptr[i].vbase) | |
cb094fa5 | 1575 | free_pages((u64)ptr[i].vbase, get_order(size)); |
39a846db AS |
1576 | } |
1577 | ||
1578 | kfree(ptr); | |
1579 | kfree(core_imc_refc); | |
1580 | } | |
1581 | ||
f74c89bd AS |
1582 | static void thread_imc_ldbar_disable(void *dummy) |
1583 | { | |
1584 | /* | |
a36e8ba6 AS |
1585 | * By setting 0th bit of LDBAR to zero, we disable thread-imc |
1586 | * updates to memory. | |
f74c89bd | 1587 | */ |
a36e8ba6 | 1588 | mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63)))); |
f74c89bd AS |
1589 | } |
1590 | ||
1591 | void thread_imc_disable(void) | |
1592 | { | |
1593 | on_each_cpu(thread_imc_ldbar_disable, NULL, 1); | |
1594 | } | |
1595 | ||
1596 | static void cleanup_all_thread_imc_memory(void) | |
1597 | { | |
1598 | int i, order = get_order(thread_imc_mem_size); | |
1599 | ||
1600 | for_each_online_cpu(i) { | |
1601 | if (per_cpu(thread_imc_mem, i)) | |
1602 | free_pages((u64)per_cpu(thread_imc_mem, i), order); | |
1603 | ||
1604 | } | |
1605 | } | |
1606 | ||
72c69dcd AS |
1607 | static void cleanup_all_trace_imc_memory(void) |
1608 | { | |
1609 | int i, order = get_order(trace_imc_mem_size); | |
1610 | ||
1611 | for_each_online_cpu(i) { | |
1612 | if (per_cpu(trace_imc_mem, i)) | |
1613 | free_pages((u64)per_cpu(trace_imc_mem, i), order); | |
1614 | ||
1615 | } | |
1616 | kfree(trace_imc_refc); | |
1617 | } | |
1618 | ||
ed8e443f AS |
1619 | /* Function to free the attr_groups which are dynamically allocated */ |
1620 | static void imc_common_mem_free(struct imc_pmu *pmu_ptr) | |
1621 | { | |
1622 | if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) | |
1623 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); | |
1624 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); | |
ed8e443f AS |
1625 | } |
1626 | ||
885dcd70 AS |
1627 | /* |
1628 | * Common function to unregister cpu hotplug callback and | |
1629 | * free the memory. | |
1630 | * TODO: Need to handle pmu unregistering, which will be | |
1631 | * done in followup series. | |
1632 | */ | |
1633 | static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) | |
1634 | { | |
1635 | if (pmu_ptr->domain == IMC_DOMAIN_NEST) { | |
b3376dcc | 1636 | mutex_lock(&nest_init_lock); |
885dcd70 AS |
1637 | if (nest_pmus == 1) { |
1638 | cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); | |
1639 | kfree(nest_imc_refc); | |
110df8bd | 1640 | kfree(per_nest_pmu_arr); |
cb094fa5 | 1641 | per_nest_pmu_arr = NULL; |
885dcd70 AS |
1642 | } |
1643 | ||
1644 | if (nest_pmus > 0) | |
1645 | nest_pmus--; | |
1646 | mutex_unlock(&nest_init_lock); | |
1647 | } | |
1648 | ||
39a846db AS |
1649 | /* Free core_imc memory */ |
1650 | if (pmu_ptr->domain == IMC_DOMAIN_CORE) { | |
1651 | cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE); | |
1652 | cleanup_all_core_imc_memory(); | |
1653 | } | |
1654 | ||
f74c89bd AS |
1655 | /* Free thread_imc memory */ |
1656 | if (pmu_ptr->domain == IMC_DOMAIN_THREAD) { | |
1657 | cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE); | |
1658 | cleanup_all_thread_imc_memory(); | |
1659 | } | |
72c69dcd AS |
1660 | |
1661 | if (pmu_ptr->domain == IMC_DOMAIN_TRACE) { | |
1662 | cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE); | |
1663 | cleanup_all_trace_imc_memory(); | |
1664 | } | |
885dcd70 AS |
1665 | } |
1666 | ||
25af86b2 AS |
1667 | /* |
1668 | * Function to unregister thread-imc if core-imc | |
1669 | * is not registered. | |
1670 | */ | |
1671 | void unregister_thread_imc(void) | |
1672 | { | |
1673 | imc_common_cpuhp_mem_free(thread_imc_pmu); | |
1674 | imc_common_mem_free(thread_imc_pmu); | |
1675 | perf_pmu_unregister(&thread_imc_pmu->pmu); | |
1676 | } | |
885dcd70 AS |
1677 | |
1678 | /* | |
1679 | * imc_mem_init : Function to support memory allocation for core imc. | |
1680 | */ | |
1681 | static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, | |
1682 | int pmu_index) | |
1683 | { | |
1684 | const char *s; | |
b41bb28b | 1685 | int nr_cores, cpu, res = -ENOMEM; |
885dcd70 AS |
1686 | |
1687 | if (of_property_read_string(parent, "name", &s)) | |
1688 | return -ENODEV; | |
1689 | ||
885dcd70 AS |
1690 | switch (pmu_ptr->domain) { |
1691 | case IMC_DOMAIN_NEST: | |
1692 | /* Update the pmu name */ | |
1693 | pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s); | |
1694 | if (!pmu_ptr->pmu.name) | |
b41bb28b | 1695 | goto err; |
885dcd70 AS |
1696 | |
1697 | /* Needed for hotplug/migration */ | |
73ce9aec MS |
1698 | if (!per_nest_pmu_arr) { |
1699 | per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1, | |
1700 | sizeof(struct imc_pmu *), | |
1701 | GFP_KERNEL); | |
1702 | if (!per_nest_pmu_arr) | |
b41bb28b | 1703 | goto err; |
73ce9aec | 1704 | } |
885dcd70 AS |
1705 | per_nest_pmu_arr[pmu_index] = pmu_ptr; |
1706 | break; | |
39a846db AS |
1707 | case IMC_DOMAIN_CORE: |
1708 | /* Update the pmu name */ | |
1709 | pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); | |
1710 | if (!pmu_ptr->pmu.name) | |
b41bb28b | 1711 | goto err; |
39a846db | 1712 | |
d2032678 | 1713 | nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); |
39a846db AS |
1714 | pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), |
1715 | GFP_KERNEL); | |
1716 | ||
1717 | if (!pmu_ptr->mem_info) | |
b41bb28b | 1718 | goto err; |
39a846db AS |
1719 | |
1720 | core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref), | |
1721 | GFP_KERNEL); | |
1722 | ||
ed8e443f AS |
1723 | if (!core_imc_refc) { |
1724 | kfree(pmu_ptr->mem_info); | |
b41bb28b | 1725 | goto err; |
ed8e443f | 1726 | } |
39a846db AS |
1727 | |
1728 | core_imc_pmu = pmu_ptr; | |
1729 | break; | |
f74c89bd AS |
1730 | case IMC_DOMAIN_THREAD: |
1731 | /* Update the pmu name */ | |
1732 | pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); | |
1733 | if (!pmu_ptr->pmu.name) | |
b41bb28b | 1734 | goto err; |
f74c89bd AS |
1735 | |
1736 | thread_imc_mem_size = pmu_ptr->counter_mem_size; | |
1737 | for_each_online_cpu(cpu) { | |
1738 | res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size); | |
ed8e443f AS |
1739 | if (res) { |
1740 | cleanup_all_thread_imc_memory(); | |
b41bb28b | 1741 | goto err; |
ed8e443f | 1742 | } |
f74c89bd AS |
1743 | } |
1744 | ||
25af86b2 | 1745 | thread_imc_pmu = pmu_ptr; |
f74c89bd | 1746 | break; |
72c69dcd AS |
1747 | case IMC_DOMAIN_TRACE: |
1748 | /* Update the pmu name */ | |
1749 | pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); | |
1750 | if (!pmu_ptr->pmu.name) | |
1751 | return -ENOMEM; | |
1752 | ||
1753 | nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); | |
1754 | trace_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref), | |
1755 | GFP_KERNEL); | |
1756 | if (!trace_imc_refc) | |
1757 | return -ENOMEM; | |
1758 | ||
1759 | trace_imc_mem_size = pmu_ptr->counter_mem_size; | |
1760 | for_each_online_cpu(cpu) { | |
1761 | res = trace_imc_mem_alloc(cpu, trace_imc_mem_size); | |
1762 | if (res) { | |
1763 | cleanup_all_trace_imc_memory(); | |
1764 | goto err; | |
1765 | } | |
1766 | } | |
1767 | break; | |
885dcd70 AS |
1768 | default: |
1769 | return -EINVAL; | |
1770 | } | |
1771 | ||
1772 | return 0; | |
b41bb28b AS |
1773 | err: |
1774 | return res; | |
885dcd70 AS |
1775 | } |
1776 | ||
1777 | /* | |
1778 | * init_imc_pmu : Setup and register the IMC pmu device. | |
1779 | * | |
1780 | * @parent: Device tree unit node | |
1781 | * @pmu_ptr: memory allocated for this pmu | |
1782 | * @pmu_idx: Count of nest pmc registered | |
1783 | * | |
1784 | * init_imc_pmu() setup pmu cpumask and registers for a cpu hotplug callback. | |
1785 | * Handles failure cases and accordingly frees memory. | |
1786 | */ | |
1787 | int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx) | |
1788 | { | |
1789 | int ret; | |
1790 | ||
1791 | ret = imc_mem_init(pmu_ptr, parent, pmu_idx); | |
cb094fa5 AS |
1792 | if (ret) |
1793 | goto err_free_mem; | |
885dcd70 | 1794 | |
885dcd70 AS |
1795 | switch (pmu_ptr->domain) { |
1796 | case IMC_DOMAIN_NEST: | |
1797 | /* | |
1798 | * Nest imc pmu need only one cpu per chip, we initialize the | |
1799 | * cpumask for the first nest imc pmu and use the same for the | |
1800 | * rest. To handle the cpuhotplug callback unregister, we track | |
1801 | * the number of nest pmus in "nest_pmus". | |
1802 | */ | |
1803 | mutex_lock(&nest_init_lock); | |
1804 | if (nest_pmus == 0) { | |
1805 | ret = init_nest_pmu_ref(); | |
1806 | if (ret) { | |
1807 | mutex_unlock(&nest_init_lock); | |
cb094fa5 AS |
1808 | kfree(per_nest_pmu_arr); |
1809 | per_nest_pmu_arr = NULL; | |
1810 | goto err_free_mem; | |
885dcd70 AS |
1811 | } |
1812 | /* Register for cpu hotplug notification. */ | |
1813 | ret = nest_pmu_cpumask_init(); | |
1814 | if (ret) { | |
1815 | mutex_unlock(&nest_init_lock); | |
110df8bd AS |
1816 | kfree(nest_imc_refc); |
1817 | kfree(per_nest_pmu_arr); | |
cb094fa5 AS |
1818 | per_nest_pmu_arr = NULL; |
1819 | goto err_free_mem; | |
885dcd70 AS |
1820 | } |
1821 | } | |
1822 | nest_pmus++; | |
1823 | mutex_unlock(&nest_init_lock); | |
39a846db AS |
1824 | break; |
1825 | case IMC_DOMAIN_CORE: | |
1826 | ret = core_imc_pmu_cpumask_init(); | |
1827 | if (ret) { | |
1828 | cleanup_all_core_imc_memory(); | |
cb094fa5 | 1829 | goto err_free_mem; |
39a846db AS |
1830 | } |
1831 | ||
f74c89bd AS |
1832 | break; |
1833 | case IMC_DOMAIN_THREAD: | |
1834 | ret = thread_imc_cpu_init(); | |
1835 | if (ret) { | |
1836 | cleanup_all_thread_imc_memory(); | |
cb094fa5 | 1837 | goto err_free_mem; |
f74c89bd AS |
1838 | } |
1839 | ||
72c69dcd AS |
1840 | break; |
1841 | case IMC_DOMAIN_TRACE: | |
1842 | ret = trace_imc_cpu_init(); | |
1843 | if (ret) { | |
1844 | cleanup_all_trace_imc_memory(); | |
1845 | goto err_free_mem; | |
1846 | } | |
1847 | ||
885dcd70 AS |
1848 | break; |
1849 | default: | |
e7a8ac43 | 1850 | return -EINVAL; /* Unknown domain */ |
885dcd70 AS |
1851 | } |
1852 | ||
1853 | ret = update_events_in_group(parent, pmu_ptr); | |
1854 | if (ret) | |
cb094fa5 | 1855 | goto err_free_cpuhp_mem; |
885dcd70 AS |
1856 | |
1857 | ret = update_pmu_ops(pmu_ptr); | |
1858 | if (ret) | |
cb094fa5 | 1859 | goto err_free_cpuhp_mem; |
885dcd70 AS |
1860 | |
1861 | ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1); | |
1862 | if (ret) | |
cb094fa5 | 1863 | goto err_free_cpuhp_mem; |
885dcd70 | 1864 | |
6233b6da | 1865 | pr_debug("%s performance monitor hardware support registered\n", |
885dcd70 AS |
1866 | pmu_ptr->pmu.name); |
1867 | ||
1868 | return 0; | |
1869 | ||
cb094fa5 | 1870 | err_free_cpuhp_mem: |
885dcd70 | 1871 | imc_common_cpuhp_mem_free(pmu_ptr); |
cb094fa5 AS |
1872 | err_free_mem: |
1873 | imc_common_mem_free(pmu_ptr); | |
885dcd70 AS |
1874 | return ret; |
1875 | } |