2 * Hypervisor supplied "24x7" performance counter support
4 * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
5 * Copyright 2014 IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) "hv-24x7: " fmt
15 #include <linux/perf_event.h>
16 #include <linux/rbtree.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
21 #include <asm/firmware.h>
22 #include <asm/hvcall.h>
24 #include <linux/byteorder/generic.h>
27 #include "hv-24x7-catalog.h"
28 #include "hv-common.h"
30 static const char *event_domain_suffix(unsigned domain)
33 #define DOMAIN(n, v, x, c) \
34 case HV_PERF_DOMAIN_##n: \
36 #include "hv-24x7-domains.h"
39 WARN(1, "unknown domain %d\n", domain);
40 return "__UNKNOWN_DOMAIN_SUFFIX";
44 static bool domain_is_valid(unsigned domain)
47 #define DOMAIN(n, v, x, c) \
48 case HV_PERF_DOMAIN_##n: \
50 #include "hv-24x7-domains.h"
58 static bool is_physical_domain(unsigned domain)
61 #define DOMAIN(n, v, x, c) \
62 case HV_PERF_DOMAIN_##n: \
64 #include "hv-24x7-domains.h"
71 static bool catalog_entry_domain_is_valid(unsigned domain)
73 return is_physical_domain(domain);
77 * TODO: Merging events:
78 * - Think of the hcall as an interface to a 4d array of counters:
80 * - y = indexes in the domain (core, chip, vcpu, node, etc)
81 * - z = offset into the counter space
82 * - w = lpars (guest vms, "logical partitions")
83 * - A single request is: x,y,y_last,z,z_last,w,w_last
84 * - this means we can retrieve a rectangle of counters in y,z for a single x.
86 * - Things to consider (ignoring w):
87 * - input cost_per_request = 16
88 * - output cost_per_result(ys,zs) = 8 + 8 * ys + ys * zs
89 * - limited number of requests per hcall (must fit into 4K bytes)
90 * - 4k = 16 [buffer header] - 16 [request size] * request_count
91 * - 255 requests per hcall
92 * - sometimes it will be more efficient to read extra data and discard
97 * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/'
100 /* u3 0-6, one of HV_24X7_PERF_DOMAIN */
101 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
103 EVENT_DEFINE_RANGE_FORMAT(core, config, 16, 31);
104 EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
105 /* u32, see "data_offset" */
106 EVENT_DEFINE_RANGE_FORMAT(offset, config, 32, 63);
108 EVENT_DEFINE_RANGE_FORMAT(lpar, config1, 0, 15);
110 EVENT_DEFINE_RANGE(reserved1, config, 4, 15);
111 EVENT_DEFINE_RANGE(reserved2, config1, 16, 63);
112 EVENT_DEFINE_RANGE(reserved3, config2, 0, 63);
114 static struct attribute *format_attrs[] = {
115 &format_attr_domain.attr,
116 &format_attr_offset.attr,
117 &format_attr_core.attr,
118 &format_attr_vcpu.attr,
119 &format_attr_lpar.attr,
123 static struct attribute_group format_group = {
125 .attrs = format_attrs,
128 static struct attribute_group event_group = {
130 /* .attrs is set in init */
133 static struct attribute_group event_desc_group = {
134 .name = "event_descs",
135 /* .attrs is set in init */
138 static struct attribute_group event_long_desc_group = {
139 .name = "event_long_descs",
140 /* .attrs is set in init */
143 static struct kmem_cache *hv_page_cache;
145 DEFINE_PER_CPU(int, hv_24x7_txn_flags);
146 DEFINE_PER_CPU(int, hv_24x7_txn_err);
149 struct perf_event *events[255];
152 DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
155 * request_buffer and result_buffer are not required to be 4k aligned,
156 * but are not allowed to cross any 4k boundary. Aligning them to 4k is
157 * the simplest way to ensure that.
159 #define H24x7_DATA_BUFFER_SIZE 4096
160 DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
161 DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
163 static char *event_name(struct hv_24x7_event_data *ev, int *len)
165 *len = be16_to_cpu(ev->event_name_len) - 2;
166 return (char *)ev->remainder;
169 static char *event_desc(struct hv_24x7_event_data *ev, int *len)
171 unsigned nl = be16_to_cpu(ev->event_name_len);
172 __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
174 *len = be16_to_cpu(*desc_len) - 2;
175 return (char *)ev->remainder + nl;
178 static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
180 unsigned nl = be16_to_cpu(ev->event_name_len);
181 __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
182 unsigned desc_len = be16_to_cpu(*desc_len_);
183 __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
185 *len = be16_to_cpu(*long_desc_len) - 2;
186 return (char *)ev->remainder + nl + desc_len;
189 static bool event_fixed_portion_is_within(struct hv_24x7_event_data *ev,
194 return (start + offsetof(struct hv_24x7_event_data, remainder)) < end;
198 * Things we don't check:
199 * - padding for desc, name, and long/detailed desc is required to be '\0'
202 * Return NULL if we pass end,
203 * Otherwise return the address of the byte just following the event.
205 static void *event_end(struct hv_24x7_event_data *ev, void *end)
210 unsigned nl = be16_to_cpu(ev->event_name_len);
213 pr_debug("%s: name length too short: %d", __func__, nl);
217 if (start + nl > end) {
218 pr_debug("%s: start=%p + nl=%u > end=%p",
219 __func__, start, nl, end);
223 dl_ = (__be16 *)(ev->remainder + nl - 2);
224 if (!IS_ALIGNED((uintptr_t)dl_, 2))
225 pr_warn("desc len not aligned %p", dl_);
226 dl = be16_to_cpu(*dl_);
228 pr_debug("%s: desc len too short: %d", __func__, dl);
232 if (start + nl + dl > end) {
233 pr_debug("%s: (start=%p + nl=%u + dl=%u)=%p > end=%p",
234 __func__, start, nl, dl, start + nl + dl, end);
238 ldl_ = (__be16 *)(ev->remainder + nl + dl - 2);
239 if (!IS_ALIGNED((uintptr_t)ldl_, 2))
240 pr_warn("long desc len not aligned %p", ldl_);
241 ldl = be16_to_cpu(*ldl_);
243 pr_debug("%s: long desc len too short (ldl=%u)",
248 if (start + nl + dl + ldl > end) {
249 pr_debug("%s: start=%p + nl=%u + dl=%u + ldl=%u > end=%p",
250 __func__, start, nl, dl, ldl, end);
254 return start + nl + dl + ldl;
257 static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
258 unsigned long version,
261 pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
262 phys_4096, version, index);
264 WARN_ON(!IS_ALIGNED(phys_4096, 4096));
266 return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
267 phys_4096, version, index);
270 static unsigned long h_get_24x7_catalog_page(char page[],
271 u64 version, u32 index)
273 return h_get_24x7_catalog_page_(virt_to_phys(page),
277 static unsigned core_domains[] = {
278 HV_PERF_DOMAIN_PHYS_CORE,
279 HV_PERF_DOMAIN_VCPU_HOME_CORE,
280 HV_PERF_DOMAIN_VCPU_HOME_CHIP,
281 HV_PERF_DOMAIN_VCPU_HOME_NODE,
282 HV_PERF_DOMAIN_VCPU_REMOTE_NODE,
284 /* chip event data always yeilds a single event, core yeilds multiple */
285 #define MAX_EVENTS_PER_EVENT_DATA ARRAY_SIZE(core_domains)
287 static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
292 if (is_physical_domain(domain)) {
300 return kasprintf(GFP_KERNEL,
301 "domain=0x%x,offset=0x%x,%s=?,lpar=%s",
303 be16_to_cpu(event->event_counter_offs) +
304 be16_to_cpu(event->event_group_record_offs),
309 /* Avoid trusting fw to NUL terminate strings */
310 static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp)
312 return kasprintf(gfp, "%.*s", max_len, maybe_str);
315 static ssize_t device_show_string(struct device *dev,
316 struct device_attribute *attr, char *buf)
318 struct dev_ext_attribute *d;
320 d = container_of(attr, struct dev_ext_attribute, attr);
322 return sprintf(buf, "%s\n", (char *)d->var);
325 static struct attribute *device_str_attr_create_(char *name, char *str)
327 struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL);
332 sysfs_attr_init(&attr->attr.attr);
335 attr->attr.attr.name = name;
336 attr->attr.attr.mode = 0444;
337 attr->attr.show = device_show_string;
339 return &attr->attr.attr;
342 static struct attribute *device_str_attr_create(char *name, int name_max,
344 char *str, size_t str_max)
347 char *s = memdup_to_str(str, str_max, GFP_KERNEL);
354 n = kasprintf(GFP_KERNEL, "%.*s", name_max, name);
356 n = kasprintf(GFP_KERNEL, "%.*s__%d", name_max, name,
361 a = device_str_attr_create_(n, s);
373 static void device_str_attr_destroy(struct attribute *attr)
375 struct dev_ext_attribute *d;
377 d = container_of(attr, struct dev_ext_attribute, attr.attr);
379 kfree(d->attr.attr.name);
383 static struct attribute *event_to_attr(unsigned ix,
384 struct hv_24x7_event_data *event,
389 char *ev_name, *a_ev_name, *val;
390 const char *ev_suffix;
391 struct attribute *attr;
393 if (!domain_is_valid(domain)) {
394 pr_warn("catalog event %u has invalid domain %u\n",
399 val = event_fmt(event, domain);
403 ev_suffix = event_domain_suffix(domain);
404 ev_name = event_name(event, &event_name_len);
406 a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s",
407 (int)event_name_len, ev_name, ev_suffix);
409 a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d",
410 (int)event_name_len, ev_name, ev_suffix, nonce);
415 attr = device_str_attr_create_(a_ev_name, val);
427 static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
431 char *name = event_name(event, &nl);
432 char *desc = event_desc(event, &dl);
434 /* If there isn't a description, don't create the sysfs file */
438 return device_str_attr_create(name, nl, nonce, desc, dl);
441 static struct attribute *
442 event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
445 char *name = event_name(event, &nl);
446 char *desc = event_long_desc(event, &dl);
448 /* If there isn't a description, don't create the sysfs file */
452 return device_str_attr_create(name, nl, nonce, desc, dl);
455 static ssize_t event_data_to_attrs(unsigned ix, struct attribute **attrs,
456 struct hv_24x7_event_data *event, int nonce)
460 switch (event->domain) {
461 case HV_PERF_DOMAIN_PHYS_CHIP:
462 *attrs = event_to_attr(ix, event, event->domain, nonce);
464 case HV_PERF_DOMAIN_PHYS_CORE:
465 for (i = 0; i < ARRAY_SIZE(core_domains); i++) {
466 attrs[i] = event_to_attr(ix, event, core_domains[i],
469 pr_warn("catalog event %u: individual attr %u "
470 "creation failure\n", ix, i);
472 device_str_attr_destroy(attrs[i - 1]);
478 pr_warn("catalog event %u: domain %u is not allowed in the "
479 "catalog\n", ix, event->domain);
484 static size_t event_to_attr_ct(struct hv_24x7_event_data *event)
486 switch (event->domain) {
487 case HV_PERF_DOMAIN_PHYS_CHIP:
489 case HV_PERF_DOMAIN_PHYS_CORE:
490 return ARRAY_SIZE(core_domains);
505 static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
512 return memcmp(d1, d2, s1);
515 static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
516 size_t s2, unsigned d2)
518 int r = memord(v1, s1, v2, s2);
529 static int event_uniq_add(struct rb_root *root, const char *name, int nl,
532 struct rb_node **new = &(root->rb_node), *parent = NULL;
533 struct event_uniq *data;
535 /* Figure out where to put new node */
537 struct event_uniq *it;
540 it = container_of(*new, struct event_uniq, node);
541 result = ev_uniq_ord(name, nl, domain, it->name, it->nl,
546 new = &((*new)->rb_left);
548 new = &((*new)->rb_right);
551 pr_info("found a duplicate event %.*s, ct=%u\n", nl,
557 data = kmalloc(sizeof(*data), GFP_KERNEL);
561 *data = (struct event_uniq) {
568 /* Add new node and rebalance tree. */
569 rb_link_node(&data->node, parent, new);
570 rb_insert_color(&data->node, root);
576 static void event_uniq_destroy(struct rb_root *root)
579 * the strings we point to are in the giant block of memory filled by
580 * the catalog, and are freed separately.
582 struct event_uniq *pos, *n;
584 rbtree_postorder_for_each_entry_safe(pos, n, root, node)
590 * ensure the event structure's sizes are self consistent and don't cause us to
591 * read outside of the event
593 * On success, return the event length in bytes.
594 * Otherwise, return -1 (and print as appropriate).
596 static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
598 size_t event_data_bytes,
599 size_t event_entry_count,
600 size_t offset, void *end)
603 void *ev_end, *calc_ev_end;
605 if (offset >= event_data_bytes)
608 if (event_idx >= event_entry_count) {
609 pr_devel("catalog event data has %zu bytes of padding after last event\n",
610 event_data_bytes - offset);
614 if (!event_fixed_portion_is_within(event, end)) {
615 pr_warn("event %zu fixed portion is not within range\n",
620 ev_len = be16_to_cpu(event->length);
623 pr_info("event %zu has length %zu not divisible by 16: event=%pK\n",
624 event_idx, ev_len, event);
626 ev_end = (__u8 *)event + ev_len;
628 pr_warn("event %zu has .length=%zu, ends after buffer end: ev_end=%pK > end=%pK, offset=%zu\n",
629 event_idx, ev_len, ev_end, end,
634 calc_ev_end = event_end(event, end);
636 pr_warn("event %zu has a calculated length which exceeds buffer length %zu: event=%pK end=%pK, offset=%zu\n",
637 event_idx, event_data_bytes, event, end,
642 if (calc_ev_end > ev_end) {
643 pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
644 event_idx, event, ev_end, offset, calc_ev_end);
651 #define MAX_4K (SIZE_MAX / 4096)
653 static int create_events_from_catalog(struct attribute ***events_,
654 struct attribute ***event_descs_,
655 struct attribute ***event_long_descs_)
658 size_t catalog_len, catalog_page_len, event_entry_count,
659 event_data_len, event_data_offs,
660 event_data_bytes, junk_events, event_idx, event_attr_ct, i,
661 attr_max, event_idx_last, desc_ct, long_desc_ct;
663 uint32_t catalog_version_num;
664 struct attribute **events, **event_descs, **event_long_descs;
665 struct hv_24x7_catalog_page_0 *page_0 =
666 kmem_cache_alloc(hv_page_cache, GFP_KERNEL);
668 void *event_data, *end;
669 struct hv_24x7_event_data *event;
670 struct rb_root ev_uniq = RB_ROOT;
678 hret = h_get_24x7_catalog_page(page, 0, 0);
684 catalog_version_num = be64_to_cpu(page_0->version);
685 catalog_page_len = be32_to_cpu(page_0->length);
687 if (MAX_4K < catalog_page_len) {
688 pr_err("invalid page count: %zu\n", catalog_page_len);
693 catalog_len = catalog_page_len * 4096;
695 event_entry_count = be16_to_cpu(page_0->event_entry_count);
696 event_data_offs = be16_to_cpu(page_0->event_data_offs);
697 event_data_len = be16_to_cpu(page_0->event_data_len);
699 pr_devel("cv %zu cl %zu eec %zu edo %zu edl %zu\n",
700 (size_t)catalog_version_num, catalog_len,
701 event_entry_count, event_data_offs, event_data_len);
703 if ((MAX_4K < event_data_len)
704 || (MAX_4K < event_data_offs)
705 || (MAX_4K - event_data_offs < event_data_len)) {
706 pr_err("invalid event data offs %zu and/or len %zu\n",
707 event_data_offs, event_data_len);
712 if ((event_data_offs + event_data_len) > catalog_page_len) {
713 pr_err("event data %zu-%zu does not fit inside catalog 0-%zu\n",
715 event_data_offs + event_data_len,
721 if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) {
722 pr_err("event_entry_count %zu is invalid\n",
728 event_data_bytes = event_data_len * 4096;
731 * event data can span several pages, events can cross between these
732 * pages. Use vmalloc to make this easier.
734 event_data = vmalloc(event_data_bytes);
736 pr_err("could not allocate event data\n");
741 end = event_data + event_data_bytes;
744 * using vmalloc_to_phys() like this only works if PAGE_SIZE is
747 BUILD_BUG_ON(PAGE_SIZE % 4096);
749 for (i = 0; i < event_data_len; i++) {
750 hret = h_get_24x7_catalog_page_(
751 vmalloc_to_phys(event_data + i * 4096),
753 i + event_data_offs);
755 pr_err("failed to get event data in page %zu\n",
756 i + event_data_offs);
763 * scan the catalog to determine the number of attributes we need, and
764 * verify it at the same time.
766 for (junk_events = 0, event = event_data, event_idx = 0, attr_max = 0;
768 event_idx++, event = (void *)event + ev_len) {
769 size_t offset = (void *)event - (void *)event_data;
773 ev_len = catalog_event_len_validate(event, event_idx,
780 name = event_name(event, &nl);
782 if (event->event_group_record_len == 0) {
783 pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
784 event_idx, nl, name);
789 if (!catalog_entry_domain_is_valid(event->domain)) {
790 pr_info("event %zu (%.*s) has invalid domain %d\n",
791 event_idx, nl, name, event->domain);
796 attr_max += event_to_attr_ct(event);
799 event_idx_last = event_idx;
800 if (event_idx_last != event_entry_count)
801 pr_warn("event buffer ended before listed # of events were parsed (got %zu, wanted %zu, junk %zu)\n",
802 event_idx_last, event_entry_count, junk_events);
804 events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
810 event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
817 event_long_descs = kmalloc_array(event_idx + 1,
818 sizeof(*event_long_descs), GFP_KERNEL);
819 if (!event_long_descs) {
824 /* Iterate over the catalog filling in the attribute vector */
825 for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
826 event = event_data, event_idx = 0;
827 event_idx < event_idx_last;
828 event_idx++, ev_len = be16_to_cpu(event->length),
829 event = (void *)event + ev_len) {
834 * these are the only "bad" events that are intermixed and that
835 * we can ignore without issue. make sure to skip them here
837 if (event->event_group_record_len == 0)
839 if (!catalog_entry_domain_is_valid(event->domain))
842 name = event_name(event, &nl);
843 nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
844 ct = event_data_to_attrs(event_idx, events + event_attr_ct,
847 pr_warn("event %zu (%.*s) creation failure, skipping\n",
848 event_idx, nl, name);
852 event_descs[desc_ct] = event_to_desc_attr(event, nonce);
853 if (event_descs[desc_ct])
855 event_long_descs[long_desc_ct] =
856 event_to_long_desc_attr(event, nonce);
857 if (event_long_descs[long_desc_ct])
862 pr_info("read %zu catalog entries, created %zu event attrs (%zu failures), %zu descs\n",
863 event_idx, event_attr_ct, junk_events, desc_ct);
865 events[event_attr_ct] = NULL;
866 event_descs[desc_ct] = NULL;
867 event_long_descs[long_desc_ct] = NULL;
869 event_uniq_destroy(&ev_uniq);
871 kmem_cache_free(hv_page_cache, page);
874 *event_descs_ = event_descs;
875 *event_long_descs_ = event_long_descs;
885 kmem_cache_free(hv_page_cache, page);
888 *event_descs_ = NULL;
889 *event_long_descs_ = NULL;
893 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
894 struct bin_attribute *bin_attr, char *buf,
895 loff_t offset, size_t count)
899 size_t catalog_len = 0, catalog_page_len = 0;
900 loff_t page_offset = 0;
901 loff_t offset_in_page;
903 uint64_t catalog_version_num = 0;
904 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
905 struct hv_24x7_catalog_page_0 *page_0 = page;
910 hret = h_get_24x7_catalog_page(page, 0, 0);
916 catalog_version_num = be64_to_cpu(page_0->version);
917 catalog_page_len = be32_to_cpu(page_0->length);
918 catalog_len = catalog_page_len * 4096;
920 page_offset = offset / 4096;
921 offset_in_page = offset % 4096;
923 if (page_offset >= catalog_page_len)
926 if (page_offset != 0) {
927 hret = h_get_24x7_catalog_page(page, catalog_version_num,
935 copy_len = 4096 - offset_in_page;
936 if (copy_len > count)
939 memcpy(buf, page+offset_in_page, copy_len);
944 pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
946 catalog_version_num, page_offset, hret);
947 kmem_cache_free(hv_page_cache, page);
949 pr_devel("catalog_read: offset=%lld(%lld) count=%zu "
950 "catalog_len=%zu(%zu) => %zd\n", offset, page_offset,
951 count, catalog_len, catalog_page_len, ret);
956 #define PAGE_0_ATTR(_name, _fmt, _expr) \
957 static ssize_t _name##_show(struct device *dev, \
958 struct device_attribute *dev_attr, \
961 unsigned long hret; \
963 void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
964 struct hv_24x7_catalog_page_0 *page_0 = page; \
967 hret = h_get_24x7_catalog_page(page, 0, 0); \
972 ret = sprintf(buf, _fmt, _expr); \
974 kmem_cache_free(hv_page_cache, page); \
977 static DEVICE_ATTR_RO(_name)
979 PAGE_0_ATTR(catalog_version, "%lld\n",
980 (unsigned long long)be64_to_cpu(page_0->version));
981 PAGE_0_ATTR(catalog_len, "%lld\n",
982 (unsigned long long)be32_to_cpu(page_0->length) * 4096);
983 static BIN_ATTR_RO(catalog, 0/* real length varies */);
985 static struct bin_attribute *if_bin_attrs[] = {
990 static struct attribute *if_attrs[] = {
991 &dev_attr_catalog_len.attr,
992 &dev_attr_catalog_version.attr,
996 static struct attribute_group if_group = {
998 .bin_attrs = if_bin_attrs,
1002 static const struct attribute_group *attr_groups[] = {
1006 &event_long_desc_group,
1011 static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
1012 struct hv_24x7_data_result_buffer *result_buffer,
1015 struct hv_24x7_request *req;
1017 req = &request_buffer->requests[0];
1018 pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => "
1019 "ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
1020 req->performance_domain, req->data_offset,
1021 req->starting_ix, req->starting_lpar_ix, ret, ret,
1022 result_buffer->detailed_rc,
1023 result_buffer->failing_request_ix);
1027 * Start the process for a new H_GET_24x7_DATA hcall.
1029 static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1030 struct hv_24x7_data_result_buffer *result_buffer)
1033 memset(request_buffer, 0, 4096);
1034 memset(result_buffer, 0, 4096);
1036 request_buffer->interface_version = HV_24X7_IF_VERSION_CURRENT;
1037 /* memset above set request_buffer->num_requests to 0 */
1041 * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected
1042 * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
1044 static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
1045 struct hv_24x7_data_result_buffer *result_buffer)
1050 * NOTE: Due to variable number of array elements in request and
1051 * result buffer(s), sizeof() is not reliable. Use the actual
1052 * allocated buffer size, H24x7_DATA_BUFFER_SIZE.
1054 ret = plpar_hcall_norets(H_GET_24X7_DATA,
1055 virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
1056 virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
1059 log_24x7_hcall(request_buffer, result_buffer, ret);
1065 * Add the given @event to the next slot in the 24x7 request_buffer.
1067 * Note that H_GET_24X7_DATA hcall allows reading several counters'
1068 * values in a single HCALL. We expect the caller to add events to the
1069 * request buffer one by one, make the HCALL and process the results.
1071 static int add_event_to_24x7_request(struct perf_event *event,
1072 struct hv_24x7_request_buffer *request_buffer)
1076 struct hv_24x7_request *req;
1078 if (request_buffer->num_requests > 254) {
1079 pr_devel("Too many requests for 24x7 HCALL %d\n",
1080 request_buffer->num_requests);
1084 if (is_physical_domain(event_get_domain(event)))
1085 idx = event_get_core(event);
1087 idx = event_get_vcpu(event);
1089 i = request_buffer->num_requests++;
1090 req = &request_buffer->requests[i];
1092 req->performance_domain = event_get_domain(event);
1093 req->data_size = cpu_to_be16(8);
1094 req->data_offset = cpu_to_be32(event_get_offset(event));
1095 req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)),
1096 req->max_num_lpars = cpu_to_be16(1);
1097 req->starting_ix = cpu_to_be16(idx);
1098 req->max_ix = cpu_to_be16(1);
1103 static unsigned long single_24x7_request(struct perf_event *event, u64 *count)
1106 struct hv_24x7_request_buffer *request_buffer;
1107 struct hv_24x7_data_result_buffer *result_buffer;
1109 BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
1110 BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
1112 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1113 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1115 init_24x7_request(request_buffer, result_buffer);
1117 ret = add_event_to_24x7_request(event, request_buffer);
1121 ret = make_24x7_request(request_buffer, result_buffer);
1123 log_24x7_hcall(request_buffer, result_buffer, ret);
1127 /* process result from hcall */
1128 *count = be64_to_cpu(result_buffer->results[0].elements[0].element_data[0]);
1131 put_cpu_var(hv_24x7_reqb);
1132 put_cpu_var(hv_24x7_resb);
1137 static int h_24x7_event_init(struct perf_event *event)
1139 struct hv_perf_caps caps;
1145 if (event->attr.type != event->pmu->type)
1148 /* Unused areas must be 0 */
1149 if (event_get_reserved1(event) ||
1150 event_get_reserved2(event) ||
1151 event_get_reserved3(event)) {
1152 pr_devel("reserved set when forbidden 0x%llx(0x%llx) 0x%llx(0x%llx) 0x%llx(0x%llx)\n",
1154 event_get_reserved1(event),
1155 event->attr.config1,
1156 event_get_reserved2(event),
1157 event->attr.config2,
1158 event_get_reserved3(event));
1162 /* unsupported modes and filters */
1163 if (event->attr.exclude_user ||
1164 event->attr.exclude_kernel ||
1165 event->attr.exclude_hv ||
1166 event->attr.exclude_idle ||
1167 event->attr.exclude_host ||
1168 event->attr.exclude_guest)
1171 /* no branch sampling */
1172 if (has_branch_stack(event))
1175 /* offset must be 8 byte aligned */
1176 if (event_get_offset(event) % 8) {
1177 pr_devel("bad alignment\n");
1181 /* Domains above 6 are invalid */
1182 domain = event_get_domain(event);
1184 pr_devel("invalid domain %d\n", domain);
1188 hret = hv_perf_caps_get(&caps);
1190 pr_devel("could not get capabilities: rc=%ld\n", hret);
1194 /* Physical domains & other lpars require extra capabilities */
1195 if (!caps.collect_privileged && (is_physical_domain(domain) ||
1196 (event_get_lpar(event) != event_get_lpar_max()))) {
1197 pr_devel("hv permissions disallow: is_physical_domain:%d, lpar=0x%llx\n",
1198 is_physical_domain(domain),
1199 event_get_lpar(event));
1203 /* see if the event complains */
1204 if (single_24x7_request(event, &ct)) {
1205 pr_devel("test hcall failed\n");
1212 static u64 h_24x7_get_value(struct perf_event *event)
1216 ret = single_24x7_request(event, &ct);
1218 /* We checked this in event init, shouldn't fail here... */
1224 static void update_event_count(struct perf_event *event, u64 now)
1228 prev = local64_xchg(&event->hw.prev_count, now);
1229 local64_add(now - prev, &event->count);
1232 static void h_24x7_event_read(struct perf_event *event)
1235 struct hv_24x7_request_buffer *request_buffer;
1236 struct hv_24x7_hw *h24x7hw;
1239 txn_flags = __this_cpu_read(hv_24x7_txn_flags);
1242 * If in a READ transaction, add this counter to the list of
1243 * counters to read during the next HCALL (i.e commit_txn()).
1244 * If not in a READ transaction, go ahead and make the HCALL
1245 * to read this counter by itself.
1248 if (txn_flags & PERF_PMU_TXN_READ) {
1252 if (__this_cpu_read(hv_24x7_txn_err))
1255 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1257 ret = add_event_to_24x7_request(event, request_buffer);
1259 __this_cpu_write(hv_24x7_txn_err, ret);
1262 * Assoicate the event with the HCALL request index,
1263 * so ->commit_txn() can quickly find/update count.
1265 i = request_buffer->num_requests - 1;
1267 h24x7hw = &get_cpu_var(hv_24x7_hw);
1268 h24x7hw->events[i] = event;
1269 put_cpu_var(h24x7hw);
1272 put_cpu_var(hv_24x7_reqb);
1274 now = h_24x7_get_value(event);
1275 update_event_count(event, now);
1279 static void h_24x7_event_start(struct perf_event *event, int flags)
1281 if (flags & PERF_EF_RELOAD)
1282 local64_set(&event->hw.prev_count, h_24x7_get_value(event));
1285 static void h_24x7_event_stop(struct perf_event *event, int flags)
1287 h_24x7_event_read(event);
1290 static int h_24x7_event_add(struct perf_event *event, int flags)
1292 if (flags & PERF_EF_START)
1293 h_24x7_event_start(event, flags);
1299 * 24x7 counters only support READ transactions. They are
1300 * always counting and dont need/support ADD transactions.
1301 * Cache the flags, but otherwise ignore transactions that
1302 * are not PERF_PMU_TXN_READ.
1304 static void h_24x7_event_start_txn(struct pmu *pmu, unsigned int flags)
1306 struct hv_24x7_request_buffer *request_buffer;
1307 struct hv_24x7_data_result_buffer *result_buffer;
1309 /* We should not be called if we are already in a txn */
1310 WARN_ON_ONCE(__this_cpu_read(hv_24x7_txn_flags));
1312 __this_cpu_write(hv_24x7_txn_flags, flags);
1313 if (flags & ~PERF_PMU_TXN_READ)
1316 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1317 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1319 init_24x7_request(request_buffer, result_buffer);
1321 put_cpu_var(hv_24x7_resb);
1322 put_cpu_var(hv_24x7_reqb);
1326 * Clean up transaction state.
1328 * NOTE: Ignore state of request and result buffers for now.
1329 * We will initialize them during the next read/txn.
1331 static void reset_txn(void)
1333 __this_cpu_write(hv_24x7_txn_flags, 0);
1334 __this_cpu_write(hv_24x7_txn_err, 0);
1338 * 24x7 counters only support READ transactions. They are always counting
1339 * and dont need/support ADD transactions. Clear ->txn_flags but otherwise
1340 * ignore transactions that are not of type PERF_PMU_TXN_READ.
1342 * For READ transactions, submit all pending 24x7 requests (i.e requests
1343 * that were queued by h_24x7_event_read()), to the hypervisor and update
1346 static int h_24x7_event_commit_txn(struct pmu *pmu)
1348 struct hv_24x7_request_buffer *request_buffer;
1349 struct hv_24x7_data_result_buffer *result_buffer;
1350 struct hv_24x7_result *resb;
1351 struct perf_event *event;
1353 int i, ret, txn_flags;
1354 struct hv_24x7_hw *h24x7hw;
1356 txn_flags = __this_cpu_read(hv_24x7_txn_flags);
1357 WARN_ON_ONCE(!txn_flags);
1360 if (txn_flags & ~PERF_PMU_TXN_READ)
1363 ret = __this_cpu_read(hv_24x7_txn_err);
1367 request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
1368 result_buffer = (void *)get_cpu_var(hv_24x7_resb);
1370 ret = make_24x7_request(request_buffer, result_buffer);
1372 log_24x7_hcall(request_buffer, result_buffer, ret);
1376 h24x7hw = &get_cpu_var(hv_24x7_hw);
1378 /* Update event counts from hcall */
1379 for (i = 0; i < request_buffer->num_requests; i++) {
1380 resb = &result_buffer->results[i];
1381 count = be64_to_cpu(resb->elements[0].element_data[0]);
1382 event = h24x7hw->events[i];
1383 h24x7hw->events[i] = NULL;
1384 update_event_count(event, count);
1387 put_cpu_var(hv_24x7_hw);
1390 put_cpu_var(hv_24x7_resb);
1391 put_cpu_var(hv_24x7_reqb);
1398 * 24x7 counters only support READ transactions. They are always counting
1399 * and dont need/support ADD transactions. However, regardless of type
1400 * of transaction, all we need to do is cleanup, so we don't have to check
1401 * the type of transaction.
1403 static void h_24x7_event_cancel_txn(struct pmu *pmu)
1405 WARN_ON_ONCE(!__this_cpu_read(hv_24x7_txn_flags));
1409 static struct pmu h_24x7_pmu = {
1410 .task_ctx_nr = perf_invalid_context,
1413 .attr_groups = attr_groups,
1414 .event_init = h_24x7_event_init,
1415 .add = h_24x7_event_add,
1416 .del = h_24x7_event_stop,
1417 .start = h_24x7_event_start,
1418 .stop = h_24x7_event_stop,
1419 .read = h_24x7_event_read,
1420 .start_txn = h_24x7_event_start_txn,
1421 .commit_txn = h_24x7_event_commit_txn,
1422 .cancel_txn = h_24x7_event_cancel_txn,
1425 static int hv_24x7_init(void)
1429 struct hv_perf_caps caps;
1431 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
1432 pr_debug("not a virtualized system, not enabling\n");
1436 hret = hv_perf_caps_get(&caps);
1438 pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
1443 hv_page_cache = kmem_cache_create("hv-page-4096", 4096, 4096, 0, NULL);
1447 /* sampling not supported */
1448 h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1450 r = create_events_from_catalog(&event_group.attrs,
1451 &event_desc_group.attrs,
1452 &event_long_desc_group.attrs);
1457 r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
1464 device_initcall(hv_24x7_init);