1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include "map_symbol.h"
19 #define COMP(m) do { \
20 if (s1->m != s2->m) { \
21 pr_debug("Samples differ at '"#m"'\n"); \
26 #define MCOMP(m) do { \
27 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
28 pr_debug("Samples differ at '"#m"'\n"); \
33 static bool samples_same(const struct perf_sample *s1,
34 const struct perf_sample *s2,
35 u64 type, u64 read_format)
39 if (type & PERF_SAMPLE_IDENTIFIER)
42 if (type & PERF_SAMPLE_IP)
45 if (type & PERF_SAMPLE_TID) {
50 if (type & PERF_SAMPLE_TIME)
53 if (type & PERF_SAMPLE_ADDR)
56 if (type & PERF_SAMPLE_ID)
59 if (type & PERF_SAMPLE_STREAM_ID)
62 if (type & PERF_SAMPLE_CPU)
65 if (type & PERF_SAMPLE_PERIOD)
68 if (type & PERF_SAMPLE_READ) {
69 if (read_format & PERF_FORMAT_GROUP)
73 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
74 COMP(read.time_enabled);
75 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
76 COMP(read.time_running);
77 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
78 if (read_format & PERF_FORMAT_GROUP) {
79 for (i = 0; i < s1->read.group.nr; i++)
80 MCOMP(read.group.values[i]);
86 if (type & PERF_SAMPLE_CALLCHAIN) {
88 for (i = 0; i < s1->callchain->nr; i++)
89 COMP(callchain->ips[i]);
92 if (type & PERF_SAMPLE_RAW) {
94 if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
95 pr_debug("Samples differ at 'raw_data'\n");
100 if (type & PERF_SAMPLE_BRANCH_STACK) {
101 COMP(branch_stack->nr);
102 for (i = 0; i < s1->branch_stack->nr; i++)
103 MCOMP(branch_stack->entries[i]);
106 if (type & PERF_SAMPLE_REGS_USER) {
107 size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
109 COMP(user_regs.mask);
111 if (s1->user_regs.abi &&
112 (!s1->user_regs.regs || !s2->user_regs.regs ||
113 memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
114 pr_debug("Samples differ at 'user_regs'\n");
119 if (type & PERF_SAMPLE_STACK_USER) {
120 COMP(user_stack.size);
121 if (memcmp(s1->user_stack.data, s2->user_stack.data,
122 s1->user_stack.size)) {
123 pr_debug("Samples differ at 'user_stack'\n");
128 if (type & PERF_SAMPLE_WEIGHT)
131 if (type & PERF_SAMPLE_DATA_SRC)
134 if (type & PERF_SAMPLE_TRANSACTION)
137 if (type & PERF_SAMPLE_REGS_INTR) {
138 size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
140 COMP(intr_regs.mask);
142 if (s1->intr_regs.abi &&
143 (!s1->intr_regs.regs || !s2->intr_regs.regs ||
144 memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
145 pr_debug("Samples differ at 'intr_regs'\n");
150 if (type & PERF_SAMPLE_PHYS_ADDR)
156 static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
158 struct evsel evsel = {
162 .sample_type = sample_type,
163 .read_format = read_format,
167 union perf_event *event;
169 struct ip_callchain callchain;
173 .data = {3, 201, 202, 203},
176 struct branch_stack branch_stack;
180 .data = {1, 211, 212, 213},
183 const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
184 const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
185 struct perf_sample sample = {
196 .raw_size = sizeof(raw_data),
199 .raw_data = (void *)raw_data,
200 .callchain = &callchain.callchain,
201 .branch_stack = &branch_stack.branch_stack,
203 .abi = PERF_SAMPLE_REGS_ABI_64,
208 .size = sizeof(data),
209 .data = (void *)data,
212 .time_enabled = 0x030a59d664fca7deULL,
213 .time_running = 0x011b6ae553eb98edULL,
216 .abi = PERF_SAMPLE_REGS_ABI_64,
222 struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
223 struct perf_sample sample_out;
227 if (sample_type & PERF_SAMPLE_REGS_USER)
228 evsel.core.attr.sample_regs_user = sample_regs;
230 if (sample_type & PERF_SAMPLE_REGS_INTR)
231 evsel.core.attr.sample_regs_intr = sample_regs;
233 for (i = 0; i < sizeof(regs); i++)
234 *(i + (u8 *)regs) = i & 0xfe;
236 if (read_format & PERF_FORMAT_GROUP) {
237 sample.read.group.nr = 4;
238 sample.read.group.values = values;
240 sample.read.one.value = 0x08789faeb786aa87ULL;
241 sample.read.one.id = 99;
244 sz = perf_event__sample_event_size(&sample, sample_type, read_format);
245 bufsz = sz + 4096; /* Add a bit for overrun checking */
246 event = malloc(bufsz);
248 pr_debug("malloc failed\n");
252 memset(event, 0xff, bufsz);
253 event->header.type = PERF_RECORD_SAMPLE;
254 event->header.misc = 0;
255 event->header.size = sz;
257 err = perf_event__synthesize_sample(event, sample_type, read_format,
260 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
261 "perf_event__synthesize_sample", sample_type, err);
265 /* The data does not contain 0xff so we use that to check the size */
266 for (i = bufsz; i > 0; i--) {
267 if (*(i - 1 + (u8 *)event) != 0xff)
271 pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
276 evsel.sample_size = __perf_evsel__sample_size(sample_type);
278 err = perf_evsel__parse_sample(&evsel, event, &sample_out);
280 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
281 "perf_evsel__parse_sample", sample_type, err);
285 if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
286 pr_debug("parsing failed for sample_type %#"PRIx64"\n",
294 if (ret && read_format)
295 pr_debug("read_format %#"PRIx64"\n", read_format);
300 * test__sample_parsing - test sample parsing.
302 * This function implements a test that synthesizes a sample event, parses it
303 * and then checks that the parsed sample matches the original sample. The test
304 * checks sample format bits separately and together. If the test passes %0 is
305 * returned, otherwise %-1 is returned.
307 int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
309 const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
316 * Fail the test if it has not been updated when new sample format bits
317 * were added. Please actually update the test rather than just change
318 * the condition below.
320 if (PERF_SAMPLE_MAX > PERF_SAMPLE_PHYS_ADDR << 1) {
321 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
325 /* Test each sample format bit separately */
326 for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
328 /* Test read_format variations */
329 if (sample_type == PERF_SAMPLE_READ) {
330 for (i = 0; i < ARRAY_SIZE(rf); i++) {
331 err = do_test(sample_type, 0, rf[i]);
339 if (sample_type == PERF_SAMPLE_REGS_USER)
340 sample_regs = 0x3fff;
342 if (sample_type == PERF_SAMPLE_REGS_INTR)
343 sample_regs = 0xff0fff;
345 err = do_test(sample_type, sample_regs, 0);
350 /* Test all sample format bits together */
351 sample_type = PERF_SAMPLE_MAX - 1;
352 sample_regs = 0x3fff; /* shared yb intr and user regs */
353 for (i = 0; i < ARRAY_SIZE(rf); i++) {
354 err = do_test(sample_type, sample_regs, rf[i]);