Merge tag '6.9-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6
[linux-2.6-block.git] / tools / perf / util / mem-events.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
ce1e22b0
JO
2#include <stddef.h>
3#include <stdlib.h>
4#include <string.h>
5#include <errno.h>
54fbad54
JO
6#include <sys/types.h>
7#include <sys/stat.h>
8#include <unistd.h>
9#include <api/fs/fs.h>
877a7a11 10#include <linux/kernel.h>
d3300a3c 11#include "map_symbol.h"
acbe613e 12#include "mem-events.h"
ce1e22b0 13#include "debug.h"
0c877d75 14#include "symbol.h"
e7ce8d11 15#include "pmu.h"
1eaf496e 16#include "pmus.h"
acbe613e 17
b0d745b3
JO
18unsigned int perf_mem_events__loads_ldlat = 30;
19
db95c2ce 20#define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a }
acbe613e 21
bb65acdc 22struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
db95c2ce
KL
23 E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "mem-loads", true, 0),
24 E("ldlat-stores", "%s/mem-stores/P", "mem-stores", false, 0),
25 E(NULL, NULL, NULL, false, 0),
acbe613e 26};
54fbad54 27#undef E
acbe613e 28
b0d745b3 29static char mem_loads_name[100];
abbdd79b 30static char mem_stores_name[100];
b0d745b3 31
a30450e6 32struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
eaf6aaee 33{
a30450e6 34 if (i >= PERF_MEM_EVENTS__MAX || !pmu)
eaf6aaee
LY
35 return NULL;
36
a30450e6
KL
37 return &pmu->mem_events[i];
38}
39
40static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
41{
42 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
43 if (pmu->mem_events)
44 return pmu;
45 }
46 return NULL;
47}
48
49struct perf_pmu *perf_mem_events_find_pmu(void)
50{
51 /*
52 * The current perf mem doesn't support per-PMU configuration.
53 * The exact same configuration is applied to all the
54 * mem_events supported PMUs.
55 * Return the first mem_events supported PMU.
56 *
57 * Notes: The only case which may support multiple mem_events
58 * supported PMUs is Intel hybrid. The exact same mem_events
59 * is shared among the PMUs. Only configure the first PMU
60 * is good enough as well.
61 */
62 return perf_pmus__scan_mem(NULL);
eaf6aaee
LY
63}
64
821aca20
KL
65/**
66 * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu
67 * @pmu: Start pmu. If it's NULL, search the entire PMU list.
68 */
69int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu)
70{
71 int num = 0;
72
73 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL)
74 num++;
75
76 return num;
77}
78
abbdd79b 79static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
2ba7ac58 80{
abbdd79b 81 struct perf_mem_event *e;
eaf6aaee 82
abbdd79b
KL
83 if (i >= PERF_MEM_EVENTS__MAX || !pmu)
84 return NULL;
85
86 e = &pmu->mem_events[i];
eaf6aaee
LY
87 if (!e)
88 return NULL;
89
abbdd79b
KL
90 if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
91 if (e->ldlat) {
92 if (!e->aux_event) {
93 /* ARM and Most of Intel */
94 scnprintf(mem_loads_name, sizeof(mem_loads_name),
95 e->name, pmu->name,
96 perf_mem_events__loads_ldlat);
97 } else {
98 /* Intel with mem-loads-aux event */
99 scnprintf(mem_loads_name, sizeof(mem_loads_name),
100 e->name, pmu->name, pmu->name,
101 perf_mem_events__loads_ldlat);
102 }
103 } else {
104 if (!e->aux_event) {
105 /* AMD and POWER */
106 scnprintf(mem_loads_name, sizeof(mem_loads_name),
107 e->name, pmu->name);
108 } else
109 return NULL;
b0d745b3 110 }
abbdd79b 111
b0d745b3
JO
112 return mem_loads_name;
113 }
114
abbdd79b
KL
115 if (i == PERF_MEM_EVENTS__STORE) {
116 scnprintf(mem_stores_name, sizeof(mem_stores_name),
117 e->name, pmu->name);
118 return mem_stores_name;
119 }
120
121 return NULL;
2ba7ac58
JO
122}
123
8ea9dfb9 124bool is_mem_loads_aux_event(struct evsel *leader)
2a57d408 125{
8ea9dfb9
KL
126 struct perf_pmu *pmu = leader->pmu;
127 struct perf_mem_event *e;
128
129 if (!pmu || !pmu->mem_events)
130 return false;
131
132 e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD];
133 if (!e->aux_event)
134 return false;
135
136 return leader->core.attr.config == e->aux_event;
2a57d408
KL
137}
138
a30450e6 139int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
ce1e22b0
JO
140{
141 char *tok, *saveptr = NULL;
142 bool found = false;
143 char *buf;
144 int j;
145
146 /* We need buffer that we know we can write to. */
147 buf = malloc(strlen(str) + 1);
148 if (!buf)
149 return -ENOMEM;
150
151 strcpy(buf, str);
152
153 tok = strtok_r((char *)buf, ",", &saveptr);
154
155 while (tok) {
156 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
a30450e6 157 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
ce1e22b0 158
4ba2452c
LY
159 if (!e->tag)
160 continue;
161
ce1e22b0
JO
162 if (strstr(e->tag, tok))
163 e->record = found = true;
164 }
165
166 tok = strtok_r(NULL, ",", &saveptr);
167 }
168
169 free(buf);
170
171 if (found)
172 return 0;
173
174 pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
175 return -1;
176}
54fbad54 177
db95c2ce 178static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu,
a4320085 179 struct perf_mem_event *e)
e7ce8d11
JY
180{
181 char path[PATH_MAX];
182 struct stat st;
183
db95c2ce
KL
184 if (!e->event_name)
185 return true;
186
187 scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name);
188
e7ce8d11
JY
189 return !stat(path, &st);
190}
191
a30450e6 192int perf_pmu__mem_events_init(struct perf_pmu *pmu)
54fbad54
JO
193{
194 const char *mnt = sysfs__mount();
195 bool found = false;
196 int j;
197
198 if (!mnt)
199 return -ENOENT;
200
201 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
a30450e6 202 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
54fbad54 203
4ba2452c
LY
204 /*
205 * If the event entry isn't valid, skip initialization
206 * and "e->supported" will keep false.
207 */
208 if (!e->tag)
209 continue;
210
db95c2ce 211 e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e);
a30450e6
KL
212 if (e->supported)
213 found = true;
54fbad54
JO
214 }
215
216 return found ? 0 : -ENOENT;
217}
0c877d75 218
a30450e6 219void perf_pmu__mem_events_list(struct perf_pmu *pmu)
b027cc6f
IR
220{
221 int j;
222
223 for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
a30450e6 224 struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
b027cc6f 225
2c5f652c
RB
226 fprintf(stderr, "%-*s%-*s%s",
227 e->tag ? 13 : 0,
228 e->tag ? : "",
229 e->tag && verbose > 0 ? 25 : 0,
abbdd79b 230 e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "",
2c5f652c 231 e->supported ? ": available\n" : "");
b027cc6f
IR
232 }
233}
234
70f4b20d 235int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
4a9086ad 236{
a4320085 237 const char *mnt = sysfs__mount();
a30450e6 238 struct perf_pmu *pmu = NULL;
4a9086ad 239 struct perf_mem_event *e;
70f4b20d
KL
240 int i = *argv_nr;
241 const char *s;
242 char *copy;
4a9086ad 243
a30450e6
KL
244 while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
245 for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
246 e = perf_pmu__mem_events_ptr(pmu, j);
4a9086ad 247
a30450e6
KL
248 if (!e->record)
249 continue;
85f73c37 250
4a9086ad 251 if (!e->supported) {
a30450e6 252 pr_err("failed: event '%s' not supported\n",
abbdd79b 253 perf_pmu__mem_events_name(j, pmu));
4a9086ad
JY
254 return -1;
255 }
256
70f4b20d
KL
257 s = perf_pmu__mem_events_name(j, pmu);
258 if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e))
259 continue;
a4320085 260
70f4b20d
KL
261 copy = strdup(s);
262 if (!copy)
263 return -1;
4a9086ad 264
70f4b20d
KL
265 rec_argv[i++] = "-e";
266 rec_argv[i++] = copy;
4a9086ad
JY
267 }
268 }
269
270 *argv_nr = i;
4a9086ad
JY
271 return 0;
272}
273
0c877d75
JO
274static const char * const tlb_access[] = {
275 "N/A",
276 "HIT",
277 "MISS",
278 "L1",
279 "L2",
280 "Walker",
281 "Fault",
282};
283
b1a5fbea 284int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
0c877d75
JO
285{
286 size_t l = 0, i;
287 u64 m = PERF_MEM_TLB_NA;
288 u64 hit, miss;
289
290 sz -= 1; /* -1 for null termination */
291 out[0] = '\0';
292
293 if (mem_info)
294 m = mem_info->data_src.mem_dtlb;
295
296 hit = m & PERF_MEM_TLB_HIT;
297 miss = m & PERF_MEM_TLB_MISS;
298
299 /* already taken care of */
300 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
301
302 for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
303 if (!(m & 0x1))
304 continue;
305 if (l) {
306 strcat(out, " or ");
307 l += 4;
308 }
b1a5fbea 309 l += scnprintf(out + l, sz - l, tlb_access[i]);
0c877d75
JO
310 }
311 if (*out == '\0')
b1a5fbea 312 l += scnprintf(out, sz - l, "N/A");
0c877d75 313 if (hit)
b1a5fbea 314 l += scnprintf(out + l, sz - l, " hit");
0c877d75 315 if (miss)
b1a5fbea
JO
316 l += scnprintf(out + l, sz - l, " miss");
317
318 return l;
0c877d75 319}
071e9a1e
JO
320
321static const char * const mem_lvl[] = {
322 "N/A",
323 "HIT",
324 "MISS",
325 "L1",
c72de116 326 "LFB/MAB",
071e9a1e
JO
327 "L2",
328 "L3",
329 "Local RAM",
330 "Remote RAM (1 hop)",
331 "Remote RAM (2 hops)",
332 "Remote Cache (1 hop)",
333 "Remote Cache (2 hops)",
334 "I/O",
335 "Uncached",
336};
337
52839e65 338static const char * const mem_lvlnum[] = {
d5fa7e9d 339 [PERF_MEM_LVLNUM_UNC] = "Uncached",
923396f6
RB
340 [PERF_MEM_LVLNUM_CXL] = "CXL",
341 [PERF_MEM_LVLNUM_IO] = "I/O",
52839e65 342 [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
c72de116 343 [PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
52839e65
AK
344 [PERF_MEM_LVLNUM_RAM] = "RAM",
345 [PERF_MEM_LVLNUM_PMEM] = "PMEM",
346 [PERF_MEM_LVLNUM_NA] = "N/A",
347};
348
cae1d759
KJ
349static const char * const mem_hops[] = {
350 "N/A",
351 /*
352 * While printing, 'Remote' will be added to represent
353 * 'Remote core, same node' accesses as remote field need
354 * to be set with mem_hops field.
355 */
356 "core, same node",
7fbddf40
KJ
357 "node, same socket",
358 "socket, same board",
359 "board",
cae1d759
KJ
360};
361
fdefc375
LY
362static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
363{
364 u64 op = PERF_MEM_LOCK_NA;
365 int l;
366
367 if (mem_info)
368 op = mem_info->data_src.mem_op;
369
370 if (op & PERF_MEM_OP_NA)
371 l = scnprintf(out, sz, "N/A");
372 else if (op & PERF_MEM_OP_LOAD)
373 l = scnprintf(out, sz, "LOAD");
374 else if (op & PERF_MEM_OP_STORE)
375 l = scnprintf(out, sz, "STORE");
376 else if (op & PERF_MEM_OP_PFETCH)
377 l = scnprintf(out, sz, "PFETCH");
378 else if (op & PERF_MEM_OP_EXEC)
379 l = scnprintf(out, sz, "EXEC");
380 else
381 l = scnprintf(out, sz, "No");
382
383 return l;
384}
385
96907563 386int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
071e9a1e 387{
ddeac198 388 union perf_mem_data_src data_src;
7fbddf40 389 int printed = 0;
ddeac198
RB
390 size_t l = 0;
391 size_t i;
392 int lvl;
393 char hit_miss[5] = {0};
071e9a1e
JO
394
395 sz -= 1; /* -1 for null termination */
396 out[0] = '\0';
397
ddeac198
RB
398 if (!mem_info)
399 goto na;
071e9a1e 400
ddeac198 401 data_src = mem_info->data_src;
071e9a1e 402
ddeac198
RB
403 if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
404 memcpy(hit_miss, "hit", 3);
405 else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
406 memcpy(hit_miss, "miss", 4);
52839e65 407
ddeac198
RB
408 lvl = data_src.mem_lvl_num;
409 if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
410 if (data_src.mem_remote) {
411 strcat(out, "Remote ");
412 l += 7;
071e9a1e 413 }
ddeac198
RB
414
415 if (data_src.mem_hops)
416 l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
417
418 if (mem_lvlnum[lvl])
419 l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
420 else
421 l += scnprintf(out + l, sz - l, "L%d", lvl);
422
423 l += scnprintf(out + l, sz - l, " %s", hit_miss);
424 return l;
071e9a1e 425 }
52839e65 426
ddeac198
RB
427 lvl = data_src.mem_lvl;
428 if (!lvl)
429 goto na;
430
431 lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
432 if (!lvl)
433 goto na;
434
435 for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
436 if (!(lvl & 0x1))
437 continue;
52839e65
AK
438 if (printed++) {
439 strcat(out, " or ");
440 l += 4;
441 }
ddeac198 442 l += scnprintf(out + l, sz - l, mem_lvl[i]);
52839e65
AK
443 }
444
ddeac198
RB
445 if (printed) {
446 l += scnprintf(out + l, sz - l, " %s", hit_miss);
447 return l;
448 }
96907563 449
ddeac198
RB
450na:
451 strcat(out, "N/A");
452 return 3;
071e9a1e 453}
2c07af13
JO
454
455static const char * const snoop_access[] = {
456 "N/A",
457 "None",
2c07af13 458 "Hit",
166ebdd2 459 "Miss",
2c07af13
JO
460 "HitM",
461};
462
f78d6250
LY
463static const char * const snoopx_access[] = {
464 "Fwd",
465 "Peer",
466};
467
149d7507 468int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
2c07af13
JO
469{
470 size_t i, l = 0;
471 u64 m = PERF_MEM_SNOOP_NA;
472
473 sz -= 1; /* -1 for null termination */
474 out[0] = '\0';
475
476 if (mem_info)
477 m = mem_info->data_src.mem_snoop;
478
479 for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
480 if (!(m & 0x1))
481 continue;
482 if (l) {
483 strcat(out, " or ");
484 l += 4;
485 }
149d7507 486 l += scnprintf(out + l, sz - l, snoop_access[i]);
2c07af13 487 }
f78d6250
LY
488
489 m = 0;
490 if (mem_info)
491 m = mem_info->data_src.mem_snoopx;
492
493 for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
494 if (!(m & 0x1))
495 continue;
496
52839e65
AK
497 if (l) {
498 strcat(out, " or ");
499 l += 4;
500 }
f78d6250 501 l += scnprintf(out + l, sz - l, snoopx_access[i]);
52839e65 502 }
2c07af13
JO
503
504 if (*out == '\0')
149d7507
JO
505 l += scnprintf(out, sz - l, "N/A");
506
507 return l;
2c07af13 508}
69a77275 509
8b0819c8 510int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
69a77275
JO
511{
512 u64 mask = PERF_MEM_LOCK_NA;
8b0819c8 513 int l;
69a77275
JO
514
515 if (mem_info)
516 mask = mem_info->data_src.mem_lock;
517
518 if (mask & PERF_MEM_LOCK_NA)
8b0819c8 519 l = scnprintf(out, sz, "N/A");
69a77275 520 else if (mask & PERF_MEM_LOCK_LOCKED)
8b0819c8 521 l = scnprintf(out, sz, "Yes");
69a77275 522 else
8b0819c8
JO
523 l = scnprintf(out, sz, "No");
524
525 return l;
69a77275 526}
c19ac912 527
a054c298
KL
528int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
529{
530 size_t l = 0;
531 u64 mask = PERF_MEM_BLK_NA;
532
533 sz -= 1; /* -1 for null termination */
534 out[0] = '\0';
535
536 if (mem_info)
537 mask = mem_info->data_src.mem_blk;
538
539 if (!mask || (mask & PERF_MEM_BLK_NA)) {
540 l += scnprintf(out + l, sz - l, " N/A");
541 return l;
542 }
543 if (mask & PERF_MEM_BLK_DATA)
544 l += scnprintf(out + l, sz - l, " Data");
545 if (mask & PERF_MEM_BLK_ADDR)
546 l += scnprintf(out + l, sz - l, " Addr");
547
548 return l;
549}
550
c19ac912
JO
551int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
552{
553 int i = 0;
554
fdefc375
LY
555 i += scnprintf(out, sz, "|OP ");
556 i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
557 i += scnprintf(out + i, sz - i, "|LVL ");
558 i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
c19ac912
JO
559 i += scnprintf(out + i, sz - i, "|SNP ");
560 i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
561 i += scnprintf(out + i, sz - i, "|TLB ");
562 i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
563 i += scnprintf(out + i, sz - i, "|LCK ");
564 i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
a054c298
KL
565 i += scnprintf(out + i, sz - i, "|BLK ");
566 i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
c19ac912
JO
567
568 return i;
569}
aadddd68
JO
570
571int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
572{
573 union perf_mem_data_src *data_src = &mi->data_src;
574 u64 daddr = mi->daddr.addr;
575 u64 op = data_src->mem_op;
576 u64 lvl = data_src->mem_lvl;
577 u64 snoop = data_src->mem_snoop;
e843dec5 578 u64 snoopx = data_src->mem_snoopx;
aadddd68 579 u64 lock = data_src->mem_lock;
d9d5d767 580 u64 blk = data_src->mem_blk;
12c15302
JO
581 /*
582 * Skylake might report unknown remote level via this
583 * bit, consider it when evaluating remote HITMs.
cae1d759
KJ
584 *
585 * Incase of power, remote field can also be used to denote cache
586 * accesses from the another core of same node. Hence, setting
587 * mrem only when HOPS is zero along with set remote field.
12c15302 588 */
cae1d759 589 bool mrem = (data_src->mem_remote && !data_src->mem_hops);
aadddd68
JO
590 int err = 0;
591
dba8ab93
JO
592#define HITM_INC(__f) \
593do { \
594 stats->__f++; \
595 stats->tot_hitm++; \
596} while (0)
597
e843dec5
LY
598#define PEER_INC(__f) \
599do { \
600 stats->__f++; \
601 stats->tot_peer++; \
602} while (0)
603
aadddd68
JO
604#define P(a, b) PERF_MEM_##a##_##b
605
606 stats->nr_entries++;
607
608 if (lock & P(LOCK, LOCKED)) stats->locks++;
609
d9d5d767
KL
610 if (blk & P(BLK, DATA)) stats->blk_data++;
611 if (blk & P(BLK, ADDR)) stats->blk_addr++;
612
aadddd68
JO
613 if (op & P(OP, LOAD)) {
614 /* load */
615 stats->load++;
616
617 if (!daddr) {
618 stats->ld_noadrs++;
619 return -1;
620 }
621
622 if (lvl & P(LVL, HIT)) {
623 if (lvl & P(LVL, UNC)) stats->ld_uncache++;
624 if (lvl & P(LVL, IO)) stats->ld_io++;
625 if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
626 if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
e843dec5
LY
627 if (lvl & P(LVL, L2)) {
628 stats->ld_l2hit++;
629
630 if (snoopx & P(SNOOPX, PEER))
631 PEER_INC(lcl_peer);
632 }
aadddd68
JO
633 if (lvl & P(LVL, L3 )) {
634 if (snoop & P(SNOOP, HITM))
dba8ab93 635 HITM_INC(lcl_hitm);
aadddd68
JO
636 else
637 stats->ld_llchit++;
e843dec5
LY
638
639 if (snoopx & P(SNOOPX, PEER))
640 PEER_INC(lcl_peer);
aadddd68
JO
641 }
642
643 if (lvl & P(LVL, LOC_RAM)) {
644 stats->lcl_dram++;
645 if (snoop & P(SNOOP, HIT))
646 stats->ld_shared++;
647 else
648 stats->ld_excl++;
649 }
650
651 if ((lvl & P(LVL, REM_RAM1)) ||
12c15302
JO
652 (lvl & P(LVL, REM_RAM2)) ||
653 mrem) {
aadddd68
JO
654 stats->rmt_dram++;
655 if (snoop & P(SNOOP, HIT))
656 stats->ld_shared++;
657 else
658 stats->ld_excl++;
659 }
660 }
661
662 if ((lvl & P(LVL, REM_CCE1)) ||
12c15302
JO
663 (lvl & P(LVL, REM_CCE2)) ||
664 mrem) {
e843dec5 665 if (snoop & P(SNOOP, HIT)) {
aadddd68 666 stats->rmt_hit++;
e843dec5 667 } else if (snoop & P(SNOOP, HITM)) {
dba8ab93 668 HITM_INC(rmt_hitm);
e843dec5
LY
669 } else if (snoopx & P(SNOOPX, PEER)) {
670 stats->rmt_hit++;
671 PEER_INC(rmt_peer);
672 }
aadddd68
JO
673 }
674
675 if ((lvl & P(LVL, MISS)))
676 stats->ld_miss++;
677
678 } else if (op & P(OP, STORE)) {
679 /* store */
680 stats->store++;
681
682 if (!daddr) {
683 stats->st_noadrs++;
684 return -1;
685 }
686
687 if (lvl & P(LVL, HIT)) {
688 if (lvl & P(LVL, UNC)) stats->st_uncache++;
689 if (lvl & P(LVL, L1 )) stats->st_l1hit++;
690 }
691 if (lvl & P(LVL, MISS))
692 if (lvl & P(LVL, L1)) stats->st_l1miss++;
98450637
LY
693 if (lvl & P(LVL, NA))
694 stats->st_na++;
aadddd68
JO
695 } else {
696 /* unparsable data_src? */
697 stats->noparse++;
698 return -1;
699 }
700
d46a4cdf 701 if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
aadddd68
JO
702 stats->nomap++;
703 return -1;
704 }
705
706#undef P
dba8ab93 707#undef HITM_INC
aadddd68
JO
708 return err;
709}
0a9a24cc
JO
710
711void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
712{
713 stats->nr_entries += add->nr_entries;
714
715 stats->locks += add->locks;
716 stats->store += add->store;
717 stats->st_uncache += add->st_uncache;
718 stats->st_noadrs += add->st_noadrs;
719 stats->st_l1hit += add->st_l1hit;
720 stats->st_l1miss += add->st_l1miss;
98450637 721 stats->st_na += add->st_na;
0a9a24cc
JO
722 stats->load += add->load;
723 stats->ld_excl += add->ld_excl;
724 stats->ld_shared += add->ld_shared;
725 stats->ld_uncache += add->ld_uncache;
726 stats->ld_io += add->ld_io;
727 stats->ld_miss += add->ld_miss;
728 stats->ld_noadrs += add->ld_noadrs;
729 stats->ld_fbhit += add->ld_fbhit;
730 stats->ld_l1hit += add->ld_l1hit;
731 stats->ld_l2hit += add->ld_l2hit;
732 stats->ld_llchit += add->ld_llchit;
733 stats->lcl_hitm += add->lcl_hitm;
734 stats->rmt_hitm += add->rmt_hitm;
dba8ab93 735 stats->tot_hitm += add->tot_hitm;
e843dec5
LY
736 stats->lcl_peer += add->lcl_peer;
737 stats->rmt_peer += add->rmt_peer;
738 stats->tot_peer += add->tot_peer;
0a9a24cc
JO
739 stats->rmt_hit += add->rmt_hit;
740 stats->lcl_dram += add->lcl_dram;
741 stats->rmt_dram += add->rmt_dram;
d9d5d767
KL
742 stats->blk_data += add->blk_data;
743 stats->blk_addr += add->blk_addr;
0a9a24cc
JO
744 stats->nomap += add->nomap;
745 stats->noparse += add->noparse;
746}