License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / tools / perf / bench / numa.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * numa.c
4  *
5  * numa: Simulate NUMA-sensitive workload and measure their NUMA performance
6  */
7
8 #include <inttypes.h>
9 /* For the CLR_() macros */
10 #include <pthread.h>
11
12 #include "../perf.h"
13 #include "../builtin.h"
14 #include "../util/util.h"
15 #include <subcmd/parse-options.h>
16 #include "../util/cloexec.h"
17
18 #include "bench.h"
19
20 #include <errno.h>
21 #include <sched.h>
22 #include <stdio.h>
23 #include <assert.h>
24 #include <malloc.h>
25 #include <signal.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <sys/mman.h>
30 #include <sys/time.h>
31 #include <sys/resource.h>
32 #include <sys/wait.h>
33 #include <sys/prctl.h>
34 #include <sys/types.h>
35 #include <linux/kernel.h>
36 #include <linux/time64.h>
37
38 #include <numa.h>
39 #include <numaif.h>
40
41 /*
42  * Regular printout to the terminal, supressed if -q is specified:
43  */
44 #define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
45
46 /*
47  * Debug printf:
48  */
49 #undef dprintf
50 #define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
51
52 struct thread_data {
53         int                     curr_cpu;
54         cpu_set_t               bind_cpumask;
55         int                     bind_node;
56         u8                      *process_data;
57         int                     process_nr;
58         int                     thread_nr;
59         int                     task_nr;
60         unsigned int            loops_done;
61         u64                     val;
62         u64                     runtime_ns;
63         u64                     system_time_ns;
64         u64                     user_time_ns;
65         double                  speed_gbs;
66         pthread_mutex_t         *process_lock;
67 };
68
69 /* Parameters set by options: */
70
71 struct params {
72         /* Startup synchronization: */
73         bool                    serialize_startup;
74
75         /* Task hierarchy: */
76         int                     nr_proc;
77         int                     nr_threads;
78
79         /* Working set sizes: */
80         const char              *mb_global_str;
81         const char              *mb_proc_str;
82         const char              *mb_proc_locked_str;
83         const char              *mb_thread_str;
84
85         double                  mb_global;
86         double                  mb_proc;
87         double                  mb_proc_locked;
88         double                  mb_thread;
89
90         /* Access patterns to the working set: */
91         bool                    data_reads;
92         bool                    data_writes;
93         bool                    data_backwards;
94         bool                    data_zero_memset;
95         bool                    data_rand_walk;
96         u32                     nr_loops;
97         u32                     nr_secs;
98         u32                     sleep_usecs;
99
100         /* Working set initialization: */
101         bool                    init_zero;
102         bool                    init_random;
103         bool                    init_cpu0;
104
105         /* Misc options: */
106         int                     show_details;
107         int                     run_all;
108         int                     thp;
109
110         long                    bytes_global;
111         long                    bytes_process;
112         long                    bytes_process_locked;
113         long                    bytes_thread;
114
115         int                     nr_tasks;
116         bool                    show_quiet;
117
118         bool                    show_convergence;
119         bool                    measure_convergence;
120
121         int                     perturb_secs;
122         int                     nr_cpus;
123         int                     nr_nodes;
124
125         /* Affinity options -C and -N: */
126         char                    *cpu_list_str;
127         char                    *node_list_str;
128 };
129
130
131 /* Global, read-writable area, accessible to all processes and threads: */
132
133 struct global_info {
134         u8                      *data;
135
136         pthread_mutex_t         startup_mutex;
137         int                     nr_tasks_started;
138
139         pthread_mutex_t         startup_done_mutex;
140
141         pthread_mutex_t         start_work_mutex;
142         int                     nr_tasks_working;
143
144         pthread_mutex_t         stop_work_mutex;
145         u64                     bytes_done;
146
147         struct thread_data      *threads;
148
149         /* Convergence latency measurement: */
150         bool                    all_converged;
151         bool                    stop_work;
152
153         int                     print_once;
154
155         struct params           p;
156 };
157
158 static struct global_info       *g = NULL;
159
160 static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
161 static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
162
163 struct params p0;
164
165 static const struct option options[] = {
166         OPT_INTEGER('p', "nr_proc"      , &p0.nr_proc,          "number of processes"),
167         OPT_INTEGER('t', "nr_threads"   , &p0.nr_threads,       "number of threads per process"),
168
169         OPT_STRING('G', "mb_global"     , &p0.mb_global_str,    "MB", "global  memory (MBs)"),
170         OPT_STRING('P', "mb_proc"       , &p0.mb_proc_str,      "MB", "process memory (MBs)"),
171         OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
172         OPT_STRING('T', "mb_thread"     , &p0.mb_thread_str,    "MB", "thread  memory (MBs)"),
173
174         OPT_UINTEGER('l', "nr_loops"    , &p0.nr_loops,         "max number of loops to run (default: unlimited)"),
175         OPT_UINTEGER('s', "nr_secs"     , &p0.nr_secs,          "max number of seconds to run (default: 5 secs)"),
176         OPT_UINTEGER('u', "usleep"      , &p0.sleep_usecs,      "usecs to sleep per loop iteration"),
177
178         OPT_BOOLEAN('R', "data_reads"   , &p0.data_reads,       "access the data via writes (can be mixed with -W)"),
179         OPT_BOOLEAN('W', "data_writes"  , &p0.data_writes,      "access the data via writes (can be mixed with -R)"),
180         OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards,  "access the data backwards as well"),
181         OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
182         OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk,  "access the data with random (32bit LFSR) walk"),
183
184
185         OPT_BOOLEAN('z', "init_zero"    , &p0.init_zero,        "bzero the initial allocations"),
186         OPT_BOOLEAN('I', "init_random"  , &p0.init_random,      "randomize the contents of the initial allocations"),
187         OPT_BOOLEAN('0', "init_cpu0"    , &p0.init_cpu0,        "do the initial allocations on CPU#0"),
188         OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs,      "perturb thread 0/0 every X secs, to test convergence stability"),
189
190         OPT_INCR   ('d', "show_details" , &p0.show_details,     "Show details"),
191         OPT_INCR   ('a', "all"          , &p0.run_all,          "Run all tests in the suite"),
192         OPT_INTEGER('H', "thp"          , &p0.thp,              "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
193         OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, "
194                     "convergence is reached when each process (all its threads) is running on a single NUMA node."),
195         OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
196         OPT_BOOLEAN('q', "quiet"        , &p0.show_quiet,       "quiet mode"),
197         OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
198
199         /* Special option string parsing callbacks: */
200         OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]",
201                         "bind the first N tasks to these specific cpus (the rest is unbound)",
202                         parse_cpus_opt),
203         OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]",
204                         "bind the first N tasks to these specific memory nodes (the rest is unbound)",
205                         parse_nodes_opt),
206         OPT_END()
207 };
208
209 static const char * const bench_numa_usage[] = {
210         "perf bench numa <options>",
211         NULL
212 };
213
214 static const char * const numa_usage[] = {
215         "perf bench numa mem [<options>]",
216         NULL
217 };
218
219 static cpu_set_t bind_to_cpu(int target_cpu)
220 {
221         cpu_set_t orig_mask, mask;
222         int ret;
223
224         ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
225         BUG_ON(ret);
226
227         CPU_ZERO(&mask);
228
229         if (target_cpu == -1) {
230                 int cpu;
231
232                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
233                         CPU_SET(cpu, &mask);
234         } else {
235                 BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
236                 CPU_SET(target_cpu, &mask);
237         }
238
239         ret = sched_setaffinity(0, sizeof(mask), &mask);
240         BUG_ON(ret);
241
242         return orig_mask;
243 }
244
245 static cpu_set_t bind_to_node(int target_node)
246 {
247         int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
248         cpu_set_t orig_mask, mask;
249         int cpu;
250         int ret;
251
252         BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
253         BUG_ON(!cpus_per_node);
254
255         ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
256         BUG_ON(ret);
257
258         CPU_ZERO(&mask);
259
260         if (target_node == -1) {
261                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
262                         CPU_SET(cpu, &mask);
263         } else {
264                 int cpu_start = (target_node + 0) * cpus_per_node;
265                 int cpu_stop  = (target_node + 1) * cpus_per_node;
266
267                 BUG_ON(cpu_stop > g->p.nr_cpus);
268
269                 for (cpu = cpu_start; cpu < cpu_stop; cpu++)
270                         CPU_SET(cpu, &mask);
271         }
272
273         ret = sched_setaffinity(0, sizeof(mask), &mask);
274         BUG_ON(ret);
275
276         return orig_mask;
277 }
278
279 static void bind_to_cpumask(cpu_set_t mask)
280 {
281         int ret;
282
283         ret = sched_setaffinity(0, sizeof(mask), &mask);
284         BUG_ON(ret);
285 }
286
287 static void mempol_restore(void)
288 {
289         int ret;
290
291         ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
292
293         BUG_ON(ret);
294 }
295
296 static void bind_to_memnode(int node)
297 {
298         unsigned long nodemask;
299         int ret;
300
301         if (node == -1)
302                 return;
303
304         BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
305         nodemask = 1L << node;
306
307         ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
308         dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);
309
310         BUG_ON(ret);
311 }
312
313 #define HPSIZE (2*1024*1024)
314
315 #define set_taskname(fmt...)                            \
316 do {                                                    \
317         char name[20];                                  \
318                                                         \
319         snprintf(name, 20, fmt);                        \
320         prctl(PR_SET_NAME, name);                       \
321 } while (0)
322
323 static u8 *alloc_data(ssize_t bytes0, int map_flags,
324                       int init_zero, int init_cpu0, int thp, int init_random)
325 {
326         cpu_set_t orig_mask;
327         ssize_t bytes;
328         u8 *buf;
329         int ret;
330
331         if (!bytes0)
332                 return NULL;
333
334         /* Allocate and initialize all memory on CPU#0: */
335         if (init_cpu0) {
336                 orig_mask = bind_to_node(0);
337                 bind_to_memnode(0);
338         }
339
340         bytes = bytes0 + HPSIZE;
341
342         buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0);
343         BUG_ON(buf == (void *)-1);
344
345         if (map_flags == MAP_PRIVATE) {
346                 if (thp > 0) {
347                         ret = madvise(buf, bytes, MADV_HUGEPAGE);
348                         if (ret && !g->print_once) {
349                                 g->print_once = 1;
350                                 printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n");
351                         }
352                 }
353                 if (thp < 0) {
354                         ret = madvise(buf, bytes, MADV_NOHUGEPAGE);
355                         if (ret && !g->print_once) {
356                                 g->print_once = 1;
357                                 printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n");
358                         }
359                 }
360         }
361
362         if (init_zero) {
363                 bzero(buf, bytes);
364         } else {
365                 /* Initialize random contents, different in each word: */
366                 if (init_random) {
367                         u64 *wbuf = (void *)buf;
368                         long off = rand();
369                         long i;
370
371                         for (i = 0; i < bytes/8; i++)
372                                 wbuf[i] = i + off;
373                 }
374         }
375
376         /* Align to 2MB boundary: */
377         buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1));
378
379         /* Restore affinity: */
380         if (init_cpu0) {
381                 bind_to_cpumask(orig_mask);
382                 mempol_restore();
383         }
384
385         return buf;
386 }
387
388 static void free_data(void *data, ssize_t bytes)
389 {
390         int ret;
391
392         if (!data)
393                 return;
394
395         ret = munmap(data, bytes);
396         BUG_ON(ret);
397 }
398
399 /*
400  * Create a shared memory buffer that can be shared between processes, zeroed:
401  */
402 static void * zalloc_shared_data(ssize_t bytes)
403 {
404         return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0,  g->p.thp, g->p.init_random);
405 }
406
407 /*
408  * Create a shared memory buffer that can be shared between processes:
409  */
410 static void * setup_shared_data(ssize_t bytes)
411 {
412         return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
413 }
414
415 /*
416  * Allocate process-local memory - this will either be shared between
417  * threads of this process, or only be accessed by this thread:
418  */
419 static void * setup_private_data(ssize_t bytes)
420 {
421         return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
422 }
423
424 /*
425  * Return a process-shared (global) mutex:
426  */
427 static void init_global_mutex(pthread_mutex_t *mutex)
428 {
429         pthread_mutexattr_t attr;
430
431         pthread_mutexattr_init(&attr);
432         pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
433         pthread_mutex_init(mutex, &attr);
434 }
435
436 static int parse_cpu_list(const char *arg)
437 {
438         p0.cpu_list_str = strdup(arg);
439
440         dprintf("got CPU list: {%s}\n", p0.cpu_list_str);
441
442         return 0;
443 }
444
445 static int parse_setup_cpu_list(void)
446 {
447         struct thread_data *td;
448         char *str0, *str;
449         int t;
450
451         if (!g->p.cpu_list_str)
452                 return 0;
453
454         dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
455
456         str0 = str = strdup(g->p.cpu_list_str);
457         t = 0;
458
459         BUG_ON(!str);
460
461         tprintf("# binding tasks to CPUs:\n");
462         tprintf("#  ");
463
464         while (true) {
465                 int bind_cpu, bind_cpu_0, bind_cpu_1;
466                 char *tok, *tok_end, *tok_step, *tok_len, *tok_mul;
467                 int bind_len;
468                 int step;
469                 int mul;
470
471                 tok = strsep(&str, ",");
472                 if (!tok)
473                         break;
474
475                 tok_end = strstr(tok, "-");
476
477                 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
478                 if (!tok_end) {
479                         /* Single CPU specified: */
480                         bind_cpu_0 = bind_cpu_1 = atol(tok);
481                 } else {
482                         /* CPU range specified (for example: "5-11"): */
483                         bind_cpu_0 = atol(tok);
484                         bind_cpu_1 = atol(tok_end + 1);
485                 }
486
487                 step = 1;
488                 tok_step = strstr(tok, "#");
489                 if (tok_step) {
490                         step = atol(tok_step + 1);
491                         BUG_ON(step <= 0 || step >= g->p.nr_cpus);
492                 }
493
494                 /*
495                  * Mask length.
496                  * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4',
497                  * where the _4 means the next 4 CPUs are allowed.
498                  */
499                 bind_len = 1;
500                 tok_len = strstr(tok, "_");
501                 if (tok_len) {
502                         bind_len = atol(tok_len + 1);
503                         BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
504                 }
505
506                 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
507                 mul = 1;
508                 tok_mul = strstr(tok, "x");
509                 if (tok_mul) {
510                         mul = atol(tok_mul + 1);
511                         BUG_ON(mul <= 0);
512                 }
513
514                 dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
515
516                 if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) {
517                         printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus);
518                         return -1;
519                 }
520
521                 BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
522                 BUG_ON(bind_cpu_0 > bind_cpu_1);
523
524                 for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
525                         int i;
526
527                         for (i = 0; i < mul; i++) {
528                                 int cpu;
529
530                                 if (t >= g->p.nr_tasks) {
531                                         printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu);
532                                         goto out;
533                                 }
534                                 td = g->threads + t;
535
536                                 if (t)
537                                         tprintf(",");
538                                 if (bind_len > 1) {
539                                         tprintf("%2d/%d", bind_cpu, bind_len);
540                                 } else {
541                                         tprintf("%2d", bind_cpu);
542                                 }
543
544                                 CPU_ZERO(&td->bind_cpumask);
545                                 for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
546                                         BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
547                                         CPU_SET(cpu, &td->bind_cpumask);
548                                 }
549                                 t++;
550                         }
551                 }
552         }
553 out:
554
555         tprintf("\n");
556
557         if (t < g->p.nr_tasks)
558                 printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
559
560         free(str0);
561         return 0;
562 }
563
564 static int parse_cpus_opt(const struct option *opt __maybe_unused,
565                           const char *arg, int unset __maybe_unused)
566 {
567         if (!arg)
568                 return -1;
569
570         return parse_cpu_list(arg);
571 }
572
573 static int parse_node_list(const char *arg)
574 {
575         p0.node_list_str = strdup(arg);
576
577         dprintf("got NODE list: {%s}\n", p0.node_list_str);
578
579         return 0;
580 }
581
582 static int parse_setup_node_list(void)
583 {
584         struct thread_data *td;
585         char *str0, *str;
586         int t;
587
588         if (!g->p.node_list_str)
589                 return 0;
590
591         dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
592
593         str0 = str = strdup(g->p.node_list_str);
594         t = 0;
595
596         BUG_ON(!str);
597
598         tprintf("# binding tasks to NODEs:\n");
599         tprintf("# ");
600
601         while (true) {
602                 int bind_node, bind_node_0, bind_node_1;
603                 char *tok, *tok_end, *tok_step, *tok_mul;
604                 int step;
605                 int mul;
606
607                 tok = strsep(&str, ",");
608                 if (!tok)
609                         break;
610
611                 tok_end = strstr(tok, "-");
612
613                 dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
614                 if (!tok_end) {
615                         /* Single NODE specified: */
616                         bind_node_0 = bind_node_1 = atol(tok);
617                 } else {
618                         /* NODE range specified (for example: "5-11"): */
619                         bind_node_0 = atol(tok);
620                         bind_node_1 = atol(tok_end + 1);
621                 }
622
623                 step = 1;
624                 tok_step = strstr(tok, "#");
625                 if (tok_step) {
626                         step = atol(tok_step + 1);
627                         BUG_ON(step <= 0 || step >= g->p.nr_nodes);
628                 }
629
630                 /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
631                 mul = 1;
632                 tok_mul = strstr(tok, "x");
633                 if (tok_mul) {
634                         mul = atol(tok_mul + 1);
635                         BUG_ON(mul <= 0);
636                 }
637
638                 dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
639
640                 if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) {
641                         printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes);
642                         return -1;
643                 }
644
645                 BUG_ON(bind_node_0 < 0 || bind_node_1 < 0);
646                 BUG_ON(bind_node_0 > bind_node_1);
647
648                 for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
649                         int i;
650
651                         for (i = 0; i < mul; i++) {
652                                 if (t >= g->p.nr_tasks) {
653                                         printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
654                                         goto out;
655                                 }
656                                 td = g->threads + t;
657
658                                 if (!t)
659                                         tprintf(" %2d", bind_node);
660                                 else
661                                         tprintf(",%2d", bind_node);
662
663                                 td->bind_node = bind_node;
664                                 t++;
665                         }
666                 }
667         }
668 out:
669
670         tprintf("\n");
671
672         if (t < g->p.nr_tasks)
673                 printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
674
675         free(str0);
676         return 0;
677 }
678
679 static int parse_nodes_opt(const struct option *opt __maybe_unused,
680                           const char *arg, int unset __maybe_unused)
681 {
682         if (!arg)
683                 return -1;
684
685         return parse_node_list(arg);
686
687         return 0;
688 }
689
690 #define BIT(x) (1ul << x)
691
692 static inline uint32_t lfsr_32(uint32_t lfsr)
693 {
694         const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
695         return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps);
696 }
697
698 /*
699  * Make sure there's real data dependency to RAM (when read
700  * accesses are enabled), so the compiler, the CPU and the
701  * kernel (KSM, zero page, etc.) cannot optimize away RAM
702  * accesses:
703  */
704 static inline u64 access_data(u64 *data, u64 val)
705 {
706         if (g->p.data_reads)
707                 val += *data;
708         if (g->p.data_writes)
709                 *data = val + 1;
710         return val;
711 }
712
713 /*
714  * The worker process does two types of work, a forwards going
715  * loop and a backwards going loop.
716  *
717  * We do this so that on multiprocessor systems we do not create
718  * a 'train' of processing, with highly synchronized processes,
719  * skewing the whole benchmark.
720  */
721 static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val)
722 {
723         long words = bytes/sizeof(u64);
724         u64 *data = (void *)__data;
725         long chunk_0, chunk_1;
726         u64 *d0, *d, *d1;
727         long off;
728         long i;
729
730         BUG_ON(!data && words);
731         BUG_ON(data && !words);
732
733         if (!data)
734                 return val;
735
736         /* Very simple memset() work variant: */
737         if (g->p.data_zero_memset && !g->p.data_rand_walk) {
738                 bzero(data, bytes);
739                 return val;
740         }
741
742         /* Spread out by PID/TID nr and by loop nr: */
743         chunk_0 = words/nr_max;
744         chunk_1 = words/g->p.nr_loops;
745         off = nr*chunk_0 + loop*chunk_1;
746
747         while (off >= words)
748                 off -= words;
749
750         if (g->p.data_rand_walk) {
751                 u32 lfsr = nr + loop + val;
752                 int j;
753
754                 for (i = 0; i < words/1024; i++) {
755                         long start, end;
756
757                         lfsr = lfsr_32(lfsr);
758
759                         start = lfsr % words;
760                         end = min(start + 1024, words-1);
761
762                         if (g->p.data_zero_memset) {
763                                 bzero(data + start, (end-start) * sizeof(u64));
764                         } else {
765                                 for (j = start; j < end; j++)
766                                         val = access_data(data + j, val);
767                         }
768                 }
769         } else if (!g->p.data_backwards || (nr + loop) & 1) {
770
771                 d0 = data + off;
772                 d  = data + off + 1;
773                 d1 = data + words;
774
775                 /* Process data forwards: */
776                 for (;;) {
777                         if (unlikely(d >= d1))
778                                 d = data;
779                         if (unlikely(d == d0))
780                                 break;
781
782                         val = access_data(d, val);
783
784                         d++;
785                 }
786         } else {
787                 /* Process data backwards: */
788
789                 d0 = data + off;
790                 d  = data + off - 1;
791                 d1 = data + words;
792
793                 /* Process data forwards: */
794                 for (;;) {
795                         if (unlikely(d < data))
796                                 d = data + words-1;
797                         if (unlikely(d == d0))
798                                 break;
799
800                         val = access_data(d, val);
801
802                         d--;
803                 }
804         }
805
806         return val;
807 }
808
809 static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
810 {
811         unsigned int cpu;
812
813         cpu = sched_getcpu();
814
815         g->threads[task_nr].curr_cpu = cpu;
816         prctl(0, bytes_worked);
817 }
818
819 #define MAX_NR_NODES    64
820
821 /*
822  * Count the number of nodes a process's threads
823  * are spread out on.
824  *
825  * A count of 1 means that the process is compressed
826  * to a single node. A count of g->p.nr_nodes means it's
827  * spread out on the whole system.
828  */
829 static int count_process_nodes(int process_nr)
830 {
831         char node_present[MAX_NR_NODES] = { 0, };
832         int nodes;
833         int n, t;
834
835         for (t = 0; t < g->p.nr_threads; t++) {
836                 struct thread_data *td;
837                 int task_nr;
838                 int node;
839
840                 task_nr = process_nr*g->p.nr_threads + t;
841                 td = g->threads + task_nr;
842
843                 node = numa_node_of_cpu(td->curr_cpu);
844                 if (node < 0) /* curr_cpu was likely still -1 */
845                         return 0;
846
847                 node_present[node] = 1;
848         }
849
850         nodes = 0;
851
852         for (n = 0; n < MAX_NR_NODES; n++)
853                 nodes += node_present[n];
854
855         return nodes;
856 }
857
858 /*
859  * Count the number of distinct process-threads a node contains.
860  *
861  * A count of 1 means that the node contains only a single
862  * process. If all nodes on the system contain at most one
863  * process then we are well-converged.
864  */
865 static int count_node_processes(int node)
866 {
867         int processes = 0;
868         int t, p;
869
870         for (p = 0; p < g->p.nr_proc; p++) {
871                 for (t = 0; t < g->p.nr_threads; t++) {
872                         struct thread_data *td;
873                         int task_nr;
874                         int n;
875
876                         task_nr = p*g->p.nr_threads + t;
877                         td = g->threads + task_nr;
878
879                         n = numa_node_of_cpu(td->curr_cpu);
880                         if (n == node) {
881                                 processes++;
882                                 break;
883                         }
884                 }
885         }
886
887         return processes;
888 }
889
890 static void calc_convergence_compression(int *strong)
891 {
892         unsigned int nodes_min, nodes_max;
893         int p;
894
895         nodes_min = -1;
896         nodes_max =  0;
897
898         for (p = 0; p < g->p.nr_proc; p++) {
899                 unsigned int nodes = count_process_nodes(p);
900
901                 if (!nodes) {
902                         *strong = 0;
903                         return;
904                 }
905
906                 nodes_min = min(nodes, nodes_min);
907                 nodes_max = max(nodes, nodes_max);
908         }
909
910         /* Strong convergence: all threads compress on a single node: */
911         if (nodes_min == 1 && nodes_max == 1) {
912                 *strong = 1;
913         } else {
914                 *strong = 0;
915                 tprintf(" {%d-%d}", nodes_min, nodes_max);
916         }
917 }
918
919 static void calc_convergence(double runtime_ns_max, double *convergence)
920 {
921         unsigned int loops_done_min, loops_done_max;
922         int process_groups;
923         int nodes[MAX_NR_NODES];
924         int distance;
925         int nr_min;
926         int nr_max;
927         int strong;
928         int sum;
929         int nr;
930         int node;
931         int cpu;
932         int t;
933
934         if (!g->p.show_convergence && !g->p.measure_convergence)
935                 return;
936
937         for (node = 0; node < g->p.nr_nodes; node++)
938                 nodes[node] = 0;
939
940         loops_done_min = -1;
941         loops_done_max = 0;
942
943         for (t = 0; t < g->p.nr_tasks; t++) {
944                 struct thread_data *td = g->threads + t;
945                 unsigned int loops_done;
946
947                 cpu = td->curr_cpu;
948
949                 /* Not all threads have written it yet: */
950                 if (cpu < 0)
951                         continue;
952
953                 node = numa_node_of_cpu(cpu);
954
955                 nodes[node]++;
956
957                 loops_done = td->loops_done;
958                 loops_done_min = min(loops_done, loops_done_min);
959                 loops_done_max = max(loops_done, loops_done_max);
960         }
961
962         nr_max = 0;
963         nr_min = g->p.nr_tasks;
964         sum = 0;
965
966         for (node = 0; node < g->p.nr_nodes; node++) {
967                 nr = nodes[node];
968                 nr_min = min(nr, nr_min);
969                 nr_max = max(nr, nr_max);
970                 sum += nr;
971         }
972         BUG_ON(nr_min > nr_max);
973
974         BUG_ON(sum > g->p.nr_tasks);
975
976         if (0 && (sum < g->p.nr_tasks))
977                 return;
978
979         /*
980          * Count the number of distinct process groups present
981          * on nodes - when we are converged this will decrease
982          * to g->p.nr_proc:
983          */
984         process_groups = 0;
985
986         for (node = 0; node < g->p.nr_nodes; node++) {
987                 int processes = count_node_processes(node);
988
989                 nr = nodes[node];
990                 tprintf(" %2d/%-2d", nr, processes);
991
992                 process_groups += processes;
993         }
994
995         distance = nr_max - nr_min;
996
997         tprintf(" [%2d/%-2d]", distance, process_groups);
998
999         tprintf(" l:%3d-%-3d (%3d)",
1000                 loops_done_min, loops_done_max, loops_done_max-loops_done_min);
1001
1002         if (loops_done_min && loops_done_max) {
1003                 double skew = 1.0 - (double)loops_done_min/loops_done_max;
1004
1005                 tprintf(" [%4.1f%%]", skew * 100.0);
1006         }
1007
1008         calc_convergence_compression(&strong);
1009
1010         if (strong && process_groups == g->p.nr_proc) {
1011                 if (!*convergence) {
1012                         *convergence = runtime_ns_max;
1013                         tprintf(" (%6.1fs converged)\n", *convergence / NSEC_PER_SEC);
1014                         if (g->p.measure_convergence) {
1015                                 g->all_converged = true;
1016                                 g->stop_work = true;
1017                         }
1018                 }
1019         } else {
1020                 if (*convergence) {
1021                         tprintf(" (%6.1fs de-converged)", runtime_ns_max / NSEC_PER_SEC);
1022                         *convergence = 0;
1023                 }
1024                 tprintf("\n");
1025         }
1026 }
1027
1028 static void show_summary(double runtime_ns_max, int l, double *convergence)
1029 {
1030         tprintf("\r #  %5.1f%%  [%.1f mins]",
1031                 (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0);
1032
1033         calc_convergence(runtime_ns_max, convergence);
1034
1035         if (g->p.show_details >= 0)
1036                 fflush(stdout);
1037 }
1038
1039 static void *worker_thread(void *__tdata)
1040 {
1041         struct thread_data *td = __tdata;
1042         struct timeval start0, start, stop, diff;
1043         int process_nr = td->process_nr;
1044         int thread_nr = td->thread_nr;
1045         unsigned long last_perturbance;
1046         int task_nr = td->task_nr;
1047         int details = g->p.show_details;
1048         int first_task, last_task;
1049         double convergence = 0;
1050         u64 val = td->val;
1051         double runtime_ns_max;
1052         u8 *global_data;
1053         u8 *process_data;
1054         u8 *thread_data;
1055         u64 bytes_done;
1056         long work_done;
1057         u32 l;
1058         struct rusage rusage;
1059
1060         bind_to_cpumask(td->bind_cpumask);
1061         bind_to_memnode(td->bind_node);
1062
1063         set_taskname("thread %d/%d", process_nr, thread_nr);
1064
1065         global_data = g->data;
1066         process_data = td->process_data;
1067         thread_data = setup_private_data(g->p.bytes_thread);
1068
1069         bytes_done = 0;
1070
1071         last_task = 0;
1072         if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
1073                 last_task = 1;
1074
1075         first_task = 0;
1076         if (process_nr == 0 && thread_nr == 0)
1077                 first_task = 1;
1078
1079         if (details >= 2) {
1080                 printf("#  thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n",
1081                         process_nr, thread_nr, global_data, process_data, thread_data);
1082         }
1083
1084         if (g->p.serialize_startup) {
1085                 pthread_mutex_lock(&g->startup_mutex);
1086                 g->nr_tasks_started++;
1087                 pthread_mutex_unlock(&g->startup_mutex);
1088
1089                 /* Here we will wait for the main process to start us all at once: */
1090                 pthread_mutex_lock(&g->start_work_mutex);
1091                 g->nr_tasks_working++;
1092
1093                 /* Last one wake the main process: */
1094                 if (g->nr_tasks_working == g->p.nr_tasks)
1095                         pthread_mutex_unlock(&g->startup_done_mutex);
1096
1097                 pthread_mutex_unlock(&g->start_work_mutex);
1098         }
1099
1100         gettimeofday(&start0, NULL);
1101
1102         start = stop = start0;
1103         last_perturbance = start.tv_sec;
1104
1105         for (l = 0; l < g->p.nr_loops; l++) {
1106                 start = stop;
1107
1108                 if (g->stop_work)
1109                         break;
1110
1111                 val += do_work(global_data,  g->p.bytes_global,  process_nr, g->p.nr_proc,      l, val);
1112                 val += do_work(process_data, g->p.bytes_process, thread_nr,  g->p.nr_threads,   l, val);
1113                 val += do_work(thread_data,  g->p.bytes_thread,  0,          1,         l, val);
1114
1115                 if (g->p.sleep_usecs) {
1116                         pthread_mutex_lock(td->process_lock);
1117                         usleep(g->p.sleep_usecs);
1118                         pthread_mutex_unlock(td->process_lock);
1119                 }
1120                 /*
1121                  * Amount of work to be done under a process-global lock:
1122                  */
1123                 if (g->p.bytes_process_locked) {
1124                         pthread_mutex_lock(td->process_lock);
1125                         val += do_work(process_data, g->p.bytes_process_locked, thread_nr,  g->p.nr_threads,    l, val);
1126                         pthread_mutex_unlock(td->process_lock);
1127                 }
1128
1129                 work_done = g->p.bytes_global + g->p.bytes_process +
1130                             g->p.bytes_process_locked + g->p.bytes_thread;
1131
1132                 update_curr_cpu(task_nr, work_done);
1133                 bytes_done += work_done;
1134
1135                 if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
1136                         continue;
1137
1138                 td->loops_done = l;
1139
1140                 gettimeofday(&stop, NULL);
1141
1142                 /* Check whether our max runtime timed out: */
1143                 if (g->p.nr_secs) {
1144                         timersub(&stop, &start0, &diff);
1145                         if ((u32)diff.tv_sec >= g->p.nr_secs) {
1146                                 g->stop_work = true;
1147                                 break;
1148                         }
1149                 }
1150
1151                 /* Update the summary at most once per second: */
1152                 if (start.tv_sec == stop.tv_sec)
1153                         continue;
1154
1155                 /*
1156                  * Perturb the first task's equilibrium every g->p.perturb_secs seconds,
1157                  * by migrating to CPU#0:
1158                  */
1159                 if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
1160                         cpu_set_t orig_mask;
1161                         int target_cpu;
1162                         int this_cpu;
1163
1164                         last_perturbance = stop.tv_sec;
1165
1166                         /*
1167                          * Depending on where we are running, move into
1168                          * the other half of the system, to create some
1169                          * real disturbance:
1170                          */
1171                         this_cpu = g->threads[task_nr].curr_cpu;
1172                         if (this_cpu < g->p.nr_cpus/2)
1173                                 target_cpu = g->p.nr_cpus-1;
1174                         else
1175                                 target_cpu = 0;
1176
1177                         orig_mask = bind_to_cpu(target_cpu);
1178
1179                         /* Here we are running on the target CPU already */
1180                         if (details >= 1)
1181                                 printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
1182
1183                         bind_to_cpumask(orig_mask);
1184                 }
1185
1186                 if (details >= 3) {
1187                         timersub(&stop, &start, &diff);
1188                         runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1189                         runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1190
1191                         if (details >= 0) {
1192                                 printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n",
1193                                         process_nr, thread_nr, runtime_ns_max / bytes_done, val);
1194                         }
1195                         fflush(stdout);
1196                 }
1197                 if (!last_task)
1198                         continue;
1199
1200                 timersub(&stop, &start0, &diff);
1201                 runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1202                 runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1203
1204                 show_summary(runtime_ns_max, l, &convergence);
1205         }
1206
1207         gettimeofday(&stop, NULL);
1208         timersub(&stop, &start0, &diff);
1209         td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
1210         td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
1211         td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9;
1212
1213         getrusage(RUSAGE_THREAD, &rusage);
1214         td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
1215         td->system_time_ns += rusage.ru_stime.tv_usec * NSEC_PER_USEC;
1216         td->user_time_ns = rusage.ru_utime.tv_sec * NSEC_PER_SEC;
1217         td->user_time_ns += rusage.ru_utime.tv_usec * NSEC_PER_USEC;
1218
1219         free_data(thread_data, g->p.bytes_thread);
1220
1221         pthread_mutex_lock(&g->stop_work_mutex);
1222         g->bytes_done += bytes_done;
1223         pthread_mutex_unlock(&g->stop_work_mutex);
1224
1225         return NULL;
1226 }
1227
1228 /*
1229  * A worker process starts a couple of threads:
1230  */
1231 static void worker_process(int process_nr)
1232 {
1233         pthread_mutex_t process_lock;
1234         struct thread_data *td;
1235         pthread_t *pthreads;
1236         u8 *process_data;
1237         int task_nr;
1238         int ret;
1239         int t;
1240
1241         pthread_mutex_init(&process_lock, NULL);
1242         set_taskname("process %d", process_nr);
1243
1244         /*
1245          * Pick up the memory policy and the CPU binding of our first thread,
1246          * so that we initialize memory accordingly:
1247          */
1248         task_nr = process_nr*g->p.nr_threads;
1249         td = g->threads + task_nr;
1250
1251         bind_to_memnode(td->bind_node);
1252         bind_to_cpumask(td->bind_cpumask);
1253
1254         pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
1255         process_data = setup_private_data(g->p.bytes_process);
1256
1257         if (g->p.show_details >= 3) {
1258                 printf(" # process %2d global mem: %p, process mem: %p\n",
1259                         process_nr, g->data, process_data);
1260         }
1261
1262         for (t = 0; t < g->p.nr_threads; t++) {
1263                 task_nr = process_nr*g->p.nr_threads + t;
1264                 td = g->threads + task_nr;
1265
1266                 td->process_data = process_data;
1267                 td->process_nr   = process_nr;
1268                 td->thread_nr    = t;
1269                 td->task_nr      = task_nr;
1270                 td->val          = rand();
1271                 td->curr_cpu     = -1;
1272                 td->process_lock = &process_lock;
1273
1274                 ret = pthread_create(pthreads + t, NULL, worker_thread, td);
1275                 BUG_ON(ret);
1276         }
1277
1278         for (t = 0; t < g->p.nr_threads; t++) {
1279                 ret = pthread_join(pthreads[t], NULL);
1280                 BUG_ON(ret);
1281         }
1282
1283         free_data(process_data, g->p.bytes_process);
1284         free(pthreads);
1285 }
1286
1287 static void print_summary(void)
1288 {
1289         if (g->p.show_details < 0)
1290                 return;
1291
1292         printf("\n ###\n");
1293         printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1294                 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
1295         printf(" #      %5dx %5ldMB global  shared mem operations\n",
1296                         g->p.nr_loops, g->p.bytes_global/1024/1024);
1297         printf(" #      %5dx %5ldMB process shared mem operations\n",
1298                         g->p.nr_loops, g->p.bytes_process/1024/1024);
1299         printf(" #      %5dx %5ldMB thread  local  mem operations\n",
1300                         g->p.nr_loops, g->p.bytes_thread/1024/1024);
1301
1302         printf(" ###\n");
1303
1304         printf("\n ###\n"); fflush(stdout);
1305 }
1306
1307 static void init_thread_data(void)
1308 {
1309         ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1310         int t;
1311
1312         g->threads = zalloc_shared_data(size);
1313
1314         for (t = 0; t < g->p.nr_tasks; t++) {
1315                 struct thread_data *td = g->threads + t;
1316                 int cpu;
1317
1318                 /* Allow all nodes by default: */
1319                 td->bind_node = -1;
1320
1321                 /* Allow all CPUs by default: */
1322                 CPU_ZERO(&td->bind_cpumask);
1323                 for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
1324                         CPU_SET(cpu, &td->bind_cpumask);
1325         }
1326 }
1327
1328 static void deinit_thread_data(void)
1329 {
1330         ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1331
1332         free_data(g->threads, size);
1333 }
1334
1335 static int init(void)
1336 {
1337         g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0);
1338
1339         /* Copy over options: */
1340         g->p = p0;
1341
1342         g->p.nr_cpus = numa_num_configured_cpus();
1343
1344         g->p.nr_nodes = numa_max_node() + 1;
1345
1346         /* char array in count_process_nodes(): */
1347         BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
1348
1349         if (g->p.show_quiet && !g->p.show_details)
1350                 g->p.show_details = -1;
1351
1352         /* Some memory should be specified: */
1353         if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
1354                 return -1;
1355
1356         if (g->p.mb_global_str) {
1357                 g->p.mb_global = atof(g->p.mb_global_str);
1358                 BUG_ON(g->p.mb_global < 0);
1359         }
1360
1361         if (g->p.mb_proc_str) {
1362                 g->p.mb_proc = atof(g->p.mb_proc_str);
1363                 BUG_ON(g->p.mb_proc < 0);
1364         }
1365
1366         if (g->p.mb_proc_locked_str) {
1367                 g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
1368                 BUG_ON(g->p.mb_proc_locked < 0);
1369                 BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
1370         }
1371
1372         if (g->p.mb_thread_str) {
1373                 g->p.mb_thread = atof(g->p.mb_thread_str);
1374                 BUG_ON(g->p.mb_thread < 0);
1375         }
1376
1377         BUG_ON(g->p.nr_threads <= 0);
1378         BUG_ON(g->p.nr_proc <= 0);
1379
1380         g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
1381
1382         g->p.bytes_global               = g->p.mb_global        *1024L*1024L;
1383         g->p.bytes_process              = g->p.mb_proc          *1024L*1024L;
1384         g->p.bytes_process_locked       = g->p.mb_proc_locked   *1024L*1024L;
1385         g->p.bytes_thread               = g->p.mb_thread        *1024L*1024L;
1386
1387         g->data = setup_shared_data(g->p.bytes_global);
1388
1389         /* Startup serialization: */
1390         init_global_mutex(&g->start_work_mutex);
1391         init_global_mutex(&g->startup_mutex);
1392         init_global_mutex(&g->startup_done_mutex);
1393         init_global_mutex(&g->stop_work_mutex);
1394
1395         init_thread_data();
1396
1397         tprintf("#\n");
1398         if (parse_setup_cpu_list() || parse_setup_node_list())
1399                 return -1;
1400         tprintf("#\n");
1401
1402         print_summary();
1403
1404         return 0;
1405 }
1406
1407 static void deinit(void)
1408 {
1409         free_data(g->data, g->p.bytes_global);
1410         g->data = NULL;
1411
1412         deinit_thread_data();
1413
1414         free_data(g, sizeof(*g));
1415         g = NULL;
1416 }
1417
1418 /*
1419  * Print a short or long result, depending on the verbosity setting:
1420  */
1421 static void print_res(const char *name, double val,
1422                       const char *txt_unit, const char *txt_short, const char *txt_long)
1423 {
1424         if (!name)
1425                 name = "main,";
1426
1427         if (!g->p.show_quiet)
1428                 printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
1429         else
1430                 printf(" %14.3f %s\n", val, txt_long);
1431 }
1432
1433 static int __bench_numa(const char *name)
1434 {
1435         struct timeval start, stop, diff;
1436         u64 runtime_ns_min, runtime_ns_sum;
1437         pid_t *pids, pid, wpid;
1438         double delta_runtime;
1439         double runtime_avg;
1440         double runtime_sec_max;
1441         double runtime_sec_min;
1442         int wait_stat;
1443         double bytes;
1444         int i, t, p;
1445
1446         if (init())
1447                 return -1;
1448
1449         pids = zalloc(g->p.nr_proc * sizeof(*pids));
1450         pid = -1;
1451
1452         /* All threads try to acquire it, this way we can wait for them to start up: */
1453         pthread_mutex_lock(&g->start_work_mutex);
1454
1455         if (g->p.serialize_startup) {
1456                 tprintf(" #\n");
1457                 tprintf(" # Startup synchronization: ..."); fflush(stdout);
1458         }
1459
1460         gettimeofday(&start, NULL);
1461
1462         for (i = 0; i < g->p.nr_proc; i++) {
1463                 pid = fork();
1464                 dprintf(" # process %2d: PID %d\n", i, pid);
1465
1466                 BUG_ON(pid < 0);
1467                 if (!pid) {
1468                         /* Child process: */
1469                         worker_process(i);
1470
1471                         exit(0);
1472                 }
1473                 pids[i] = pid;
1474
1475         }
1476         /* Wait for all the threads to start up: */
1477         while (g->nr_tasks_started != g->p.nr_tasks)
1478                 usleep(USEC_PER_MSEC);
1479
1480         BUG_ON(g->nr_tasks_started != g->p.nr_tasks);
1481
1482         if (g->p.serialize_startup) {
1483                 double startup_sec;
1484
1485                 pthread_mutex_lock(&g->startup_done_mutex);
1486
1487                 /* This will start all threads: */
1488                 pthread_mutex_unlock(&g->start_work_mutex);
1489
1490                 /* This mutex is locked - the last started thread will wake us: */
1491                 pthread_mutex_lock(&g->startup_done_mutex);
1492
1493                 gettimeofday(&stop, NULL);
1494
1495                 timersub(&stop, &start, &diff);
1496
1497                 startup_sec = diff.tv_sec * NSEC_PER_SEC;
1498                 startup_sec += diff.tv_usec * NSEC_PER_USEC;
1499                 startup_sec /= NSEC_PER_SEC;
1500
1501                 tprintf(" threads initialized in %.6f seconds.\n", startup_sec);
1502                 tprintf(" #\n");
1503
1504                 start = stop;
1505                 pthread_mutex_unlock(&g->startup_done_mutex);
1506         } else {
1507                 gettimeofday(&start, NULL);
1508         }
1509
1510         /* Parent process: */
1511
1512
1513         for (i = 0; i < g->p.nr_proc; i++) {
1514                 wpid = waitpid(pids[i], &wait_stat, 0);
1515                 BUG_ON(wpid < 0);
1516                 BUG_ON(!WIFEXITED(wait_stat));
1517
1518         }
1519
1520         runtime_ns_sum = 0;
1521         runtime_ns_min = -1LL;
1522
1523         for (t = 0; t < g->p.nr_tasks; t++) {
1524                 u64 thread_runtime_ns = g->threads[t].runtime_ns;
1525
1526                 runtime_ns_sum += thread_runtime_ns;
1527                 runtime_ns_min = min(thread_runtime_ns, runtime_ns_min);
1528         }
1529
1530         gettimeofday(&stop, NULL);
1531         timersub(&stop, &start, &diff);
1532
1533         BUG_ON(bench_format != BENCH_FORMAT_DEFAULT);
1534
1535         tprintf("\n ###\n");
1536         tprintf("\n");
1537
1538         runtime_sec_max = diff.tv_sec * NSEC_PER_SEC;
1539         runtime_sec_max += diff.tv_usec * NSEC_PER_USEC;
1540         runtime_sec_max /= NSEC_PER_SEC;
1541
1542         runtime_sec_min = runtime_ns_min / NSEC_PER_SEC;
1543
1544         bytes = g->bytes_done;
1545         runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC;
1546
1547         if (g->p.measure_convergence) {
1548                 print_res(name, runtime_sec_max,
1549                         "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge");
1550         }
1551
1552         print_res(name, runtime_sec_max,
1553                 "secs,", "runtime-max/thread",  "secs slowest (max) thread-runtime");
1554
1555         print_res(name, runtime_sec_min,
1556                 "secs,", "runtime-min/thread",  "secs fastest (min) thread-runtime");
1557
1558         print_res(name, runtime_avg,
1559                 "secs,", "runtime-avg/thread",  "secs average thread-runtime");
1560
1561         delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0;
1562         print_res(name, delta_runtime / runtime_sec_max * 100.0,
1563                 "%,", "spread-runtime/thread",  "% difference between max/avg runtime");
1564
1565         print_res(name, bytes / g->p.nr_tasks / 1e9,
1566                 "GB,", "data/thread",           "GB data processed, per thread");
1567
1568         print_res(name, bytes / 1e9,
1569                 "GB,", "data-total",            "GB data processed, total");
1570
1571         print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks),
1572                 "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime");
1573
1574         print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
1575                 "GB/sec,", "thread-speed",      "GB/sec/thread speed");
1576
1577         print_res(name, bytes / runtime_sec_max / 1e9,
1578                 "GB/sec,", "total-speed",       "GB/sec total speed");
1579
1580         if (g->p.show_details >= 2) {
1581                 char tname[14 + 2 * 10 + 1];
1582                 struct thread_data *td;
1583                 for (p = 0; p < g->p.nr_proc; p++) {
1584                         for (t = 0; t < g->p.nr_threads; t++) {
1585                                 memset(tname, 0, sizeof(tname));
1586                                 td = g->threads + p*g->p.nr_threads + t;
1587                                 snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
1588                                 print_res(tname, td->speed_gbs,
1589                                         "GB/sec",       "thread-speed", "GB/sec/thread speed");
1590                                 print_res(tname, td->system_time_ns / NSEC_PER_SEC,
1591                                         "secs", "thread-system-time", "system CPU time/thread");
1592                                 print_res(tname, td->user_time_ns / NSEC_PER_SEC,
1593                                         "secs", "thread-user-time", "user CPU time/thread");
1594                         }
1595                 }
1596         }
1597
1598         free(pids);
1599
1600         deinit();
1601
1602         return 0;
1603 }
1604
1605 #define MAX_ARGS 50
1606
1607 static int command_size(const char **argv)
1608 {
1609         int size = 0;
1610
1611         while (*argv) {
1612                 size++;
1613                 argv++;
1614         }
1615
1616         BUG_ON(size >= MAX_ARGS);
1617
1618         return size;
1619 }
1620
1621 static void init_params(struct params *p, const char *name, int argc, const char **argv)
1622 {
1623         int i;
1624
1625         printf("\n # Running %s \"perf bench numa", name);
1626
1627         for (i = 0; i < argc; i++)
1628                 printf(" %s", argv[i]);
1629
1630         printf("\"\n");
1631
1632         memset(p, 0, sizeof(*p));
1633
1634         /* Initialize nonzero defaults: */
1635
1636         p->serialize_startup            = 1;
1637         p->data_reads                   = true;
1638         p->data_writes                  = true;
1639         p->data_backwards               = true;
1640         p->data_rand_walk               = true;
1641         p->nr_loops                     = -1;
1642         p->init_random                  = true;
1643         p->mb_global_str                = "1";
1644         p->nr_proc                      = 1;
1645         p->nr_threads                   = 1;
1646         p->nr_secs                      = 5;
1647         p->run_all                      = argc == 1;
1648 }
1649
1650 static int run_bench_numa(const char *name, const char **argv)
1651 {
1652         int argc = command_size(argv);
1653
1654         init_params(&p0, name, argc, argv);
1655         argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1656         if (argc)
1657                 goto err;
1658
1659         if (__bench_numa(name))
1660                 goto err;
1661
1662         return 0;
1663
1664 err:
1665         return -1;
1666 }
1667
1668 #define OPT_BW_RAM              "-s",  "20", "-zZq",    "--thp", " 1", "--no-data_rand_walk"
1669 #define OPT_BW_RAM_NOTHP        OPT_BW_RAM,             "--thp", "-1"
1670
1671 #define OPT_CONV                "-s", "100", "-zZ0qcm", "--thp", " 1"
1672 #define OPT_CONV_NOTHP          OPT_CONV,               "--thp", "-1"
1673
1674 #define OPT_BW                  "-s",  "20", "-zZ0q",   "--thp", " 1"
1675 #define OPT_BW_NOTHP            OPT_BW,                 "--thp", "-1"
1676
1677 /*
1678  * The built-in test-suite executed by "perf bench numa -a".
1679  *
1680  * (A minimum of 4 nodes and 16 GB of RAM is recommended.)
1681  */
1682 static const char *tests[][MAX_ARGS] = {
1683    /* Basic single-stream NUMA bandwidth measurements: */
1684    { "RAM-bw-local,",     "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1685                           "-C" ,   "0", "-M",   "0", OPT_BW_RAM },
1686    { "RAM-bw-local-NOTHP,",
1687                           "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1688                           "-C" ,   "0", "-M",   "0", OPT_BW_RAM_NOTHP },
1689    { "RAM-bw-remote,",    "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1690                           "-C" ,   "0", "-M",   "1", OPT_BW_RAM },
1691
1692    /* 2-stream NUMA bandwidth measurements: */
1693    { "RAM-bw-local-2x,",  "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1694                            "-C", "0,2", "-M", "0x2", OPT_BW_RAM },
1695    { "RAM-bw-remote-2x,", "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1696                            "-C", "0,2", "-M", "1x2", OPT_BW_RAM },
1697
1698    /* Cross-stream NUMA bandwidth measurement: */
1699    { "RAM-bw-cross,",     "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1700                            "-C", "0,8", "-M", "1,0", OPT_BW_RAM },
1701
1702    /* Convergence latency measurements: */
1703    { " 1x3-convergence,", "mem",  "-p",  "1", "-t",  "3", "-P",  "512", OPT_CONV },
1704    { " 1x4-convergence,", "mem",  "-p",  "1", "-t",  "4", "-P",  "512", OPT_CONV },
1705    { " 1x6-convergence,", "mem",  "-p",  "1", "-t",  "6", "-P", "1020", OPT_CONV },
1706    { " 2x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1707    { " 3x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1708    { " 4x4-convergence,", "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV },
1709    { " 4x4-convergence-NOTHP,",
1710                           "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1711    { " 4x6-convergence,", "mem",  "-p",  "4", "-t",  "6", "-P", "1020", OPT_CONV },
1712    { " 4x8-convergence,", "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_CONV },
1713    { " 8x4-convergence,", "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV },
1714    { " 8x4-convergence-NOTHP,",
1715                           "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1716    { " 3x1-convergence,", "mem",  "-p",  "3", "-t",  "1", "-P",  "512", OPT_CONV },
1717    { " 4x1-convergence,", "mem",  "-p",  "4", "-t",  "1", "-P",  "512", OPT_CONV },
1718    { " 8x1-convergence,", "mem",  "-p",  "8", "-t",  "1", "-P",  "512", OPT_CONV },
1719    { "16x1-convergence,", "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_CONV },
1720    { "32x1-convergence,", "mem",  "-p", "32", "-t",  "1", "-P",  "128", OPT_CONV },
1721
1722    /* Various NUMA process/thread layout bandwidth measurements: */
1723    { " 2x1-bw-process,",  "mem",  "-p",  "2", "-t",  "1", "-P", "1024", OPT_BW },
1724    { " 3x1-bw-process,",  "mem",  "-p",  "3", "-t",  "1", "-P", "1024", OPT_BW },
1725    { " 4x1-bw-process,",  "mem",  "-p",  "4", "-t",  "1", "-P", "1024", OPT_BW },
1726    { " 8x1-bw-process,",  "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW },
1727    { " 8x1-bw-process-NOTHP,",
1728                           "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW_NOTHP },
1729    { "16x1-bw-process,",  "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_BW },
1730
1731    { " 4x1-bw-thread,",   "mem",  "-p",  "1", "-t",  "4", "-T",  "256", OPT_BW },
1732    { " 8x1-bw-thread,",   "mem",  "-p",  "1", "-t",  "8", "-T",  "256", OPT_BW },
1733    { "16x1-bw-thread,",   "mem",  "-p",  "1", "-t", "16", "-T",  "128", OPT_BW },
1734    { "32x1-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-T",   "64", OPT_BW },
1735
1736    { " 2x3-bw-thread,",   "mem",  "-p",  "2", "-t",  "3", "-P",  "512", OPT_BW },
1737    { " 4x4-bw-thread,",   "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_BW },
1738    { " 4x6-bw-thread,",   "mem",  "-p",  "4", "-t",  "6", "-P",  "512", OPT_BW },
1739    { " 4x8-bw-thread,",   "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW },
1740    { " 4x8-bw-thread-NOTHP,",
1741                           "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW_NOTHP },
1742    { " 3x3-bw-thread,",   "mem",  "-p",  "3", "-t",  "3", "-P",  "512", OPT_BW },
1743    { " 5x5-bw-thread,",   "mem",  "-p",  "5", "-t",  "5", "-P",  "512", OPT_BW },
1744
1745    { "2x16-bw-thread,",   "mem",  "-p",  "2", "-t", "16", "-P",  "512", OPT_BW },
1746    { "1x32-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-P", "2048", OPT_BW },
1747
1748    { "numa02-bw,",        "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW },
1749    { "numa02-bw-NOTHP,",  "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW_NOTHP },
1750    { "numa01-bw-thread,", "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW },
1751    { "numa01-bw-thread-NOTHP,",
1752                           "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW_NOTHP },
1753 };
1754
1755 static int bench_all(void)
1756 {
1757         int nr = ARRAY_SIZE(tests);
1758         int ret;
1759         int i;
1760
1761         ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'");
1762         BUG_ON(ret < 0);
1763
1764         for (i = 0; i < nr; i++) {
1765                 run_bench_numa(tests[i][0], tests[i] + 1);
1766         }
1767
1768         printf("\n");
1769
1770         return 0;
1771 }
1772
1773 int bench_numa(int argc, const char **argv)
1774 {
1775         init_params(&p0, "main,", argc, argv);
1776         argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1777         if (argc)
1778                 goto err;
1779
1780         if (p0.run_all)
1781                 return bench_all();
1782
1783         if (__bench_numa(NULL))
1784                 goto err;
1785
1786         return 0;
1787
1788 err:
1789         usage_with_options(numa_usage, options);
1790         return -1;
1791 }