2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 #include <sys/types.h>
36 #include <semaphore.h>
39 #include <asm/unistd.h>
41 #define MAX_JOBS (1024)
44 * assume we don't have _get either, if _set isn't defined
46 #ifndef __NR_ioprio_set
49 #define __NR_ioprio_set 289
50 #define __NR_ioprio_get 290
51 #elif defined(__powerpc__) || defined(__powerpc64__)
52 #define __NR_ioprio_set 273
53 #define __NR_ioprio_get 274
54 #elif defined(__x86_64__)
55 #define __NR_ioprio_set 251
56 #define __NR_ioprio_get 252
57 #elif defined(__ia64__)
58 #define __NR_ioprio_set 1274
59 #define __NR_ioprio_get 1275
60 #elif defined(__alpha__)
61 #define __NR_ioprio_set 442
62 #define __NR_ioprio_get 443
63 #elif defined(__s390x__) || defined(__s390__)
64 #define __NR_ioprio_set 282
65 #define __NR_ioprio_get 283
67 #error "Unsupported arch"
72 static int ioprio_set(int which, int who, int ioprio)
74 return syscall(__NR_ioprio_set, which, who, ioprio);
78 IOPRIO_WHO_PROCESS = 1,
83 #define IOPRIO_CLASS_SHIFT 13
88 #define DEF_TIMEOUT (30)
89 #define DEF_RATE_CYCLE (1000)
90 #define DEF_ODIRECT (1)
91 #define DEF_SEQUENTIAL (1)
92 #define DEF_WRITESTAT (0)
93 #define DEF_RAND_REPEAT (1)
95 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
97 static int write_stat = DEF_WRITESTAT;
98 static int repeatable = DEF_RAND_REPEAT;
99 static int rate_quit = 1;
101 static int thread_number;
102 static char *ini_file;
130 volatile int terminate;
131 volatile int runstate;
134 unsigned int sequential;
136 unsigned int odirect;
137 unsigned int delay_sleep;
138 unsigned int fsync_blocks;
139 unsigned int start_delay;
140 unsigned int timeout;
141 unsigned int use_aio;
144 io_context_t *aio_ctx;
145 struct iocb *aio_iocbs;
146 unsigned int aio_depth;
147 unsigned int aio_cur_depth;
148 struct io_event *aio_events;
149 char *aio_iocbs_status;
152 unsigned int ratemin;
153 unsigned int ratecycle;
154 unsigned long rate_usec_cycle;
155 long rate_pending_usleep;
156 unsigned long rate_blocks;
157 struct timeval lastrate;
159 unsigned long max_latency; /* msec */
160 unsigned long min_latency; /* msec */
161 unsigned long runtime; /* sec */
162 unsigned long blocks;
163 unsigned long io_blocks;
164 unsigned long last_block;
166 struct drand48_data random_state;
171 unsigned long stat_time;
172 unsigned long stat_time_last;
173 unsigned long stat_blocks_last;
175 struct timeval start;
178 static struct thread_data *threads;
179 static struct thread_data def_thread;
181 static sem_t startup_sem;
183 static void sig_handler(int sig)
187 for (i = 0; i < thread_number; i++) {
188 struct thread_data *td = &threads[i];
195 static int init_random_state(struct thread_data *td)
197 unsigned long seed = 123;
203 int fd = open("/dev/random", O_RDONLY);
210 if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
219 srand48_r(seed, &td->random_state);
223 static void shutdown_stat_file(struct thread_data *td)
225 if (td->stat_fd != -1) {
231 static int init_stat_file(struct thread_data *td)
238 sprintf(n, "fio_thread%d.stat", td->thread_number);
239 td->stat_fd = open(n, O_WRONLY | O_CREAT | O_TRUNC, 0644);
240 if (td->stat_fd == -1) {
241 perror("open stat file");
249 static unsigned long utime_since(struct timeval *s, struct timeval *e)
253 sec = e->tv_sec - s->tv_sec;
254 usec = e->tv_usec - s->tv_usec;
255 if (sec > 0 && usec < 0) {
260 sec *= (double) 1000000;
265 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
269 sec = e->tv_sec - s->tv_sec;
270 usec = e->tv_usec - s->tv_usec;
271 if (sec > 0 && usec < 0) {
276 sec *= (double) 1000;
277 usec /= (double) 1000;
282 static inline unsigned long msec_now(struct timeval *s)
284 return s->tv_sec * 1000 + s->tv_usec / 1000;
287 static unsigned long get_next_offset(struct thread_data *td)
292 if (!td->sequential) {
293 lrand48_r(&td->random_state, &r);
294 b = (1+(double) (td->blocks-1) * r / (RAND_MAX+1.0));
303 static void add_stat_sample(struct thread_data *td, unsigned long msec)
311 sprintf(sample, "%lu, %lu\n", td->io_blocks, msec);
312 write(td->stat_fd, sample, strlen(sample));
314 td->stat_time += msec;
315 td->stat_time_last += msec;
316 td->stat_blocks_last++;
318 if (td->stat_time_last >= 500) {
319 unsigned long rate = td->stat_blocks_last * td->bs / (td->stat_time_last);
321 td->stat_time_last = 0;
322 td->stat_blocks_last = 0;
323 sprintf(sample, "%lu, %lu\n", td->stat_time, rate);
324 //sprintf(sample, "%lu, %lu\n", td->io_blocks, msec);
325 write(td->stat_fd, sample, strlen(sample));
330 static void usec_sleep(int usec)
332 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
336 rem.tv_sec = rem.tv_nsec = 0;
337 nanosleep(&req, &rem);
341 req.tv_nsec = rem.tv_nsec;
345 static void rate_throttle(struct thread_data *td, unsigned long time_spent)
350 if (time_spent < td->rate_usec_cycle) {
351 unsigned long s = td->rate_usec_cycle - time_spent;
353 td->rate_pending_usleep += s;
354 if (td->rate_pending_usleep >= 100000) {
355 usec_sleep(td->rate_pending_usleep);
356 td->rate_pending_usleep = 0;
359 long overtime = time_spent - td->rate_usec_cycle;
361 td->rate_pending_usleep -= overtime;
365 static int check_min_rate(struct thread_data *td, struct timeval *now)
367 unsigned long spent = mtime_since(&td->start, now);
371 * allow a 2 second settle period in the beginning
377 * if rate blocks is set, sample is running
379 if (td->rate_blocks) {
380 spent = mtime_since(&td->lastrate, now);
381 if (spent < td->ratecycle)
384 rate = ((td->io_blocks - td->rate_blocks) * td->bs) / spent;
385 if (rate < td->ratemin) {
386 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
393 td->rate_blocks = td->io_blocks;
394 memcpy(&td->lastrate, now, sizeof(*now));
398 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
400 if (mtime_since(&td->start, t) >= td->timeout * 1000)
406 #define should_fsync(td) ((td)->ddir == DDIR_WRITE && !(td)->odirect)
408 static void do_sync_io(struct thread_data *td)
411 unsigned long blocks, msec, usec;
413 for (blocks = 0; blocks < td->blocks; blocks++) {
414 off_t offset = get_next_offset(td);
420 if (lseek(td->fd, offset, SEEK_SET) == -1) {
426 usec_sleep(td->delay_sleep);
428 gettimeofday(&s, NULL);
430 if (td->ddir == DDIR_READ)
431 ret = read(td->fd, td->buf, td->bs);
433 ret = write(td->fd, td->buf, td->bs);
435 if (ret < (int) td->bs) {
443 if (should_fsync(td) && td->fsync_blocks &&
444 (td->io_blocks % td->fsync_blocks) == 0)
447 gettimeofday(&e, NULL);
449 usec = utime_since(&s, &e);
451 rate_throttle(td, usec);
453 if (check_min_rate(td, &e)) {
459 add_stat_sample(td, msec);
461 if (msec < td->min_latency)
462 td->min_latency = msec;
463 if (msec > td->max_latency)
464 td->max_latency = msec;
466 if (runtime_exceeded(td, &e))
470 if (should_fsync(td))
474 static void aio_put_iocb(struct thread_data *td, struct iocb *iocb)
476 long offset = ((long) iocb - (long) td->aio_iocbs)/ sizeof(struct iocb);
478 td->aio_iocbs_status[offset] = 0;
482 static struct iocb *aio_get_iocb(struct thread_data *td, struct timeval *t)
484 struct iocb *iocb = NULL;
487 for (i = 0; i < td->aio_depth; i++) {
488 if (td->aio_iocbs_status[i] == 0) {
489 td->aio_iocbs_status[i] = 1;
490 iocb = &td->aio_iocbs[i];
496 off_t off = get_next_offset(td);
497 char *p = td->buf + i * td->bs;
499 if (td->ddir == DDIR_READ)
500 io_prep_pread(iocb, td->fd, p, td->bs, off);
502 io_prep_pwrite(iocb, td->fd, p, td->bs, off);
504 io_set_callback(iocb, (io_callback_t) msec_now(t));
510 static int aio_submit(struct thread_data *td, struct iocb *iocb)
515 ret = io_submit(*td->aio_ctx, 1, &iocb);
521 else if (errno == EAGAIN)
530 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
532 static void do_async_io(struct thread_data *td)
535 unsigned long blocks, msec, usec;
537 for (blocks = 0; blocks < td->blocks; blocks++) {
538 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
539 struct timespec *timeout;
540 int ret, i, min_evts = 0;
547 usec_sleep(td->delay_sleep);
549 gettimeofday(&s, NULL);
551 iocb = aio_get_iocb(td, &s);
553 ret = aio_submit(td, iocb);
561 if (td->aio_cur_depth < td->aio_depth) {
569 ret = io_getevents(*td->aio_ctx, min_evts, td->aio_cur_depth, td->aio_events, timeout);
576 gettimeofday(&e, NULL);
578 for (i = 0; i < ret; i++) {
579 struct io_event *ev = td->aio_events + i;
585 msec = msec_now(&e) - iocb_time(iocb);
586 add_stat_sample(td, msec);
588 if (msec < td->min_latency)
589 td->min_latency = msec;
590 if (msec > td->max_latency)
591 td->max_latency = msec;
593 aio_put_iocb(td, iocb);
597 * the rate is batched for now, it should work for batches
598 * of completions except the very first one which may look
601 usec = utime_since(&s, &e);
603 rate_throttle(td, usec);
605 if (check_min_rate(td, &e)) {
610 if (runtime_exceeded(td, &e))
615 static void cleanup_pending_aio(struct thread_data *td)
617 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
622 * get immediately available events, if any
624 r = io_getevents(*td->aio_ctx, 0, td->aio_cur_depth, td->aio_events, &ts);
626 for (i = 0; i < r; i++)
627 aio_put_iocb(td, &td->aio_iocbs[i]);
631 * now cancel remaining active events
633 for (i = 0; i < td->aio_depth; i++) {
634 if (td->aio_iocbs_status[i] == 0)
637 r = io_cancel(*td->aio_ctx, &td->aio_iocbs[i], td->aio_events);
639 aio_put_iocb(td, &td->aio_iocbs[i]);
642 if (td->aio_cur_depth)
643 io_getevents(*td->aio_ctx, td->aio_cur_depth, td->aio_cur_depth, td->aio_events, NULL);
646 static void cleanup_aio(struct thread_data *td)
648 if (td->aio_cur_depth)
649 cleanup_pending_aio(td);
652 io_destroy(*td->aio_ctx);
658 free(td->aio_events);
659 if (td->aio_iocbs_status)
660 free(td->aio_iocbs_status);
663 static int init_aio(struct thread_data *td)
665 td->aio_ctx = malloc(sizeof(*td->aio_ctx));
667 if (io_queue_init(td->aio_depth, td->aio_ctx)) {
672 td->aio_iocbs = malloc(td->aio_depth * sizeof(struct iocb));
673 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
674 td->aio_iocbs_status = malloc(td->aio_depth * sizeof(char));
678 static void *thread_main(int shm_id, int offset, char *argv[])
680 struct thread_data *td;
682 void *data, *ptr = NULL;
688 data = shmat(shm_id, NULL, 0);
689 td = data + offset * sizeof(struct thread_data);
694 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
699 printf("Thread (%s) (pid=%u) (f=%s) (aio=%d) started\n", td->ddir == DDIR_READ ? "read" : "write", td->pid, td->file_name, td->aio_depth);
702 sprintf(argv[0], "fio%d", offset);
708 if (td->ddir == DDIR_READ)
709 td->fd = open(td->file_name, flags | O_RDONLY);
711 td->fd = open(td->file_name, flags | O_WRONLY | O_CREAT | O_TRUNC, 0644);
718 if (td->use_aio && init_aio(td))
721 if (init_random_state(td))
723 if (init_stat_file(td))
726 if (td->ddir == DDIR_READ) {
727 if (fstat(td->fd, &st) == -1) {
732 td->blocks = st.st_size / td->bs;
738 td->blocks = 1024 * 1024 * 1024 / td->bs;
741 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
747 sem_post(&startup_sem);
748 sem_wait(&td->mutex);
750 gettimeofday(&td->start, NULL);
753 memcpy(&td->lastrate, &td->start, sizeof(td->start));
756 ptr = malloc(td->bs + MASK);
757 td->buf = ALIGN(ptr);
760 ptr = malloc(td->bs * td->aio_depth + MASK);
761 td->buf = ALIGN(ptr);
765 gettimeofday(&end, NULL);
766 td->runtime = mtime_since(&td->start, &end);
771 shutdown_stat_file(td);
779 sem_post(&startup_sem);
780 sem_wait(&td->mutex);
784 td->runstate = TD_EXITED;
789 static void free_shm(void)
794 static void show_thread_status(struct thread_data *td)
796 int prio, prio_class;
797 unsigned long bw = 0;
799 if (!td->io_blocks && !td->error)
803 bw = (td->io_blocks * td->bs) / td->runtime;
805 prio = td->ioprio & 0xff;
806 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
808 printf("thread%d (%s): err=%2d, prio=%1d/%1d maxl=%5lumsec, io=%6luMiB, bw=%6luKiB/sec\n", td->thread_number, td->ddir == DDIR_READ ? " read": "write", td->error, prio_class, prio, td->max_latency, td->io_blocks * td->bs >> 20, bw);
811 static int setup_rate(struct thread_data *td)
813 int nr_reads_per_sec;
818 if (td->rate < td->ratemin) {
819 fprintf(stderr, "min rate larger than nominal rate\n");
823 nr_reads_per_sec = td->rate * 1024 / td->bs;
824 td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
825 td->rate_pending_usleep = 0;
829 static struct thread_data *get_new_job(int global)
831 struct thread_data *td;
835 if (thread_number >= MAX_JOBS)
838 td = &threads[thread_number++];
839 memset(td, 0, sizeof(*td));
841 td->thread_number = thread_number;
842 td->ddir = def_thread.ddir;
843 td->bs = def_thread.bs;
844 td->odirect = def_thread.odirect;
845 td->ratecycle = def_thread.ratecycle;
846 td->sequential = def_thread.sequential;
847 td->timeout = def_thread.timeout;
848 memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
853 static void put_job(struct thread_data *td)
855 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
859 static int add_job(struct thread_data *td, const char *filename, int prioclass,
862 if (td == &def_thread)
865 strcpy(td->file_name, filename);
867 sem_init(&td->mutex, 1, 0);
868 td->min_latency = 10000000;
869 td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
871 if (td->use_aio && !td->aio_depth)
877 printf("Client%d: file=%s, rw=%d, prio=%d, seq=%d, odir=%d, bs=%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, filename, td->ddir, td->ioprio, td->sequential, td->odirect, td->bs, td->rate, td->use_aio, td->aio_depth);
881 static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
887 for (i = 0; i < sizeof(int) * 8; i++) {
889 CPU_SET(i, &cpumask);
893 static void fill_option(const char *input, char *output)
898 while (input[i] != ',' && input[i] != '}' && input[i] != '\0') {
899 output[i] = input[i];
914 static void parse_jobs_cmd(int argc, char *argv[], int index)
916 struct thread_data *td;
917 unsigned int prio, prioclass, cpu;
918 char *string, *filename, *p, *c;
921 string = malloc(256);
922 filename = malloc(256);
924 for (i = index; i < argc; i++) {
940 c = strstr(p, "rw=");
944 td->ddir = DDIR_READ;
946 td->ddir = DDIR_WRITE;
949 c = strstr(p, "prio=");
955 c = strstr(p, "prioclass=");
958 prioclass = *c - '0';
961 c = strstr(p, "file=");
964 fill_option(c, filename);
967 c = strstr(p, "bs=");
970 fill_option(c, string);
971 td->bs = strtoul(string, NULL, 10);
975 c = strstr(p, "direct=");
984 c = strstr(p, "delay=");
987 fill_option(c, string);
988 td->delay_sleep = strtoul(string, NULL, 10);
991 c = strstr(p, "rate=");
994 fill_option(c, string);
995 td->rate = strtoul(string, NULL, 10);
998 c = strstr(p, "ratemin=");
1001 fill_option(c, string);
1002 td->ratemin = strtoul(string, NULL, 10);
1005 c = strstr(p, "ratecycle=");
1008 fill_option(c, string);
1009 td->ratecycle = strtoul(string, NULL, 10);
1012 c = strstr(p, "cpumask=");
1015 fill_option(c, string);
1016 cpu = strtoul(string, NULL, 10);
1017 fill_cpu_mask(td->cpumask, cpu);
1020 c = strstr(p, "fsync=");
1023 fill_option(c, string);
1024 td->fsync_blocks = strtoul(string, NULL, 10);
1027 c = strstr(p, "startdelay=");
1030 fill_option(c, string);
1031 td->start_delay = strtoul(string, NULL, 10);
1034 c = strstr(p, "timeout=");
1037 fill_option(c, string);
1038 td->timeout = strtoul(string, NULL, 10);
1041 c = strstr(p, "aio_depth=");
1044 fill_option(c, string);
1045 td->aio_depth = strtoul(string, NULL, 10);
1048 c = strstr(p, "aio");
1052 c = strstr(p, "random");
1055 c = strstr(p, "sequential");
1059 if (add_job(td, filename, prioclass, prio))
1067 static int check_int(char *p, char *name, unsigned int *val)
1071 sprintf(str, "%s=%%d", name);
1072 if (sscanf(p, str, val) == 1)
1075 sprintf(str, "%s = %%d", name);
1076 if (sscanf(p, str, val) == 1)
1082 static int is_empty_or_comment(char *line)
1086 for (i = 0; i < strlen(line); i++) {
1089 if (!isspace(line[i]) && !iscntrl(line[i]))
1096 static int parse_jobs_ini(char *file)
1098 unsigned int prioclass, prio, cpu, global;
1099 struct thread_data *td;
1100 char *string, *name;
1105 f = fopen(file, "r");
1111 string = malloc(4096);
1114 while ((p = fgets(string, 4096, f)) != NULL) {
1115 if (is_empty_or_comment(p))
1117 if (sscanf(p, "[%s]", name) != 1)
1120 global = !strncmp(name, "global", 6);
1122 name[strlen(name) - 1] = '\0';
1124 td = get_new_job(global);
1132 while ((p = fgets(string, 4096, f)) != NULL) {
1133 if (is_empty_or_comment(p))
1137 if (!check_int(p, "bs", &td->bs)) {
1142 if (!check_int(p, "rw", &td->ddir)) {
1146 if (!check_int(p, "prio", &prio)) {
1150 if (!check_int(p, "prioclass", &prioclass)) {
1154 if (!check_int(p, "direct", &td->odirect)) {
1158 if (!check_int(p, "rate", &td->rate)) {
1162 if (!check_int(p, "ratemin", &td->ratemin)) {
1166 if (!check_int(p, "ratecycle", &td->ratecycle)) {
1170 if (!check_int(p, "delay", &td->delay_sleep)) {
1174 if (!check_int(p, "cpumask", &cpu)) {
1175 fill_cpu_mask(td->cpumask, cpu);
1179 if (!check_int(p, "fsync", &td->fsync_blocks)) {
1183 if (!check_int(p, "startdelay", &td->start_delay)) {
1187 if (!check_int(p, "timeout", &td->timeout)) {
1191 if (!check_int(p, "aio_depth", &td->aio_depth)) {
1195 if (!strncmp(p, "sequential", 10)) {
1200 if (!strncmp(p, "random", 6)) {
1205 if (!strncmp(p, "aio", 3)) {
1211 printf("Client%d: bad option %s\n",td->thread_number,p);
1215 if (add_job(td, name, prioclass, prio))
1225 static int parse_options(int argc, char *argv[])
1229 for (i = 1; i < argc; i++) {
1230 char *parm = argv[i];
1239 def_thread.sequential = !!atoi(parm);
1243 def_thread.bs = atoi(parm);
1244 def_thread.bs <<= 10;
1245 if (!def_thread.bs) {
1246 printf("bad block size\n");
1247 def_thread.bs = DEF_BS;
1252 def_thread.timeout = atoi(parm);
1256 write_stat = !!atoi(parm);
1260 repeatable = !!atoi(parm);
1264 rate_quit = !!atoi(parm);
1268 def_thread.odirect = !!atoi(parm);
1271 if (i + 1 >= argc) {
1272 printf("-f needs file as arg\n");
1275 ini_file = strdup(argv[i+1]);
1278 printf("bad option %s\n", argv[i]);
1286 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1290 for (i = 0; i < thread_number; i++) {
1291 struct thread_data *td = &threads[i];
1293 if (td->runstate != TD_EXITED)
1296 td->runstate = TD_REAPED;
1297 waitpid(td->pid, NULL, 0);
1299 (*m_rate) -= td->ratemin;
1300 (*t_rate) -= td->rate;
1305 printf("Threads now running: %d", *nr_running);
1306 if (*m_rate || *t_rate)
1307 printf(", rate %d/%dKiB/sec", *t_rate, *m_rate);
1312 static void run_threads(char *argv[])
1314 struct timeval genesis, now;
1315 struct thread_data *td;
1316 unsigned long spent;
1317 int i, todo, nr_running, m_rate, t_rate;
1319 gettimeofday(&genesis, NULL);
1321 printf("Starting %d threads\n", thread_number);
1324 signal(SIGINT, sig_handler);
1326 todo = thread_number;
1328 m_rate = t_rate = 0;
1331 for (i = 0; i < thread_number; i++) {
1334 if (td->runstate != TD_NOT_CREATED)
1338 * never got a chance to start, killed by other
1339 * thread for some reason
1341 if (td->terminate) {
1346 if (td->start_delay) {
1347 gettimeofday(&now, NULL);
1348 spent = mtime_since(&genesis, &now);
1350 if (td->start_delay * 1000 > spent)
1354 td->runstate = TD_CREATED;
1355 sem_init(&startup_sem, 1, 1);
1359 sem_wait(&startup_sem);
1361 thread_main(shm_id, i, argv);
1366 for (i = 0; i < thread_number; i++) {
1367 struct thread_data *td = &threads[i];
1369 if (td->runstate == TD_CREATED) {
1370 td->runstate = TD_STARTED;
1372 m_rate += td->ratemin;
1374 sem_post(&td->mutex);
1376 printf("Threads now running: %d", nr_running);
1377 if (m_rate || t_rate)
1378 printf(", rate %d/%dKiB/sec", t_rate, m_rate);
1383 reap_threads(&nr_running, &t_rate, &m_rate);
1389 while (nr_running) {
1390 reap_threads(&nr_running, &t_rate, &m_rate);
1395 int main(int argc, char *argv[])
1397 static unsigned long max_run[2], min_run[2], total_blocks[2];
1398 static unsigned long max_bw[2], min_bw[2], maxl[2], minl[2];
1399 static unsigned long read_mb, write_mb, read_agg, write_agg;
1402 shm_id = shmget(0, MAX_JOBS * sizeof(struct thread_data), IPC_CREAT | 0600);
1408 threads = shmat(shm_id, NULL, 0);
1409 if (threads == (void *) -1 ) {
1416 if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
1417 perror("sched_getaffinity");
1424 def_thread.ddir = DDIR_READ;
1425 def_thread.bs = DEF_BS;
1426 def_thread.odirect = 1;
1427 def_thread.ratecycle = DEF_RATE_CYCLE;
1428 def_thread.sequential = 1;
1429 def_thread.timeout = DEF_TIMEOUT;
1431 i = parse_options(argc, argv);
1434 if (parse_jobs_ini(ini_file))
1437 parse_jobs_cmd(argc, argv, i);
1439 if (!thread_number) {
1440 printf("Nothing to do\n");
1444 printf("%s: %s, bs=%uKiB, timeo=%u, write_stat=%u, odirect=%d\n", argv[0], def_thread.sequential ? "sequential" : "random", def_thread.bs >> 10, def_thread.timeout, write_stat, def_thread.odirect);
1448 min_bw[0] = min_run[0] = ~0UL;
1449 min_bw[1] = min_run[1] = ~0UL;
1450 minl[0] = minl[1] = ~0UL;
1451 for (i = 0; i < thread_number; i++) {
1452 struct thread_data *td = &threads[i];
1453 unsigned long bw = 0;
1458 if (td->runtime < min_run[td->ddir])
1459 min_run[td->ddir] = td->runtime;
1460 if (td->runtime > max_run[td->ddir])
1461 max_run[td->ddir] = td->runtime;
1464 bw = (td->io_blocks * td->bs) / td->runtime;
1465 if (bw < min_bw[td->ddir])
1466 min_bw[td->ddir] = bw;
1467 if (bw > max_bw[td->ddir])
1468 max_bw[td->ddir] = bw;
1469 if (td->max_latency < minl[td->ddir])
1470 minl[td->ddir] = td->max_latency;
1471 if (td->max_latency > maxl[td->ddir])
1472 maxl[td->ddir] = td->max_latency;
1474 total_blocks[td->ddir] += td->io_blocks;
1476 if (td->ddir == DDIR_READ) {
1477 read_mb += (td->bs * td->io_blocks) >> 20;
1479 read_agg += (td->io_blocks * td->bs) / td->runtime;
1481 if (td->ddir == DDIR_WRITE) {
1482 write_mb += (td->bs * td->io_blocks) >> 20;
1484 write_agg += (td->io_blocks * td->bs) / td->runtime;
1488 show_thread_status(td);
1491 printf("Run status:\n");
1492 if (max_run[DDIR_READ])
1493 printf(" READ: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", read_mb, read_agg, minl[0], maxl[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
1494 if (max_run[DDIR_WRITE])
1495 printf(" WRITE: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", write_mb, write_agg, minl[1], maxl[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);