2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 #include <sys/types.h>
36 #include <semaphore.h>
39 #include <asm/unistd.h>
41 #define MAX_JOBS (1024)
44 * assume we don't have _get either, if _set isn't defined
46 #ifndef __NR_ioprio_set
49 #define __NR_ioprio_set 289
50 #define __NR_ioprio_get 290
51 #elif defined(__powerpc__) || defined(__powerpc64__)
52 #define __NR_ioprio_set 273
53 #define __NR_ioprio_get 274
54 #elif defined(__x86_64__)
55 #define __NR_ioprio_set 251
56 #define __NR_ioprio_get 252
57 #elif defined(__ia64__)
58 #define __NR_ioprio_set 1274
59 #define __NR_ioprio_get 1275
60 #elif defined(__alpha__)
61 #define __NR_ioprio_set 442
62 #define __NR_ioprio_get 443
63 #elif defined(__s390x__) || defined(__s390__)
64 #define __NR_ioprio_set 282
65 #define __NR_ioprio_get 283
67 #error "Unsupported arch"
72 static int ioprio_set(int which, int who, int ioprio)
74 return syscall(__NR_ioprio_set, which, who, ioprio);
78 IOPRIO_WHO_PROCESS = 1,
83 #define IOPRIO_CLASS_SHIFT 13
88 #define DEF_TIMEOUT (30)
89 #define DEF_RATE_CYCLE (1000)
90 #define DEF_ODIRECT (1)
91 #define DEF_SEQUENTIAL (1)
92 #define DEF_WRITESTAT (0)
93 #define DEF_RAND_REPEAT (1)
95 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
97 static int write_stat = DEF_WRITESTAT;
98 static int repeatable = DEF_RAND_REPEAT;
99 static int rate_quit = 1;
101 static int thread_number;
102 static char *ini_file;
130 volatile int terminate;
131 volatile int runstate;
134 unsigned int sequential;
136 unsigned int odirect;
137 unsigned int delay_sleep;
138 unsigned int fsync_blocks;
139 unsigned int start_delay;
140 unsigned int timeout;
141 unsigned int use_aio;
144 io_context_t *aio_ctx;
145 struct iocb *aio_iocbs;
146 unsigned int aio_depth;
147 unsigned int aio_cur_depth;
148 struct io_event *aio_events;
149 char *aio_iocbs_status;
152 unsigned int ratemin;
153 unsigned int ratecycle;
154 unsigned long rate_usec_cycle;
155 long rate_pending_usleep;
156 unsigned long rate_blocks;
157 struct timeval lastrate;
159 unsigned long max_latency; /* msec */
160 unsigned long min_latency; /* msec */
161 unsigned long runtime; /* sec */
162 unsigned long blocks;
163 unsigned long io_blocks;
164 unsigned long last_block;
166 struct drand48_data random_state;
171 unsigned long stat_time;
172 unsigned long stat_time_last;
173 unsigned long stat_blocks_last;
175 struct timeval start;
178 static struct thread_data *threads;
179 static struct thread_data def_thread;
181 static sem_t startup_sem;
183 static void sig_handler(int sig)
187 for (i = 0; i < thread_number; i++) {
188 struct thread_data *td = &threads[i];
195 static int init_random_state(struct thread_data *td)
197 unsigned long seed = 123;
203 int fd = open("/dev/random", O_RDONLY);
210 if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
219 srand48_r(seed, &td->random_state);
223 static void shutdown_stat_file(struct thread_data *td)
225 if (td->stat_fd != -1) {
231 static int init_stat_file(struct thread_data *td)
238 sprintf(n, "fio_thread%d.stat", td->thread_number);
239 td->stat_fd = open(n, O_WRONLY | O_CREAT | O_TRUNC, 0644);
240 if (td->stat_fd == -1) {
241 perror("open stat file");
249 static unsigned long utime_since(struct timeval *s, struct timeval *e)
253 sec = e->tv_sec - s->tv_sec;
254 usec = e->tv_usec - s->tv_usec;
255 if (sec > 0 && usec < 0) {
260 sec *= (double) 1000000;
265 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
269 sec = e->tv_sec - s->tv_sec;
270 usec = e->tv_usec - s->tv_usec;
271 if (sec > 0 && usec < 0) {
276 sec *= (double) 1000;
277 usec /= (double) 1000;
282 static inline unsigned long msec_now(struct timeval *s)
284 return s->tv_sec * 1000 + s->tv_usec / 1000;
287 static unsigned long get_next_offset(struct thread_data *td)
292 if (!td->sequential) {
293 lrand48_r(&td->random_state, &r);
294 b = (1+(double) (td->blocks-1) * r / (RAND_MAX+1.0));
303 static void add_stat_sample(struct thread_data *td, unsigned long msec)
310 td->stat_time += msec;
311 td->stat_time_last += msec;
312 td->stat_blocks_last++;
314 if (td->stat_time_last >= 500) {
315 unsigned long rate = td->stat_blocks_last * td->bs / (td->stat_time_last);
317 td->stat_time_last = 0;
318 td->stat_blocks_last = 0;
319 sprintf(sample, "%lu, %lu\n", td->stat_time, rate);
320 write(td->stat_fd, sample, strlen(sample));
324 static void usec_sleep(int usec)
326 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
330 rem.tv_sec = rem.tv_nsec = 0;
331 nanosleep(&req, &rem);
335 req.tv_nsec = rem.tv_nsec;
339 static void rate_throttle(struct thread_data *td, unsigned long time_spent)
344 if (time_spent < td->rate_usec_cycle) {
345 unsigned long s = td->rate_usec_cycle - time_spent;
347 td->rate_pending_usleep += s;
348 if (td->rate_pending_usleep >= 100000) {
349 usec_sleep(td->rate_pending_usleep);
350 td->rate_pending_usleep = 0;
353 long overtime = time_spent - td->rate_usec_cycle;
355 td->rate_pending_usleep -= overtime;
359 static int check_min_rate(struct thread_data *td, struct timeval *now)
365 * allow a 2 second settle period in the beginning
367 if (mtime_since(&td->start, now) < 2000)
371 * if rate blocks is set, sample is running
373 if (td->rate_blocks) {
374 spent = mtime_since(&td->lastrate, now);
375 if (spent < td->ratecycle)
378 rate = ((td->io_blocks - td->rate_blocks) * td->bs) / spent;
379 if (rate < td->ratemin) {
380 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
387 td->rate_blocks = td->io_blocks;
388 memcpy(&td->lastrate, now, sizeof(*now));
392 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
394 if (mtime_since(&td->start, t) >= td->timeout * 1000)
400 #define should_fsync(td) ((td)->ddir == DDIR_WRITE && !(td)->odirect)
402 static void do_sync_io(struct thread_data *td)
405 unsigned long blocks, msec, usec;
407 for (blocks = 0; blocks < td->blocks; blocks++) {
408 off_t offset = get_next_offset(td);
414 if (lseek(td->fd, offset, SEEK_SET) == -1) {
420 usec_sleep(td->delay_sleep);
422 gettimeofday(&s, NULL);
424 if (td->ddir == DDIR_READ)
425 ret = read(td->fd, td->buf, td->bs);
427 ret = write(td->fd, td->buf, td->bs);
429 if (ret < (int) td->bs) {
437 if (should_fsync(td) && td->fsync_blocks &&
438 (td->io_blocks % td->fsync_blocks) == 0)
441 gettimeofday(&e, NULL);
443 usec = utime_since(&s, &e);
445 rate_throttle(td, usec);
447 if (check_min_rate(td, &e)) {
453 add_stat_sample(td, msec);
455 if (msec < td->min_latency)
456 td->min_latency = msec;
457 if (msec > td->max_latency)
458 td->max_latency = msec;
460 if (runtime_exceeded(td, &e))
464 if (should_fsync(td))
468 static void aio_put_iocb(struct thread_data *td, struct iocb *iocb)
470 long offset = ((long) iocb - (long) td->aio_iocbs)/ sizeof(struct iocb);
472 td->aio_iocbs_status[offset] = 0;
476 static struct iocb *aio_get_iocb(struct thread_data *td, struct timeval *t)
478 struct iocb *iocb = NULL;
481 for (i = 0; i < td->aio_depth; i++) {
482 if (td->aio_iocbs_status[i] == 0) {
483 td->aio_iocbs_status[i] = 1;
484 iocb = &td->aio_iocbs[i];
490 off_t off = get_next_offset(td);
491 char *p = td->buf + i * td->bs;
493 if (td->ddir == DDIR_READ)
494 io_prep_pread(iocb, td->fd, p, td->bs, off);
496 io_prep_pwrite(iocb, td->fd, p, td->bs, off);
498 io_set_callback(iocb, (io_callback_t) msec_now(t));
504 static int aio_submit(struct thread_data *td, struct iocb *iocb)
509 ret = io_submit(*td->aio_ctx, 1, &iocb);
515 else if (errno == EAGAIN)
524 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
526 static void do_async_io(struct thread_data *td)
529 unsigned long blocks, msec, usec;
531 for (blocks = 0; blocks < td->blocks; blocks++) {
532 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
533 struct timespec *timeout;
534 int ret, i, min_evts = 0;
541 usec_sleep(td->delay_sleep);
543 gettimeofday(&s, NULL);
545 iocb = aio_get_iocb(td, &s);
547 ret = aio_submit(td, iocb);
555 if (td->aio_cur_depth < td->aio_depth) {
563 ret = io_getevents(*td->aio_ctx, min_evts, td->aio_cur_depth, td->aio_events, timeout);
570 gettimeofday(&e, NULL);
572 for (i = 0; i < ret; i++) {
573 struct io_event *ev = td->aio_events + i;
579 msec = msec_now(&e) - iocb_time(iocb);
580 add_stat_sample(td, msec);
582 if (msec < td->min_latency)
583 td->min_latency = msec;
584 if (msec > td->max_latency)
585 td->max_latency = msec;
587 aio_put_iocb(td, iocb);
591 * the rate is batched for now, it should work for batches
592 * of completions except the very first one which may look
595 usec = utime_since(&s, &e);
597 rate_throttle(td, usec);
599 if (check_min_rate(td, &e)) {
604 if (runtime_exceeded(td, &e))
609 static void cleanup_pending_aio(struct thread_data *td)
611 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
616 * get immediately available events, if any
618 r = io_getevents(*td->aio_ctx, 0, td->aio_cur_depth, td->aio_events, &ts);
620 for (i = 0; i < r; i++)
621 aio_put_iocb(td, &td->aio_iocbs[i]);
625 * now cancel remaining active events
627 for (i = 0; i < td->aio_depth; i++) {
628 if (td->aio_iocbs_status[i] == 0)
631 r = io_cancel(*td->aio_ctx, &td->aio_iocbs[i], td->aio_events);
633 aio_put_iocb(td, &td->aio_iocbs[i]);
636 if (td->aio_cur_depth)
637 io_getevents(*td->aio_ctx, td->aio_cur_depth, td->aio_cur_depth, td->aio_events, NULL);
640 static void cleanup_aio(struct thread_data *td)
642 if (td->aio_cur_depth)
643 cleanup_pending_aio(td);
646 io_destroy(*td->aio_ctx);
652 free(td->aio_events);
653 if (td->aio_iocbs_status)
654 free(td->aio_iocbs_status);
657 static int init_aio(struct thread_data *td)
659 td->aio_ctx = malloc(sizeof(*td->aio_ctx));
661 if (io_queue_init(td->aio_depth, td->aio_ctx)) {
666 td->aio_iocbs = malloc(td->aio_depth * sizeof(struct iocb));
667 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
668 td->aio_iocbs_status = malloc(td->aio_depth * sizeof(char));
672 static void *thread_main(int shm_id, int offset, char *argv[])
674 struct thread_data *td;
676 void *data, *ptr = NULL;
682 data = shmat(shm_id, NULL, 0);
683 td = data + offset * sizeof(struct thread_data);
688 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
693 printf("Thread (%s) (pid=%u) (f=%s) (aio=%d) started\n", td->ddir == DDIR_READ ? "read" : "write", td->pid, td->file_name, td->aio_depth);
696 sprintf(argv[0], "fio%d", offset);
702 if (td->ddir == DDIR_READ)
703 td->fd = open(td->file_name, flags | O_RDONLY);
705 td->fd = open(td->file_name, flags | O_WRONLY | O_CREAT | O_TRUNC, 0644);
712 if (td->use_aio && init_aio(td))
715 if (init_random_state(td))
717 if (init_stat_file(td))
720 if (td->ddir == DDIR_READ) {
721 if (fstat(td->fd, &st) == -1) {
726 td->blocks = st.st_size / td->bs;
732 td->blocks = 1024 * 1024 * 1024 / td->bs;
735 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
741 sem_post(&startup_sem);
742 sem_wait(&td->mutex);
744 gettimeofday(&td->start, NULL);
747 memcpy(&td->lastrate, &td->start, sizeof(td->start));
750 ptr = malloc(td->bs + MASK);
751 td->buf = ALIGN(ptr);
754 ptr = malloc(td->bs * td->aio_depth + MASK);
755 td->buf = ALIGN(ptr);
759 gettimeofday(&end, NULL);
760 td->runtime = mtime_since(&td->start, &end);
765 shutdown_stat_file(td);
773 sem_post(&startup_sem);
774 sem_wait(&td->mutex);
778 td->runstate = TD_EXITED;
783 static void free_shm(void)
788 static void show_thread_status(struct thread_data *td)
790 int prio, prio_class;
791 unsigned long bw = 0;
793 if (!td->io_blocks && !td->error)
797 bw = (td->io_blocks * td->bs) / td->runtime;
799 prio = td->ioprio & 0xff;
800 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
802 printf("thread%d (%s): err=%2d, prio=%1d/%1d maxl=%5lumsec, io=%6luMiB, bw=%6luKiB/sec\n", td->thread_number, td->ddir == DDIR_READ ? " read": "write", td->error, prio_class, prio, td->max_latency, td->io_blocks * td->bs >> 20, bw);
805 static int setup_rate(struct thread_data *td)
807 int nr_reads_per_sec;
812 if (td->rate < td->ratemin) {
813 fprintf(stderr, "min rate larger than nominal rate\n");
817 nr_reads_per_sec = td->rate * 1024 / td->bs;
818 td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
819 td->rate_pending_usleep = 0;
823 static struct thread_data *get_new_job(int global)
825 struct thread_data *td;
829 if (thread_number >= MAX_JOBS)
832 td = &threads[thread_number++];
833 memset(td, 0, sizeof(*td));
835 td->thread_number = thread_number;
836 td->ddir = def_thread.ddir;
837 td->bs = def_thread.bs;
838 td->odirect = def_thread.odirect;
839 td->ratecycle = def_thread.ratecycle;
840 td->sequential = def_thread.sequential;
841 td->timeout = def_thread.timeout;
842 memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
847 static void put_job(struct thread_data *td)
849 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
853 static int add_job(struct thread_data *td, const char *filename, int prioclass,
856 if (td == &def_thread)
859 strcpy(td->file_name, filename);
861 sem_init(&td->mutex, 1, 0);
862 td->min_latency = 10000000;
863 td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
865 if (td->use_aio && !td->aio_depth)
871 printf("Client%d: file=%s, rw=%d, prio=%d, seq=%d, odir=%d, bs=%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, filename, td->ddir, td->ioprio, td->sequential, td->odirect, td->bs, td->rate, td->use_aio, td->aio_depth);
875 static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
881 for (i = 0; i < sizeof(int) * 8; i++) {
883 CPU_SET(i, &cpumask);
887 static void fill_option(const char *input, char *output)
892 while (input[i] != ',' && input[i] != '}' && input[i] != '\0') {
893 output[i] = input[i];
908 static void parse_jobs_cmd(int argc, char *argv[], int index)
910 struct thread_data *td;
911 unsigned int prio, prioclass, cpu;
912 char *string, *filename, *p, *c;
915 string = malloc(256);
916 filename = malloc(256);
918 for (i = index; i < argc; i++) {
934 c = strstr(p, "rw=");
938 td->ddir = DDIR_READ;
940 td->ddir = DDIR_WRITE;
943 c = strstr(p, "prio=");
949 c = strstr(p, "prioclass=");
952 prioclass = *c - '0';
955 c = strstr(p, "file=");
958 fill_option(c, filename);
961 c = strstr(p, "bs=");
964 fill_option(c, string);
965 td->bs = strtoul(string, NULL, 10);
969 c = strstr(p, "direct=");
978 c = strstr(p, "delay=");
981 fill_option(c, string);
982 td->delay_sleep = strtoul(string, NULL, 10);
985 c = strstr(p, "rate=");
988 fill_option(c, string);
989 td->rate = strtoul(string, NULL, 10);
992 c = strstr(p, "ratemin=");
995 fill_option(c, string);
996 td->ratemin = strtoul(string, NULL, 10);
999 c = strstr(p, "ratecycle=");
1002 fill_option(c, string);
1003 td->ratecycle = strtoul(string, NULL, 10);
1006 c = strstr(p, "cpumask=");
1009 fill_option(c, string);
1010 cpu = strtoul(string, NULL, 10);
1011 fill_cpu_mask(td->cpumask, cpu);
1014 c = strstr(p, "fsync=");
1017 fill_option(c, string);
1018 td->fsync_blocks = strtoul(string, NULL, 10);
1021 c = strstr(p, "startdelay=");
1024 fill_option(c, string);
1025 td->start_delay = strtoul(string, NULL, 10);
1028 c = strstr(p, "timeout=");
1031 fill_option(c, string);
1032 td->timeout = strtoul(string, NULL, 10);
1035 c = strstr(p, "aio_depth=");
1038 fill_option(c, string);
1039 td->aio_depth = strtoul(string, NULL, 10);
1042 c = strstr(p, "aio");
1046 c = strstr(p, "random");
1049 c = strstr(p, "sequential");
1053 if (add_job(td, filename, prioclass, prio))
1061 static int check_int(char *p, char *name, unsigned int *val)
1065 sprintf(str, "%s=%%d", name);
1066 if (sscanf(p, str, val) == 1)
1069 sprintf(str, "%s = %%d", name);
1070 if (sscanf(p, str, val) == 1)
1076 static int is_empty_or_comment(char *line)
1080 for (i = 0; i < strlen(line); i++) {
1083 if (!isspace(line[i]) && !iscntrl(line[i]))
1090 static int parse_jobs_ini(char *file)
1092 unsigned int prioclass, prio, cpu, global;
1093 struct thread_data *td;
1094 char *string, *name;
1099 f = fopen(file, "r");
1105 string = malloc(4096);
1108 while ((p = fgets(string, 4096, f)) != NULL) {
1109 if (is_empty_or_comment(p))
1111 if (sscanf(p, "[%s]", name) != 1)
1114 global = !strncmp(name, "global", 6);
1116 name[strlen(name) - 1] = '\0';
1118 td = get_new_job(global);
1126 while ((p = fgets(string, 4096, f)) != NULL) {
1127 if (is_empty_or_comment(p))
1131 if (!check_int(p, "bs", &td->bs)) {
1136 if (!check_int(p, "rw", &td->ddir)) {
1140 if (!check_int(p, "prio", &prio)) {
1144 if (!check_int(p, "prioclass", &prioclass)) {
1148 if (!check_int(p, "direct", &td->odirect)) {
1152 if (!check_int(p, "rate", &td->rate)) {
1156 if (!check_int(p, "ratemin", &td->ratemin)) {
1160 if (!check_int(p, "ratecycle", &td->ratecycle)) {
1164 if (!check_int(p, "delay", &td->delay_sleep)) {
1168 if (!check_int(p, "cpumask", &cpu)) {
1169 fill_cpu_mask(td->cpumask, cpu);
1173 if (!check_int(p, "fsync", &td->fsync_blocks)) {
1177 if (!check_int(p, "startdelay", &td->start_delay)) {
1181 if (!check_int(p, "timeout", &td->timeout)) {
1185 if (!check_int(p, "aio_depth", &td->aio_depth)) {
1189 if (!strncmp(p, "sequential", 10)) {
1194 if (!strncmp(p, "random", 6)) {
1199 if (!strncmp(p, "aio", 3)) {
1205 printf("Client%d: bad option %s\n",td->thread_number,p);
1209 if (add_job(td, name, prioclass, prio))
1219 static int parse_options(int argc, char *argv[])
1223 for (i = 1; i < argc; i++) {
1224 char *parm = argv[i];
1233 def_thread.sequential = !!atoi(parm);
1237 def_thread.bs = atoi(parm);
1238 def_thread.bs <<= 10;
1239 if (!def_thread.bs) {
1240 printf("bad block size\n");
1241 def_thread.bs = DEF_BS;
1246 def_thread.timeout = atoi(parm);
1250 write_stat = !!atoi(parm);
1254 repeatable = !!atoi(parm);
1258 rate_quit = !!atoi(parm);
1262 def_thread.odirect = !!atoi(parm);
1265 if (i + 1 >= argc) {
1266 printf("-f needs file as arg\n");
1269 ini_file = strdup(argv[i+1]);
1272 printf("bad option %s\n", argv[i]);
1280 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1284 for (i = 0; i < thread_number; i++) {
1285 struct thread_data *td = &threads[i];
1287 if (td->runstate != TD_EXITED)
1290 td->runstate = TD_REAPED;
1291 waitpid(td->pid, NULL, 0);
1293 (*m_rate) -= td->ratemin;
1294 (*t_rate) -= td->rate;
1299 printf("Threads now running: %d", *nr_running);
1300 if (*m_rate || *t_rate)
1301 printf(", rate %d/%dKiB/sec", *t_rate, *m_rate);
1306 static void run_threads(char *argv[])
1308 struct timeval genesis, now;
1309 struct thread_data *td;
1310 unsigned long spent;
1311 int i, todo, nr_running, m_rate, t_rate;
1313 gettimeofday(&genesis, NULL);
1315 printf("Starting %d threads\n", thread_number);
1318 signal(SIGINT, sig_handler);
1320 todo = thread_number;
1322 m_rate = t_rate = 0;
1325 for (i = 0; i < thread_number; i++) {
1328 if (td->runstate != TD_NOT_CREATED)
1332 * never got a chance to start, killed by other
1333 * thread for some reason
1335 if (td->terminate) {
1340 if (td->start_delay) {
1341 gettimeofday(&now, NULL);
1342 spent = mtime_since(&genesis, &now);
1344 if (td->start_delay * 1000 > spent)
1348 td->runstate = TD_CREATED;
1349 sem_init(&startup_sem, 1, 1);
1353 sem_wait(&startup_sem);
1355 thread_main(shm_id, i, argv);
1360 for (i = 0; i < thread_number; i++) {
1361 struct thread_data *td = &threads[i];
1363 if (td->runstate == TD_CREATED) {
1364 td->runstate = TD_STARTED;
1366 m_rate += td->ratemin;
1368 sem_post(&td->mutex);
1370 printf("Threads now running: %d", nr_running);
1371 if (m_rate || t_rate)
1372 printf(", rate %d/%dKiB/sec", t_rate, m_rate);
1377 reap_threads(&nr_running, &t_rate, &m_rate);
1383 while (nr_running) {
1384 reap_threads(&nr_running, &t_rate, &m_rate);
1389 int main(int argc, char *argv[])
1391 static unsigned long max_run[2], min_run[2], total_blocks[2];
1392 static unsigned long max_bw[2], min_bw[2], maxl[2], minl[2];
1393 static unsigned long read_mb, write_mb, read_agg, write_agg;
1396 shm_id = shmget(0, MAX_JOBS * sizeof(struct thread_data), IPC_CREAT | 0600);
1402 threads = shmat(shm_id, NULL, 0);
1403 if (threads == (void *) -1 ) {
1410 if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
1411 perror("sched_getaffinity");
1418 def_thread.ddir = DDIR_READ;
1419 def_thread.bs = DEF_BS;
1420 def_thread.odirect = 1;
1421 def_thread.ratecycle = DEF_RATE_CYCLE;
1422 def_thread.sequential = 1;
1423 def_thread.timeout = DEF_TIMEOUT;
1425 i = parse_options(argc, argv);
1428 if (parse_jobs_ini(ini_file))
1431 parse_jobs_cmd(argc, argv, i);
1433 if (!thread_number) {
1434 printf("Nothing to do\n");
1438 printf("%s: %s, bs=%uKiB, timeo=%u, write_stat=%u, odirect=%d\n", argv[0], def_thread.sequential ? "sequential" : "random", def_thread.bs >> 10, def_thread.timeout, write_stat, def_thread.odirect);
1442 min_bw[0] = min_run[0] = ~0UL;
1443 min_bw[1] = min_run[1] = ~0UL;
1444 minl[0] = minl[1] = ~0UL;
1445 for (i = 0; i < thread_number; i++) {
1446 struct thread_data *td = &threads[i];
1447 unsigned long bw = 0;
1452 if (td->runtime < min_run[td->ddir])
1453 min_run[td->ddir] = td->runtime;
1454 if (td->runtime > max_run[td->ddir])
1455 max_run[td->ddir] = td->runtime;
1458 bw = (td->io_blocks * td->bs) / td->runtime;
1459 if (bw < min_bw[td->ddir])
1460 min_bw[td->ddir] = bw;
1461 if (bw > max_bw[td->ddir])
1462 max_bw[td->ddir] = bw;
1463 if (td->max_latency < minl[td->ddir])
1464 minl[td->ddir] = td->max_latency;
1465 if (td->max_latency > maxl[td->ddir])
1466 maxl[td->ddir] = td->max_latency;
1468 total_blocks[td->ddir] += td->io_blocks;
1470 if (td->ddir == DDIR_READ) {
1471 read_mb += (td->bs * td->io_blocks) >> 20;
1473 read_agg += (td->io_blocks * td->bs) / td->runtime;
1475 if (td->ddir == DDIR_WRITE) {
1476 write_mb += (td->bs * td->io_blocks) >> 20;
1478 write_agg += (td->io_blocks * td->bs) / td->runtime;
1482 show_thread_status(td);
1485 printf("Run status:\n");
1486 if (max_run[DDIR_READ])
1487 printf(" READ: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", read_mb, read_agg, minl[0], maxl[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
1488 if (max_run[DDIR_WRITE])
1489 printf(" WRITE: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", write_mb, write_agg, minl[1], maxl[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);