2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 #include <sys/types.h>
37 #include <semaphore.h>
40 #include <asm/unistd.h>
42 #define MAX_JOBS (1024)
45 * assume we don't have _get either, if _set isn't defined
47 #ifndef __NR_ioprio_set
50 #define __NR_ioprio_set 289
51 #define __NR_ioprio_get 290
52 #elif defined(__powerpc__) || defined(__powerpc64__)
53 #define __NR_ioprio_set 273
54 #define __NR_ioprio_get 274
55 #elif defined(__x86_64__)
56 #define __NR_ioprio_set 251
57 #define __NR_ioprio_get 252
58 #elif defined(__ia64__)
59 #define __NR_ioprio_set 1274
60 #define __NR_ioprio_get 1275
61 #elif defined(__alpha__)
62 #define __NR_ioprio_set 442
63 #define __NR_ioprio_get 443
64 #elif defined(__s390x__) || defined(__s390__)
65 #define __NR_ioprio_set 282
66 #define __NR_ioprio_get 283
68 #error "Unsupported arch"
73 static int ioprio_set(int which, int who, int ioprio)
75 return syscall(__NR_ioprio_set, which, who, ioprio);
79 IOPRIO_WHO_PROCESS = 1,
84 #define IOPRIO_CLASS_SHIFT 13
89 #define DEF_TIMEOUT (30)
90 #define DEF_RATE_CYCLE (1000)
91 #define DEF_ODIRECT (1)
92 #define DEF_SEQUENTIAL (1)
93 #define DEF_WRITESTAT (0)
94 #define DEF_RAND_REPEAT (1)
96 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
98 static int repeatable = DEF_RAND_REPEAT;
99 static int rate_quit = 1;
101 static int thread_number;
102 static char *ini_file;
129 volatile int terminate;
130 volatile int runstate;
133 unsigned int sequential;
135 unsigned int odirect;
136 unsigned int delay_sleep;
137 unsigned int fsync_blocks;
138 unsigned int start_delay;
139 unsigned int timeout;
140 unsigned int use_aio;
143 io_context_t *aio_ctx;
144 struct iocb *aio_iocbs;
145 unsigned int aio_depth;
146 unsigned int aio_cur_depth;
147 struct io_event *aio_events;
148 char *aio_iocbs_status;
151 unsigned int ratemin;
152 unsigned int ratecycle;
153 unsigned long rate_usec_cycle;
154 long rate_pending_usleep;
155 unsigned long rate_blocks;
156 struct timeval lastrate;
158 unsigned long max_latency; /* msec */
159 unsigned long min_latency; /* msec */
160 unsigned long runtime; /* sec */
161 unsigned long blocks;
162 unsigned long io_blocks;
163 unsigned long last_block;
165 struct drand48_data random_state;
168 * bandwidth and latency stats
170 unsigned long stat_time;
171 unsigned long stat_time_sq;
172 unsigned long stat_time_samples;
173 unsigned long stat_io_blocks;
174 unsigned long stat_bw;
175 unsigned long stat_bw_sq;
176 unsigned long stat_bw_samples;
177 struct timeval stat_sample_time;
179 struct timeval start;
182 static struct thread_data *threads;
183 static struct thread_data def_thread;
185 static sem_t startup_sem;
187 static void sig_handler(int sig)
191 for (i = 0; i < thread_number; i++) {
192 struct thread_data *td = &threads[i];
199 static int init_random_state(struct thread_data *td)
201 unsigned long seed = 123;
207 int fd = open("/dev/random", O_RDONLY);
214 if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
223 srand48_r(seed, &td->random_state);
227 static unsigned long utime_since(struct timeval *s, struct timeval *e)
231 sec = e->tv_sec - s->tv_sec;
232 usec = e->tv_usec - s->tv_usec;
233 if (sec > 0 && usec < 0) {
238 sec *= (double) 1000000;
243 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
247 sec = e->tv_sec - s->tv_sec;
248 usec = e->tv_usec - s->tv_usec;
249 if (sec > 0 && usec < 0) {
254 sec *= (double) 1000;
255 usec /= (double) 1000;
260 static unsigned long mtime_since_now(struct timeval *s)
264 gettimeofday(&t, NULL);
265 return mtime_since(s, &t);
268 static inline unsigned long msec_now(struct timeval *s)
270 return s->tv_sec * 1000 + s->tv_usec / 1000;
273 static unsigned long get_next_offset(struct thread_data *td)
278 if (!td->sequential) {
279 lrand48_r(&td->random_state, &r);
280 b = (1+(double) (td->blocks-1) * r / (RAND_MAX+1.0));
289 static void add_stat_sample(struct thread_data *td, unsigned long msec)
293 td->stat_time += msec;
294 td->stat_time_sq += msec * msec;
295 td->stat_time_samples++;
297 spent = mtime_since_now(&td->stat_sample_time);
299 unsigned long rate = ((td->io_blocks - td->stat_io_blocks) * td->bs) / spent;
302 td->stat_bw_sq += rate * rate;
303 gettimeofday(&td->stat_sample_time, NULL);
304 td->stat_io_blocks = td->io_blocks;
305 td->stat_bw_samples++;
309 static void usec_sleep(int usec)
311 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
315 rem.tv_sec = rem.tv_nsec = 0;
316 nanosleep(&req, &rem);
320 req.tv_nsec = rem.tv_nsec;
324 static void rate_throttle(struct thread_data *td, unsigned long time_spent)
329 if (time_spent < td->rate_usec_cycle) {
330 unsigned long s = td->rate_usec_cycle - time_spent;
332 td->rate_pending_usleep += s;
333 if (td->rate_pending_usleep >= 100000) {
334 usec_sleep(td->rate_pending_usleep);
335 td->rate_pending_usleep = 0;
338 long overtime = time_spent - td->rate_usec_cycle;
340 td->rate_pending_usleep -= overtime;
344 static int check_min_rate(struct thread_data *td, struct timeval *now)
350 * allow a 2 second settle period in the beginning
352 if (mtime_since(&td->start, now) < 2000)
356 * if rate blocks is set, sample is running
358 if (td->rate_blocks) {
359 spent = mtime_since(&td->lastrate, now);
360 if (spent < td->ratecycle)
363 rate = ((td->io_blocks - td->rate_blocks) * td->bs) / spent;
364 if (rate < td->ratemin) {
365 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
372 td->rate_blocks = td->io_blocks;
373 memcpy(&td->lastrate, now, sizeof(*now));
377 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
379 if (mtime_since(&td->start, t) >= td->timeout * 1000)
385 #define should_fsync(td) ((td)->ddir == DDIR_WRITE && !(td)->odirect)
387 static void do_sync_io(struct thread_data *td)
390 unsigned long blocks, msec, usec;
392 for (blocks = 0; blocks < td->blocks; blocks++) {
393 off_t offset = get_next_offset(td);
399 if (lseek(td->fd, offset, SEEK_SET) == -1) {
405 usec_sleep(td->delay_sleep);
407 gettimeofday(&s, NULL);
409 if (td->ddir == DDIR_READ)
410 ret = read(td->fd, td->buf, td->bs);
412 ret = write(td->fd, td->buf, td->bs);
414 if (ret < (int) td->bs) {
422 if (should_fsync(td) && td->fsync_blocks &&
423 (td->io_blocks % td->fsync_blocks) == 0)
426 gettimeofday(&e, NULL);
428 usec = utime_since(&s, &e);
430 rate_throttle(td, usec);
432 if (check_min_rate(td, &e)) {
438 add_stat_sample(td, msec);
440 if (msec < td->min_latency)
441 td->min_latency = msec;
442 if (msec > td->max_latency)
443 td->max_latency = msec;
445 if (runtime_exceeded(td, &e))
449 if (should_fsync(td))
453 static void aio_put_iocb(struct thread_data *td, struct iocb *iocb)
455 long offset = ((long) iocb - (long) td->aio_iocbs)/ sizeof(struct iocb);
457 td->aio_iocbs_status[offset] = 0;
461 static struct iocb *aio_get_iocb(struct thread_data *td, struct timeval *t)
463 struct iocb *iocb = NULL;
466 for (i = 0; i < td->aio_depth; i++) {
467 if (td->aio_iocbs_status[i] == 0) {
468 td->aio_iocbs_status[i] = 1;
469 iocb = &td->aio_iocbs[i];
475 off_t off = get_next_offset(td);
476 char *p = td->buf + i * td->bs;
478 if (td->ddir == DDIR_READ)
479 io_prep_pread(iocb, td->fd, p, td->bs, off);
481 io_prep_pwrite(iocb, td->fd, p, td->bs, off);
483 io_set_callback(iocb, (io_callback_t) msec_now(t));
489 static int aio_submit(struct thread_data *td, struct iocb *iocb)
494 ret = io_submit(*td->aio_ctx, 1, &iocb);
500 else if (errno == EAGAIN)
509 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
511 static void do_async_io(struct thread_data *td)
514 unsigned long blocks, msec, usec;
516 for (blocks = 0; blocks < td->blocks; blocks++) {
517 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
518 struct timespec *timeout;
519 int ret, i, min_evts = 0;
526 usec_sleep(td->delay_sleep);
528 gettimeofday(&s, NULL);
530 iocb = aio_get_iocb(td, &s);
532 ret = aio_submit(td, iocb);
540 if (td->aio_cur_depth < td->aio_depth) {
548 ret = io_getevents(*td->aio_ctx, min_evts, td->aio_cur_depth, td->aio_events, timeout);
555 gettimeofday(&e, NULL);
557 for (i = 0; i < ret; i++) {
558 struct io_event *ev = td->aio_events + i;
564 msec = msec_now(&e) - iocb_time(iocb);
565 add_stat_sample(td, msec);
567 if (msec < td->min_latency)
568 td->min_latency = msec;
569 if (msec > td->max_latency)
570 td->max_latency = msec;
572 aio_put_iocb(td, iocb);
576 * the rate is batched for now, it should work for batches
577 * of completions except the very first one which may look
580 usec = utime_since(&s, &e);
582 rate_throttle(td, usec);
584 if (check_min_rate(td, &e)) {
589 if (runtime_exceeded(td, &e))
594 static void cleanup_pending_aio(struct thread_data *td)
596 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
601 * get immediately available events, if any
603 r = io_getevents(*td->aio_ctx, 0, td->aio_cur_depth, td->aio_events, &ts);
605 for (i = 0; i < r; i++)
606 aio_put_iocb(td, &td->aio_iocbs[i]);
610 * now cancel remaining active events
612 for (i = 0; i < td->aio_depth; i++) {
613 if (td->aio_iocbs_status[i] == 0)
616 r = io_cancel(*td->aio_ctx, &td->aio_iocbs[i], td->aio_events);
618 aio_put_iocb(td, &td->aio_iocbs[i]);
621 if (td->aio_cur_depth)
622 io_getevents(*td->aio_ctx, td->aio_cur_depth, td->aio_cur_depth, td->aio_events, NULL);
625 static void cleanup_aio(struct thread_data *td)
627 if (td->aio_cur_depth)
628 cleanup_pending_aio(td);
631 io_destroy(*td->aio_ctx);
637 free(td->aio_events);
638 if (td->aio_iocbs_status)
639 free(td->aio_iocbs_status);
642 static int init_aio(struct thread_data *td)
644 td->aio_ctx = malloc(sizeof(*td->aio_ctx));
646 if (io_queue_init(td->aio_depth, td->aio_ctx)) {
651 td->aio_iocbs = malloc(td->aio_depth * sizeof(struct iocb));
652 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
653 td->aio_iocbs_status = malloc(td->aio_depth * sizeof(char));
657 static void *thread_main(int shm_id, int offset, char *argv[])
659 struct thread_data *td;
660 void *data, *ptr = NULL;
666 data = shmat(shm_id, NULL, 0);
667 td = data + offset * sizeof(struct thread_data);
672 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
677 printf("Client%d (pid=%u) started\n", td->thread_number, td->pid);
679 sprintf(argv[0], "fio%d", offset);
685 if (td->ddir == DDIR_READ)
686 td->fd = open(td->file_name, flags | O_RDONLY);
688 td->fd = open(td->file_name, flags | O_WRONLY | O_CREAT | O_TRUNC, 0644);
695 if (td->use_aio && init_aio(td))
698 if (init_random_state(td))
701 if (td->ddir == DDIR_READ) {
702 if (fstat(td->fd, &st) == -1) {
707 td->blocks = st.st_size / td->bs;
713 td->blocks = 1024 * 1024 * 1024 / td->bs;
716 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
722 sem_post(&startup_sem);
723 sem_wait(&td->mutex);
725 gettimeofday(&td->start, NULL);
728 memcpy(&td->lastrate, &td->start, sizeof(td->start));
730 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
733 ptr = malloc(td->bs + MASK);
734 td->buf = ALIGN(ptr);
737 ptr = malloc(td->bs * td->aio_depth + MASK);
738 td->buf = ALIGN(ptr);
742 td->runtime = mtime_since_now(&td->start);
752 sem_post(&startup_sem);
753 sem_wait(&td->mutex);
757 td->runstate = TD_EXITED;
762 static void free_shm(void)
767 static void show_thread_status(struct thread_data *td)
769 int prio, prio_class;
770 unsigned long bw = 0;
771 double n_lat, n_bw, m_lat, m_bw, dev_lat, dev_bw;
773 if (!td->io_blocks && !td->error)
777 bw = (td->io_blocks * td->bs) / td->runtime;
779 prio = td->ioprio & 0xff;
780 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
782 n_lat = (double) td->stat_time_samples;
783 n_bw = (double) td->stat_bw_samples;
785 m_lat = (double) td->stat_time / n_lat;
786 dev_lat = sqrt(((double) td->stat_time_sq - (m_lat * m_lat) / n_lat) / (n_lat - 1));
787 m_bw = (double) td->stat_bw / n_bw;
788 dev_bw = sqrt(((double) td->stat_bw_sq - (m_bw * m_bw) / n_bw) / (n_bw - 1));
790 printf("Client%d: err=%2d, io=%6luMiB, bw=%6luKiB/sec, latmax=%5lumsec, latavg=%5.02fmsec, latdev=%5.02fmsec, bwavg=%5.02fKiB/sec, bwdev=%5.02fKiB/sec\n", td->thread_number, td->error, td->io_blocks * td->bs >> 20, bw, td->max_latency, m_lat, dev_lat, m_bw, dev_bw);
793 static int setup_rate(struct thread_data *td)
795 int nr_reads_per_sec;
800 if (td->rate < td->ratemin) {
801 fprintf(stderr, "min rate larger than nominal rate\n");
805 nr_reads_per_sec = td->rate * 1024 / td->bs;
806 td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
807 td->rate_pending_usleep = 0;
811 static struct thread_data *get_new_job(int global)
813 struct thread_data *td;
817 if (thread_number >= MAX_JOBS)
820 td = &threads[thread_number++];
821 memset(td, 0, sizeof(*td));
823 td->thread_number = thread_number;
824 td->ddir = def_thread.ddir;
825 td->bs = def_thread.bs;
826 td->odirect = def_thread.odirect;
827 td->ratecycle = def_thread.ratecycle;
828 td->sequential = def_thread.sequential;
829 td->timeout = def_thread.timeout;
830 memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
835 static void put_job(struct thread_data *td)
837 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
841 static int add_job(struct thread_data *td, const char *filename, int prioclass,
844 if (td == &def_thread)
847 strcpy(td->file_name, filename);
848 sem_init(&td->mutex, 1, 0);
849 td->min_latency = 10000000;
850 td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
852 if (td->use_aio && !td->aio_depth)
858 printf("Client%d: file=%s, rw=%d, prio=%d/%d, seq=%d, odir=%d, bs=%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, filename, td->ddir, prioclass, prio, td->sequential, td->odirect, td->bs, td->rate, td->use_aio, td->aio_depth);
862 static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
868 for (i = 0; i < sizeof(int) * 8; i++) {
870 CPU_SET(i, &cpumask);
874 static void fill_option(const char *input, char *output)
879 while (input[i] != ',' && input[i] != '}' && input[i] != '\0') {
880 output[i] = input[i];
895 static void parse_jobs_cmd(int argc, char *argv[], int index)
897 struct thread_data *td;
898 unsigned int prio, prioclass, cpu;
899 char *string, *filename, *p, *c;
902 string = malloc(256);
903 filename = malloc(256);
905 for (i = index; i < argc; i++) {
921 c = strstr(p, "rw=");
925 td->ddir = DDIR_READ;
927 td->ddir = DDIR_WRITE;
930 c = strstr(p, "prio=");
936 c = strstr(p, "prioclass=");
939 prioclass = *c - '0';
942 c = strstr(p, "file=");
945 fill_option(c, filename);
948 c = strstr(p, "bs=");
951 fill_option(c, string);
952 td->bs = strtoul(string, NULL, 10);
956 c = strstr(p, "direct=");
965 c = strstr(p, "delay=");
968 fill_option(c, string);
969 td->delay_sleep = strtoul(string, NULL, 10);
972 c = strstr(p, "rate=");
975 fill_option(c, string);
976 td->rate = strtoul(string, NULL, 10);
979 c = strstr(p, "ratemin=");
982 fill_option(c, string);
983 td->ratemin = strtoul(string, NULL, 10);
986 c = strstr(p, "ratecycle=");
989 fill_option(c, string);
990 td->ratecycle = strtoul(string, NULL, 10);
993 c = strstr(p, "cpumask=");
996 fill_option(c, string);
997 cpu = strtoul(string, NULL, 10);
998 fill_cpu_mask(td->cpumask, cpu);
1001 c = strstr(p, "fsync=");
1004 fill_option(c, string);
1005 td->fsync_blocks = strtoul(string, NULL, 10);
1008 c = strstr(p, "startdelay=");
1011 fill_option(c, string);
1012 td->start_delay = strtoul(string, NULL, 10);
1015 c = strstr(p, "timeout=");
1018 fill_option(c, string);
1019 td->timeout = strtoul(string, NULL, 10);
1022 c = strstr(p, "aio_depth=");
1025 fill_option(c, string);
1026 td->aio_depth = strtoul(string, NULL, 10);
1029 c = strstr(p, "aio");
1033 c = strstr(p, "random");
1036 c = strstr(p, "sequential");
1040 if (add_job(td, filename, prioclass, prio))
1048 static int check_int(char *p, char *name, unsigned int *val)
1052 sprintf(str, "%s=%%d", name);
1053 if (sscanf(p, str, val) == 1)
1056 sprintf(str, "%s = %%d", name);
1057 if (sscanf(p, str, val) == 1)
1063 static int is_empty_or_comment(char *line)
1067 for (i = 0; i < strlen(line); i++) {
1070 if (!isspace(line[i]) && !iscntrl(line[i]))
1077 static int parse_jobs_ini(char *file)
1079 unsigned int prioclass, prio, cpu, global;
1080 struct thread_data *td;
1081 char *string, *name;
1086 f = fopen(file, "r");
1092 string = malloc(4096);
1095 while ((p = fgets(string, 4096, f)) != NULL) {
1096 if (is_empty_or_comment(p))
1098 if (sscanf(p, "[%s]", name) != 1)
1101 global = !strncmp(name, "global", 6);
1103 name[strlen(name) - 1] = '\0';
1105 td = get_new_job(global);
1113 while ((p = fgets(string, 4096, f)) != NULL) {
1114 if (is_empty_or_comment(p))
1118 if (!check_int(p, "bs", &td->bs)) {
1123 if (!check_int(p, "rw", &td->ddir)) {
1127 if (!check_int(p, "prio", &prio)) {
1131 if (!check_int(p, "prioclass", &prioclass)) {
1135 if (!check_int(p, "direct", &td->odirect)) {
1139 if (!check_int(p, "rate", &td->rate)) {
1143 if (!check_int(p, "ratemin", &td->ratemin)) {
1147 if (!check_int(p, "ratecycle", &td->ratecycle)) {
1151 if (!check_int(p, "delay", &td->delay_sleep)) {
1155 if (!check_int(p, "cpumask", &cpu)) {
1156 fill_cpu_mask(td->cpumask, cpu);
1160 if (!check_int(p, "fsync", &td->fsync_blocks)) {
1164 if (!check_int(p, "startdelay", &td->start_delay)) {
1168 if (!check_int(p, "timeout", &td->timeout)) {
1172 if (!check_int(p, "aio_depth", &td->aio_depth)) {
1176 if (!strncmp(p, "sequential", 10)) {
1181 if (!strncmp(p, "random", 6)) {
1186 if (!strncmp(p, "aio", 3)) {
1192 printf("Client%d: bad option %s\n",td->thread_number,p);
1196 if (add_job(td, name, prioclass, prio))
1206 static int parse_options(int argc, char *argv[])
1210 for (i = 1; i < argc; i++) {
1211 char *parm = argv[i];
1220 def_thread.sequential = !!atoi(parm);
1224 def_thread.bs = atoi(parm);
1225 def_thread.bs <<= 10;
1226 if (!def_thread.bs) {
1227 printf("bad block size\n");
1228 def_thread.bs = DEF_BS;
1233 def_thread.timeout = atoi(parm);
1237 repeatable = !!atoi(parm);
1241 rate_quit = !!atoi(parm);
1245 def_thread.odirect = !!atoi(parm);
1248 if (i + 1 >= argc) {
1249 printf("-f needs file as arg\n");
1252 ini_file = strdup(argv[i+1]);
1256 printf("bad option %s\n", argv[i]);
1264 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1268 for (i = 0; i < thread_number; i++) {
1269 struct thread_data *td = &threads[i];
1271 if (td->runstate != TD_EXITED)
1274 td->runstate = TD_REAPED;
1275 waitpid(td->pid, NULL, 0);
1277 (*m_rate) -= td->ratemin;
1278 (*t_rate) -= td->rate;
1283 printf("Threads now running: %d", *nr_running);
1284 if (*m_rate || *t_rate)
1285 printf(", rate %d/%dKiB/sec", *t_rate, *m_rate);
1290 static void run_threads(char *argv[])
1292 struct timeval genesis;
1293 struct thread_data *td;
1294 unsigned long spent;
1295 int i, todo, nr_running, m_rate, t_rate;
1297 gettimeofday(&genesis, NULL);
1299 printf("Starting %d threads\n", thread_number);
1302 signal(SIGINT, sig_handler);
1304 todo = thread_number;
1306 m_rate = t_rate = 0;
1309 for (i = 0; i < thread_number; i++) {
1312 if (td->runstate != TD_NOT_CREATED)
1316 * never got a chance to start, killed by other
1317 * thread for some reason
1319 if (td->terminate) {
1324 if (td->start_delay) {
1325 spent = mtime_since_now(&genesis);
1327 if (td->start_delay * 1000 > spent)
1331 td->runstate = TD_CREATED;
1332 sem_init(&startup_sem, 1, 1);
1336 sem_wait(&startup_sem);
1338 thread_main(shm_id, i, argv);
1343 for (i = 0; i < thread_number; i++) {
1344 struct thread_data *td = &threads[i];
1346 if (td->runstate == TD_CREATED) {
1347 td->runstate = TD_STARTED;
1349 m_rate += td->ratemin;
1351 sem_post(&td->mutex);
1353 printf("Threads now running: %d", nr_running);
1354 if (m_rate || t_rate)
1355 printf(", rate %d/%dKiB/sec", t_rate, m_rate);
1360 reap_threads(&nr_running, &t_rate, &m_rate);
1366 while (nr_running) {
1367 reap_threads(&nr_running, &t_rate, &m_rate);
1372 int main(int argc, char *argv[])
1374 static unsigned long max_run[2], min_run[2], total_blocks[2];
1375 static unsigned long max_bw[2], min_bw[2], maxl[2], minl[2];
1376 static unsigned long read_mb, write_mb, read_agg, write_agg;
1379 shm_id = shmget(0, MAX_JOBS * sizeof(struct thread_data), IPC_CREAT | 0600);
1385 threads = shmat(shm_id, NULL, 0);
1386 if (threads == (void *) -1 ) {
1393 if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
1394 perror("sched_getaffinity");
1401 def_thread.ddir = DDIR_READ;
1402 def_thread.bs = DEF_BS;
1403 def_thread.odirect = 1;
1404 def_thread.ratecycle = DEF_RATE_CYCLE;
1405 def_thread.sequential = 1;
1406 def_thread.timeout = DEF_TIMEOUT;
1408 i = parse_options(argc, argv);
1411 if (parse_jobs_ini(ini_file))
1414 parse_jobs_cmd(argc, argv, i);
1416 if (!thread_number) {
1417 printf("Nothing to do\n");
1423 min_bw[0] = min_run[0] = ~0UL;
1424 min_bw[1] = min_run[1] = ~0UL;
1425 minl[0] = minl[1] = ~0UL;
1426 for (i = 0; i < thread_number; i++) {
1427 struct thread_data *td = &threads[i];
1428 unsigned long bw = 0;
1433 if (td->runtime < min_run[td->ddir])
1434 min_run[td->ddir] = td->runtime;
1435 if (td->runtime > max_run[td->ddir])
1436 max_run[td->ddir] = td->runtime;
1439 bw = (td->io_blocks * td->bs) / td->runtime;
1440 if (bw < min_bw[td->ddir])
1441 min_bw[td->ddir] = bw;
1442 if (bw > max_bw[td->ddir])
1443 max_bw[td->ddir] = bw;
1444 if (td->max_latency < minl[td->ddir])
1445 minl[td->ddir] = td->max_latency;
1446 if (td->max_latency > maxl[td->ddir])
1447 maxl[td->ddir] = td->max_latency;
1449 total_blocks[td->ddir] += td->io_blocks;
1451 if (td->ddir == DDIR_READ) {
1452 read_mb += (td->bs * td->io_blocks) >> 20;
1454 read_agg += (td->io_blocks * td->bs) / td->runtime;
1456 if (td->ddir == DDIR_WRITE) {
1457 write_mb += (td->bs * td->io_blocks) >> 20;
1459 write_agg += (td->io_blocks * td->bs) / td->runtime;
1463 show_thread_status(td);
1466 printf("Run status:\n");
1467 if (max_run[DDIR_READ])
1468 printf(" READ: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", read_mb, read_agg, minl[0], maxl[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
1469 if (max_run[DDIR_WRITE])
1470 printf(" WRITE: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", write_mb, write_agg, minl[1], maxl[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);