2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
35 #include <sys/types.h>
38 #include <semaphore.h>
41 #include <asm/unistd.h>
43 #define MAX_JOBS (1024)
46 * assume we don't have _get either, if _set isn't defined
48 #ifndef __NR_ioprio_set
51 #define __NR_ioprio_set 289
52 #define __NR_ioprio_get 290
53 #elif defined(__powerpc__) || defined(__powerpc64__)
54 #define __NR_ioprio_set 273
55 #define __NR_ioprio_get 274
56 #elif defined(__x86_64__)
57 #define __NR_ioprio_set 251
58 #define __NR_ioprio_get 252
59 #elif defined(__ia64__)
60 #define __NR_ioprio_set 1274
61 #define __NR_ioprio_get 1275
62 #elif defined(__alpha__)
63 #define __NR_ioprio_set 442
64 #define __NR_ioprio_get 443
65 #elif defined(__s390x__) || defined(__s390__)
66 #define __NR_ioprio_set 282
67 #define __NR_ioprio_get 283
69 #error "Unsupported arch"
74 static int ioprio_set(int which, int who, int ioprio)
76 return syscall(__NR_ioprio_set, which, who, ioprio);
80 IOPRIO_WHO_PROCESS = 1,
85 #define IOPRIO_CLASS_SHIFT 13
90 #define DEF_TIMEOUT (30)
91 #define DEF_RATE_CYCLE (1000)
92 #define DEF_ODIRECT (1)
93 #define DEF_SEQUENTIAL (1)
94 #define DEF_RAND_REPEAT (1)
95 #define DEF_OVERWRITE (0)
96 #define DEF_CREATE (1)
98 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
100 static int repeatable = DEF_RAND_REPEAT;
101 static int rate_quit = 1;
103 static int thread_number;
104 static char *ini_file;
106 static int max_jobs = MAX_JOBS;
126 #define td_read(td) ((td)->ddir == DDIR_READ)
127 #define should_fsync(td) (!td_read(td) && !(td)->odirect)
136 volatile int terminate;
137 volatile int runstate;
140 unsigned int sequential;
142 unsigned int odirect;
143 unsigned int delay_sleep;
144 unsigned int fsync_blocks;
145 unsigned int start_delay;
146 unsigned int timeout;
147 unsigned int use_aio;
148 unsigned int create_file;
149 unsigned int overwrite;
150 unsigned long long file_size;
151 unsigned long long file_offset;
154 io_context_t *aio_ctx;
155 struct iocb *aio_iocbs;
156 unsigned int aio_depth;
157 unsigned int aio_cur_depth;
158 struct io_event *aio_events;
159 char *aio_iocbs_status;
162 unsigned int ratemin;
163 unsigned int ratecycle;
164 unsigned long rate_usec_cycle;
165 long rate_pending_usleep;
166 unsigned long rate_blocks;
167 struct timeval lastrate;
169 unsigned long max_latency; /* msec */
170 unsigned long min_latency; /* msec */
171 unsigned long runtime; /* sec */
172 unsigned long blocks;
173 unsigned long io_blocks;
174 unsigned long last_block;
176 struct drand48_data random_state;
179 * bandwidth and latency stats
181 unsigned long stat_time;
182 unsigned long stat_time_sq;
183 unsigned long stat_time_samples;
184 unsigned long stat_io_blocks;
185 unsigned long stat_bw;
186 unsigned long stat_bw_sq;
187 unsigned long stat_bw_samples;
188 struct timeval stat_sample_time;
190 struct timeval start;
193 static struct thread_data *threads;
194 static struct thread_data def_thread;
196 static sem_t startup_sem;
198 static void sig_handler(int sig)
202 for (i = 0; i < thread_number; i++) {
203 struct thread_data *td = &threads[i];
210 static int init_random_state(struct thread_data *td)
212 unsigned long seed = 123;
218 int fd = open("/dev/random", O_RDONLY);
225 if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
234 srand48_r(seed, &td->random_state);
238 static unsigned long utime_since(struct timeval *s, struct timeval *e)
242 sec = e->tv_sec - s->tv_sec;
243 usec = e->tv_usec - s->tv_usec;
244 if (sec > 0 && usec < 0) {
249 sec *= (double) 1000000;
254 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
258 sec = e->tv_sec - s->tv_sec;
259 usec = e->tv_usec - s->tv_usec;
260 if (sec > 0 && usec < 0) {
265 sec *= (double) 1000;
266 usec /= (double) 1000;
271 static unsigned long mtime_since_now(struct timeval *s)
275 gettimeofday(&t, NULL);
276 return mtime_since(s, &t);
279 static inline unsigned long msec_now(struct timeval *s)
281 return s->tv_sec * 1000 + s->tv_usec / 1000;
284 static unsigned long get_next_offset(struct thread_data *td)
289 if (!td->sequential) {
290 lrand48_r(&td->random_state, &r);
291 b = (1+(double) (td->blocks-1) * r / (RAND_MAX+1.0));
297 return b * td->bs + td->file_offset;
300 static void add_stat_sample(struct thread_data *td, unsigned long msec)
304 td->stat_time += msec;
305 td->stat_time_sq += msec * msec;
306 td->stat_time_samples++;
308 spent = mtime_since_now(&td->stat_sample_time);
310 unsigned long rate = ((td->io_blocks - td->stat_io_blocks) * td->bs) / spent;
313 td->stat_bw_sq += rate * rate;
314 gettimeofday(&td->stat_sample_time, NULL);
315 td->stat_io_blocks = td->io_blocks;
316 td->stat_bw_samples++;
320 static void usec_sleep(int usec)
322 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
326 rem.tv_sec = rem.tv_nsec = 0;
327 nanosleep(&req, &rem);
331 req.tv_nsec = rem.tv_nsec;
335 static void rate_throttle(struct thread_data *td, unsigned long time_spent)
340 if (time_spent < td->rate_usec_cycle) {
341 unsigned long s = td->rate_usec_cycle - time_spent;
343 td->rate_pending_usleep += s;
344 if (td->rate_pending_usleep >= 100000) {
345 usec_sleep(td->rate_pending_usleep);
346 td->rate_pending_usleep = 0;
349 long overtime = time_spent - td->rate_usec_cycle;
351 td->rate_pending_usleep -= overtime;
355 static int check_min_rate(struct thread_data *td, struct timeval *now)
361 * allow a 2 second settle period in the beginning
363 if (mtime_since(&td->start, now) < 2000)
367 * if rate blocks is set, sample is running
369 if (td->rate_blocks) {
370 spent = mtime_since(&td->lastrate, now);
371 if (spent < td->ratecycle)
374 rate = ((td->io_blocks - td->rate_blocks) * td->bs) / spent;
375 if (rate < td->ratemin) {
376 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
383 td->rate_blocks = td->io_blocks;
384 memcpy(&td->lastrate, now, sizeof(*now));
388 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
390 if (mtime_since(&td->start, t) >= td->timeout * 1000)
396 static void do_sync_io(struct thread_data *td)
399 unsigned long blocks, msec, usec;
401 for (blocks = 0; blocks < td->blocks; blocks++) {
402 off_t offset = get_next_offset(td);
408 if (lseek(td->fd, offset, SEEK_SET) == -1) {
414 usec_sleep(td->delay_sleep);
416 gettimeofday(&s, NULL);
419 ret = read(td->fd, td->buf, td->bs);
421 ret = write(td->fd, td->buf, td->bs);
423 if (ret < (int) td->bs) {
431 if (should_fsync(td) && td->fsync_blocks &&
432 (td->io_blocks % td->fsync_blocks) == 0)
435 gettimeofday(&e, NULL);
437 usec = utime_since(&s, &e);
439 rate_throttle(td, usec);
441 if (check_min_rate(td, &e)) {
447 add_stat_sample(td, msec);
449 if (msec < td->min_latency)
450 td->min_latency = msec;
451 if (msec > td->max_latency)
452 td->max_latency = msec;
454 if (runtime_exceeded(td, &e))
458 if (should_fsync(td))
462 static void aio_put_iocb(struct thread_data *td, struct iocb *iocb)
464 long offset = ((long) iocb - (long) td->aio_iocbs)/ sizeof(struct iocb);
466 td->aio_iocbs_status[offset] = 0;
470 static struct iocb *aio_get_iocb(struct thread_data *td, struct timeval *t)
472 struct iocb *iocb = NULL;
475 for (i = 0; i < td->aio_depth; i++) {
476 if (td->aio_iocbs_status[i] == 0) {
477 td->aio_iocbs_status[i] = 1;
478 iocb = &td->aio_iocbs[i];
484 off_t off = get_next_offset(td);
485 char *p = td->buf + i * td->bs;
488 io_prep_pread(iocb, td->fd, p, td->bs, off);
490 io_prep_pwrite(iocb, td->fd, p, td->bs, off);
492 io_set_callback(iocb, (io_callback_t) msec_now(t));
498 static int aio_submit(struct thread_data *td, struct iocb *iocb)
503 ret = io_submit(*td->aio_ctx, 1, &iocb);
509 else if (errno == EAGAIN)
518 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
520 static void do_async_io(struct thread_data *td)
523 unsigned long blocks, msec, usec;
525 for (blocks = 0; blocks < td->blocks; blocks++) {
526 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
527 struct timespec *timeout;
528 int ret, i, min_evts = 0;
535 usec_sleep(td->delay_sleep);
537 gettimeofday(&s, NULL);
539 iocb = aio_get_iocb(td, &s);
541 ret = aio_submit(td, iocb);
549 if (td->aio_cur_depth < td->aio_depth) {
557 ret = io_getevents(*td->aio_ctx, min_evts, td->aio_cur_depth, td->aio_events, timeout);
564 gettimeofday(&e, NULL);
566 for (i = 0; i < ret; i++) {
567 struct io_event *ev = td->aio_events + i;
573 msec = msec_now(&e) - iocb_time(iocb);
574 add_stat_sample(td, msec);
576 if (msec < td->min_latency)
577 td->min_latency = msec;
578 if (msec > td->max_latency)
579 td->max_latency = msec;
581 aio_put_iocb(td, iocb);
585 * the rate is batched for now, it should work for batches
586 * of completions except the very first one which may look
589 usec = utime_since(&s, &e);
591 rate_throttle(td, usec);
593 if (check_min_rate(td, &e)) {
598 if (runtime_exceeded(td, &e))
603 static void cleanup_pending_aio(struct thread_data *td)
605 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
610 * get immediately available events, if any
612 r = io_getevents(*td->aio_ctx, 0, td->aio_cur_depth, td->aio_events, &ts);
614 for (i = 0; i < r; i++)
615 aio_put_iocb(td, &td->aio_iocbs[i]);
619 * now cancel remaining active events
621 for (i = 0; i < td->aio_depth; i++) {
622 if (td->aio_iocbs_status[i] == 0)
625 r = io_cancel(*td->aio_ctx, &td->aio_iocbs[i], td->aio_events);
627 aio_put_iocb(td, &td->aio_iocbs[i]);
630 if (td->aio_cur_depth)
631 io_getevents(*td->aio_ctx, td->aio_cur_depth, td->aio_cur_depth, td->aio_events, NULL);
634 static void cleanup_aio(struct thread_data *td)
636 if (td->aio_cur_depth)
637 cleanup_pending_aio(td);
640 io_destroy(*td->aio_ctx);
646 free(td->aio_events);
647 if (td->aio_iocbs_status)
648 free(td->aio_iocbs_status);
651 static int init_aio(struct thread_data *td)
653 td->aio_ctx = malloc(sizeof(*td->aio_ctx));
655 if (io_queue_init(td->aio_depth, td->aio_ctx)) {
660 td->aio_iocbs = malloc(td->aio_depth * sizeof(struct iocb));
661 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
662 td->aio_iocbs_status = malloc(td->aio_depth * sizeof(char));
666 static int create_file(struct thread_data *td)
671 if (!td->file_size) {
672 fprintf(stderr, "Need size for create\n");
678 * unless specifically asked for overwrite, let normal io extend it
680 if (!td_read(td) && !td->overwrite)
683 td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
689 td->blocks = td->file_size / td->bs;
691 memset(b, 0, td->bs);
693 for (i = 0; i < td->blocks; i++) {
694 int r = write(td->fd, b, td->bs);
715 static int file_exists(struct thread_data *td)
719 if (stat(td->file_name, &st) != -1)
722 return errno != ENOENT;
725 static int setup_file(struct thread_data *td)
730 if (!file_exists(td)) {
731 if (!td->create_file) {
743 td->fd = open(td->file_name, flags | O_RDONLY);
748 td->fd = open(td->file_name, flags | O_WRONLY | O_CREAT, 0600);
757 if (fstat(td->fd, &st) == -1) {
762 if (td->file_size > st.st_size)
763 st.st_size = td->file_size;
766 td->file_size = 1024 * 1024 * 1024;
768 st.st_size = td->file_size;
771 td->blocks = (st.st_size - td->file_offset) / td->bs;
773 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
781 static void *thread_main(int shm_id, int offset, char *argv[])
783 struct thread_data *td;
784 void *data, *ptr = NULL;
789 data = shmat(shm_id, NULL, 0);
790 td = data + offset * sizeof(struct thread_data);
795 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
800 printf("Client%d (pid=%u) started\n", td->thread_number, td->pid);
802 sprintf(argv[0], "fio%d", offset);
804 if (td->use_aio && init_aio(td))
807 if (init_random_state(td))
811 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
820 sem_post(&startup_sem);
821 sem_wait(&td->mutex);
823 gettimeofday(&td->start, NULL);
826 memcpy(&td->lastrate, &td->start, sizeof(td->start));
828 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
831 ptr = malloc(td->bs + MASK);
832 td->buf = ALIGN(ptr);
835 ptr = malloc(td->bs * td->aio_depth + MASK);
836 td->buf = ALIGN(ptr);
840 td->runtime = mtime_since_now(&td->start);
850 sem_post(&startup_sem);
851 sem_wait(&td->mutex);
855 td->runstate = TD_EXITED;
860 static void free_shm(void)
865 static void show_thread_status(struct thread_data *td)
867 int prio, prio_class;
868 unsigned long bw = 0;
869 double n_lat, n_bw, m_lat, m_bw, dev_lat, dev_bw;
871 if (!td->io_blocks && !td->error)
875 bw = (td->io_blocks * td->bs) / td->runtime;
877 prio = td->ioprio & 0xff;
878 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
880 n_lat = (double) td->stat_time_samples;
881 n_bw = (double) td->stat_bw_samples;
883 m_lat = (double) td->stat_time / n_lat;
884 dev_lat = sqrt(((double) td->stat_time_sq - (m_lat * m_lat) / n_lat) / (n_lat - 1));
885 m_bw = (double) td->stat_bw / n_bw;
886 dev_bw = sqrt(((double) td->stat_bw_sq - (m_bw * m_bw) / n_bw) / (n_bw - 1));
888 printf("Client%d: err=%2d, io=%6luMiB, bw=%6luKiB/sec, latmax=%5lumsec, latavg=%5.02fmsec, latdev=%5.02fmsec, bwavg=%5.02fKiB/sec, bwdev=%5.02fKiB/sec\n", td->thread_number, td->error, td->io_blocks * td->bs >> 20, bw, td->max_latency, m_lat, dev_lat, m_bw, dev_bw);
891 static int setup_rate(struct thread_data *td)
893 int nr_reads_per_sec;
898 if (td->rate < td->ratemin) {
899 fprintf(stderr, "min rate larger than nominal rate\n");
903 nr_reads_per_sec = td->rate * 1024 / td->bs;
904 td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
905 td->rate_pending_usleep = 0;
909 static struct thread_data *get_new_job(int global)
911 struct thread_data *td;
915 if (thread_number >= max_jobs)
918 td = &threads[thread_number++];
919 memset(td, 0, sizeof(*td));
921 td->thread_number = thread_number;
922 td->ddir = def_thread.ddir;
923 td->bs = def_thread.bs;
924 td->odirect = def_thread.odirect;
925 td->ratecycle = def_thread.ratecycle;
926 td->sequential = def_thread.sequential;
927 td->timeout = def_thread.timeout;
928 td->create_file = def_thread.create_file;
929 td->overwrite = def_thread.overwrite;
930 memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
935 static void put_job(struct thread_data *td)
937 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
941 static int add_job(struct thread_data *td, const char *filename, int prioclass,
944 if (td == &def_thread)
947 strcpy(td->file_name, filename);
948 sem_init(&td->mutex, 1, 0);
949 td->min_latency = 10000000;
950 td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
952 if (td->use_aio && !td->aio_depth)
958 printf("Client%d: file=%s, rw=%d, prio=%d/%d, seq=%d, odir=%d, bs=%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, filename, td->ddir, prioclass, prio, td->sequential, td->odirect, td->bs, td->rate, td->use_aio, td->aio_depth);
962 static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
968 for (i = 0; i < sizeof(int) * 8; i++) {
970 CPU_SET(i, &cpumask);
974 static void fill_option(const char *input, char *output)
979 while (input[i] != ',' && input[i] != '}' && input[i] != '\0') {
980 output[i] = input[i];
988 * convert string after '=' into decimal value, noting any size suffix
990 static int str_cnv(char *p, unsigned long long *val)
996 str = strstr(p, "=");
1004 switch (str[len - 2]) {
1015 mult = 1024 * 1024 * 1024;
1019 *val = strtoul(str, NULL, 10);
1020 if (*val == ULONG_MAX && errno == ERANGE)
1036 static void parse_jobs_cmd(int argc, char *argv[], int index)
1038 struct thread_data *td;
1039 unsigned int prio, prioclass, cpu;
1040 char *string, *filename, *p, *c;
1043 string = malloc(256);
1044 filename = malloc(256);
1046 for (i = index; i < argc; i++) {
1049 c = strpbrk(p, "{");
1055 td = get_new_job(0);
1062 c = strstr(p, "rw=");
1066 td->ddir = DDIR_READ;
1068 td->ddir = DDIR_WRITE;
1071 c = strstr(p, "prio=");
1077 c = strstr(p, "prioclass=");
1080 prioclass = *c - '0';
1083 c = strstr(p, "file=");
1086 fill_option(c, filename);
1089 c = strstr(p, "bs=");
1092 fill_option(c, string);
1093 td->bs = strtoul(string, NULL, 10);
1097 c = strstr(p, "direct=");
1106 c = strstr(p, "delay=");
1109 fill_option(c, string);
1110 td->delay_sleep = strtoul(string, NULL, 10);
1113 c = strstr(p, "rate=");
1116 fill_option(c, string);
1117 td->rate = strtoul(string, NULL, 10);
1120 c = strstr(p, "ratemin=");
1123 fill_option(c, string);
1124 td->ratemin = strtoul(string, NULL, 10);
1127 c = strstr(p, "ratecycle=");
1130 fill_option(c, string);
1131 td->ratecycle = strtoul(string, NULL, 10);
1134 c = strstr(p, "cpumask=");
1137 fill_option(c, string);
1138 cpu = strtoul(string, NULL, 10);
1139 fill_cpu_mask(td->cpumask, cpu);
1142 c = strstr(p, "fsync=");
1145 fill_option(c, string);
1146 td->fsync_blocks = strtoul(string, NULL, 10);
1149 c = strstr(p, "startdelay=");
1152 fill_option(c, string);
1153 td->start_delay = strtoul(string, NULL, 10);
1156 c = strstr(p, "timeout=");
1159 fill_option(c, string);
1160 td->timeout = strtoul(string, NULL, 10);
1163 c = strstr(p, "size=");
1166 str_cnv(c, &td->file_size);
1169 c = strstr(p, "offset=");
1172 str_cnv(c, &td->file_offset);
1175 c = strstr(p, "aio_depth=");
1178 fill_option(c, string);
1179 td->aio_depth = strtoul(string, NULL, 10);
1182 c = strstr(p, "aio");
1186 c = strstr(p, "create");
1188 td->create_file = 1;
1190 c = strstr(p, "overwrite");
1194 c = strstr(p, "random");
1197 c = strstr(p, "sequential");
1201 if (add_job(td, filename, prioclass, prio))
1209 static int check_strcnv(char *p, char *name, unsigned long long *val)
1211 if (!strstr(p, name))
1214 return str_cnv(p, val);
1217 static int check_int(char *p, char *name, unsigned int *val)
1221 sprintf(str, "%s=%%d", name);
1222 if (sscanf(p, str, val) == 1)
1225 sprintf(str, "%s = %%d", name);
1226 if (sscanf(p, str, val) == 1)
1232 static int is_empty_or_comment(char *line)
1236 for (i = 0; i < strlen(line); i++) {
1239 if (!isspace(line[i]) && !iscntrl(line[i]))
1246 static int parse_jobs_ini(char *file)
1248 unsigned int prioclass, prio, cpu, global;
1249 struct thread_data *td;
1250 char *string, *name;
1255 f = fopen(file, "r");
1261 string = malloc(4096);
1264 while ((p = fgets(string, 4096, f)) != NULL) {
1265 if (is_empty_or_comment(p))
1267 if (sscanf(p, "[%s]", name) != 1)
1270 global = !strncmp(name, "global", 6);
1272 name[strlen(name) - 1] = '\0';
1274 td = get_new_job(global);
1282 while ((p = fgets(string, 4096, f)) != NULL) {
1283 if (is_empty_or_comment(p))
1287 if (!check_int(p, "bs", &td->bs)) {
1292 if (!check_int(p, "rw", &td->ddir)) {
1296 if (!check_int(p, "prio", &prio)) {
1300 if (!check_int(p, "prioclass", &prioclass)) {
1304 if (!check_int(p, "direct", &td->odirect)) {
1308 if (!check_int(p, "rate", &td->rate)) {
1312 if (!check_int(p, "ratemin", &td->ratemin)) {
1316 if (!check_int(p, "ratecycle", &td->ratecycle)) {
1320 if (!check_int(p, "delay", &td->delay_sleep)) {
1324 if (!check_int(p, "cpumask", &cpu)) {
1325 fill_cpu_mask(td->cpumask, cpu);
1329 if (!check_int(p, "fsync", &td->fsync_blocks)) {
1333 if (!check_int(p, "startdelay", &td->start_delay)) {
1337 if (!check_int(p, "timeout", &td->timeout)) {
1341 if (!check_int(p, "aio_depth", &td->aio_depth)) {
1345 if (!check_strcnv(p, "size", &td->file_size)) {
1349 if (!check_strcnv(p, "offset", &td->file_offset)) {
1353 if (!strncmp(p, "sequential", 10)) {
1358 if (!strncmp(p, "random", 6)) {
1363 if (!strncmp(p, "aio", 3)) {
1368 if (!strncmp(p, "create", 6)) {
1369 td->create_file = 1;
1373 if (!strncmp(p, "overwrite", 9)) {
1378 printf("Client%d: bad option %s\n",td->thread_number,p);
1382 if (add_job(td, name, prioclass, prio))
1392 static int parse_options(int argc, char *argv[])
1396 for (i = 1; i < argc; i++) {
1397 char *parm = argv[i];
1406 def_thread.sequential = !!atoi(parm);
1410 def_thread.bs = atoi(parm);
1411 def_thread.bs <<= 10;
1412 if (!def_thread.bs) {
1413 printf("bad block size\n");
1414 def_thread.bs = DEF_BS;
1419 def_thread.timeout = atoi(parm);
1423 repeatable = !!atoi(parm);
1427 rate_quit = !!atoi(parm);
1431 def_thread.odirect = !!atoi(parm);
1434 if (i + 1 >= argc) {
1435 printf("-f needs file as arg\n");
1438 ini_file = strdup(argv[i+1]);
1442 printf("bad option %s\n", argv[i]);
1450 static void print_thread_status(struct thread_data *td, int nr_running,
1451 int t_rate, int m_rate, int die)
1453 printf("Client%d: %s\n", td->thread_number, die ? "exited" : "spawned");
1455 printf("Threads now running: %d", nr_running);
1456 if (m_rate || t_rate)
1457 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
1461 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1466 * reap exited threads (TD_EXITED -> TD_REAPED)
1468 for (i = 0; i < thread_number; i++) {
1469 struct thread_data *td = &threads[i];
1471 if (td->runstate != TD_EXITED)
1474 td->runstate = TD_REAPED;
1475 waitpid(td->pid, NULL, 0);
1477 (*m_rate) -= td->ratemin;
1478 (*t_rate) -= td->rate;
1483 print_thread_status(td, *nr_running, *t_rate, *m_rate, 1);
1487 static void run_threads(char *argv[])
1489 struct timeval genesis;
1490 struct thread_data *td;
1491 unsigned long spent;
1492 int i, todo, nr_running, m_rate, t_rate;
1494 gettimeofday(&genesis, NULL);
1496 printf("Starting %d threads\n", thread_number);
1499 signal(SIGINT, sig_handler);
1501 todo = thread_number;
1503 m_rate = t_rate = 0;
1507 * create threads (TD_NOT_CREATED -> TD_CREATED)
1509 for (i = 0; i < thread_number; i++) {
1512 if (td->runstate != TD_NOT_CREATED)
1516 * never got a chance to start, killed by other
1517 * thread for some reason
1519 if (td->terminate) {
1524 if (td->start_delay) {
1525 spent = mtime_since_now(&genesis);
1527 if (td->start_delay * 1000 > spent)
1531 td->runstate = TD_CREATED;
1532 sem_init(&startup_sem, 1, 1);
1536 sem_wait(&startup_sem);
1538 thread_main(shm_id, i, argv);
1544 * start created threads (TD_CREATED -> TD_STARTED)
1546 for (i = 0; i < thread_number; i++) {
1547 struct thread_data *td = &threads[i];
1549 if (td->runstate != TD_CREATED)
1552 td->runstate = TD_STARTED;
1554 m_rate += td->ratemin;
1556 sem_post(&td->mutex);
1558 print_thread_status(td, nr_running, t_rate, m_rate, 0);
1561 reap_threads(&nr_running, &t_rate, &m_rate);
1567 while (nr_running) {
1568 reap_threads(&nr_running, &t_rate, &m_rate);
1573 int setup_thread_area(void)
1576 * 1024 is too much on some machines, scale max_jobs if
1577 * we get a failure that looks like too large a shm segment
1580 int s = max_jobs * sizeof(struct thread_data);
1582 shm_id = shmget(0, s, IPC_CREAT | 0600);
1585 if (errno != EINVAL) {
1596 threads = shmat(shm_id, NULL, 0);
1597 if (threads == (void *) -1) {
1606 int main(int argc, char *argv[])
1608 static unsigned long max_run[2], min_run[2], total_blocks[2];
1609 static unsigned long max_bw[2], min_bw[2], maxl[2], minl[2];
1610 static unsigned long read_mb, write_mb, read_agg, write_agg;
1613 if (setup_thread_area())
1616 if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
1617 perror("sched_getaffinity");
1624 def_thread.ddir = DDIR_READ;
1625 def_thread.bs = DEF_BS;
1626 def_thread.odirect = DEF_ODIRECT;
1627 def_thread.ratecycle = DEF_RATE_CYCLE;
1628 def_thread.sequential = DEF_SEQUENTIAL;
1629 def_thread.timeout = DEF_TIMEOUT;
1630 def_thread.create_file = DEF_CREATE;
1631 def_thread.overwrite = DEF_OVERWRITE;
1633 i = parse_options(argc, argv);
1636 if (parse_jobs_ini(ini_file))
1639 parse_jobs_cmd(argc, argv, i);
1641 if (!thread_number) {
1642 printf("Nothing to do\n");
1648 min_bw[0] = min_run[0] = ~0UL;
1649 min_bw[1] = min_run[1] = ~0UL;
1650 minl[0] = minl[1] = ~0UL;
1651 for (i = 0; i < thread_number; i++) {
1652 struct thread_data *td = &threads[i];
1653 unsigned long bw = 0;
1658 if (td->runtime < min_run[td->ddir])
1659 min_run[td->ddir] = td->runtime;
1660 if (td->runtime > max_run[td->ddir])
1661 max_run[td->ddir] = td->runtime;
1664 bw = (td->io_blocks * td->bs) / td->runtime;
1665 if (bw < min_bw[td->ddir])
1666 min_bw[td->ddir] = bw;
1667 if (bw > max_bw[td->ddir])
1668 max_bw[td->ddir] = bw;
1669 if (td->max_latency < minl[td->ddir])
1670 minl[td->ddir] = td->max_latency;
1671 if (td->max_latency > maxl[td->ddir])
1672 maxl[td->ddir] = td->max_latency;
1674 total_blocks[td->ddir] += td->io_blocks;
1677 read_mb += (td->bs * td->io_blocks) >> 20;
1679 read_agg += (td->io_blocks * td->bs) / td->runtime;
1681 write_mb += (td->bs * td->io_blocks) >> 20;
1683 write_agg += (td->io_blocks * td->bs) / td->runtime;
1687 show_thread_status(td);
1690 printf("Run status:\n");
1691 if (max_run[DDIR_READ])
1692 printf(" READ: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", read_mb, read_agg, minl[0], maxl[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
1693 if (max_run[DDIR_WRITE])
1694 printf(" WRITE: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", write_mb, write_agg, minl[1], maxl[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);