2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
35 #include <sys/types.h>
38 #include <semaphore.h>
41 #include <asm/unistd.h>
43 #define MAX_JOBS (1024)
46 * assume we don't have _get either, if _set isn't defined
48 #ifndef __NR_ioprio_set
50 #define __NR_ioprio_set 289
51 #define __NR_ioprio_get 290
52 #elif defined(__powerpc__) || defined(__powerpc64__)
53 #define __NR_ioprio_set 273
54 #define __NR_ioprio_get 274
55 #elif defined(__x86_64__)
56 #define __NR_ioprio_set 251
57 #define __NR_ioprio_get 252
58 #elif defined(__ia64__)
59 #define __NR_ioprio_set 1274
60 #define __NR_ioprio_get 1275
61 #elif defined(__alpha__)
62 #define __NR_ioprio_set 442
63 #define __NR_ioprio_get 443
64 #elif defined(__s390x__) || defined(__s390__)
65 #define __NR_ioprio_set 282
66 #define __NR_ioprio_get 283
68 #error "Unsupported arch"
72 #ifndef __NR_fadvise64
74 #define __NR_fadvise64 250
75 #elif defined(__powerpc__) || defined(__powerpc64__)
76 #define __NR_fadvise64 233
77 #elif defined(__x86_64__)
78 #define __NR_fadvise64 221
79 #elif defined(__ia64__)
80 #define __NR_fadvise64 1234
81 #elif defined(__alpha__)
82 #define __NR_fadvise64 413
83 #elif defined(__s390x__) || defined(__s390__)
84 #define __NR_fadvise64 253
86 #error "Unsupported arch"
90 static int ioprio_set(int which, int who, int ioprio)
92 return syscall(__NR_ioprio_set, which, who, ioprio);
96 * we want fadvise64 really, but it's so tangled... later
98 static int fadvise(int fd, loff_t offset, size_t len, int advice)
101 return syscall(__NR_fadvise64, fd, offset, offset >> 32, len, advice);
103 return posix_fadvise(fd, (off_t) offset, len, advice);
108 IOPRIO_WHO_PROCESS = 1,
113 #define IOPRIO_CLASS_SHIFT 13
117 #define DEF_BS (4096)
118 #define DEF_TIMEOUT (30)
119 #define DEF_RATE_CYCLE (1000)
120 #define DEF_ODIRECT (1)
121 #define DEF_SEQUENTIAL (1)
122 #define DEF_RAND_REPEAT (1)
123 #define DEF_OVERWRITE (0)
124 #define DEF_CREATE (1)
125 #define DEF_INVALIDATE (1)
127 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
129 static int repeatable = DEF_RAND_REPEAT;
130 static int rate_quit = 1;
132 static int thread_number;
133 static char *ini_file;
135 static int max_jobs = MAX_JOBS;
155 #define td_read(td) ((td)->ddir == DDIR_READ)
156 #define should_fsync(td) (!td_read(td) && !(td)->odirect)
165 volatile int terminate;
166 volatile int runstate;
169 unsigned int sequential;
171 unsigned int odirect;
172 unsigned int delay_sleep;
173 unsigned int fsync_blocks;
174 unsigned int start_delay;
175 unsigned int timeout;
176 unsigned int use_aio;
177 unsigned int create_file;
178 unsigned int overwrite;
179 unsigned int invalidate_cache;
180 unsigned long long file_size;
181 unsigned long long file_offset;
184 io_context_t *aio_ctx;
185 struct iocb *aio_iocbs;
186 unsigned int aio_depth;
187 unsigned int aio_cur_depth;
188 struct io_event *aio_events;
189 char *aio_iocbs_status;
192 unsigned int ratemin;
193 unsigned int ratecycle;
194 unsigned long rate_usec_cycle;
195 long rate_pending_usleep;
196 unsigned long rate_blocks;
197 struct timeval lastrate;
199 unsigned long max_latency; /* msec */
200 unsigned long min_latency; /* msec */
201 unsigned long runtime; /* sec */
202 unsigned long blocks;
203 unsigned long io_blocks;
204 unsigned long last_block;
206 struct drand48_data random_state;
209 * bandwidth and latency stats
211 unsigned long stat_time;
212 unsigned long stat_time_sq;
213 unsigned long stat_time_samples;
214 unsigned long stat_io_blocks;
215 unsigned long stat_bw;
216 unsigned long stat_bw_sq;
217 unsigned long stat_bw_samples;
218 struct timeval stat_sample_time;
220 struct timeval start;
223 static struct thread_data *threads;
224 static struct thread_data def_thread;
226 static sem_t startup_sem;
228 static void sig_handler(int sig)
232 for (i = 0; i < thread_number; i++) {
233 struct thread_data *td = &threads[i];
240 static int init_random_state(struct thread_data *td)
242 unsigned long seed = 123;
248 int fd = open("/dev/random", O_RDONLY);
255 if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
264 srand48_r(seed, &td->random_state);
268 static unsigned long utime_since(struct timeval *s, struct timeval *e)
272 sec = e->tv_sec - s->tv_sec;
273 usec = e->tv_usec - s->tv_usec;
274 if (sec > 0 && usec < 0) {
279 sec *= (double) 1000000;
284 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
288 sec = e->tv_sec - s->tv_sec;
289 usec = e->tv_usec - s->tv_usec;
290 if (sec > 0 && usec < 0) {
295 sec *= (double) 1000;
296 usec /= (double) 1000;
301 static unsigned long mtime_since_now(struct timeval *s)
305 gettimeofday(&t, NULL);
306 return mtime_since(s, &t);
309 static inline unsigned long msec_now(struct timeval *s)
311 return s->tv_sec * 1000 + s->tv_usec / 1000;
314 static unsigned long get_next_offset(struct thread_data *td)
319 if (!td->sequential) {
320 lrand48_r(&td->random_state, &r);
321 b = (1+(double) (td->blocks-1) * r / (RAND_MAX+1.0));
327 return b * td->bs + td->file_offset;
330 static void add_stat_sample(struct thread_data *td, unsigned long msec)
334 td->stat_time += msec;
335 td->stat_time_sq += msec * msec;
336 td->stat_time_samples++;
338 spent = mtime_since_now(&td->stat_sample_time);
340 unsigned long rate = ((td->io_blocks - td->stat_io_blocks) * td->bs) / spent;
343 td->stat_bw_sq += rate * rate;
344 gettimeofday(&td->stat_sample_time, NULL);
345 td->stat_io_blocks = td->io_blocks;
346 td->stat_bw_samples++;
350 static void usec_sleep(int usec)
352 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
356 rem.tv_sec = rem.tv_nsec = 0;
357 nanosleep(&req, &rem);
361 req.tv_nsec = rem.tv_nsec;
365 static void rate_throttle(struct thread_data *td, unsigned long time_spent)
370 if (time_spent < td->rate_usec_cycle) {
371 unsigned long s = td->rate_usec_cycle - time_spent;
373 td->rate_pending_usleep += s;
374 if (td->rate_pending_usleep >= 100000) {
375 usec_sleep(td->rate_pending_usleep);
376 td->rate_pending_usleep = 0;
379 long overtime = time_spent - td->rate_usec_cycle;
381 td->rate_pending_usleep -= overtime;
385 static int check_min_rate(struct thread_data *td, struct timeval *now)
391 * allow a 2 second settle period in the beginning
393 if (mtime_since(&td->start, now) < 2000)
397 * if rate blocks is set, sample is running
399 if (td->rate_blocks) {
400 spent = mtime_since(&td->lastrate, now);
401 if (spent < td->ratecycle)
404 rate = ((td->io_blocks - td->rate_blocks) * td->bs) / spent;
405 if (rate < td->ratemin) {
406 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
413 td->rate_blocks = td->io_blocks;
414 memcpy(&td->lastrate, now, sizeof(*now));
418 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
420 if (mtime_since(&td->start, t) >= td->timeout * 1000)
426 static void do_sync_io(struct thread_data *td)
429 unsigned long blocks, msec, usec;
431 for (blocks = 0; blocks < td->blocks; blocks++) {
432 off_t offset = get_next_offset(td);
438 if (lseek(td->fd, offset, SEEK_SET) == -1) {
444 usec_sleep(td->delay_sleep);
446 gettimeofday(&s, NULL);
449 ret = read(td->fd, td->buf, td->bs);
451 ret = write(td->fd, td->buf, td->bs);
453 if (ret < (int) td->bs) {
461 if (should_fsync(td) && td->fsync_blocks &&
462 (td->io_blocks % td->fsync_blocks) == 0)
465 gettimeofday(&e, NULL);
467 usec = utime_since(&s, &e);
469 rate_throttle(td, usec);
471 if (check_min_rate(td, &e)) {
477 add_stat_sample(td, msec);
479 if (msec < td->min_latency)
480 td->min_latency = msec;
481 if (msec > td->max_latency)
482 td->max_latency = msec;
484 if (runtime_exceeded(td, &e))
488 if (should_fsync(td))
492 static void aio_put_iocb(struct thread_data *td, struct iocb *iocb)
494 long offset = ((long) iocb - (long) td->aio_iocbs)/ sizeof(struct iocb);
496 td->aio_iocbs_status[offset] = 0;
500 static struct iocb *aio_get_iocb(struct thread_data *td, struct timeval *t)
502 struct iocb *iocb = NULL;
505 for (i = 0; i < td->aio_depth; i++) {
506 if (td->aio_iocbs_status[i] == 0) {
507 td->aio_iocbs_status[i] = 1;
508 iocb = &td->aio_iocbs[i];
514 off_t off = get_next_offset(td);
515 char *p = td->buf + i * td->bs;
518 io_prep_pread(iocb, td->fd, p, td->bs, off);
520 io_prep_pwrite(iocb, td->fd, p, td->bs, off);
522 io_set_callback(iocb, (io_callback_t) msec_now(t));
528 static int aio_submit(struct thread_data *td, struct iocb *iocb)
533 ret = io_submit(*td->aio_ctx, 1, &iocb);
539 else if (errno == EAGAIN)
548 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
550 static void do_async_io(struct thread_data *td)
553 unsigned long blocks, msec, usec;
555 for (blocks = 0; blocks < td->blocks; blocks++) {
556 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
557 struct timespec *timeout;
558 int ret, i, min_evts = 0;
565 usec_sleep(td->delay_sleep);
567 gettimeofday(&s, NULL);
569 iocb = aio_get_iocb(td, &s);
571 ret = aio_submit(td, iocb);
579 if (td->aio_cur_depth < td->aio_depth) {
587 ret = io_getevents(*td->aio_ctx, min_evts, td->aio_cur_depth, td->aio_events, timeout);
594 gettimeofday(&e, NULL);
596 for (i = 0; i < ret; i++) {
597 struct io_event *ev = td->aio_events + i;
603 msec = msec_now(&e) - iocb_time(iocb);
604 add_stat_sample(td, msec);
606 if (msec < td->min_latency)
607 td->min_latency = msec;
608 if (msec > td->max_latency)
609 td->max_latency = msec;
611 aio_put_iocb(td, iocb);
615 * the rate is batched for now, it should work for batches
616 * of completions except the very first one which may look
619 usec = utime_since(&s, &e);
621 rate_throttle(td, usec);
623 if (check_min_rate(td, &e)) {
628 if (runtime_exceeded(td, &e))
633 static void cleanup_pending_aio(struct thread_data *td)
635 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
640 * get immediately available events, if any
642 r = io_getevents(*td->aio_ctx, 0, td->aio_cur_depth, td->aio_events, &ts);
644 for (i = 0; i < r; i++)
645 aio_put_iocb(td, &td->aio_iocbs[i]);
649 * now cancel remaining active events
651 for (i = 0; i < td->aio_depth; i++) {
652 if (td->aio_iocbs_status[i] == 0)
655 r = io_cancel(*td->aio_ctx, &td->aio_iocbs[i], td->aio_events);
657 aio_put_iocb(td, &td->aio_iocbs[i]);
660 if (td->aio_cur_depth)
661 io_getevents(*td->aio_ctx, td->aio_cur_depth, td->aio_cur_depth, td->aio_events, NULL);
664 static void cleanup_aio(struct thread_data *td)
666 if (td->aio_cur_depth)
667 cleanup_pending_aio(td);
670 io_destroy(*td->aio_ctx);
676 free(td->aio_events);
677 if (td->aio_iocbs_status)
678 free(td->aio_iocbs_status);
681 static int init_aio(struct thread_data *td)
683 td->aio_ctx = malloc(sizeof(*td->aio_ctx));
685 if (io_queue_init(td->aio_depth, td->aio_ctx)) {
690 td->aio_iocbs = malloc(td->aio_depth * sizeof(struct iocb));
691 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
692 td->aio_iocbs_status = malloc(td->aio_depth * sizeof(char));
696 static int create_file(struct thread_data *td)
701 if (!td->file_size) {
702 fprintf(stderr, "Need size for create\n");
708 * unless specifically asked for overwrite, let normal io extend it
710 if (!td_read(td) && !td->overwrite)
713 td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
719 td->blocks = td->file_size / td->bs;
721 memset(b, 0, td->bs);
723 for (i = 0; i < td->blocks; i++) {
724 int r = write(td->fd, b, td->bs);
745 static int file_exists(struct thread_data *td)
749 if (stat(td->file_name, &st) != -1)
752 return errno != ENOENT;
755 static int setup_file(struct thread_data *td)
760 if (!file_exists(td)) {
761 if (!td->create_file) {
773 td->fd = open(td->file_name, flags | O_RDONLY);
778 td->fd = open(td->file_name, flags | O_WRONLY | O_CREAT, 0600);
787 if (fstat(td->fd, &st) == -1) {
792 if (td->file_size > st.st_size)
793 st.st_size = td->file_size;
796 td->file_size = 1024 * 1024 * 1024;
798 st.st_size = td->file_size;
801 td->blocks = (st.st_size - td->file_offset) / td->bs;
803 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
808 if (td->invalidate_cache) {
809 if (fadvise(td->fd, 0, st.st_size, POSIX_FADV_DONTNEED) < 0) {
818 static void *thread_main(int shm_id, int offset, char *argv[])
820 struct thread_data *td;
821 void *data, *ptr = NULL;
826 data = shmat(shm_id, NULL, 0);
827 td = data + offset * sizeof(struct thread_data);
830 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
835 printf("Client%d (pid=%u) started\n", td->thread_number, td->pid);
837 sprintf(argv[0], "fio%d", offset);
839 if (td->use_aio && init_aio(td))
842 if (init_random_state(td))
846 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
856 ptr = malloc(td->bs * td->aio_depth + MASK);
858 ptr = malloc(td->bs + MASK);
860 td->buf = ALIGN(ptr);
862 sem_post(&startup_sem);
863 sem_wait(&td->mutex);
865 gettimeofday(&td->start, NULL);
868 memcpy(&td->lastrate, &td->start, sizeof(td->start));
870 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
877 td->runtime = mtime_since_now(&td->start);
887 sem_post(&startup_sem);
888 sem_wait(&td->mutex);
892 td->runstate = TD_EXITED;
897 static void free_shm(void)
902 static void show_thread_status(struct thread_data *td)
904 int prio, prio_class;
905 unsigned long bw = 0;
906 double n_lat, n_bw, m_lat, m_bw, dev_lat, dev_bw;
908 if (!td->io_blocks && !td->error)
912 bw = (td->io_blocks * td->bs) / td->runtime;
914 prio = td->ioprio & 0xff;
915 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
917 n_lat = (double) td->stat_time_samples;
918 n_bw = (double) td->stat_bw_samples;
920 m_lat = (double) td->stat_time / n_lat;
921 dev_lat = sqrt(((double) td->stat_time_sq - (m_lat * m_lat) / n_lat) / (n_lat - 1));
922 m_bw = (double) td->stat_bw / n_bw;
923 dev_bw = sqrt(((double) td->stat_bw_sq - (m_bw * m_bw) / n_bw) / (n_bw - 1));
925 printf("Client%d: err=%2d, io=%6luMiB, bw=%6luKiB/sec, latmax=%5lumsec, latavg=%5.02fmsec, latdev=%5.02fmsec, bwavg=%5.02fKiB/sec, bwdev=%5.02fKiB/sec\n", td->thread_number, td->error, td->io_blocks * td->bs >> 20, bw, td->max_latency, m_lat, dev_lat, m_bw, dev_bw);
928 static int setup_rate(struct thread_data *td)
930 int nr_reads_per_sec;
935 if (td->rate < td->ratemin) {
936 fprintf(stderr, "min rate larger than nominal rate\n");
940 nr_reads_per_sec = td->rate * 1024 / td->bs;
941 td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
942 td->rate_pending_usleep = 0;
946 static struct thread_data *get_new_job(int global)
948 struct thread_data *td;
952 if (thread_number >= max_jobs)
955 td = &threads[thread_number++];
956 memset(td, 0, sizeof(*td));
959 td->thread_number = thread_number;
960 td->ddir = def_thread.ddir;
961 td->bs = def_thread.bs;
962 td->odirect = def_thread.odirect;
963 td->ratecycle = def_thread.ratecycle;
964 td->sequential = def_thread.sequential;
965 td->timeout = def_thread.timeout;
966 td->create_file = def_thread.create_file;
967 td->overwrite = def_thread.overwrite;
968 td->invalidate_cache = def_thread.invalidate_cache;
969 memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
974 static void put_job(struct thread_data *td)
976 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
980 static int add_job(struct thread_data *td, const char *filename, int prioclass,
983 if (td == &def_thread)
986 strcpy(td->file_name, filename);
987 sem_init(&td->mutex, 1, 0);
988 td->min_latency = 10000000;
989 td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
991 if (td->use_aio && !td->aio_depth)
997 printf("Client%d: file=%s, rw=%d, prio=%d/%d, seq=%d, odir=%d, bs=%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, filename, td->ddir, prioclass, prio, td->sequential, td->odirect, td->bs, td->rate, td->use_aio, td->aio_depth);
1001 static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
1007 for (i = 0; i < sizeof(int) * 8; i++) {
1009 CPU_SET(i, &cpumask);
1013 static void fill_option(const char *input, char *output)
1018 while (input[i] != ',' && input[i] != '}' && input[i] != '\0') {
1019 output[i] = input[i];
1027 * convert string after '=' into decimal value, noting any size suffix
1029 static int str_cnv(char *p, unsigned long long *val)
1035 str = strstr(p, "=");
1043 switch (str[len - 2]) {
1054 mult = 1024 * 1024 * 1024;
1058 *val = strtoul(str, NULL, 10);
1059 if (*val == ULONG_MAX && errno == ERANGE)
1075 static void parse_jobs_cmd(int argc, char *argv[], int index)
1077 struct thread_data *td;
1078 unsigned int prio, prioclass, cpu;
1079 char *string, *filename, *p, *c;
1082 string = malloc(256);
1083 filename = malloc(256);
1085 for (i = index; i < argc; i++) {
1088 c = strpbrk(p, "{");
1094 td = get_new_job(0);
1101 c = strstr(p, "rw=");
1105 td->ddir = DDIR_READ;
1107 td->ddir = DDIR_WRITE;
1110 c = strstr(p, "prio=");
1116 c = strstr(p, "prioclass=");
1119 prioclass = *c - '0';
1122 c = strstr(p, "file=");
1125 fill_option(c, filename);
1128 c = strstr(p, "bs=");
1131 fill_option(c, string);
1132 td->bs = strtoul(string, NULL, 10);
1136 c = strstr(p, "direct=");
1145 c = strstr(p, "delay=");
1148 fill_option(c, string);
1149 td->delay_sleep = strtoul(string, NULL, 10);
1152 c = strstr(p, "rate=");
1155 fill_option(c, string);
1156 td->rate = strtoul(string, NULL, 10);
1159 c = strstr(p, "ratemin=");
1162 fill_option(c, string);
1163 td->ratemin = strtoul(string, NULL, 10);
1166 c = strstr(p, "ratecycle=");
1169 fill_option(c, string);
1170 td->ratecycle = strtoul(string, NULL, 10);
1173 c = strstr(p, "cpumask=");
1176 fill_option(c, string);
1177 cpu = strtoul(string, NULL, 10);
1178 fill_cpu_mask(td->cpumask, cpu);
1181 c = strstr(p, "fsync=");
1184 fill_option(c, string);
1185 td->fsync_blocks = strtoul(string, NULL, 10);
1188 c = strstr(p, "startdelay=");
1191 fill_option(c, string);
1192 td->start_delay = strtoul(string, NULL, 10);
1195 c = strstr(p, "timeout=");
1198 fill_option(c, string);
1199 td->timeout = strtoul(string, NULL, 10);
1202 c = strstr(p, "invalidate=");
1206 td->invalidate_cache = 1;
1208 td->invalidate_cache = 0;
1211 c = strstr(p, "size=");
1214 str_cnv(c, &td->file_size);
1217 c = strstr(p, "offset=");
1220 str_cnv(c, &td->file_offset);
1223 c = strstr(p, "aio_depth=");
1226 fill_option(c, string);
1227 td->aio_depth = strtoul(string, NULL, 10);
1230 c = strstr(p, "aio");
1234 c = strstr(p, "create");
1236 td->create_file = 1;
1238 c = strstr(p, "overwrite");
1242 c = strstr(p, "random");
1245 c = strstr(p, "sequential");
1249 if (add_job(td, filename, prioclass, prio))
1257 static int check_strcnv(char *p, char *name, unsigned long long *val)
1259 if (!strstr(p, name))
1262 return str_cnv(p, val);
1265 static int check_int(char *p, char *name, unsigned int *val)
1269 sprintf(str, "%s=%%d", name);
1270 if (sscanf(p, str, val) == 1)
1273 sprintf(str, "%s = %%d", name);
1274 if (sscanf(p, str, val) == 1)
1280 static int is_empty_or_comment(char *line)
1284 for (i = 0; i < strlen(line); i++) {
1287 if (!isspace(line[i]) && !iscntrl(line[i]))
1294 static int parse_jobs_ini(char *file)
1296 unsigned int prioclass, prio, cpu, global;
1297 struct thread_data *td;
1298 char *string, *name;
1303 f = fopen(file, "r");
1309 string = malloc(4096);
1312 while ((p = fgets(string, 4096, f)) != NULL) {
1313 if (is_empty_or_comment(p))
1315 if (sscanf(p, "[%s]", name) != 1)
1318 global = !strncmp(name, "global", 6);
1320 name[strlen(name) - 1] = '\0';
1322 td = get_new_job(global);
1330 while ((p = fgets(string, 4096, f)) != NULL) {
1331 if (is_empty_or_comment(p))
1335 if (!check_int(p, "bs", &td->bs)) {
1340 if (!check_int(p, "rw", &td->ddir)) {
1344 if (!check_int(p, "prio", &prio)) {
1348 if (!check_int(p, "prioclass", &prioclass)) {
1352 if (!check_int(p, "direct", &td->odirect)) {
1356 if (!check_int(p, "rate", &td->rate)) {
1360 if (!check_int(p, "ratemin", &td->ratemin)) {
1364 if (!check_int(p, "ratecycle", &td->ratecycle)) {
1368 if (!check_int(p, "delay", &td->delay_sleep)) {
1372 if (!check_int(p, "cpumask", &cpu)) {
1373 fill_cpu_mask(td->cpumask, cpu);
1377 if (!check_int(p, "fsync", &td->fsync_blocks)) {
1381 if (!check_int(p, "startdelay", &td->start_delay)) {
1385 if (!check_int(p, "timeout", &td->timeout)) {
1389 if (!check_int(p, "invalidate",&td->invalidate_cache)) {
1393 if (!check_int(p, "aio_depth", &td->aio_depth)) {
1397 if (!check_strcnv(p, "size", &td->file_size)) {
1401 if (!check_strcnv(p, "offset", &td->file_offset)) {
1405 if (!strncmp(p, "sequential", 10)) {
1410 if (!strncmp(p, "random", 6)) {
1415 if (!strncmp(p, "aio", 3)) {
1420 if (!strncmp(p, "create", 6)) {
1421 td->create_file = 1;
1425 if (!strncmp(p, "overwrite", 9)) {
1430 printf("Client%d: bad option %s\n",td->thread_number,p);
1434 if (add_job(td, name, prioclass, prio))
1444 static int parse_options(int argc, char *argv[])
1448 for (i = 1; i < argc; i++) {
1449 char *parm = argv[i];
1458 def_thread.sequential = !!atoi(parm);
1462 def_thread.bs = atoi(parm);
1463 def_thread.bs <<= 10;
1464 if (!def_thread.bs) {
1465 printf("bad block size\n");
1466 def_thread.bs = DEF_BS;
1471 def_thread.timeout = atoi(parm);
1475 repeatable = !!atoi(parm);
1479 rate_quit = !!atoi(parm);
1483 def_thread.odirect = !!atoi(parm);
1486 if (i + 1 >= argc) {
1487 printf("-f needs file as arg\n");
1490 ini_file = strdup(argv[i+1]);
1494 printf("bad option %s\n", argv[i]);
1502 static void print_thread_status(struct thread_data *td, int nr_running,
1503 int t_rate, int m_rate, int die)
1505 printf("Client%d: %s\n", td->thread_number, die ? "exited" : "spawned");
1507 printf("Threads now running: %d", nr_running);
1508 if (m_rate || t_rate)
1509 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
1513 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1518 * reap exited threads (TD_EXITED -> TD_REAPED)
1520 for (i = 0; i < thread_number; i++) {
1521 struct thread_data *td = &threads[i];
1523 if (td->runstate != TD_EXITED)
1526 td->runstate = TD_REAPED;
1527 waitpid(td->pid, NULL, 0);
1529 (*m_rate) -= td->ratemin;
1530 (*t_rate) -= td->rate;
1535 print_thread_status(td, *nr_running, *t_rate, *m_rate, 1);
1539 static void run_threads(char *argv[])
1541 struct timeval genesis;
1542 struct thread_data *td;
1543 unsigned long spent;
1544 int i, todo, nr_running, m_rate, t_rate;
1546 gettimeofday(&genesis, NULL);
1548 printf("Starting %d threads\n", thread_number);
1551 signal(SIGINT, sig_handler);
1553 todo = thread_number;
1555 m_rate = t_rate = 0;
1559 * create threads (TD_NOT_CREATED -> TD_CREATED)
1561 for (i = 0; i < thread_number; i++) {
1564 if (td->runstate != TD_NOT_CREATED)
1568 * never got a chance to start, killed by other
1569 * thread for some reason
1571 if (td->terminate) {
1576 if (td->start_delay) {
1577 spent = mtime_since_now(&genesis);
1579 if (td->start_delay * 1000 > spent)
1583 td->runstate = TD_CREATED;
1584 sem_init(&startup_sem, 1, 1);
1588 sem_wait(&startup_sem);
1590 thread_main(shm_id, i, argv);
1596 * start created threads (TD_CREATED -> TD_STARTED)
1598 for (i = 0; i < thread_number; i++) {
1599 struct thread_data *td = &threads[i];
1601 if (td->runstate != TD_CREATED)
1604 td->runstate = TD_STARTED;
1606 m_rate += td->ratemin;
1608 sem_post(&td->mutex);
1610 print_thread_status(td, nr_running, t_rate, m_rate, 0);
1613 reap_threads(&nr_running, &t_rate, &m_rate);
1619 while (nr_running) {
1620 reap_threads(&nr_running, &t_rate, &m_rate);
1625 int setup_thread_area(void)
1628 * 1024 is too much on some machines, scale max_jobs if
1629 * we get a failure that looks like too large a shm segment
1632 int s = max_jobs * sizeof(struct thread_data);
1634 shm_id = shmget(0, s, IPC_CREAT | 0600);
1637 if (errno != EINVAL) {
1648 threads = shmat(shm_id, NULL, 0);
1649 if (threads == (void *) -1) {
1658 int main(int argc, char *argv[])
1660 static unsigned long max_run[2], min_run[2], total_blocks[2];
1661 static unsigned long max_bw[2], min_bw[2], maxl[2], minl[2];
1662 static unsigned long read_mb, write_mb, read_agg, write_agg;
1665 if (setup_thread_area())
1668 if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
1669 perror("sched_getaffinity");
1676 def_thread.ddir = DDIR_READ;
1677 def_thread.bs = DEF_BS;
1678 def_thread.odirect = DEF_ODIRECT;
1679 def_thread.ratecycle = DEF_RATE_CYCLE;
1680 def_thread.sequential = DEF_SEQUENTIAL;
1681 def_thread.timeout = DEF_TIMEOUT;
1682 def_thread.create_file = DEF_CREATE;
1683 def_thread.overwrite = DEF_OVERWRITE;
1684 def_thread.invalidate_cache = DEF_INVALIDATE;
1686 i = parse_options(argc, argv);
1689 if (parse_jobs_ini(ini_file))
1692 parse_jobs_cmd(argc, argv, i);
1694 if (!thread_number) {
1695 printf("Nothing to do\n");
1701 min_bw[0] = min_run[0] = ~0UL;
1702 min_bw[1] = min_run[1] = ~0UL;
1703 minl[0] = minl[1] = ~0UL;
1704 for (i = 0; i < thread_number; i++) {
1705 struct thread_data *td = &threads[i];
1706 unsigned long bw = 0;
1711 if (td->runtime < min_run[td->ddir])
1712 min_run[td->ddir] = td->runtime;
1713 if (td->runtime > max_run[td->ddir])
1714 max_run[td->ddir] = td->runtime;
1717 bw = (td->io_blocks * td->bs) / td->runtime;
1718 if (bw < min_bw[td->ddir])
1719 min_bw[td->ddir] = bw;
1720 if (bw > max_bw[td->ddir])
1721 max_bw[td->ddir] = bw;
1722 if (td->max_latency < minl[td->ddir])
1723 minl[td->ddir] = td->max_latency;
1724 if (td->max_latency > maxl[td->ddir])
1725 maxl[td->ddir] = td->max_latency;
1727 total_blocks[td->ddir] += td->io_blocks;
1730 read_mb += (td->bs * td->io_blocks) >> 20;
1732 read_agg += (td->io_blocks * td->bs) / td->runtime;
1734 write_mb += (td->bs * td->io_blocks) >> 20;
1736 write_agg += (td->io_blocks * td->bs) / td->runtime;
1740 show_thread_status(td);
1743 printf("Run status:\n");
1744 if (max_run[DDIR_READ])
1745 printf(" READ: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", read_mb, read_agg, minl[0], maxl[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
1746 if (max_run[DDIR_WRITE])
1747 printf(" WRITE: io=%luMiB, aggrb=%lu, minl=%lu, maxl=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", write_mb, write_agg, minl[1], maxl[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);