2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 #include <sys/types.h>
38 #include <sys/ioctl.h>
46 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
49 int thread_number = 0;
50 static char run_str[MAX_JOBS + 1];
52 static LIST_HEAD(disk_list);
53 static struct itimerval itimer;
54 static struct timeval genesis;
56 static void update_io_ticks(void);
57 static void disk_util_timer_arm(void);
58 static void print_thread_status(void);
72 #define should_fsync(td) ((td_write(td) || td_rw(td)) && (!(td)->odirect || (td)->override_sync))
74 static sem_t startup_sem;
76 #define TERMINATE_ALL (-1)
78 static void terminate_threads(int group_id)
82 for (i = 0; i < thread_number; i++) {
83 struct thread_data *td = &threads[i];
85 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
92 static void sig_handler(int sig)
97 disk_util_timer_arm();
98 print_thread_status();
101 printf("\nfio: terminating on signal\n");
103 terminate_threads(TERMINATE_ALL);
108 static unsigned long utime_since(struct timeval *s, struct timeval *e)
112 sec = e->tv_sec - s->tv_sec;
113 usec = e->tv_usec - s->tv_usec;
114 if (sec > 0 && usec < 0) {
119 sec *= (double) 1000000;
124 static unsigned long utime_since_now(struct timeval *s)
128 gettimeofday(&t, NULL);
129 return utime_since(s, &t);
132 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
136 sec = e->tv_sec - s->tv_sec;
137 usec = e->tv_usec - s->tv_usec;
138 if (sec > 0 && usec < 0) {
143 sec *= (double) 1000;
144 usec /= (double) 1000;
149 static unsigned long mtime_since_now(struct timeval *s)
153 gettimeofday(&t, NULL);
154 return mtime_since(s, &t);
157 static inline unsigned long msec_now(struct timeval *s)
159 return s->tv_sec * 1000 + s->tv_usec / 1000;
162 static unsigned long time_since_now(struct timeval *s)
164 return mtime_since_now(s) / 1000;
167 static int random_map_free(struct thread_data *td, unsigned long long block)
169 unsigned int idx = RAND_MAP_IDX(td, block);
170 unsigned int bit = RAND_MAP_BIT(td, block);
172 return (td->file_map[idx] & (1UL << bit)) == 0;
175 static int get_next_free_block(struct thread_data *td, unsigned long long *b)
181 while ((*b) * td->min_bs < td->io_size) {
182 if (td->file_map[i] != -1UL) {
183 *b += ffz(td->file_map[i]);
187 *b += BLOCKS_PER_MAP;
194 static void mark_random_map(struct thread_data *td, struct io_u *io_u)
196 unsigned long block = io_u->offset / td->min_bs;
197 unsigned int blocks = 0;
199 while (blocks < (io_u->buflen / td->min_bs)) {
200 unsigned int idx, bit;
202 if (!random_map_free(td, block))
205 idx = RAND_MAP_IDX(td, block);
206 bit = RAND_MAP_BIT(td, block);
208 assert(idx < td->num_maps);
210 td->file_map[idx] |= (1UL << bit);
215 if ((blocks * td->min_bs) < io_u->buflen)
216 io_u->buflen = blocks * td->min_bs;
219 static inline void add_stat_sample(struct io_stat *is, unsigned long val)
221 if (val > is->max_val)
223 if (val < is->min_val)
227 is->val_sq += val * val;
231 static void add_log_sample(struct thread_data *td, struct io_log *iolog,
232 unsigned long val, int ddir)
234 if (iolog->nr_samples == iolog->max_samples) {
235 int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
237 iolog->log = realloc(iolog->log, new_size);
238 iolog->max_samples <<= 1;
241 iolog->log[iolog->nr_samples].val = val;
242 iolog->log[iolog->nr_samples].time = mtime_since_now(&td->epoch);
243 iolog->log[iolog->nr_samples].ddir = ddir;
247 static void add_clat_sample(struct thread_data *td, int ddir,unsigned long msec)
249 add_stat_sample(&td->clat_stat[ddir], msec);
252 add_log_sample(td, td->clat_log, msec, ddir);
255 static void add_slat_sample(struct thread_data *td, int ddir,unsigned long msec)
257 add_stat_sample(&td->slat_stat[ddir], msec);
260 add_log_sample(td, td->slat_log, msec, ddir);
263 static void add_bw_sample(struct thread_data *td, int ddir)
265 unsigned long spent = mtime_since_now(&td->stat_sample_time[ddir]);
268 if (spent < td->bw_avg_time)
271 rate = (td->this_io_bytes[ddir] - td->stat_io_bytes[ddir]) / spent;
272 add_stat_sample(&td->bw_stat[ddir], rate);
275 add_log_sample(td, td->bw_log, rate, ddir);
277 gettimeofday(&td->stat_sample_time[ddir], NULL);
278 td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
281 static int get_next_offset(struct thread_data *td, unsigned long long *offset)
283 unsigned long long b, rb;
286 if (!td->sequential) {
287 unsigned long max_blocks = td->io_size / td->min_bs;
291 lrand48_r(&td->random_state, &r);
292 b = ((max_blocks - 1) * r / (RAND_MAX+1.0));
293 rb = b + (td->file_offset / td->min_bs);
295 } while (!random_map_free(td, rb) && loops);
298 if (get_next_free_block(td, &b))
302 b = td->last_pos / td->min_bs;
304 *offset = (b * td->min_bs) + td->file_offset;
305 if (*offset > td->real_file_size)
311 static unsigned int get_next_buflen(struct thread_data *td)
316 if (td->min_bs == td->max_bs)
319 lrand48_r(&td->bsrange_state, &r);
320 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
321 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
324 if (buflen > td->io_size - td->this_io_bytes[td->ddir])
325 buflen = td->io_size - td->this_io_bytes[td->ddir];
331 * busy looping version for the last few usec
333 static void __usec_sleep(unsigned int usec)
335 struct timeval start;
337 gettimeofday(&start, NULL);
338 while (utime_since_now(&start) < usec)
342 static void usec_sleep(struct thread_data *td, unsigned long usec)
344 struct timespec req, rem;
346 req.tv_sec = usec / 1000000;
347 req.tv_nsec = usec * 1000 - req.tv_sec * 1000000;
355 rem.tv_sec = rem.tv_nsec = 0;
356 if (nanosleep(&req, &rem) < 0)
359 if ((rem.tv_sec + rem.tv_nsec) == 0)
362 req.tv_nsec = rem.tv_nsec;
363 req.tv_sec = rem.tv_sec;
365 usec = rem.tv_sec * 1000000 + rem.tv_nsec / 1000;
366 } while (!td->terminate);
369 static void rate_throttle(struct thread_data *td, unsigned long time_spent,
372 unsigned long usec_cycle;
377 usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
379 if (time_spent < usec_cycle) {
380 unsigned long s = usec_cycle - time_spent;
382 td->rate_pending_usleep += s;
383 if (td->rate_pending_usleep >= 100000) {
384 usec_sleep(td, td->rate_pending_usleep);
385 td->rate_pending_usleep = 0;
388 long overtime = time_spent - usec_cycle;
390 td->rate_pending_usleep -= overtime;
394 static int check_min_rate(struct thread_data *td, struct timeval *now)
401 * allow a 2 second settle period in the beginning
403 if (mtime_since(&td->start, now) < 2000)
407 * if rate blocks is set, sample is running
409 if (td->rate_bytes) {
410 spent = mtime_since(&td->lastrate, now);
411 if (spent < td->ratecycle)
414 rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
415 if (rate < td->ratemin) {
416 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
418 terminate_threads(td->groupid);
423 td->rate_bytes = td->this_io_bytes[ddir];
424 memcpy(&td->lastrate, now, sizeof(*now));
428 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
432 if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
438 static void fill_random_bytes(struct thread_data *td,
439 unsigned char *p, unsigned int len)
445 drand48_r(&td->verify_state, &r);
448 * lrand48_r seems to be broken and only fill the bottom
449 * 32-bits, even on 64-bit archs with 64-bit longs
462 static void hexdump(void *buffer, int len)
464 unsigned char *p = buffer;
467 for (i = 0; i < len; i++)
468 printf("%02x", p[i]);
472 static int verify_io_u_crc32(struct verify_header *hdr, struct io_u *io_u)
474 unsigned char *p = (unsigned char *) io_u->buf;
479 c = crc32(p, hdr->len - sizeof(*hdr));
480 ret = c != hdr->crc32;
483 fprintf(stderr, "crc32: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
484 fprintf(stderr, "crc32: wanted %lx, got %lx\n", hdr->crc32, c);
490 static int verify_io_u_md5(struct verify_header *hdr, struct io_u *io_u)
492 unsigned char *p = (unsigned char *) io_u->buf;
493 struct md5_ctx md5_ctx;
496 memset(&md5_ctx, 0, sizeof(md5_ctx));
498 md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
500 ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
502 fprintf(stderr, "md5: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
503 hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
504 hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
510 static int verify_io_u(struct io_u *io_u)
512 struct verify_header *hdr = (struct verify_header *) io_u->buf;
515 if (hdr->fio_magic != FIO_HDR_MAGIC)
518 if (hdr->verify_type == VERIFY_MD5)
519 ret = verify_io_u_md5(hdr, io_u);
520 else if (hdr->verify_type == VERIFY_CRC32)
521 ret = verify_io_u_crc32(hdr, io_u);
523 fprintf(stderr, "Bad verify type %d\n", hdr->verify_type);
530 static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
532 hdr->crc32 = crc32(p, len);
535 static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
537 struct md5_ctx md5_ctx;
539 memset(&md5_ctx, 0, sizeof(md5_ctx));
540 md5_update(&md5_ctx, p, len);
541 memcpy(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
544 unsigned int hweight32(unsigned int w)
546 unsigned int res = w - ((w >> 1) & 0x55555555);
548 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
549 res = (res + (res >> 4)) & 0x0F0F0F0F;
550 res = res + (res >> 8);
552 return (res + (res >> 16)) & 0x000000FF;
555 unsigned long hweight64(unsigned long long w)
558 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
559 #elif __WORDSIZE == 64
560 unsigned long long v = w - ((w >> 1) & 0x5555555555555555ul);
562 v = (v & 0x3333333333333333ul) + ((v >> 2) & 0x3333333333333333ul);
563 v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
567 return (v + (v >> 32)) & 0x00000000000000FFul;
569 #error __WORDSIZE not defined
573 static int get_rw_ddir(struct thread_data *td)
576 * perhaps cheasy, but use the hamming weight of the position
577 * as a randomizer for data direction.
580 return hweight64(td->last_pos) & 1;
581 else if (td_read(td))
588 * fill body of io_u->buf with random data and add a header with the
589 * (eg) sha1sum of that data.
591 static void populate_io_u(struct thread_data *td, struct io_u *io_u)
593 unsigned char *p = (unsigned char *) io_u->buf;
594 struct verify_header hdr;
596 hdr.fio_magic = FIO_HDR_MAGIC;
597 hdr.len = io_u->buflen;
599 fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
601 if (td->verify == VERIFY_MD5) {
602 fill_md5(&hdr, p, io_u->buflen - sizeof(hdr));
603 hdr.verify_type = VERIFY_MD5;
605 fill_crc32(&hdr, p, io_u->buflen - sizeof(hdr));
606 hdr.verify_type = VERIFY_CRC32;
609 memcpy(io_u->buf, &hdr, sizeof(hdr));
612 static int td_io_prep(struct thread_data *td, struct io_u *io_u, int read)
615 io_u->ddir = DDIR_READ;
617 io_u->ddir = DDIR_WRITE;
619 if (td->io_prep && td->io_prep(td, io_u))
625 void put_io_u(struct thread_data *td, struct io_u *io_u)
627 list_del(&io_u->list);
628 list_add(&io_u->list, &td->io_u_freelist);
632 #define queue_full(td) (list_empty(&(td)->io_u_freelist))
634 struct io_u *__get_io_u(struct thread_data *td)
641 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
644 list_del(&io_u->list);
645 list_add(&io_u->list, &td->io_u_busylist);
650 static struct io_u *get_io_u(struct thread_data *td)
654 io_u = __get_io_u(td);
658 if (td->zone_bytes >= td->zone_size) {
660 td->last_pos += td->zone_skip;
663 if (get_next_offset(td, &io_u->offset)) {
668 io_u->buflen = get_next_buflen(td);
674 if (io_u->buflen + io_u->offset > td->real_file_size)
675 io_u->buflen = td->real_file_size - io_u->offset;
683 mark_random_map(td, io_u);
685 td->last_pos += io_u->buflen;
687 if (td->verify != VERIFY_NONE)
688 populate_io_u(td, io_u);
690 if (td_io_prep(td, io_u, get_rw_ddir(td))) {
695 gettimeofday(&io_u->start_time, NULL);
699 static inline void td_set_runstate(struct thread_data *td, int runstate)
701 td->old_runstate = td->runstate;
702 td->runstate = runstate;
705 static int get_next_verify(struct thread_data *td,
706 unsigned long long *offset, unsigned int *len)
708 struct io_piece *ipo;
710 if (list_empty(&td->io_hist_list))
713 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
714 list_del(&ipo->list);
716 *offset = ipo->offset;
722 static void prune_io_piece_log(struct thread_data *td)
724 struct io_piece *ipo;
726 while (!list_empty(&td->io_hist_list)) {
727 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
729 list_del(&ipo->list);
735 * log a succesful write, so we can unwind the log for verify
737 static void log_io_piece(struct thread_data *td, struct io_u *io_u)
739 struct io_piece *ipo = malloc(sizeof(struct io_piece));
740 struct list_head *entry;
742 INIT_LIST_HEAD(&ipo->list);
743 ipo->offset = io_u->offset;
744 ipo->len = io_u->buflen;
747 * for random io where the writes extend the file, it will typically
748 * be laid out with the block scattered as written. it's faster to
749 * read them in in that order again, so don't sort
751 if (td->sequential || !td->overwrite) {
752 list_add_tail(&ipo->list, &td->io_hist_list);
757 * for random io, sort the list so verify will run faster
759 entry = &td->io_hist_list;
760 while ((entry = entry->prev) != &td->io_hist_list) {
761 struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
763 if (__ipo->offset < ipo->offset)
767 list_add(&ipo->list, entry);
770 static int sync_td(struct thread_data *td)
773 return td->io_sync(td);
778 static int io_u_getevents(struct thread_data *td, int min, int max,
781 return td->io_getevents(td, min, max, t);
784 static int io_u_queue(struct thread_data *td, struct io_u *io_u)
786 gettimeofday(&io_u->issue_time, NULL);
788 return td->io_queue(td, io_u);
791 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
793 static void io_completed(struct thread_data *td, struct io_u *io_u,
794 struct io_completion_data *icd)
799 gettimeofday(&e, NULL);
802 unsigned int bytes = io_u->buflen - io_u->resid;
803 const int idx = io_u->ddir;
805 td->io_blocks[idx]++;
806 td->io_bytes[idx] += bytes;
807 td->zone_bytes += bytes;
808 td->this_io_bytes[idx] += bytes;
810 msec = mtime_since(&io_u->issue_time, &e);
812 add_clat_sample(td, idx, msec);
813 add_bw_sample(td, idx);
815 if ((td_rw(td) || td_write(td)) && idx == DDIR_WRITE)
816 log_io_piece(td, io_u);
818 icd->bytes_done[idx] += bytes;
820 icd->error = io_u->error;
823 static void ios_completed(struct thread_data *td,struct io_completion_data *icd)
829 icd->bytes_done[0] = icd->bytes_done[1] = 0;
831 for (i = 0; i < icd->nr; i++) {
832 io_u = td->io_event(td, i);
834 io_completed(td, io_u, icd);
839 static void cleanup_pending_aio(struct thread_data *td)
841 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
842 struct list_head *entry, *n;
843 struct io_completion_data icd;
848 * get immediately available events, if any
850 r = io_u_getevents(td, 0, td->cur_depth, &ts);
853 ios_completed(td, &icd);
857 * now cancel remaining active events
860 list_for_each_safe(entry, n, &td->io_u_busylist) {
861 io_u = list_entry(entry, struct io_u, list);
863 r = td->io_cancel(td, io_u);
870 r = io_u_getevents(td, td->cur_depth, td->cur_depth, NULL);
873 ios_completed(td, &icd);
878 static int do_io_u_verify(struct thread_data *td, struct io_u **io_u)
880 struct io_u *v_io_u = *io_u;
884 ret = verify_io_u(v_io_u);
885 put_io_u(td, v_io_u);
892 static void do_verify(struct thread_data *td)
895 struct io_u *io_u, *v_io_u = NULL;
896 struct io_completion_data icd;
899 td_set_runstate(td, TD_VERIFYING);
905 gettimeofday(&t, NULL);
906 if (runtime_exceeded(td, &t))
909 io_u = __get_io_u(td);
913 if (get_next_verify(td, &io_u->offset, &io_u->buflen)) {
918 if (td_io_prep(td, io_u, 1)) {
923 ret = io_u_queue(td, io_u);
931 * we have one pending to verify, do that while
932 * we are doing io on the next one
934 if (do_io_u_verify(td, &v_io_u))
937 ret = io_u_getevents(td, 1, 1, NULL);
944 v_io_u = td->io_event(td, 0);
947 io_completed(td, v_io_u, &icd);
950 td_verror(td, icd.error);
951 put_io_u(td, v_io_u);
957 * if we can't submit more io, we need to verify now
959 if (queue_full(td) && do_io_u_verify(td, &v_io_u))
964 do_io_u_verify(td, &v_io_u);
967 cleanup_pending_aio(td);
969 td_set_runstate(td, TD_RUNNING);
972 static void do_io(struct thread_data *td)
974 struct io_completion_data icd;
978 while (td->this_io_bytes[td->ddir] < td->io_size) {
979 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
980 struct timespec *timeout;
981 int ret, min_evts = 0;
991 memcpy(&s, &io_u->start_time, sizeof(s));
993 ret = io_u_queue(td, io_u);
1000 add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time));
1002 if (td->cur_depth < td->iodepth) {
1010 ret = io_u_getevents(td, min_evts, td->cur_depth, timeout);
1018 ios_completed(td, &icd);
1020 td_verror(td, icd.error);
1025 * the rate is batched for now, it should work for batches
1026 * of completions except the very first one which may look
1029 gettimeofday(&e, NULL);
1030 usec = utime_since(&s, &e);
1032 rate_throttle(td, usec, icd.bytes_done[td->ddir]);
1034 if (check_min_rate(td, &e)) {
1035 td_verror(td, ENOMEM);
1039 if (runtime_exceeded(td, &e))
1043 usec_sleep(td, td->thinktime);
1045 if (should_fsync(td) && td->fsync_blocks &&
1046 (td->io_blocks[DDIR_WRITE] % td->fsync_blocks) == 0)
1051 cleanup_pending_aio(td);
1053 if (should_fsync(td) && td->fsync_blocks)
1057 static void cleanup_io(struct thread_data *td)
1063 static int init_io(struct thread_data *td)
1065 if (td->io_engine == FIO_SYNCIO)
1066 return fio_syncio_init(td);
1067 else if (td->io_engine == FIO_MMAPIO)
1068 return fio_mmapio_init(td);
1069 else if (td->io_engine == FIO_LIBAIO)
1070 return fio_libaio_init(td);
1071 else if (td->io_engine == FIO_POSIXAIO)
1072 return fio_posixaio_init(td);
1073 else if (td->io_engine == FIO_SGIO)
1074 return fio_sgio_init(td);
1076 fprintf(stderr, "bad io_engine %d\n", td->io_engine);
1081 static void cleanup_io_u(struct thread_data *td)
1083 struct list_head *entry, *n;
1086 list_for_each_safe(entry, n, &td->io_u_freelist) {
1087 io_u = list_entry(entry, struct io_u, list);
1089 list_del(&io_u->list);
1093 if (td->mem_type == MEM_MALLOC)
1094 free(td->orig_buffer);
1095 else if (td->mem_type == MEM_SHM) {
1096 struct shmid_ds sbuf;
1098 shmdt(td->orig_buffer);
1099 shmctl(td->shm_id, IPC_RMID, &sbuf);
1100 } else if (td->mem_type == MEM_MMAP)
1101 munmap(td->orig_buffer, td->orig_buffer_size);
1103 fprintf(stderr, "Bad memory type %d\n", td->mem_type);
1105 td->orig_buffer = NULL;
1108 static int init_io_u(struct thread_data *td)
1114 if (td->io_engine & FIO_SYNCIO)
1117 max_units = td->iodepth;
1119 td->orig_buffer_size = td->max_bs * max_units + MASK;
1121 if (td->mem_type == MEM_MALLOC)
1122 td->orig_buffer = malloc(td->orig_buffer_size);
1123 else if (td->mem_type == MEM_SHM) {
1124 td->shm_id = shmget(IPC_PRIVATE, td->orig_buffer_size, IPC_CREAT | 0600);
1125 if (td->shm_id < 0) {
1126 td_verror(td, errno);
1131 td->orig_buffer = shmat(td->shm_id, NULL, 0);
1132 if (td->orig_buffer == (void *) -1) {
1133 td_verror(td, errno);
1135 td->orig_buffer = NULL;
1138 } else if (td->mem_type == MEM_MMAP) {
1139 td->orig_buffer = mmap(NULL, td->orig_buffer_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | OS_MAP_ANON, 0, 0);
1140 if (td->orig_buffer == MAP_FAILED) {
1141 td_verror(td, errno);
1143 td->orig_buffer = NULL;
1148 INIT_LIST_HEAD(&td->io_u_freelist);
1149 INIT_LIST_HEAD(&td->io_u_busylist);
1150 INIT_LIST_HEAD(&td->io_hist_list);
1152 p = ALIGN(td->orig_buffer);
1153 for (i = 0; i < max_units; i++) {
1154 io_u = malloc(sizeof(*io_u));
1155 memset(io_u, 0, sizeof(*io_u));
1156 INIT_LIST_HEAD(&io_u->list);
1158 io_u->buf = p + td->max_bs * i;
1160 list_add(&io_u->list, &td->io_u_freelist);
1166 static int create_file(struct thread_data *td, unsigned long long size,
1169 unsigned long long left;
1175 * unless specifically asked for overwrite, let normal io extend it
1177 if (td_write(td) && !td->overwrite)
1181 fprintf(stderr, "Need size for create\n");
1182 td_verror(td, EINVAL);
1187 oflags = O_CREAT | O_TRUNC;
1188 printf("Client%d: Laying out IO file (%LuMiB)\n", td->thread_number, size >> 20);
1191 printf("Client%d: Extending IO file (%Lu -> %LuMiB)\n", td->thread_number, (td->file_size - size) >> 20, td->file_size >> 20);
1194 td->fd = open(td->file_name, O_WRONLY | oflags, 0644);
1196 td_verror(td, errno);
1200 if (!extend && ftruncate(td->fd, td->file_size) == -1) {
1201 td_verror(td, errno);
1205 td->io_size = td->file_size;
1206 b = malloc(td->max_bs);
1207 memset(b, 0, td->max_bs);
1210 while (left && !td->terminate) {
1215 r = write(td->fd, b, bs);
1217 if (r == (int) bs) {
1222 td_verror(td, errno);
1231 unlink(td->file_name);
1232 else if (td->create_fsync)
1241 static int file_size(struct thread_data *td)
1245 if (fstat(td->fd, &st) == -1) {
1246 td_verror(td, errno);
1250 td->real_file_size = st.st_size;
1252 if (!td->file_size || td->file_size > td->real_file_size)
1253 td->file_size = td->real_file_size;
1255 td->file_size -= td->file_offset;
1259 static int bdev_size(struct thread_data *td)
1261 unsigned long long bytes;
1264 r = blockdev_size(td->fd, &bytes);
1270 td->real_file_size = bytes;
1273 * no extend possibilities, so limit size to device size if too large
1275 if (!td->file_size || td->file_size > td->real_file_size)
1276 td->file_size = td->real_file_size;
1278 td->file_size -= td->file_offset;
1282 static int get_file_size(struct thread_data *td)
1286 if (td->filetype == FIO_TYPE_FILE)
1287 ret = file_size(td);
1288 else if (td->filetype == FIO_TYPE_BD)
1289 ret = bdev_size(td);
1291 td->real_file_size = -1;
1296 if (td->file_offset > td->real_file_size) {
1297 fprintf(stderr, "Client%d: offset extends end (%Lu > %Lu)\n", td->thread_number, td->file_offset, td->real_file_size);
1301 td->io_size = td->file_size;
1302 if (td->io_size == 0) {
1303 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1304 td_verror(td, EINVAL);
1309 td->zone_size = td->io_size;
1311 td->total_io_size = td->io_size * td->loops;
1315 static int setup_file_mmap(struct thread_data *td)
1320 flags = PROT_READ | PROT_WRITE;
1321 else if (td_write(td)) {
1324 if (td->verify != VERIFY_NONE)
1329 td->mmap = mmap(NULL, td->file_size, flags, MAP_SHARED, td->fd, td->file_offset);
1330 if (td->mmap == MAP_FAILED) {
1332 td_verror(td, errno);
1336 if (td->invalidate_cache) {
1337 if (madvise(td->mmap, td->file_size, MADV_DONTNEED) < 0) {
1338 td_verror(td, errno);
1343 if (td->sequential) {
1344 if (madvise(td->mmap, td->file_size, MADV_SEQUENTIAL) < 0) {
1345 td_verror(td, errno);
1349 if (madvise(td->mmap, td->file_size, MADV_RANDOM) < 0) {
1350 td_verror(td, errno);
1358 static int setup_file_plain(struct thread_data *td)
1360 if (td->invalidate_cache) {
1361 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_DONTNEED) < 0) {
1362 td_verror(td, errno);
1367 if (td->sequential) {
1368 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_SEQUENTIAL) < 0) {
1369 td_verror(td, errno);
1373 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_RANDOM) < 0) {
1374 td_verror(td, errno);
1382 static int setup_file(struct thread_data *td)
1387 if (stat(td->file_name, &st) == -1) {
1388 if (errno != ENOENT) {
1389 td_verror(td, errno);
1392 if (!td->create_file) {
1393 td_verror(td, ENOENT);
1396 if (create_file(td, td->file_size, 0))
1398 } else if (td->filetype == FIO_TYPE_FILE) {
1399 if (st.st_size < (off_t) td->file_size) {
1400 if (create_file(td, td->file_size - st.st_size, 1))
1408 if (td_write(td) || td_rw(td)) {
1409 if (td->filetype == FIO_TYPE_FILE) {
1420 td->fd = open(td->file_name, flags, 0600);
1422 if (td->filetype == FIO_TYPE_CHAR)
1427 td->fd = open(td->file_name, flags);
1431 td_verror(td, errno);
1435 if (get_file_size(td))
1438 if (td->io_engine != FIO_MMAPIO)
1439 return setup_file_plain(td);
1441 return setup_file_mmap(td);
1444 static int check_dev_match(dev_t dev, char *path)
1446 unsigned int major, minor;
1450 f = fopen(path, "r");
1452 perror("open path");
1456 p = fgets(line, sizeof(line), f);
1462 if (sscanf(p, "%u:%u", &major, &minor) != 2) {
1467 if (((major << 8) | minor) == dev) {
1476 static int find_block_dir(dev_t dev, char *path)
1487 while ((dir = readdir(D)) != NULL) {
1488 char full_path[256];
1490 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1492 if (!strcmp(dir->d_name, "device"))
1495 sprintf(full_path, "%s/%s", path, dir->d_name);
1497 if (!strcmp(dir->d_name, "dev")) {
1498 if (!check_dev_match(dev, full_path)) {
1504 if (stat(full_path, &st) == -1) {
1509 if (!S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode))
1512 found = find_block_dir(dev, full_path);
1514 strcpy(path, full_path);
1523 static int get_io_ticks(struct disk_util *du, struct disk_util_stat *dus)
1530 f = fopen(du->path, "r");
1534 p = fgets(line, sizeof(line), f);
1540 if (sscanf(p, "%u %u %llu %u %u %u %llu %u %u %u %u\n", &dus->ios[0], &dus->merges[0], &dus->sectors[0], &dus->ticks[0], &dus->ios[1], &dus->merges[1], &dus->sectors[1], &dus->ticks[1], &in_flight, &dus->io_ticks, &dus->time_in_queue) != 11) {
1549 static void update_io_tick_disk(struct disk_util *du)
1551 struct disk_util_stat __dus, *dus, *ldus;
1554 if (get_io_ticks(du, &__dus))
1558 ldus = &du->last_dus;
1560 dus->sectors[0] += (__dus.sectors[0] - ldus->sectors[0]);
1561 dus->sectors[1] += (__dus.sectors[1] - ldus->sectors[1]);
1562 dus->ios[0] += (__dus.ios[0] - ldus->ios[0]);
1563 dus->ios[1] += (__dus.ios[1] - ldus->ios[1]);
1564 dus->merges[0] += (__dus.merges[0] - ldus->merges[0]);
1565 dus->merges[1] += (__dus.merges[1] - ldus->merges[1]);
1566 dus->ticks[0] += (__dus.ticks[0] - ldus->ticks[0]);
1567 dus->ticks[1] += (__dus.ticks[1] - ldus->ticks[1]);
1568 dus->io_ticks += (__dus.io_ticks - ldus->io_ticks);
1569 dus->time_in_queue += (__dus.time_in_queue - ldus->time_in_queue);
1571 gettimeofday(&t, NULL);
1572 du->msec += mtime_since(&du->time, &t);
1573 memcpy(&du->time, &t, sizeof(t));
1574 memcpy(ldus, &__dus, sizeof(__dus));
1577 static void update_io_ticks(void)
1579 struct list_head *entry;
1580 struct disk_util *du;
1582 list_for_each(entry, &disk_list) {
1583 du = list_entry(entry, struct disk_util, list);
1584 update_io_tick_disk(du);
1588 static int disk_util_exists(dev_t dev)
1590 struct list_head *entry;
1591 struct disk_util *du;
1593 list_for_each(entry, &disk_list) {
1594 du = list_entry(entry, struct disk_util, list);
1603 static void disk_util_add(dev_t dev, char *path)
1605 struct disk_util *du = malloc(sizeof(*du));
1607 memset(du, 0, sizeof(*du));
1608 INIT_LIST_HEAD(&du->list);
1609 sprintf(du->path, "%s/stat", path);
1610 du->name = strdup(basename(path));
1613 gettimeofday(&du->time, NULL);
1614 get_io_ticks(du, &du->last_dus);
1616 list_add_tail(&du->list, &disk_list);
1619 static void init_disk_util(struct thread_data *td)
1622 char foo[256], tmp[256];
1626 if (!td->do_disk_util)
1629 if (!stat(td->file_name, &st)) {
1630 if (S_ISBLK(st.st_mode))
1636 * must be a file, open "." in that path
1638 strcpy(foo, td->file_name);
1641 perror("disk util stat");
1648 if (disk_util_exists(dev))
1651 sprintf(foo, "/sys/block");
1652 if (!find_block_dir(dev, foo))
1656 * for md/dm, there's no queue dir. we already have the right place
1658 sprintf(tmp, "%s/stat", foo);
1659 if (stat(tmp, &st)) {
1661 * if this is inside a partition dir, jump back to parent
1663 sprintf(tmp, "%s/queue", foo);
1664 if (stat(tmp, &st)) {
1666 sprintf(tmp, "%s/queue", p);
1667 if (stat(tmp, &st)) {
1668 fprintf(stderr, "unknown sysfs layout\n");
1671 sprintf(foo, "%s", p);
1675 disk_util_add(dev, foo);
1678 static void disk_util_timer_arm(void)
1680 itimer.it_value.tv_sec = 0;
1681 itimer.it_value.tv_usec = DISK_UTIL_MSEC * 1000;
1682 setitimer(ITIMER_REAL, &itimer, NULL);
1685 static void clear_io_state(struct thread_data *td)
1687 if (td->io_engine == FIO_SYNCIO)
1688 lseek(td->fd, SEEK_SET, 0);
1691 td->stat_io_bytes[0] = td->stat_io_bytes[1] = 0;
1692 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
1696 memset(td->file_map, 0, td->num_maps * sizeof(long));
1699 static void update_rusage_stat(struct thread_data *td)
1701 if (!(td->runtime[0] + td->runtime[1]))
1704 getrusage(RUSAGE_SELF, &td->ru_end);
1706 td->usr_time += mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
1707 td->sys_time += mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
1708 td->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
1711 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
1714 static void *thread_main(void *data)
1716 struct thread_data *td = data;
1719 if (!td->use_thread)
1727 if (fio_setaffinity(td) == -1) {
1728 td_verror(td, errno);
1736 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1737 td_verror(td, errno);
1742 sem_post(&startup_sem);
1743 sem_wait(&td->mutex);
1745 if (!td->create_serialize && setup_file(td))
1748 if (init_random_state(td))
1751 gettimeofday(&td->epoch, NULL);
1753 while (td->loops--) {
1754 getrusage(RUSAGE_SELF, &td->ru_start);
1755 gettimeofday(&td->start, NULL);
1756 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
1759 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
1762 prune_io_piece_log(td);
1766 td->runtime[td->ddir] += mtime_since_now(&td->start);
1768 td->runtime[td->ddir ^ 1] = td->runtime[td->ddir];
1770 update_rusage_stat(td);
1772 if (td->error || td->terminate)
1775 if (td->verify == VERIFY_NONE)
1779 gettimeofday(&td->start, NULL);
1783 td->runtime[DDIR_READ] += mtime_since_now(&td->start);
1785 if (td->error || td->terminate)
1792 finish_log(td, td->bw_log, "bw");
1794 finish_log(td, td->slat_log, "slat");
1796 finish_log(td, td->clat_log, "clat");
1798 if (exitall_on_terminate)
1799 terminate_threads(td->groupid);
1807 munmap(td->mmap, td->file_size);
1811 sem_post(&startup_sem);
1812 sem_wait(&td->mutex);
1814 td_set_runstate(td, TD_EXITED);
1819 static void *fork_main(int shmid, int offset)
1821 struct thread_data *td;
1824 data = shmat(shmid, NULL, 0);
1825 if (data == (void *) -1) {
1830 td = data + offset * sizeof(struct thread_data);
1836 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1837 double *mean, double *dev)
1841 if (is->samples == 0)
1847 n = (double) is->samples;
1848 *mean = (double) is->val / n;
1849 *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1850 if (!(*min + *max) && !(*mean + *dev))
1856 static void show_ddir_status(struct thread_data *td, struct group_run_stats *rs,
1859 char *ddir_str[] = { "read ", "write" };
1860 unsigned long min, max, bw;
1863 if (!td->runtime[ddir])
1866 bw = td->io_bytes[ddir] / td->runtime[ddir];
1867 printf(" %s: io=%6lluMiB, bw=%6luKiB/s, runt=%6lumsec\n", ddir_str[ddir], td->io_bytes[ddir] >> 20, bw, td->runtime[ddir]);
1869 if (calc_lat(&td->slat_stat[ddir], &min, &max, &mean, &dev))
1870 printf(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1872 if (calc_lat(&td->clat_stat[ddir], &min, &max, &mean, &dev))
1873 printf(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1875 if (calc_lat(&td->bw_stat[ddir], &min, &max, &mean, &dev)) {
1878 p_of_agg = mean * 100 / (double) rs->agg[ddir];
1879 printf(" bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, dev=%5.02f\n", min, max, p_of_agg, mean, dev);
1883 static void show_thread_status(struct thread_data *td,
1884 struct group_run_stats *rs)
1886 double usr_cpu, sys_cpu;
1888 if (!(td->io_bytes[0] + td->io_bytes[1]) && !td->error)
1891 printf("Client%d (groupid=%d): err=%2d:\n", td->thread_number, td->groupid, td->error);
1893 show_ddir_status(td, rs, td->ddir);
1894 show_ddir_status(td, rs, td->ddir ^ 1);
1896 if (td->runtime[0] + td->runtime[1]) {
1897 double runt = td->runtime[0] + td->runtime[1];
1899 usr_cpu = (double) td->usr_time * 100 / runt;
1900 sys_cpu = (double) td->sys_time * 100 / runt;
1906 printf(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ctx);
1909 static void check_str_update(struct thread_data *td)
1911 char c = run_str[td->thread_number - 1];
1913 if (td->runstate == td->old_runstate)
1916 switch (td->runstate) {
1929 } else if (td_read(td)) {
1947 case TD_NOT_CREATED:
1951 printf("state %d\n", td->runstate);
1954 run_str[td->thread_number - 1] = c;
1955 td->old_runstate = td->runstate;
1958 static void eta_to_str(char *str, int eta_sec)
1960 unsigned int d, h, m, s;
1961 static int always_d, always_h;
1973 if (d || always_d) {
1975 str += sprintf(str, "%02dd:", d);
1977 if (h || always_h) {
1979 str += sprintf(str, "%02dh:", h);
1982 str += sprintf(str, "%02dm:", m);
1983 str += sprintf(str, "%02ds", s);
1986 static int thread_eta(struct thread_data *td, unsigned long elapsed)
1988 unsigned long long bytes_total, bytes_done;
1989 unsigned int eta_sec = 0;
1991 bytes_total = td->total_io_size;
1994 * if writing, bytes_total will be twice the size. If mixing,
1995 * assume a 50/50 split and thus bytes_total will be 50% larger.
1999 bytes_total = bytes_total * 3 / 2;
2003 if (td->zone_size && td->zone_skip)
2004 bytes_total /= (td->zone_skip / td->zone_size);
2006 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) {
2009 bytes_done = td->io_bytes[DDIR_READ] + td->io_bytes[DDIR_WRITE];
2010 perc = (double) bytes_done / (double) bytes_total;
2014 eta_sec = (elapsed * (1.0 / perc)) - elapsed;
2016 if (td->timeout && eta_sec > (td->timeout - elapsed))
2017 eta_sec = td->timeout - elapsed;
2018 } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED) {
2019 int t_eta = 0, r_eta = 0;
2022 * We can only guess - assume it'll run the full timeout
2023 * if given, otherwise assume it'll run at the specified rate.
2026 t_eta = td->timeout + td->start_delay - elapsed;
2028 r_eta = (bytes_total / 1024) / td->rate;
2029 r_eta += td->start_delay - elapsed;
2033 eta_sec = min(r_eta, t_eta);
2042 * thread is already done
2050 static void print_thread_status(void)
2052 unsigned long elapsed = time_since_now(&genesis);
2053 int i, nr_running, t_rate, m_rate, *eta_secs, eta_sec;
2057 eta_secs = malloc(thread_number * sizeof(int));
2058 memset(eta_secs, 0, thread_number * sizeof(int));
2060 nr_running = t_rate = m_rate = 0;
2061 for (i = 0; i < thread_number; i++) {
2062 struct thread_data *td = &threads[i];
2064 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING){
2067 m_rate += td->ratemin;
2071 eta_secs[i] = thread_eta(td, elapsed);
2073 eta_secs[i] = INT_MAX;
2075 check_str_update(td);
2078 if (exitall_on_terminate)
2083 for (i = 0; i < thread_number; i++) {
2084 if (exitall_on_terminate) {
2085 if (eta_secs[i] < eta_sec)
2086 eta_sec = eta_secs[i];
2088 if (eta_secs[i] > eta_sec)
2089 eta_sec = eta_secs[i];
2093 if (eta_sec != INT_MAX && elapsed) {
2094 perc = (double) elapsed / (double) (elapsed + eta_sec);
2095 eta_to_str(eta_str, eta_sec);
2098 printf("Threads now running (%d)", nr_running);
2099 if (m_rate || t_rate)
2100 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
2101 if (eta_sec != INT_MAX) {
2103 printf(": [%s] [%3.2f%% done] [eta %s]", run_str, perc,eta_str);
2110 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
2115 * reap exited threads (TD_EXITED -> TD_REAPED)
2117 for (i = 0; i < thread_number; i++) {
2118 struct thread_data *td = &threads[i];
2120 if (td->runstate != TD_EXITED)
2123 td_set_runstate(td, TD_REAPED);
2125 if (td->use_thread) {
2128 if (pthread_join(td->thread, (void *) &ret))
2129 perror("thread_join");
2131 waitpid(td->pid, NULL, 0);
2134 (*m_rate) -= td->ratemin;
2135 (*t_rate) -= td->rate;
2139 static void run_threads(void)
2141 struct thread_data *td;
2142 unsigned long spent;
2143 int i, todo, nr_running, m_rate, t_rate, nr_started;
2145 printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
2148 signal(SIGINT, sig_handler);
2149 signal(SIGALRM, sig_handler);
2151 todo = thread_number;
2154 m_rate = t_rate = 0;
2156 for (i = 0; i < thread_number; i++) {
2159 run_str[td->thread_number - 1] = 'P';
2163 if (!td->create_serialize)
2167 * do file setup here so it happens sequentially,
2168 * we don't want X number of threads getting their
2169 * client data interspersed on disk
2171 if (setup_file(td)) {
2172 td_set_runstate(td, TD_REAPED);
2177 gettimeofday(&genesis, NULL);
2181 * create threads (TD_NOT_CREATED -> TD_CREATED)
2183 for (i = 0; i < thread_number; i++) {
2186 if (td->runstate != TD_NOT_CREATED)
2190 * never got a chance to start, killed by other
2191 * thread for some reason
2193 if (td->terminate) {
2198 if (td->start_delay) {
2199 spent = mtime_since_now(&genesis);
2201 if (td->start_delay * 1000 > spent)
2205 if (td->stonewall && (nr_started || nr_running))
2208 td_set_runstate(td, TD_CREATED);
2209 sem_init(&startup_sem, 0, 1);
2213 if (td->use_thread) {
2214 if (pthread_create(&td->thread, NULL, thread_main, td)) {
2215 perror("thread_create");
2220 sem_wait(&startup_sem);
2222 fork_main(shm_id, i);
2229 * start created threads (TD_CREATED -> TD_RUNNING)
2231 for (i = 0; i < thread_number; i++) {
2234 if (td->runstate != TD_CREATED)
2237 td_set_runstate(td, TD_RUNNING);
2240 m_rate += td->ratemin;
2242 sem_post(&td->mutex);
2245 reap_threads(&nr_running, &t_rate, &m_rate);
2251 while (nr_running) {
2252 reap_threads(&nr_running, &t_rate, &m_rate);
2259 static void show_group_stats(struct group_run_stats *rs, int id)
2261 printf("\nRun status group %d (all jobs):\n", id);
2263 if (rs->max_run[DDIR_READ])
2264 printf(" READ: io=%lluMiB, aggrb=%llu, minb=%llu, maxb=%llu, mint=%llumsec, maxt=%llumsec\n", rs->io_mb[0], rs->agg[0], rs->min_bw[0], rs->max_bw[0], rs->min_run[0], rs->max_run[0]);
2265 if (rs->max_run[DDIR_WRITE])
2266 printf(" WRITE: io=%lluMiB, aggrb=%llu, minb=%llu, maxb=%llu, mint=%llumsec, maxt=%llumsec\n", rs->io_mb[1], rs->agg[1], rs->min_bw[1], rs->max_bw[1], rs->min_run[1], rs->max_run[1]);
2269 static void show_disk_util(void)
2271 struct disk_util_stat *dus;
2272 struct list_head *entry;
2273 struct disk_util *du;
2276 printf("\nDisk stats (read/write):\n");
2278 list_for_each(entry, &disk_list) {
2279 du = list_entry(entry, struct disk_util, list);
2282 util = (double) 100 * du->dus.io_ticks / (double) du->msec;
2286 printf(" %s: ios=%u/%u, merge=%u/%u, ticks=%u/%u, in_queue=%u, util=%3.2f%%\n", du->name, dus->ios[0], dus->ios[1], dus->merges[0], dus->merges[1], dus->ticks[0], dus->ticks[1], dus->time_in_queue, util);
2290 static void show_run_stats(void)
2292 struct group_run_stats *runstats, *rs;
2293 struct thread_data *td;
2296 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
2298 for (i = 0; i < groupid + 1; i++) {
2301 memset(rs, 0, sizeof(*rs));
2302 rs->min_bw[0] = rs->min_run[0] = ~0UL;
2303 rs->min_bw[1] = rs->min_run[1] = ~0UL;
2306 for (i = 0; i < thread_number; i++) {
2307 unsigned long rbw, wbw;
2312 printf("Client%d: %s\n", td->thread_number, td->verror);
2316 rs = &runstats[td->groupid];
2318 if (td->runtime[0] < rs->min_run[0] || !rs->min_run[0])
2319 rs->min_run[0] = td->runtime[0];
2320 if (td->runtime[0] > rs->max_run[0])
2321 rs->max_run[0] = td->runtime[0];
2322 if (td->runtime[1] < rs->min_run[1] || !rs->min_run[1])
2323 rs->min_run[1] = td->runtime[1];
2324 if (td->runtime[1] > rs->max_run[1])
2325 rs->max_run[1] = td->runtime[1];
2329 rbw = td->io_bytes[0] / td->runtime[0];
2331 wbw = td->io_bytes[1] / td->runtime[1];
2333 if (rbw < rs->min_bw[0])
2334 rs->min_bw[0] = rbw;
2335 if (wbw < rs->min_bw[1])
2336 rs->min_bw[1] = wbw;
2337 if (rbw > rs->max_bw[0])
2338 rs->max_bw[0] = rbw;
2339 if (wbw > rs->max_bw[1])
2340 rs->max_bw[1] = wbw;
2342 rs->io_mb[0] += td->io_bytes[0] >> 20;
2343 rs->io_mb[1] += td->io_bytes[1] >> 20;
2346 for (i = 0; i < groupid + 1; i++) {
2350 rs->agg[0] = (rs->io_mb[0]*1024*1000) / rs->max_run[0];
2352 rs->agg[1] = (rs->io_mb[1]*1024*1000) / rs->max_run[1];
2356 * don't overwrite last signal output
2360 for (i = 0; i < thread_number; i++) {
2362 rs = &runstats[td->groupid];
2364 show_thread_status(td, rs);
2367 for (i = 0; i < groupid + 1; i++)
2368 show_group_stats(&runstats[i], i);
2373 int main(int argc, char *argv[])
2375 if (parse_options(argc, argv))
2378 if (!thread_number) {
2379 printf("Nothing to do\n");
2383 disk_util_timer_arm();