2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 #include <sys/types.h>
38 #include <sys/ioctl.h>
46 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
49 int thread_number = 0;
50 static char run_str[MAX_JOBS + 1];
52 static LIST_HEAD(disk_list);
53 static struct itimerval itimer;
55 static void update_io_ticks(void);
56 static void disk_util_timer_arm(void);
57 static void print_thread_status(void);
71 #define should_fsync(td) (td_write(td) && (!(td)->odirect || (td)->override_sync))
73 static sem_t startup_sem;
75 #define TERMINATE_ALL (-1)
77 static void terminate_threads(int group_id)
81 for (i = 0; i < thread_number; i++) {
82 struct thread_data *td = &threads[i];
84 if (group_id == TERMINATE_ALL || groupid == td->groupid) {
91 static void sig_handler(int sig)
96 disk_util_timer_arm();
97 print_thread_status();
100 printf("\nfio: terminating on signal\n");
102 terminate_threads(TERMINATE_ALL);
107 static unsigned long utime_since(struct timeval *s, struct timeval *e)
111 sec = e->tv_sec - s->tv_sec;
112 usec = e->tv_usec - s->tv_usec;
113 if (sec > 0 && usec < 0) {
118 sec *= (double) 1000000;
123 static unsigned long utime_since_now(struct timeval *s)
127 gettimeofday(&t, NULL);
128 return utime_since(s, &t);
131 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
135 sec = e->tv_sec - s->tv_sec;
136 usec = e->tv_usec - s->tv_usec;
137 if (sec > 0 && usec < 0) {
142 sec *= (double) 1000;
143 usec /= (double) 1000;
148 static unsigned long mtime_since_now(struct timeval *s)
152 gettimeofday(&t, NULL);
153 return mtime_since(s, &t);
156 static inline unsigned long msec_now(struct timeval *s)
158 return s->tv_sec * 1000 + s->tv_usec / 1000;
161 static int random_map_free(struct thread_data *td, unsigned long long block)
163 unsigned int idx = RAND_MAP_IDX(td, block);
164 unsigned int bit = RAND_MAP_BIT(td, block);
166 return (td->file_map[idx] & (1UL << bit)) == 0;
169 static int get_next_free_block(struct thread_data *td, unsigned long long *b)
175 while ((*b) * td->min_bs < td->io_size) {
176 if (td->file_map[i] != -1UL) {
177 *b += ffz(td->file_map[i]);
181 *b += BLOCKS_PER_MAP;
188 static void mark_random_map(struct thread_data *td, struct io_u *io_u)
190 unsigned long block = io_u->offset / td->min_bs;
191 unsigned int blocks = 0;
193 while (blocks < (io_u->buflen / td->min_bs)) {
194 unsigned int idx, bit;
196 if (!random_map_free(td, block))
199 idx = RAND_MAP_IDX(td, block);
200 bit = RAND_MAP_BIT(td, block);
202 assert(idx < td->num_maps);
204 td->file_map[idx] |= (1UL << bit);
209 if ((blocks * td->min_bs) < io_u->buflen)
210 io_u->buflen = blocks * td->min_bs;
213 static int get_next_offset(struct thread_data *td, unsigned long long *offset)
215 unsigned long long b, rb;
218 if (!td->sequential) {
219 unsigned long max_blocks = td->io_size / td->min_bs;
223 lrand48_r(&td->random_state, &r);
224 b = ((max_blocks - 1) * r / (RAND_MAX+1.0));
225 rb = b + (td->file_offset / td->min_bs);
227 } while (!random_map_free(td, rb) && loops);
230 if (get_next_free_block(td, &b))
234 b = td->last_bytes / td->min_bs;
236 *offset = (b * td->min_bs) + td->file_offset;
237 if (*offset > td->file_size)
243 static unsigned int get_next_buflen(struct thread_data *td)
248 if (td->min_bs == td->max_bs)
251 lrand48_r(&td->bsrange_state, &r);
252 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
253 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
256 if (buflen > td->io_size - td->this_io_bytes[td->ddir])
257 buflen = td->io_size - td->this_io_bytes[td->ddir];
262 static inline void add_stat_sample(struct io_stat *is, unsigned long val)
264 if (val > is->max_val)
266 if (val < is->min_val)
270 is->val_sq += val * val;
274 static void add_log_sample(struct thread_data *td, struct io_log *iolog,
275 unsigned long val, int ddir)
277 if (iolog->nr_samples == iolog->max_samples) {
278 int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
280 iolog->log = realloc(iolog->log, new_size);
281 iolog->max_samples <<= 1;
284 iolog->log[iolog->nr_samples].val = val;
285 iolog->log[iolog->nr_samples].time = mtime_since_now(&td->epoch);
286 iolog->log[iolog->nr_samples].ddir = ddir;
290 static void add_clat_sample(struct thread_data *td, int ddir,unsigned long msec)
292 add_stat_sample(&td->clat_stat[ddir], msec);
295 add_log_sample(td, td->clat_log, msec, ddir);
298 static void add_slat_sample(struct thread_data *td, int ddir,unsigned long msec)
300 add_stat_sample(&td->slat_stat[ddir], msec);
303 add_log_sample(td, td->slat_log, msec, ddir);
306 static void add_bw_sample(struct thread_data *td, int ddir)
308 unsigned long spent = mtime_since_now(&td->stat_sample_time[ddir]);
311 if (spent < td->bw_avg_time)
314 rate = (td->this_io_bytes[ddir] - td->stat_io_bytes[ddir]) / spent;
315 add_stat_sample(&td->bw_stat[ddir], rate);
318 add_log_sample(td, td->bw_log, rate, ddir);
320 gettimeofday(&td->stat_sample_time[ddir], NULL);
321 td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
325 * busy looping version for the last few usec
327 static void __usec_sleep(unsigned int usec)
329 struct timeval start;
331 gettimeofday(&start, NULL);
332 while (utime_since_now(&start) < usec)
336 static void usec_sleep(struct thread_data *td, unsigned long usec)
338 struct timespec req, rem;
340 req.tv_sec = usec / 1000000;
341 req.tv_nsec = usec * 1000 - req.tv_sec * 1000000;
349 rem.tv_sec = rem.tv_nsec = 0;
350 if (nanosleep(&req, &rem) < 0)
353 if ((rem.tv_sec + rem.tv_nsec) == 0)
356 req.tv_nsec = rem.tv_nsec;
357 req.tv_sec = rem.tv_sec;
359 usec = rem.tv_sec * 1000000 + rem.tv_nsec / 1000;
360 } while (!td->terminate);
363 static void rate_throttle(struct thread_data *td, unsigned long time_spent,
366 unsigned long usec_cycle;
371 usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
373 if (time_spent < usec_cycle) {
374 unsigned long s = usec_cycle - time_spent;
376 td->rate_pending_usleep += s;
377 if (td->rate_pending_usleep >= 100000) {
378 usec_sleep(td, td->rate_pending_usleep);
379 td->rate_pending_usleep = 0;
382 long overtime = time_spent - usec_cycle;
384 td->rate_pending_usleep -= overtime;
388 static int check_min_rate(struct thread_data *td, struct timeval *now)
395 * allow a 2 second settle period in the beginning
397 if (mtime_since(&td->start, now) < 2000)
401 * if rate blocks is set, sample is running
403 if (td->rate_bytes) {
404 spent = mtime_since(&td->lastrate, now);
405 if (spent < td->ratecycle)
408 rate = (td->this_io_bytes[ddir] - td->rate_bytes) / spent;
409 if (rate < td->ratemin) {
410 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
412 terminate_threads(td->groupid);
417 td->rate_bytes = td->this_io_bytes[ddir];
418 memcpy(&td->lastrate, now, sizeof(*now));
422 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
426 if (mtime_since(&td->epoch, t) >= td->timeout * 1000)
432 static void fill_random_bytes(struct thread_data *td,
433 unsigned char *p, unsigned int len)
439 drand48_r(&td->verify_state, &r);
442 * lrand48_r seems to be broken and only fill the bottom
443 * 32-bits, even on 64-bit archs with 64-bit longs
456 static void hexdump(void *buffer, int len)
458 unsigned char *p = buffer;
461 for (i = 0; i < len; i++)
462 printf("%02x", p[i]);
466 static int verify_io_u_crc32(struct verify_header *hdr, struct io_u *io_u)
468 unsigned char *p = (unsigned char *) io_u->buf;
473 c = crc32(p, hdr->len - sizeof(*hdr));
474 ret = c != hdr->crc32;
477 fprintf(stderr, "crc32: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
478 fprintf(stderr, "crc32: wanted %lx, got %lx\n", hdr->crc32, c);
484 static int verify_io_u_md5(struct verify_header *hdr, struct io_u *io_u)
486 unsigned char *p = (unsigned char *) io_u->buf;
487 struct md5_ctx md5_ctx;
490 memset(&md5_ctx, 0, sizeof(md5_ctx));
492 md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
494 ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
496 fprintf(stderr, "md5: verify failed at %llu/%u\n", io_u->offset, io_u->buflen);
497 hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
498 hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
504 static int verify_io_u(struct io_u *io_u)
506 struct verify_header *hdr = (struct verify_header *) io_u->buf;
509 if (hdr->fio_magic != FIO_HDR_MAGIC)
512 if (hdr->verify_type == VERIFY_MD5)
513 ret = verify_io_u_md5(hdr, io_u);
514 else if (hdr->verify_type == VERIFY_CRC32)
515 ret = verify_io_u_crc32(hdr, io_u);
517 fprintf(stderr, "Bad verify type %d\n", hdr->verify_type);
524 static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
526 hdr->crc32 = crc32(p, len);
529 static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
531 struct md5_ctx md5_ctx;
533 memset(&md5_ctx, 0, sizeof(md5_ctx));
534 md5_update(&md5_ctx, p, len);
535 memcpy(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
539 * fill body of io_u->buf with random data and add a header with the
540 * (eg) sha1sum of that data.
542 static void populate_io_u(struct thread_data *td, struct io_u *io_u)
544 unsigned char *p = (unsigned char *) io_u->buf;
545 struct verify_header hdr;
547 hdr.fio_magic = FIO_HDR_MAGIC;
548 hdr.len = io_u->buflen;
550 fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
552 if (td->verify == VERIFY_MD5) {
553 fill_md5(&hdr, p, io_u->buflen - sizeof(hdr));
554 hdr.verify_type = VERIFY_MD5;
556 fill_crc32(&hdr, p, io_u->buflen - sizeof(hdr));
557 hdr.verify_type = VERIFY_CRC32;
560 memcpy(io_u->buf, &hdr, sizeof(hdr));
563 static void put_io_u(struct thread_data *td, struct io_u *io_u)
565 list_del(&io_u->list);
566 list_add(&io_u->list, &td->io_u_freelist);
570 #define queue_full(td) (list_empty(&(td)->io_u_freelist))
572 static struct io_u *__get_io_u(struct thread_data *td)
579 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
582 list_del(&io_u->list);
583 list_add(&io_u->list, &td->io_u_busylist);
588 static int td_io_prep(struct thread_data *td, struct io_u *io_u, int read)
591 io_u->ddir = DDIR_READ;
593 io_u->ddir = DDIR_WRITE;
595 if (td->io_prep && td->io_prep(td, io_u))
601 static struct io_u *get_io_u(struct thread_data *td)
605 io_u = __get_io_u(td);
609 if (get_next_offset(td, &io_u->offset)) {
614 io_u->buflen = get_next_buflen(td);
620 if (io_u->buflen + io_u->offset > td->file_size)
621 io_u->buflen = td->file_size - io_u->offset;
629 mark_random_map(td, io_u);
631 td->last_bytes += io_u->buflen;
633 if (td->verify != VERIFY_NONE)
634 populate_io_u(td, io_u);
636 if (td_io_prep(td, io_u, td_read(td))) {
641 gettimeofday(&io_u->start_time, NULL);
645 static inline void td_set_runstate(struct thread_data *td, int runstate)
647 td->old_runstate = td->runstate;
648 td->runstate = runstate;
651 static int get_next_verify(struct thread_data *td,
652 unsigned long long *offset, unsigned int *len)
654 struct io_piece *ipo;
656 if (list_empty(&td->io_hist_list))
659 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
660 list_del(&ipo->list);
662 *offset = ipo->offset;
668 static void prune_io_piece_log(struct thread_data *td)
670 struct io_piece *ipo;
672 while (!list_empty(&td->io_hist_list)) {
673 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
675 list_del(&ipo->list);
681 * log a succesful write, so we can unwind the log for verify
683 static void log_io_piece(struct thread_data *td, struct io_u *io_u)
685 struct io_piece *ipo = malloc(sizeof(struct io_piece));
686 struct list_head *entry;
688 INIT_LIST_HEAD(&ipo->list);
689 ipo->offset = io_u->offset;
690 ipo->len = io_u->buflen;
693 * for random io where the writes extend the file, it will typically
694 * be laid out with the block scattered as written. it's faster to
695 * read them in in that order again, so don't sort
697 if (td->sequential || !td->overwrite) {
698 list_add_tail(&ipo->list, &td->io_hist_list);
703 * for random io, sort the list so verify will run faster
705 entry = &td->io_hist_list;
706 while ((entry = entry->prev) != &td->io_hist_list) {
707 struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
709 if (__ipo->offset < ipo->offset)
713 list_add(&ipo->list, entry);
716 static int sync_td(struct thread_data *td)
719 return td->io_sync(td);
724 static int io_u_getevents(struct thread_data *td, int min, int max,
727 return td->io_getevents(td, min, max, t);
730 static int io_u_queue(struct thread_data *td, struct io_u *io_u)
732 gettimeofday(&io_u->issue_time, NULL);
734 return td->io_queue(td, io_u);
737 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
739 static void io_completed(struct thread_data *td, struct io_u *io_u,
740 struct io_completion_data *icd)
745 gettimeofday(&e, NULL);
748 int idx = io_u->ddir;
750 td->io_blocks[idx]++;
751 td->io_bytes[idx] += (io_u->buflen - io_u->resid);
752 td->this_io_bytes[idx] += (io_u->buflen - io_u->resid);
754 msec = mtime_since(&io_u->issue_time, &e);
756 add_clat_sample(td, io_u->ddir, msec);
757 add_bw_sample(td, io_u->ddir);
759 if (td_write(td) && io_u->ddir == DDIR_WRITE)
760 log_io_piece(td, io_u);
762 icd->bytes_done[idx] += (io_u->buflen - io_u->resid);
764 icd->error = io_u->error;
767 static void ios_completed(struct thread_data *td,struct io_completion_data *icd)
773 icd->bytes_done[0] = icd->bytes_done[1] = 0;
775 for (i = 0; i < icd->nr; i++) {
776 io_u = td->io_event(td, i);
778 io_completed(td, io_u, icd);
783 static void cleanup_pending_aio(struct thread_data *td)
785 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
786 struct list_head *entry, *n;
787 struct io_completion_data icd;
792 * get immediately available events, if any
794 r = io_u_getevents(td, 0, td->cur_depth, &ts);
797 ios_completed(td, &icd);
801 * now cancel remaining active events
804 list_for_each_safe(entry, n, &td->io_u_busylist) {
805 io_u = list_entry(entry, struct io_u, list);
807 r = td->io_cancel(td, io_u);
814 r = io_u_getevents(td, td->cur_depth, td->cur_depth, NULL);
817 ios_completed(td, &icd);
822 static int do_io_u_verify(struct thread_data *td, struct io_u **io_u)
824 struct io_u *v_io_u = *io_u;
828 ret = verify_io_u(v_io_u);
829 put_io_u(td, v_io_u);
836 static void do_verify(struct thread_data *td)
839 struct io_u *io_u, *v_io_u = NULL;
840 struct io_completion_data icd;
843 td_set_runstate(td, TD_VERIFYING);
849 gettimeofday(&t, NULL);
850 if (runtime_exceeded(td, &t))
853 io_u = __get_io_u(td);
857 if (get_next_verify(td, &io_u->offset, &io_u->buflen)) {
862 if (td_io_prep(td, io_u, 1)) {
867 ret = io_u_queue(td, io_u);
875 * we have one pending to verify, do that while
876 * we are doing io on the next one
878 if (do_io_u_verify(td, &v_io_u))
881 ret = io_u_getevents(td, 1, 1, NULL);
888 v_io_u = td->io_event(td, 0);
891 io_completed(td, v_io_u, &icd);
894 td_verror(td, icd.error);
895 put_io_u(td, v_io_u);
901 * if we can't submit more io, we need to verify now
903 if (queue_full(td) && do_io_u_verify(td, &v_io_u))
908 do_io_u_verify(td, &v_io_u);
911 cleanup_pending_aio(td);
913 td_set_runstate(td, TD_RUNNING);
916 static void do_io(struct thread_data *td)
918 struct io_completion_data icd;
922 while (td->this_io_bytes[td->ddir] < td->io_size) {
923 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
924 struct timespec *timeout;
925 int ret, min_evts = 0;
935 memcpy(&s, &io_u->start_time, sizeof(s));
937 ret = io_u_queue(td, io_u);
944 add_slat_sample(td, io_u->ddir, mtime_since(&io_u->start_time, &io_u->issue_time));
946 if (td->cur_depth < td->iodepth) {
954 ret = io_u_getevents(td, min_evts, td->cur_depth, timeout);
962 ios_completed(td, &icd);
964 td_verror(td, icd.error);
969 * the rate is batched for now, it should work for batches
970 * of completions except the very first one which may look
973 gettimeofday(&e, NULL);
974 usec = utime_since(&s, &e);
976 rate_throttle(td, usec, icd.bytes_done[td->ddir]);
978 if (check_min_rate(td, &e)) {
979 td_verror(td, ENOMEM);
983 if (runtime_exceeded(td, &e))
987 usec_sleep(td, td->thinktime);
989 if (should_fsync(td) && td->fsync_blocks &&
990 (td->io_blocks[DDIR_WRITE] % td->fsync_blocks) == 0)
995 cleanup_pending_aio(td);
997 if (should_fsync(td))
1001 static void cleanup_io(struct thread_data *td)
1007 static int init_io(struct thread_data *td)
1009 if (td->io_engine == FIO_SYNCIO)
1010 return fio_syncio_init(td);
1011 else if (td->io_engine == FIO_MMAPIO)
1012 return fio_mmapio_init(td);
1013 else if (td->io_engine == FIO_LIBAIO)
1014 return fio_libaio_init(td);
1015 else if (td->io_engine == FIO_POSIXAIO)
1016 return fio_posixaio_init(td);
1017 else if (td->io_engine == FIO_SGIO)
1018 return fio_sgio_init(td);
1020 fprintf(stderr, "bad io_engine %d\n", td->io_engine);
1025 static void cleanup_io_u(struct thread_data *td)
1027 struct list_head *entry, *n;
1030 list_for_each_safe(entry, n, &td->io_u_freelist) {
1031 io_u = list_entry(entry, struct io_u, list);
1033 list_del(&io_u->list);
1037 if (td->mem_type == MEM_MALLOC)
1038 free(td->orig_buffer);
1039 else if (td->mem_type == MEM_SHM) {
1040 struct shmid_ds sbuf;
1042 shmdt(td->orig_buffer);
1043 shmctl(td->shm_id, IPC_RMID, &sbuf);
1044 } else if (td->mem_type == MEM_MMAP)
1045 munmap(td->orig_buffer, td->orig_buffer_size);
1047 fprintf(stderr, "Bad memory type %d\n", td->mem_type);
1049 td->orig_buffer = NULL;
1052 static int init_io_u(struct thread_data *td)
1058 if (td->io_engine & FIO_SYNCIO)
1061 max_units = td->iodepth;
1063 td->orig_buffer_size = td->max_bs * max_units + MASK;
1065 if (td->mem_type == MEM_MALLOC)
1066 td->orig_buffer = malloc(td->orig_buffer_size);
1067 else if (td->mem_type == MEM_SHM) {
1068 td->shm_id = shmget(IPC_PRIVATE, td->orig_buffer_size, IPC_CREAT | 0600);
1069 if (td->shm_id < 0) {
1070 td_verror(td, errno);
1075 td->orig_buffer = shmat(td->shm_id, NULL, 0);
1076 if (td->orig_buffer == (void *) -1) {
1077 td_verror(td, errno);
1079 td->orig_buffer = NULL;
1082 } else if (td->mem_type == MEM_MMAP) {
1083 td->orig_buffer = mmap(NULL, td->orig_buffer_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | OS_MAP_ANON, 0, 0);
1084 if (td->orig_buffer == MAP_FAILED) {
1085 td_verror(td, errno);
1087 td->orig_buffer = NULL;
1092 INIT_LIST_HEAD(&td->io_u_freelist);
1093 INIT_LIST_HEAD(&td->io_u_busylist);
1094 INIT_LIST_HEAD(&td->io_hist_list);
1096 p = ALIGN(td->orig_buffer);
1097 for (i = 0; i < max_units; i++) {
1098 io_u = malloc(sizeof(*io_u));
1099 memset(io_u, 0, sizeof(*io_u));
1100 INIT_LIST_HEAD(&io_u->list);
1102 io_u->buf = p + td->max_bs * i;
1103 list_add(&io_u->list, &td->io_u_freelist);
1109 static int create_file(struct thread_data *td, unsigned long long size,
1112 unsigned long long left;
1118 * unless specifically asked for overwrite, let normal io extend it
1120 if (td_write(td) && !td->overwrite)
1124 fprintf(stderr, "Need size for create\n");
1125 td_verror(td, EINVAL);
1130 oflags = O_CREAT | O_TRUNC;
1131 printf("Client%d: Laying out IO file (%LuMiB)\n", td->thread_number, size >> 20);
1134 printf("Client%d: Extending IO file (%Lu -> %LuMiB)\n", td->thread_number, (td->file_size - size) >> 20, td->file_size >> 20);
1137 td->fd = open(td->file_name, O_WRONLY | oflags, 0644);
1139 td_verror(td, errno);
1143 if (!extend && ftruncate(td->fd, td->file_size) == -1) {
1144 td_verror(td, errno);
1148 td->io_size = td->file_size;
1149 b = malloc(td->max_bs);
1150 memset(b, 0, td->max_bs);
1153 while (left && !td->terminate) {
1158 r = write(td->fd, b, bs);
1160 if (r == (int) bs) {
1165 td_verror(td, errno);
1174 unlink(td->file_name);
1175 else if (td->create_fsync)
1184 static int file_size(struct thread_data *td)
1188 if (fstat(td->fd, &st) == -1) {
1189 td_verror(td, errno);
1194 td->file_size = st.st_size;
1199 static int bdev_size(struct thread_data *td)
1204 r = blockdev_size(td->fd, &bytes);
1211 * no extend possibilities, so limit size to device size if too large
1213 if (!td->file_size || td->file_size > bytes)
1214 td->file_size = bytes;
1219 static int get_file_size(struct thread_data *td)
1223 if (td->filetype == FIO_TYPE_FILE)
1224 ret = file_size(td);
1226 ret = bdev_size(td);
1231 if (td->file_offset > td->file_size) {
1232 fprintf(stderr, "Client%d: offset larger than length (%Lu > %Lu)\n", td->thread_number, td->file_offset, td->file_size);
1236 td->io_size = td->file_size - td->file_offset;
1237 if (td->io_size == 0) {
1238 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1239 td_verror(td, EINVAL);
1243 td->total_io_size = td->io_size * td->loops;
1247 static int setup_file_mmap(struct thread_data *td)
1256 if (td->verify != VERIFY_NONE)
1260 td->mmap = mmap(NULL, td->file_size, flags, MAP_SHARED, td->fd, td->file_offset);
1261 if (td->mmap == MAP_FAILED) {
1263 td_verror(td, errno);
1267 if (td->invalidate_cache) {
1268 if (madvise(td->mmap, td->file_size, MADV_DONTNEED) < 0) {
1269 td_verror(td, errno);
1274 if (td->sequential) {
1275 if (madvise(td->mmap, td->file_size, MADV_SEQUENTIAL) < 0) {
1276 td_verror(td, errno);
1280 if (madvise(td->mmap, td->file_size, MADV_RANDOM) < 0) {
1281 td_verror(td, errno);
1289 static int setup_file_plain(struct thread_data *td)
1291 if (td->invalidate_cache) {
1292 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_DONTNEED) < 0) {
1293 td_verror(td, errno);
1298 if (td->sequential) {
1299 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_SEQUENTIAL) < 0) {
1300 td_verror(td, errno);
1304 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_RANDOM) < 0) {
1305 td_verror(td, errno);
1313 static int setup_file(struct thread_data *td)
1318 if (stat(td->file_name, &st) == -1) {
1319 if (errno != ENOENT) {
1320 td_verror(td, errno);
1323 if (!td->create_file) {
1324 td_verror(td, ENOENT);
1327 if (create_file(td, td->file_size, 0))
1329 } else if (td->filetype == FIO_TYPE_FILE) {
1330 if (st.st_size < td->file_size) {
1331 if (create_file(td, td->file_size - st.st_size, 1))
1340 td->fd = open(td->file_name, flags | O_RDONLY);
1342 if (td->filetype == FIO_TYPE_FILE) {
1353 td->fd = open(td->file_name, flags, 0600);
1357 td_verror(td, errno);
1361 if (get_file_size(td))
1364 if (td->io_engine != FIO_MMAPIO)
1365 return setup_file_plain(td);
1367 return setup_file_mmap(td);
1370 static int check_dev_match(dev_t dev, char *path)
1372 unsigned int major, minor;
1376 f = fopen(path, "r");
1378 perror("open path");
1382 p = fgets(line, sizeof(line), f);
1388 if (sscanf(p, "%u:%u", &major, &minor) != 2) {
1393 if (((major << 8) | minor) == dev) {
1402 static int find_block_dir(dev_t dev, char *path)
1413 while ((dir = readdir(D)) != NULL) {
1414 char full_path[256];
1416 if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1418 if (!strcmp(dir->d_name, "device"))
1421 sprintf(full_path, "%s/%s", path, dir->d_name);
1423 if (!strcmp(dir->d_name, "dev")) {
1424 if (!check_dev_match(dev, full_path)) {
1430 if (stat(full_path, &st) == -1) {
1435 if (!S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode))
1438 found = find_block_dir(dev, full_path);
1440 strcpy(path, full_path);
1449 static int get_io_ticks(struct disk_util *du, struct disk_util_stat *dus)
1456 f = fopen(du->path, "r");
1460 p = fgets(line, sizeof(line), f);
1466 if (sscanf(p, "%u %u %llu %u %u %u %llu %u %u %u %u\n", &dus->ios[0], &dus->merges[0], &dus->sectors[0], &dus->ticks[0], &dus->ios[1], &dus->merges[1], &dus->sectors[1], &dus->ticks[1], &in_flight, &dus->io_ticks, &dus->time_in_queue) != 11) {
1475 static void update_io_tick_disk(struct disk_util *du)
1477 struct disk_util_stat __dus, *dus, *ldus;
1480 if (get_io_ticks(du, &__dus))
1484 ldus = &du->last_dus;
1486 dus->sectors[0] += (__dus.sectors[0] - ldus->sectors[0]);
1487 dus->sectors[1] += (__dus.sectors[1] - ldus->sectors[1]);
1488 dus->ios[0] += (__dus.ios[0] - ldus->ios[0]);
1489 dus->ios[1] += (__dus.ios[1] - ldus->ios[1]);
1490 dus->merges[0] += (__dus.merges[0] - ldus->merges[0]);
1491 dus->merges[1] += (__dus.merges[1] - ldus->merges[1]);
1492 dus->ticks[0] += (__dus.ticks[0] - ldus->ticks[0]);
1493 dus->ticks[1] += (__dus.ticks[1] - ldus->ticks[1]);
1494 dus->io_ticks += (__dus.io_ticks - ldus->io_ticks);
1495 dus->time_in_queue += (__dus.time_in_queue - ldus->time_in_queue);
1497 gettimeofday(&t, NULL);
1498 du->msec += mtime_since(&du->time, &t);
1499 memcpy(&du->time, &t, sizeof(t));
1500 memcpy(ldus, &__dus, sizeof(__dus));
1503 static void update_io_ticks(void)
1505 struct list_head *entry;
1506 struct disk_util *du;
1508 list_for_each(entry, &disk_list) {
1509 du = list_entry(entry, struct disk_util, list);
1510 update_io_tick_disk(du);
1514 static int disk_util_exists(dev_t dev)
1516 struct list_head *entry;
1517 struct disk_util *du;
1519 list_for_each(entry, &disk_list) {
1520 du = list_entry(entry, struct disk_util, list);
1529 static void disk_util_add(dev_t dev, char *path)
1531 struct disk_util *du = malloc(sizeof(*du));
1533 memset(du, 0, sizeof(*du));
1534 INIT_LIST_HEAD(&du->list);
1535 sprintf(du->path, "%s/stat", path);
1536 du->name = strdup(basename(path));
1539 gettimeofday(&du->time, NULL);
1540 get_io_ticks(du, &du->last_dus);
1542 list_add_tail(&du->list, &disk_list);
1545 static void init_disk_util(struct thread_data *td)
1548 char foo[256], tmp[256];
1552 if (!td->do_disk_util)
1555 if (!stat(td->file_name, &st)) {
1556 if (S_ISBLK(st.st_mode))
1562 * must be a file, open "." in that path
1564 strcpy(foo, td->file_name);
1567 perror("disk util stat");
1574 if (disk_util_exists(dev))
1577 sprintf(foo, "/sys/block");
1578 if (!find_block_dir(dev, foo))
1582 * if this is inside a partition dir, jump back to parent
1584 sprintf(tmp, "%s/queue", foo);
1585 if (stat(tmp, &st)) {
1587 sprintf(tmp, "%s/queue", p);
1588 if (stat(tmp, &st)) {
1589 fprintf(stderr, "unknown sysfs layout\n");
1592 sprintf(foo, "%s", p);
1595 disk_util_add(dev, foo);
1598 static void disk_util_timer_arm(void)
1600 itimer.it_value.tv_sec = 0;
1601 itimer.it_value.tv_usec = DISK_UTIL_MSEC * 1000;
1602 setitimer(ITIMER_REAL, &itimer, NULL);
1605 static void clear_io_state(struct thread_data *td)
1607 if (td->io_engine == FIO_SYNCIO)
1608 lseek(td->fd, SEEK_SET, 0);
1611 td->stat_io_bytes[0] = td->stat_io_bytes[1] = 0;
1612 td->this_io_bytes[0] = td->this_io_bytes[1] = 0;
1615 memset(td->file_map, 0, td->num_maps * sizeof(long));
1618 static void update_rusage_stat(struct thread_data *td)
1620 if (!(td->runtime[0] + td->runtime[1]))
1623 getrusage(RUSAGE_SELF, &td->ru_end);
1625 td->usr_time += mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
1626 td->sys_time += mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
1627 td->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
1630 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
1633 static void *thread_main(void *data)
1635 struct thread_data *td = data;
1638 if (!td->use_thread)
1646 if (fio_setaffinity(td) == -1) {
1647 td_verror(td, errno);
1655 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1656 td_verror(td, errno);
1661 sem_post(&startup_sem);
1662 sem_wait(&td->mutex);
1664 if (!td->create_serialize && setup_file(td))
1667 if (init_random_state(td))
1670 gettimeofday(&td->epoch, NULL);
1672 while (td->loops--) {
1673 getrusage(RUSAGE_SELF, &td->ru_start);
1674 gettimeofday(&td->start, NULL);
1675 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
1678 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
1681 prune_io_piece_log(td);
1685 td->runtime[td->ddir] += mtime_since_now(&td->start);
1686 update_rusage_stat(td);
1688 if (td->error || td->terminate)
1691 if (td->verify == VERIFY_NONE)
1695 gettimeofday(&td->start, NULL);
1699 td->runtime[DDIR_READ] += mtime_since_now(&td->start);
1701 if (td->error || td->terminate)
1708 finish_log(td, td->bw_log, "bw");
1710 finish_log(td, td->slat_log, "slat");
1712 finish_log(td, td->clat_log, "clat");
1714 if (exitall_on_terminate)
1715 terminate_threads(td->groupid);
1723 munmap(td->mmap, td->file_size);
1727 sem_post(&startup_sem);
1728 sem_wait(&td->mutex);
1730 td_set_runstate(td, TD_EXITED);
1735 static void *fork_main(int shmid, int offset)
1737 struct thread_data *td;
1740 data = shmat(shmid, NULL, 0);
1741 if (data == (void *) -1) {
1746 td = data + offset * sizeof(struct thread_data);
1752 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1753 double *mean, double *dev)
1757 if (is->samples == 0)
1763 n = (double) is->samples;
1764 *mean = (double) is->val / n;
1765 *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1766 if (!(*min + *max) && !(*mean + *dev))
1772 static void show_ddir_status(struct thread_data *td, struct group_run_stats *rs,
1775 char *ddir_str[] = { "read ", "write" };
1776 unsigned long min, max, bw;
1779 if (!td->runtime[ddir])
1782 bw = td->io_bytes[ddir] / td->runtime[ddir];
1783 printf(" %s: io=%6luMiB, bw=%6luKiB/s, runt=%6lumsec\n", ddir_str[ddir], td->io_bytes[ddir] >> 20, bw, td->runtime[ddir]);
1785 if (calc_lat(&td->slat_stat[ddir], &min, &max, &mean, &dev))
1786 printf(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1788 if (calc_lat(&td->clat_stat[ddir], &min, &max, &mean, &dev))
1789 printf(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1791 if (calc_lat(&td->bw_stat[ddir], &min, &max, &mean, &dev)) {
1794 p_of_agg = mean * 100 / (double) rs->agg[ddir];
1795 printf(" bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, dev=%5.02f\n", min, max, p_of_agg, mean, dev);
1799 static void show_thread_status(struct thread_data *td,
1800 struct group_run_stats *rs)
1802 double usr_cpu, sys_cpu;
1804 if (!(td->io_bytes[0] + td->io_bytes[1]) && !td->error)
1807 printf("Client%d (groupid=%d): err=%2d:\n", td->thread_number, td->groupid, td->error);
1809 show_ddir_status(td, rs, td->ddir);
1810 show_ddir_status(td, rs, td->ddir ^ 1);
1812 if (td->runtime[0] + td->runtime[1]) {
1813 double runt = td->runtime[0] + td->runtime[1];
1815 usr_cpu = (double) td->usr_time * 100 / runt;
1816 sys_cpu = (double) td->sys_time * 100 / runt;
1822 printf(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ctx);
1825 static void check_str_update(struct thread_data *td)
1827 char c = run_str[td->thread_number - 1];
1829 if (td->runstate == td->old_runstate)
1832 switch (td->runstate) {
1858 case TD_NOT_CREATED:
1862 printf("state %d\n", td->runstate);
1865 run_str[td->thread_number - 1] = c;
1866 td->old_runstate = td->runstate;
1869 static void print_thread_status(void)
1871 unsigned long long bytes_done, bytes_total;
1872 int i, nr_running, t_rate, m_rate;
1875 bytes_done = bytes_total = 0;
1876 nr_running = t_rate = m_rate = 0;
1877 for (i = 0; i < thread_number; i++) {
1878 struct thread_data *td = &threads[i];
1880 if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING){
1883 m_rate += td->ratemin;
1886 bytes_total += td->total_io_size;
1888 bytes_total += td->total_io_size;
1890 bytes_done += td->io_bytes[DDIR_READ] +td->io_bytes[DDIR_WRITE];
1892 check_str_update(td);
1896 if (bytes_total && bytes_done) {
1897 perc = (double) 100 * bytes_done / (double) bytes_total;
1902 printf("Threads now running: %d", nr_running);
1903 if (m_rate || t_rate)
1904 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
1905 printf(" : [%s] [%3.2f%% done]\r", run_str, perc);
1909 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1914 * reap exited threads (TD_EXITED -> TD_REAPED)
1916 for (i = 0; i < thread_number; i++) {
1917 struct thread_data *td = &threads[i];
1919 if (td->runstate != TD_EXITED)
1922 td_set_runstate(td, TD_REAPED);
1924 if (td->use_thread) {
1927 if (pthread_join(td->thread, (void *) &ret))
1928 perror("thread_join");
1930 waitpid(td->pid, NULL, 0);
1933 (*m_rate) -= td->ratemin;
1934 (*t_rate) -= td->rate;
1938 static void run_threads(void)
1940 struct timeval genesis;
1941 struct thread_data *td;
1942 unsigned long spent;
1943 int i, todo, nr_running, m_rate, t_rate, nr_started;
1945 printf("Starting %d thread%s\n", thread_number, thread_number > 1 ? "s" : "");
1948 signal(SIGINT, sig_handler);
1949 signal(SIGALRM, sig_handler);
1951 todo = thread_number;
1954 m_rate = t_rate = 0;
1956 for (i = 0; i < thread_number; i++) {
1959 run_str[td->thread_number - 1] = 'P';
1963 if (!td->create_serialize)
1967 * do file setup here so it happens sequentially,
1968 * we don't want X number of threads getting their
1969 * client data interspersed on disk
1971 if (setup_file(td)) {
1972 td_set_runstate(td, TD_REAPED);
1977 gettimeofday(&genesis, NULL);
1981 * create threads (TD_NOT_CREATED -> TD_CREATED)
1983 for (i = 0; i < thread_number; i++) {
1986 if (td->runstate != TD_NOT_CREATED)
1990 * never got a chance to start, killed by other
1991 * thread for some reason
1993 if (td->terminate) {
1998 if (td->start_delay) {
1999 spent = mtime_since_now(&genesis);
2001 if (td->start_delay * 1000 > spent)
2005 if (td->stonewall && (nr_started || nr_running))
2008 td_set_runstate(td, TD_CREATED);
2009 sem_init(&startup_sem, 0, 1);
2013 if (td->use_thread) {
2014 if (pthread_create(&td->thread, NULL, thread_main, td)) {
2015 perror("thread_create");
2020 sem_wait(&startup_sem);
2022 fork_main(shm_id, i);
2029 * start created threads (TD_CREATED -> TD_RUNNING)
2031 for (i = 0; i < thread_number; i++) {
2034 if (td->runstate != TD_CREATED)
2037 td_set_runstate(td, TD_RUNNING);
2040 m_rate += td->ratemin;
2042 sem_post(&td->mutex);
2045 reap_threads(&nr_running, &t_rate, &m_rate);
2051 while (nr_running) {
2052 reap_threads(&nr_running, &t_rate, &m_rate);
2059 static void show_group_stats(struct group_run_stats *rs, int id)
2061 printf("\nRun status group %d (all jobs):\n", id);
2063 if (rs->max_run[DDIR_READ])
2064 printf(" READ: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", rs->io_mb[0], rs->agg[0], rs->min_bw[0], rs->max_bw[0], rs->min_run[0], rs->max_run[0]);
2065 if (rs->max_run[DDIR_WRITE])
2066 printf(" WRITE: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", rs->io_mb[1], rs->agg[1], rs->min_bw[1], rs->max_bw[1], rs->min_run[1], rs->max_run[1]);
2069 static void show_disk_util(void)
2071 struct disk_util_stat *dus;
2072 struct list_head *entry;
2073 struct disk_util *du;
2076 printf("\nDisk stats (read/write):\n");
2078 list_for_each(entry, &disk_list) {
2079 du = list_entry(entry, struct disk_util, list);
2082 util = (double) 100 * du->dus.io_ticks / (double) du->msec;
2086 printf(" %s: ios=%u/%u, merge=%u/%u, ticks=%u/%u, in_queue=%u, util=%3.2f%%\n", du->name, dus->ios[0], dus->ios[1], dus->merges[0], dus->merges[1], dus->ticks[0], dus->ticks[1], dus->time_in_queue, util);
2090 static void show_run_stats(void)
2092 struct group_run_stats *runstats, *rs;
2093 struct thread_data *td;
2096 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
2098 for (i = 0; i < groupid + 1; i++) {
2101 memset(rs, 0, sizeof(*rs));
2102 rs->min_bw[0] = rs->min_run[0] = ~0UL;
2103 rs->min_bw[1] = rs->min_run[1] = ~0UL;
2106 for (i = 0; i < thread_number; i++) {
2107 unsigned long rbw, wbw;
2112 printf("Client%d: %s\n", td->thread_number, td->verror);
2116 rs = &runstats[td->groupid];
2118 if (td->runtime[0] < rs->min_run[0] || !rs->min_run[0])
2119 rs->min_run[0] = td->runtime[0];
2120 if (td->runtime[0] > rs->max_run[0])
2121 rs->max_run[0] = td->runtime[0];
2122 if (td->runtime[1] < rs->min_run[1] || !rs->min_run[1])
2123 rs->min_run[1] = td->runtime[1];
2124 if (td->runtime[1] > rs->max_run[1])
2125 rs->max_run[1] = td->runtime[1];
2129 rbw = td->io_bytes[0] / td->runtime[0];
2131 wbw = td->io_bytes[1] / td->runtime[1];
2133 if (rbw < rs->min_bw[0])
2134 rs->min_bw[0] = rbw;
2135 if (wbw < rs->min_bw[1])
2136 rs->min_bw[1] = wbw;
2137 if (rbw > rs->max_bw[0])
2138 rs->max_bw[0] = rbw;
2139 if (wbw > rs->max_bw[1])
2140 rs->max_bw[1] = wbw;
2142 rs->io_mb[0] += td->io_bytes[0] >> 20;
2143 rs->io_mb[1] += td->io_bytes[1] >> 20;
2146 for (i = 0; i < groupid + 1; i++) {
2150 rs->agg[0] = (rs->io_mb[0]*1024*1000) / rs->max_run[0];
2152 rs->agg[1] = (rs->io_mb[1]*1024*1000) / rs->max_run[1];
2156 * don't overwrite last signal output
2160 for (i = 0; i < thread_number; i++) {
2162 rs = &runstats[td->groupid];
2164 show_thread_status(td, rs);
2167 for (i = 0; i < groupid + 1; i++)
2168 show_group_stats(&runstats[i], i);
2173 int main(int argc, char *argv[])
2175 if (parse_options(argc, argv))
2178 if (!thread_number) {
2179 printf("Nothing to do\n");
2183 disk_util_timer_arm();