2 * fio - the flexible io tester
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #include <sys/types.h>
37 #include <sys/ioctl.h>
39 #include <asm/unistd.h>
45 #define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
48 int thread_number = 0;
49 char run_str[MAX_JOBS + 1];
69 struct timeval start_time;
70 struct timeval issue_time;
74 unsigned long long offset;
76 struct list_head list;
79 #define should_fsync(td) (td_write(td) && !(td)->odirect)
81 static sem_t startup_sem;
83 #define TERMINATE_ALL (-1)
85 static void terminate_threads(int groupid)
89 for (i = 0; i < thread_number; i++) {
90 struct thread_data *td = &threads[i];
92 if (groupid == TERMINATE_ALL || groupid == td->groupid) {
99 static void sig_handler(int sig)
101 terminate_threads(TERMINATE_ALL);
104 static unsigned long utime_since(struct timeval *s, struct timeval *e)
108 sec = e->tv_sec - s->tv_sec;
109 usec = e->tv_usec - s->tv_usec;
110 if (sec > 0 && usec < 0) {
115 sec *= (double) 1000000;
120 static unsigned long utime_since_now(struct timeval *s)
124 gettimeofday(&t, NULL);
125 return utime_since(s, &t);
128 static unsigned long mtime_since(struct timeval *s, struct timeval *e)
132 sec = e->tv_sec - s->tv_sec;
133 usec = e->tv_usec - s->tv_usec;
134 if (sec > 0 && usec < 0) {
139 sec *= (double) 1000;
140 usec /= (double) 1000;
145 static unsigned long mtime_since_now(struct timeval *s)
149 gettimeofday(&t, NULL);
150 return mtime_since(s, &t);
153 static inline unsigned long msec_now(struct timeval *s)
155 return s->tv_sec * 1000 + s->tv_usec / 1000;
158 static int random_map_free(struct thread_data *td, unsigned long long block)
160 unsigned int idx = RAND_MAP_IDX(td, block);
161 unsigned int bit = RAND_MAP_BIT(td, block);
163 return (td->file_map[idx] & (1UL << bit)) == 0;
166 static int get_next_free_block(struct thread_data *td, unsigned long long *b)
172 while ((*b) * td->min_bs < td->io_size) {
173 if (td->file_map[i] != -1UL) {
174 *b += ffz(td->file_map[i]);
178 *b += BLOCKS_PER_MAP;
185 static void mark_random_map(struct thread_data *td, struct io_u *io_u)
187 unsigned long block = io_u->offset / td->min_bs;
188 unsigned int blocks = 0;
190 while (blocks < (io_u->buflen / td->min_bs)) {
193 if (!random_map_free(td, block))
196 idx = RAND_MAP_IDX(td, block);
197 bit = RAND_MAP_BIT(td, block);
199 assert(idx < td->num_maps);
201 td->file_map[idx] |= (1UL << bit);
206 if ((blocks * td->min_bs) < io_u->buflen)
207 io_u->buflen = blocks * td->min_bs;
210 static int get_next_offset(struct thread_data *td, unsigned long long *offset)
212 unsigned long long b, rb;
215 if (!td->sequential) {
216 unsigned long max_blocks = td->io_size / td->min_bs;
220 lrand48_r(&td->random_state, &r);
221 b = ((max_blocks - 1) * r / (RAND_MAX+1.0));
222 rb = b + (td->file_offset / td->min_bs);
224 } while (!random_map_free(td, rb) && loops);
227 if (get_next_free_block(td, &b))
231 b = td->last_bytes / td->min_bs;
233 *offset = (b * td->min_bs) + td->file_offset;
234 if (*offset > td->file_size)
240 static unsigned int get_next_buflen(struct thread_data *td)
245 if (td->min_bs == td->max_bs)
248 lrand48_r(&td->bsrange_state, &r);
249 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
250 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
253 if (buflen > td->io_size - td->this_io_bytes)
254 buflen = td->io_size - td->this_io_bytes;
259 static inline void add_stat_sample(struct thread_data *td, struct io_stat *is,
262 if (val > is->max_val)
264 if (val < is->min_val)
268 is->val_sq += val * val;
272 static void add_log_sample(struct thread_data *td, struct io_log *log,
275 if (log->nr_samples == log->max_samples) {
276 int new_size = sizeof(struct io_sample) * log->max_samples * 2;
278 log->log = realloc(log->log, new_size);
279 log->max_samples <<= 1;
282 log->log[log->nr_samples].val = val;
283 log->log[log->nr_samples].time = mtime_since_now(&td->start);
287 static void add_clat_sample(struct thread_data *td, unsigned long msec)
289 add_stat_sample(td, &td->clat_stat, msec);
292 add_log_sample(td, td->lat_log, msec);
295 static void add_slat_sample(struct thread_data *td, unsigned long msec)
297 add_stat_sample(td, &td->slat_stat, msec);
300 static void add_bw_sample(struct thread_data *td)
302 unsigned long spent = mtime_since_now(&td->stat_sample_time);
305 if (spent < td->bw_avg_time)
308 rate = (td->this_io_bytes - td->stat_io_bytes) / spent;
309 add_stat_sample(td, &td->bw_stat, rate);
312 add_log_sample(td, td->bw_log, rate);
314 gettimeofday(&td->stat_sample_time, NULL);
315 td->stat_io_bytes = td->this_io_bytes;
319 * busy looping version for the last few usec
321 static void __usec_sleep(int usec)
323 struct timeval start;
325 gettimeofday(&start, NULL);
326 while (utime_since_now(&start) < usec)
330 static void usec_sleep(int usec)
332 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
340 rem.tv_sec = rem.tv_nsec = 0;
341 nanosleep(&req, &rem);
345 req.tv_nsec = rem.tv_nsec;
346 usec = rem.tv_nsec * 1000;
350 static void rate_throttle(struct thread_data *td, unsigned long time_spent,
353 unsigned long usec_cycle;
358 usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
360 if (time_spent < usec_cycle) {
361 unsigned long s = usec_cycle - time_spent;
363 td->rate_pending_usleep += s;
364 if (td->rate_pending_usleep >= 100000) {
365 usec_sleep(td->rate_pending_usleep);
366 td->rate_pending_usleep = 0;
369 long overtime = time_spent - usec_cycle;
371 td->rate_pending_usleep -= overtime;
375 static int check_min_rate(struct thread_data *td, struct timeval *now)
381 * allow a 2 second settle period in the beginning
383 if (mtime_since(&td->start, now) < 2000)
387 * if rate blocks is set, sample is running
389 if (td->rate_bytes) {
390 spent = mtime_since(&td->lastrate, now);
391 if (spent < td->ratecycle)
394 rate = (td->this_io_bytes - td->rate_bytes) / spent;
395 if (rate < td->ratemin) {
396 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
398 terminate_threads(td->groupid);
403 td->rate_bytes = td->this_io_bytes;
404 memcpy(&td->lastrate, now, sizeof(*now));
408 static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
412 if (mtime_since(&td->start, t) >= td->timeout * 1000)
418 static void fill_random_bytes(struct thread_data *td,
419 unsigned char *p, unsigned int len)
425 drand48_r(&td->verify_state, &r);
428 * lrand48_r seems to be broken and only fill the bottom
429 * 32-bits, even on 64-bit archs with 64-bit longs
442 static void hexdump(void *buffer, int len)
444 unsigned char *p = buffer;
447 for (i = 0; i < len; i++)
448 printf("%02x", p[i]);
452 static int verify_io_u_crc32(struct verify_header *hdr, struct io_u *io_u)
454 unsigned char *p = (unsigned char *) io_u->buf;
458 c = crc32(p, hdr->len - sizeof(*hdr));
460 return c != hdr->crc32;
463 static int verify_io_u_md5(struct verify_header *hdr, struct io_u *io_u)
465 unsigned char *p = (unsigned char *) io_u->buf;
466 struct md5_ctx md5_ctx;
469 memset(&md5_ctx, 0, sizeof(md5_ctx));
471 md5_update(&md5_ctx, p, hdr->len - sizeof(*hdr));
473 ret = memcmp(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
475 hexdump(hdr->md5_digest, sizeof(hdr->md5_digest));
476 hexdump(md5_ctx.hash, sizeof(md5_ctx.hash));
482 static int verify_io_u(struct io_u *io_u)
484 struct verify_header *hdr = (struct verify_header *) io_u->buf;
487 if (hdr->fio_magic != FIO_HDR_MAGIC)
490 if (hdr->verify_type == VERIFY_MD5)
491 ret = verify_io_u_md5(hdr, io_u);
492 else if (hdr->verify_type == VERIFY_CRC32)
493 ret = verify_io_u_crc32(hdr, io_u);
495 fprintf(stderr, "Bad verify type %d\n", hdr->verify_type);
502 static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
504 hdr->crc32 = crc32(p, len);
507 static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
509 struct md5_ctx md5_ctx;
511 memset(&md5_ctx, 0, sizeof(md5_ctx));
512 md5_update(&md5_ctx, p, len);
513 memcpy(hdr->md5_digest, md5_ctx.hash, sizeof(md5_ctx.hash));
517 * fill body of io_u->buf with random data and add a header with the
518 * (eg) sha1sum of that data.
520 static void populate_io_u(struct thread_data *td, struct io_u *io_u)
522 unsigned char *p = (unsigned char *) io_u->buf;
523 struct verify_header hdr;
525 hdr.fio_magic = FIO_HDR_MAGIC;
526 hdr.len = io_u->buflen;
528 fill_random_bytes(td, p, io_u->buflen - sizeof(hdr));
530 if (td->verify == VERIFY_MD5) {
531 fill_md5(&hdr, p, io_u->buflen - sizeof(hdr));
532 hdr.verify_type = VERIFY_MD5;
534 fill_crc32(&hdr, p, io_u->buflen - sizeof(hdr));
535 hdr.verify_type = VERIFY_CRC32;
538 memcpy(io_u->buf, &hdr, sizeof(hdr));
541 static void put_io_u(struct thread_data *td, struct io_u *io_u)
543 list_del(&io_u->list);
544 list_add(&io_u->list, &td->io_u_freelist);
548 #define queue_full(td) (list_empty(&(td)->io_u_freelist))
550 static struct io_u *__get_io_u(struct thread_data *td)
557 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
558 list_del(&io_u->list);
559 list_add(&io_u->list, &td->io_u_busylist);
564 static struct io_u *get_io_u(struct thread_data *td)
568 io_u = __get_io_u(td);
572 if (get_next_offset(td, &io_u->offset)) {
577 io_u->buflen = get_next_buflen(td);
583 if (io_u->buflen + io_u->offset > td->file_size)
584 io_u->buflen = td->file_size - io_u->offset;
587 mark_random_map(td, io_u);
589 td->last_bytes += io_u->buflen;
591 if (td->verify != VERIFY_NONE)
592 populate_io_u(td, io_u);
596 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
598 io_prep_pwrite(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
601 gettimeofday(&io_u->start_time, NULL);
605 static inline void td_set_runstate(struct thread_data *td, int runstate)
607 td->old_runstate = td->runstate;
608 td->runstate = runstate;
611 static int get_next_verify(struct thread_data *td,
612 unsigned long long *offset, unsigned int *len)
614 struct io_piece *ipo;
616 if (list_empty(&td->io_hist_list))
619 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
620 list_del(&ipo->list);
622 *offset = ipo->offset;
628 static void prune_io_piece_log(struct thread_data *td)
630 struct io_piece *ipo;
632 while (!list_empty(&td->io_hist_list)) {
633 ipo = list_entry(td->io_hist_list.next, struct io_piece, list);
635 list_del(&ipo->list);
641 * log a succesful write, so we can unwind the log for verify
643 static void log_io_piece(struct thread_data *td, struct io_u *io_u)
645 struct io_piece *ipo = malloc(sizeof(struct io_piece));
646 struct list_head *entry;
648 INIT_LIST_HEAD(&ipo->list);
649 ipo->offset = io_u->offset;
650 ipo->len = io_u->buflen;
653 * for random io where the writes extend the file, it will typically
654 * be laid out with the block scattered as written. it's faster to
655 * read them in in that order again, so don't sort
657 if (td->sequential || !td->overwrite) {
658 list_add_tail(&ipo->list, &td->io_hist_list);
663 * for random io, sort the list so verify will run faster
665 entry = &td->io_hist_list;
666 while ((entry = entry->prev) != &td->io_hist_list) {
667 struct io_piece *__ipo = list_entry(entry, struct io_piece, list);
669 if (__ipo->offset < ipo->offset)
673 list_add(&ipo->list, entry);
676 static void do_sync_verify(struct thread_data *td)
679 struct io_u *io_u = NULL;
682 td_set_runstate(td, TD_VERIFYING);
684 io_u = __get_io_u(td);
688 if (fadvise(td->fd, td->file_offset, td->io_size, POSIX_FADV_DONTNEED) < 0) {
693 if (madvise(td->mmap, td->io_size, MADV_DONTNEED)) {
704 gettimeofday(&t, NULL);
705 if (runtime_exceeded(td, &t))
708 if (get_next_verify(td, &io_u->offset, &io_u->buflen))
711 if (td->cur_off != io_u->offset) {
712 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
718 ret = read(td->fd, io_u->buf, io_u->buflen);
719 if (ret < (int) io_u->buflen) {
729 if (verify_io_u(io_u))
732 td->cur_off = io_u->offset + io_u->buflen;
736 td_set_runstate(td, TD_RUNNING);
740 static int __do_sync_mmap(struct thread_data *td, struct io_u *io_u)
742 unsigned long long real_off = io_u->offset - td->file_offset;
745 memcpy(io_u->buf, td->mmap + real_off, io_u->buflen);
747 memcpy(td->mmap + real_off, io_u->buf, io_u->buflen);
750 * not really direct, but should drop the pages from the cache
753 msync(td->mmap + real_off, io_u->buflen, MS_SYNC);
754 madvise(td->mmap + real_off, io_u->buflen, MADV_DONTNEED);
760 static int __do_sync_rw(struct thread_data *td, struct io_u *io_u)
762 if (td->cur_off != io_u->offset) {
763 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
770 return read(td->fd, io_u->buf, io_u->buflen);
772 return write(td->fd, io_u->buf, io_u->buflen);
775 static void sync_td(struct thread_data *td)
780 msync(td->mmap, td->file_size, MS_SYNC);
783 static void do_sync_io(struct thread_data *td)
785 unsigned long msec, usec;
786 struct io_u *io_u = NULL;
789 while (td->this_io_bytes < td->io_size) {
800 ret = __do_sync_rw(td, io_u);
802 ret = __do_sync_mmap(td, io_u);
804 if (ret < (int) io_u->buflen) {
811 log_io_piece(td, io_u);
814 td->io_bytes += io_u->buflen;
815 td->this_io_bytes += io_u->buflen;
816 td->cur_off = io_u->offset + io_u->buflen;
818 gettimeofday(&e, NULL);
820 usec = utime_since(&io_u->start_time, &e);
822 rate_throttle(td, usec, io_u->buflen);
824 if (check_min_rate(td, &e)) {
830 add_clat_sample(td, msec);
833 if (runtime_exceeded(td, &e))
840 usec_sleep(td->thinktime);
842 if (should_fsync(td) && td->fsync_blocks &&
843 (td->io_blocks % td->fsync_blocks) == 0)
850 if (should_fsync(td))
854 static int io_u_getevents(struct thread_data *td, int min, int max,
860 r = io_getevents(td->aio_ctx, min, max, td->aio_events, t);
861 if (r != -EAGAIN && r != -EINTR)
868 static int io_u_queue(struct thread_data *td, struct io_u *io_u)
870 struct iocb *iocb = &io_u->iocb;
874 ret = io_submit(td->aio_ctx, 1, &iocb);
877 else if (ret == -EAGAIN)
879 else if (ret == -EINTR)
888 #define iocb_time(iocb) ((unsigned long) (iocb)->data)
889 #define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
891 static int ios_completed(struct thread_data *td, int nr)
898 gettimeofday(&e, NULL);
900 for (i = 0, bytes_done = 0; i < nr; i++) {
901 io_u = ev_to_iou(td->aio_events + i);
904 td->io_bytes += io_u->buflen;
905 td->this_io_bytes += io_u->buflen;
907 msec = mtime_since(&io_u->issue_time, &e);
909 add_clat_sample(td, msec);
913 log_io_piece(td, io_u);
915 bytes_done += io_u->buflen;
922 static void cleanup_pending_aio(struct thread_data *td)
924 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
925 struct list_head *entry, *n;
930 * get immediately available events, if any
932 r = io_u_getevents(td, 0, td->cur_depth, &ts);
934 ios_completed(td, r);
937 * now cancel remaining active events
939 list_for_each_safe(entry, n, &td->io_u_busylist) {
940 io_u = list_entry(entry, struct io_u, list);
942 r = io_cancel(td->aio_ctx, &io_u->iocb, td->aio_events);
948 r = io_u_getevents(td, td->cur_depth, td->cur_depth, NULL);
950 ios_completed(td, r);
954 static int async_do_verify(struct thread_data *td, struct io_u **io_u)
956 struct io_u *v_io_u = *io_u;
960 ret = verify_io_u(v_io_u);
961 put_io_u(td, v_io_u);
968 static void do_async_verify(struct thread_data *td)
971 struct io_u *io_u, *v_io_u = NULL;
974 td_set_runstate(td, TD_VERIFYING);
980 gettimeofday(&t, NULL);
981 if (runtime_exceeded(td, &t))
984 io_u = __get_io_u(td);
988 if (get_next_verify(td, &io_u->offset, &io_u->buflen)) {
993 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
994 ret = io_u_queue(td, io_u);
1002 * we have one pending to verify, do that while the next
1003 * we are doing io on the next one
1005 if (async_do_verify(td, &v_io_u))
1008 ret = io_u_getevents(td, 1, 1, NULL);
1015 v_io_u = ev_to_iou(td->aio_events);
1017 td->cur_off = v_io_u->offset + v_io_u->buflen;
1020 * if we can't submit more io, we need to verify now
1022 if (queue_full(td) && async_do_verify(td, &v_io_u))
1027 async_do_verify(td, &v_io_u);
1030 cleanup_pending_aio(td);
1032 td_set_runstate(td, TD_RUNNING);
1035 static void do_async_io(struct thread_data *td)
1037 struct timeval s, e;
1040 while (td->this_io_bytes < td->io_size) {
1041 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
1042 struct timespec *timeout;
1043 int ret, min_evts = 0;
1045 unsigned int bytes_done;
1050 io_u = get_io_u(td);
1054 memcpy(&s, &io_u->start_time, sizeof(s));
1056 ret = io_u_queue(td, io_u);
1063 gettimeofday(&io_u->issue_time, NULL);
1064 add_slat_sample(td, mtime_since(&io_u->start_time, &io_u->issue_time));
1065 if (td->cur_depth < td->aio_depth) {
1073 ret = io_u_getevents(td, min_evts, td->cur_depth, timeout);
1080 bytes_done = ios_completed(td, ret);
1083 * the rate is batched for now, it should work for batches
1084 * of completions except the very first one which may look
1087 gettimeofday(&e, NULL);
1088 usec = utime_since(&s, &e);
1090 rate_throttle(td, usec, bytes_done);
1092 if (check_min_rate(td, &e)) {
1093 td->error = ENODATA;
1097 if (runtime_exceeded(td, &e))
1101 usec_sleep(td->thinktime);
1103 if (should_fsync(td) && td->fsync_blocks &&
1104 (td->io_blocks % td->fsync_blocks) == 0)
1109 cleanup_pending_aio(td);
1111 if (should_fsync(td))
1115 static void cleanup_aio(struct thread_data *td)
1117 io_destroy(td->aio_ctx);
1120 free(td->aio_events);
1123 static int init_aio(struct thread_data *td)
1125 if (io_queue_init(td->aio_depth, &td->aio_ctx)) {
1130 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
1134 static void cleanup_io_u(struct thread_data *td)
1136 struct list_head *entry, *n;
1139 list_for_each_safe(entry, n, &td->io_u_freelist) {
1140 io_u = list_entry(entry, struct io_u, list);
1142 list_del(&io_u->list);
1146 if (td->mem_type == MEM_MALLOC)
1147 free(td->orig_buffer);
1148 else if (td->mem_type == MEM_SHM) {
1149 struct shmid_ds sbuf;
1151 shmdt(td->orig_buffer);
1152 shmctl(td->shm_id, IPC_RMID, &sbuf);
1153 } else if (td->mem_type == MEM_MMAP)
1154 munmap(td->orig_buffer, td->orig_buffer_size);
1156 fprintf(stderr, "Bad memory type %d\n", td->mem_type);
1158 td->orig_buffer = NULL;
1161 static int init_io_u(struct thread_data *td)
1170 max_units = td->aio_depth;
1172 td->orig_buffer_size = td->max_bs * max_units + MASK;
1174 if (td->mem_type == MEM_MALLOC)
1175 td->orig_buffer = malloc(td->orig_buffer_size);
1176 else if (td->mem_type == MEM_SHM) {
1177 td->shm_id = shmget(IPC_PRIVATE, td->orig_buffer_size, IPC_CREAT | 0600);
1178 if (td->shm_id < 0) {
1184 td->orig_buffer = shmat(td->shm_id, NULL, 0);
1185 if (td->orig_buffer == (void *) -1) {
1188 td->orig_buffer = NULL;
1191 } else if (td->mem_type == MEM_MMAP) {
1192 td->orig_buffer = mmap(NULL, td->orig_buffer_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
1193 if (td->orig_buffer == MAP_FAILED) {
1196 td->orig_buffer = NULL;
1201 INIT_LIST_HEAD(&td->io_u_freelist);
1202 INIT_LIST_HEAD(&td->io_u_busylist);
1203 INIT_LIST_HEAD(&td->io_hist_list);
1205 p = ALIGN(td->orig_buffer);
1206 for (i = 0; i < max_units; i++) {
1207 io_u = malloc(sizeof(*io_u));
1208 memset(io_u, 0, sizeof(*io_u));
1209 INIT_LIST_HEAD(&io_u->list);
1211 io_u->buf = p + td->max_bs * i;
1212 list_add(&io_u->list, &td->io_u_freelist);
1218 static int create_file(struct thread_data *td)
1220 unsigned long long left;
1226 * unless specifically asked for overwrite, let normal io extend it
1228 if (td_write(td) && !td->overwrite)
1231 if (!td->file_size) {
1232 fprintf(stderr, "Need size for create\n");
1237 printf("Client%d: Laying out IO file\n", td->thread_number);
1239 td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
1245 if (ftruncate(td->fd, td->file_size) == -1) {
1250 td->io_size = td->file_size;
1251 b = malloc(td->max_bs);
1252 memset(b, 0, td->max_bs);
1254 left = td->file_size;
1260 r = write(td->fd, b, bs);
1262 if (r == (int) bs) {
1275 if (td->create_fsync)
1284 static int file_exists(struct thread_data *td)
1288 if (stat(td->file_name, &st) != -1)
1291 return errno != ENOENT;
1294 static int file_size(struct thread_data *td)
1298 if (fstat(td->fd, &st) == -1) {
1304 if (!td->file_size || td->file_size > st.st_size)
1305 td->file_size = st.st_size;
1308 td->file_size = 1024 * 1024 * 1024;
1314 static int bdev_size(struct thread_data *td)
1318 if (ioctl(td->fd, BLKGETSIZE64, &bytes) < 0) {
1323 if (!td->file_size || (td->file_size > bytes))
1324 td->file_size = bytes;
1329 static int get_file_size(struct thread_data *td)
1333 if (td->filetype == FIO_TYPE_FILE)
1334 ret = file_size(td);
1336 ret = bdev_size(td);
1341 if (td->file_offset > td->file_size) {
1342 fprintf(stderr, "Client%d: offset larger than length\n", td->thread_number);
1346 td->io_size = td->file_size - td->file_offset;
1347 if (td->io_size == 0) {
1348 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1356 static int setup_file_mmap(struct thread_data *td)
1365 if (td->verify != VERIFY_NONE)
1369 td->mmap = mmap(NULL, td->file_size, flags, MAP_SHARED, td->fd, td->file_offset);
1370 if (td->mmap == MAP_FAILED) {
1376 if (td->invalidate_cache) {
1377 if (madvise(td->mmap, td->file_size, MADV_DONTNEED) < 0) {
1383 if (td->sequential) {
1384 if (madvise(td->mmap, td->file_size, MADV_SEQUENTIAL) < 0) {
1389 if (madvise(td->mmap, td->file_size, MADV_RANDOM) < 0) {
1398 static int setup_file_plain(struct thread_data *td)
1400 if (td->invalidate_cache) {
1401 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_DONTNEED) < 0) {
1407 if (td->sequential) {
1408 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_SEQUENTIAL) < 0) {
1413 if (fadvise(td->fd, td->file_offset, td->file_size, POSIX_FADV_RANDOM) < 0) {
1422 static int setup_file(struct thread_data *td)
1426 if (!file_exists(td)) {
1427 if (!td->create_file) {
1431 if (create_file(td))
1439 td->fd = open(td->file_name, flags | O_RDONLY);
1448 td->fd = open(td->file_name, flags | O_CREAT, 0600);
1456 if (get_file_size(td))
1459 if (td_write(td) && ftruncate(td->fd, td->file_size) == -1) {
1465 return setup_file_plain(td);
1467 return setup_file_mmap(td);
1470 static void clear_io_state(struct thread_data *td)
1473 lseek(td->fd, SEEK_SET, 0);
1477 td->stat_io_bytes = 0;
1478 td->this_io_bytes = 0;
1481 memset(td->file_map, 0, td->num_maps * sizeof(long));
1484 static void update_rusage_stat(struct thread_data *td)
1489 getrusage(RUSAGE_SELF, &td->ru_end);
1491 td->usr_time += mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
1492 td->sys_time += mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
1493 td->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
1496 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
1499 static void *thread_main(void *data)
1501 struct thread_data *td = data;
1510 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
1515 if (td->use_aio && init_aio(td))
1519 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1525 sem_post(&startup_sem);
1526 sem_wait(&td->mutex);
1528 if (!td->create_serialize && setup_file(td))
1531 if (init_random_state(td))
1534 while (td->loops--) {
1535 getrusage(RUSAGE_SELF, &td->ru_start);
1536 gettimeofday(&td->start, NULL);
1537 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
1540 memcpy(&td->lastrate, &td->stat_sample_time, sizeof(td->lastrate));
1543 prune_io_piece_log(td);
1553 td->runtime += mtime_since_now(&td->start);
1554 update_rusage_stat(td);
1556 if (td->verify == VERIFY_NONE)
1564 do_async_verify(td);
1573 finish_log(td, td->bw_log, "bw");
1575 finish_log(td, td->lat_log, "lat");
1577 if (exitall_on_terminate)
1578 terminate_threads(td->groupid);
1586 munmap(td->mmap, td->file_size);
1591 sem_post(&startup_sem);
1592 sem_wait(&td->mutex);
1594 td_set_runstate(td, TD_EXITED);
1599 static void *fork_main(int shm_id, int offset)
1601 struct thread_data *td;
1604 data = shmat(shm_id, NULL, 0);
1605 if (data == (void *) -1) {
1610 td = data + offset * sizeof(struct thread_data);
1616 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1617 double *mean, double *dev)
1621 if (is->samples == 0)
1627 n = (double) is->samples;
1628 *mean = (double) is->val / n;
1629 *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1633 static void show_thread_status(struct thread_data *td,
1634 struct group_run_stats *rs)
1636 int prio, prio_class;
1637 unsigned long min, max, bw = 0;
1638 double mean, dev, usr_cpu, sys_cpu;
1640 if (!td->io_bytes && !td->error)
1644 bw = td->io_bytes / td->runtime;
1646 prio = td->ioprio & 0xff;
1647 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
1649 printf("Client%d (g=%d): err=%2d, io=%6luMiB, bw=%6luKiB/s, runt=%6lumsec\n", td->thread_number, td->groupid, td->error, td->io_bytes >> 20, bw, td->runtime);
1651 if (calc_lat(&td->slat_stat, &min, &max, &mean, &dev))
1652 printf(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1653 if (calc_lat(&td->clat_stat, &min, &max, &mean, &dev))
1654 printf(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1655 if (calc_lat(&td->bw_stat, &min, &max, &mean, &dev)) {
1658 p_of_agg = mean * 100 / (double) rs->agg[td->ddir];
1659 printf(" bw (KiB/s) : min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, dev=%5.02f\n", min, max, p_of_agg, mean, dev);
1663 usr_cpu = (double) td->usr_time * 100 / (double) td->runtime;
1664 sys_cpu = (double) td->sys_time * 100 / (double) td->runtime;
1670 printf(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ctx);
1673 static void print_thread_status(struct thread_data *td, int nr_running,
1674 int t_rate, int m_rate)
1676 printf("Threads now running: %d", nr_running);
1677 if (m_rate || t_rate)
1678 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
1679 printf(" : [%s]\r", run_str);
1683 static void check_str_update(struct thread_data *td, int n, int t, int m)
1685 char c = run_str[td->thread_number - 1];
1687 if (td->runstate == td->old_runstate)
1690 switch (td->runstate) {
1716 case TD_NOT_CREATED:
1720 printf("state %d\n", td->runstate);
1723 run_str[td->thread_number - 1] = c;
1724 print_thread_status(td, n, t, m);
1725 td->old_runstate = td->runstate;
1728 static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1733 * reap exited threads (TD_EXITED -> TD_REAPED)
1735 for (i = 0; i < thread_number; i++) {
1736 struct thread_data *td = &threads[i];
1738 check_str_update(td, *nr_running, *t_rate, *m_rate);
1740 if (td->runstate != TD_EXITED)
1743 td_set_runstate(td, TD_REAPED);
1745 if (td->use_thread) {
1748 if (pthread_join(td->thread, (void *) &ret))
1749 perror("thread_join");
1751 waitpid(td->pid, NULL, 0);
1754 (*m_rate) -= td->ratemin;
1755 (*t_rate) -= td->rate;
1756 check_str_update(td, *nr_running, *t_rate, *m_rate);
1760 static void run_threads(char *argv[])
1762 struct timeval genesis;
1763 struct thread_data *td;
1764 unsigned long spent;
1765 int i, todo, nr_running, m_rate, t_rate, nr_started;
1767 printf("Starting %d threads\n", thread_number);
1770 signal(SIGINT, sig_handler);
1772 todo = thread_number;
1775 m_rate = t_rate = 0;
1777 for (i = 0; i < thread_number; i++) {
1780 if (!td->create_serialize)
1784 * do file setup here so it happens sequentially,
1785 * we don't want X number of threads getting their
1786 * client data interspersed on disk
1788 if (setup_file(td)) {
1789 td_set_runstate(td, TD_REAPED);
1794 gettimeofday(&genesis, NULL);
1798 * create threads (TD_NOT_CREATED -> TD_CREATED)
1800 for (i = 0; i < thread_number; i++) {
1803 if (td->runstate != TD_NOT_CREATED)
1807 * never got a chance to start, killed by other
1808 * thread for some reason
1810 if (td->terminate) {
1815 if (td->start_delay) {
1816 spent = mtime_since_now(&genesis);
1818 if (td->start_delay * 1000 > spent)
1822 if (td->stonewall && (nr_started || nr_running))
1825 td_set_runstate(td, TD_CREATED);
1826 check_str_update(td, nr_running, t_rate, m_rate);
1827 sem_init(&startup_sem, 1, 1);
1831 if (td->use_thread) {
1832 if (pthread_create(&td->thread, NULL, thread_main, td)) {
1833 perror("thread_create");
1838 sem_wait(&startup_sem);
1840 fork_main(shm_id, i);
1847 * start created threads (TD_CREATED -> TD_RUNNING)
1849 for (i = 0; i < thread_number; i++) {
1850 struct thread_data *td = &threads[i];
1852 if (td->runstate != TD_CREATED)
1855 td_set_runstate(td, TD_RUNNING);
1858 m_rate += td->ratemin;
1860 check_str_update(td, nr_running, t_rate, m_rate);
1861 sem_post(&td->mutex);
1864 for (i = 0; i < thread_number; i++) {
1865 struct thread_data *td = &threads[i];
1867 if (td->runstate != TD_RUNNING &&
1868 td->runstate != TD_VERIFYING)
1871 check_str_update(td, nr_running, t_rate, m_rate);
1874 reap_threads(&nr_running, &t_rate, &m_rate);
1880 while (nr_running) {
1881 reap_threads(&nr_running, &t_rate, &m_rate);
1886 static void show_group_stats(struct group_run_stats *rs, int id)
1888 printf("\nRun status group %d:\n", id);
1890 if (rs->max_run[DDIR_READ])
1891 printf(" READ: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", rs->io_mb[0], rs->agg[0], rs->min_bw[0], rs->max_bw[0], rs->min_run[0], rs->max_run[0]);
1892 if (rs->max_run[DDIR_WRITE])
1893 printf(" WRITE: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", rs->io_mb[1], rs->agg[1], rs->min_bw[1], rs->max_bw[1], rs->min_run[1], rs->max_run[1]);
1896 static void show_run_stats(void)
1898 struct group_run_stats *runstats, *rs;
1899 struct thread_data *td;
1902 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
1904 for (i = 0; i < groupid + 1; i++) {
1907 memset(rs, 0, sizeof(*rs));
1908 rs->min_bw[0] = rs->min_run[0] = ~0UL;
1909 rs->min_bw[1] = rs->min_run[1] = ~0UL;
1912 for (i = 0; i < thread_number; i++) {
1913 unsigned long bw = 0;
1920 rs = &runstats[td->groupid];
1922 if (td->runtime < rs->min_run[td->ddir])
1923 rs->min_run[td->ddir] = td->runtime;
1924 if (td->runtime > rs->max_run[td->ddir])
1925 rs->max_run[td->ddir] = td->runtime;
1928 bw = td->io_bytes / td->runtime;
1929 if (bw < rs->min_bw[td->ddir])
1930 rs->min_bw[td->ddir] = bw;
1931 if (bw > rs->max_bw[td->ddir])
1932 rs->max_bw[td->ddir] = bw;
1934 rs->io_mb[td->ddir] += td->io_bytes >> 20;
1937 for (i = 0; i < groupid + 1; i++) {
1941 rs->agg[0] = (rs->io_mb[0]*1024*1000) / rs->max_run[0];
1943 rs->agg[1] = (rs->io_mb[1]*1024*1000) / rs->max_run[1];
1946 for (i = 0; i < thread_number; i++) {
1948 rs = &runstats[td->groupid];
1950 show_thread_status(td, rs);
1953 for (i = 0; i < groupid + 1; i++)
1954 show_group_stats(&runstats[i], i);
1957 int main(int argc, char *argv[])
1959 memset(run_str, 0, sizeof(run_str));
1961 if (parse_options(argc, argv))
1964 if (!thread_number) {
1965 printf("Nothing to do\n");