[PATCH] fio: correct latency graph
[disktools.git] / fio.c
... / ...
CommitLineData
1/*
2 * fio - the flexible io tester
3 *
4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#include <stdio.h>
22#include <stdlib.h>
23#include <unistd.h>
24#include <fcntl.h>
25#include <string.h>
26#include <errno.h>
27#include <signal.h>
28#include <time.h>
29#include <ctype.h>
30#include <sched.h>
31#include <libaio.h>
32#include <math.h>
33#include <limits.h>
34#include <sys/time.h>
35#include <sys/types.h>
36#include <sys/stat.h>
37#include <sys/wait.h>
38#include <semaphore.h>
39#include <sys/ipc.h>
40#include <sys/shm.h>
41#include <asm/unistd.h>
42
43#include "list.h"
44
45#define MAX_JOBS (1024)
46
47/*
48 * assume we don't have _get either, if _set isn't defined
49 */
50#ifndef __NR_ioprio_set
51#if defined(__i386__)
52#define __NR_ioprio_set 289
53#define __NR_ioprio_get 290
54#elif defined(__powerpc__) || defined(__powerpc64__)
55#define __NR_ioprio_set 273
56#define __NR_ioprio_get 274
57#elif defined(__x86_64__)
58#define __NR_ioprio_set 251
59#define __NR_ioprio_get 252
60#elif defined(__ia64__)
61#define __NR_ioprio_set 1274
62#define __NR_ioprio_get 1275
63#elif defined(__alpha__)
64#define __NR_ioprio_set 442
65#define __NR_ioprio_get 443
66#elif defined(__s390x__) || defined(__s390__)
67#define __NR_ioprio_set 282
68#define __NR_ioprio_get 283
69#else
70#error "Unsupported arch"
71#endif
72#endif
73
74#ifndef __NR_fadvise64
75#if defined(__i386__)
76#define __NR_fadvise64 250
77#elif defined(__powerpc__) || defined(__powerpc64__)
78#define __NR_fadvise64 233
79#elif defined(__x86_64__)
80#define __NR_fadvise64 221
81#elif defined(__ia64__)
82#define __NR_fadvise64 1234
83#elif defined(__alpha__)
84#define __NR_fadvise64 413
85#elif defined(__s390x__) || defined(__s390__)
86#define __NR_fadvise64 253
87#else
88#error "Unsupported arch"
89#endif
90#endif
91
92static int ioprio_set(int which, int who, int ioprio)
93{
94 return syscall(__NR_ioprio_set, which, who, ioprio);
95}
96
97/*
98 * we want fadvise64 really, but it's so tangled... later
99 */
100static int fadvise(int fd, loff_t offset, size_t len, int advice)
101{
102#if 0
103 return syscall(__NR_fadvise64, fd, offset, offset >> 32, len, advice);
104#else
105 return posix_fadvise(fd, (off_t) offset, len, advice);
106#endif
107}
108
109enum {
110 IOPRIO_WHO_PROCESS = 1,
111 IOPRIO_WHO_PGRP,
112 IOPRIO_WHO_USER,
113};
114
115#define IOPRIO_CLASS_SHIFT 13
116
117#define MASK (4095)
118
119#define DEF_BS (4096)
120#define DEF_TIMEOUT (30)
121#define DEF_RATE_CYCLE (1000)
122#define DEF_ODIRECT (1)
123#define DEF_SEQUENTIAL (1)
124#define DEF_RAND_REPEAT (1)
125#define DEF_OVERWRITE (0)
126#define DEF_CREATE (1)
127#define DEF_INVALIDATE (1)
128#define DEF_SYNCIO (0)
129#define DEF_RANDSEED (0xb1899bedUL)
130#define DEF_BWAVGTIME (500)
131
132#define ALIGN(buf) (char *) (((unsigned long) (buf) + MASK) & ~(MASK))
133
134static int repeatable = DEF_RAND_REPEAT;
135static int rate_quit = 1;
136static int write_lat_log;
137static int write_bw_log;
138static int exitall_on_terminate;
139
140static int thread_number;
141static char *ini_file;
142
143static int max_jobs = MAX_JOBS;
144
145static char run_str[MAX_JOBS + 1];
146
147static int shm_id;
148
149enum {
150 DDIR_READ = 0,
151 DDIR_WRITE,
152};
153
154/*
155 * thread life cycle
156 */
157enum {
158 TD_NOT_CREATED = 0,
159 TD_CREATED,
160 TD_STARTED,
161 TD_EXITED,
162 TD_REAPED,
163};
164
165enum {
166 MEM_MALLOC,
167 MEM_SHM,
168};
169
170/*
171 * The io unit
172 */
173struct io_u {
174 struct iocb iocb;
175 struct timeval start_time;
176 struct timeval issue_time;
177
178 char *buf;
179 unsigned int buflen;
180 unsigned long long offset;
181
182 struct list_head list;
183};
184
185struct io_stat {
186 unsigned long val;
187 unsigned long val_sq;
188 unsigned long max_val;
189 unsigned long min_val;
190 unsigned long samples;
191};
192
193struct io_sample {
194 unsigned long time;
195 unsigned long val;
196};
197
198struct io_log {
199 unsigned long nr_samples;
200 unsigned long max_samples;
201 struct io_sample *log;
202};
203
204#define td_read(td) ((td)->ddir == DDIR_READ)
205#define should_fsync(td) (!td_read(td) && !(td)->odirect)
206
207struct thread_data {
208 char file_name[256];
209 int thread_number;
210 int error;
211 int fd;
212 pid_t pid;
213 char *orig_buffer;
214 volatile int terminate;
215 volatile int runstate;
216 unsigned int ddir;
217 unsigned int ioprio;
218 unsigned int sequential;
219 unsigned int bs;
220 unsigned int min_bs;
221 unsigned int max_bs;
222 unsigned int odirect;
223 unsigned int thinktime;
224 unsigned int fsync_blocks;
225 unsigned int start_delay;
226 unsigned int timeout;
227 unsigned int use_aio;
228 unsigned int create_file;
229 unsigned int overwrite;
230 unsigned int invalidate_cache;
231 unsigned int bw_avg_time;
232 unsigned long long file_size;
233 unsigned long long file_offset;
234 unsigned int sync_io;
235 unsigned int mem_type;
236 cpu_set_t cpumask;
237
238 struct drand48_data bsrange_state;
239
240 int shm_id;
241
242 off_t cur_off;
243
244 io_context_t aio_ctx;
245 unsigned int aio_depth;
246 struct io_event *aio_events;
247
248 unsigned int cur_depth;
249 struct list_head io_u_freelist;
250 struct list_head io_u_busylist;
251
252 unsigned int rate;
253 unsigned int ratemin;
254 unsigned int ratecycle;
255 unsigned long rate_usec_cycle;
256 long rate_pending_usleep;
257 unsigned long rate_kb;
258 struct timeval lastrate;
259
260 unsigned long runtime; /* sec */
261 unsigned long kb;
262 unsigned long io_blocks;
263 unsigned long io_kb;
264 unsigned long last_kb;
265 sem_t mutex;
266 struct drand48_data random_state;
267
268 /*
269 * bandwidth and latency stats
270 */
271 struct io_stat clat_stat; /* completion latency */
272 struct io_stat slat_stat; /* submission latency */
273
274 struct io_stat bw_stat; /* bandwidth stats */
275 unsigned long stat_io_kb;
276 struct timeval stat_sample_time;
277
278 struct io_log *lat_log;
279 struct io_log *bw_log;
280
281 struct timeval start;
282};
283
284static struct thread_data *threads;
285static struct thread_data def_thread;
286
287static sem_t startup_sem;
288
289static void sig_handler(int sig)
290{
291 int i;
292
293 for (i = 0; i < thread_number; i++) {
294 struct thread_data *td = &threads[i];
295
296 td->terminate = 1;
297 td->start_delay = 0;
298 }
299}
300
301static int init_random_state(struct thread_data *td)
302{
303 unsigned long seed = DEF_RANDSEED;
304
305 srand48_r(seed, &td->bsrange_state);
306
307 if (td->sequential)
308 return 0;
309
310 if (!repeatable) {
311 int fd = open("/dev/random", O_RDONLY);
312
313 if (fd == -1) {
314 td->error = errno;
315 return 1;
316 }
317
318 if (read(fd, &seed, sizeof(seed)) < (int) sizeof(seed)) {
319 td->error = EIO;
320 close(fd);
321 return 1;
322 }
323
324 close(fd);
325 }
326
327 srand48_r(seed, &td->random_state);
328 return 0;
329}
330
331static unsigned long utime_since(struct timeval *s, struct timeval *e)
332{
333 double sec, usec;
334
335 sec = e->tv_sec - s->tv_sec;
336 usec = e->tv_usec - s->tv_usec;
337 if (sec > 0 && usec < 0) {
338 sec--;
339 usec += 1000000;
340 }
341
342 sec *= (double) 1000000;
343
344 return sec + usec;
345}
346
347static unsigned long mtime_since(struct timeval *s, struct timeval *e)
348{
349 double sec, usec;
350
351 sec = e->tv_sec - s->tv_sec;
352 usec = e->tv_usec - s->tv_usec;
353 if (sec > 0 && usec < 0) {
354 sec--;
355 usec += 1000000;
356 }
357
358 sec *= (double) 1000;
359 usec /= (double) 1000;
360
361 return sec + usec;
362}
363
364static unsigned long mtime_since_now(struct timeval *s)
365{
366 struct timeval t;
367
368 gettimeofday(&t, NULL);
369 return mtime_since(s, &t);
370}
371
372static inline unsigned long msec_now(struct timeval *s)
373{
374 return s->tv_sec * 1000 + s->tv_usec / 1000;
375}
376
377static unsigned long long get_next_offset(struct thread_data *td)
378{
379 unsigned long long kb;
380 long r;
381
382 if (!td->sequential) {
383 lrand48_r(&td->random_state, &r);
384 kb = (1+(double) (td->kb-1) * r / (RAND_MAX+1.0));
385 } else
386 kb = td->last_kb;
387
388 return (kb << 10) + td->file_offset;
389}
390
391static unsigned int get_next_buflen(struct thread_data *td)
392{
393 unsigned int buflen;
394 long r;
395
396 if (td->min_bs == td->max_bs)
397 buflen = td->min_bs;
398 else {
399 lrand48_r(&td->bsrange_state, &r);
400 buflen = (1 + (double) (td->max_bs - 1) * r / (RAND_MAX + 1.0));
401 buflen = (buflen + td->min_bs - 1) & ~(td->min_bs - 1);
402 }
403
404 if (buflen > ((td->kb - td->io_kb) << 10))
405 buflen = (td->kb - td->io_kb) << 10;
406
407 td->last_kb += buflen >> 10;
408 return buflen;
409}
410
411static inline void add_stat_sample(struct thread_data *td, struct io_stat *is,
412 unsigned long val)
413{
414 if (val > is->max_val)
415 is->max_val = val;
416 if (val < is->min_val)
417 is->min_val = val;
418
419 is->val += val;
420 is->val_sq += val * val;
421 is->samples++;
422}
423
424static void add_log_sample(struct thread_data *td, struct io_log *log,
425 unsigned long val)
426{
427 if (log->nr_samples == log->max_samples) {
428 int new_size = sizeof(struct io_sample) * log->max_samples * 2;
429
430 log->log = realloc(log->log, new_size);
431 log->max_samples <<= 1;
432 }
433
434 log->log[log->nr_samples].val = val;
435 log->log[log->nr_samples].time = mtime_since_now(&td->start);
436 log->nr_samples++;
437}
438
439static void add_clat_sample(struct thread_data *td, unsigned long msec)
440{
441 add_stat_sample(td, &td->clat_stat, msec);
442
443 if (td->lat_log)
444 add_log_sample(td, td->lat_log, msec);
445}
446
447static void add_slat_sample(struct thread_data *td, unsigned long msec)
448{
449 add_stat_sample(td, &td->slat_stat, msec);
450}
451
452static void add_bw_sample(struct thread_data *td, unsigned long msec)
453{
454 unsigned long spent = mtime_since_now(&td->stat_sample_time);
455 unsigned long rate;
456
457 if (spent < td->bw_avg_time)
458 return;
459
460 rate = ((td->io_kb - td->stat_io_kb) * 1024) / spent;
461 add_stat_sample(td, &td->bw_stat, rate);
462
463 if (td->bw_log)
464 add_log_sample(td, td->bw_log, rate);
465
466 gettimeofday(&td->stat_sample_time, NULL);
467 td->stat_io_kb = td->io_kb;
468}
469
470static void usec_sleep(int usec)
471{
472 struct timespec req = { .tv_sec = 0, .tv_nsec = usec * 1000 };
473 struct timespec rem;
474
475 do {
476 rem.tv_sec = rem.tv_nsec = 0;
477 nanosleep(&req, &rem);
478 if (!rem.tv_nsec)
479 break;
480
481 req.tv_nsec = rem.tv_nsec;
482 } while (1);
483}
484
485static void rate_throttle(struct thread_data *td, unsigned long time_spent,
486 unsigned int bytes)
487{
488 unsigned long usec_cycle;
489
490 if (!td->rate)
491 return;
492
493 usec_cycle = td->rate_usec_cycle * (bytes / td->min_bs);
494
495 if (time_spent < usec_cycle) {
496 unsigned long s = usec_cycle - time_spent;
497
498 td->rate_pending_usleep += s;
499 if (td->rate_pending_usleep >= 100000) {
500 usec_sleep(td->rate_pending_usleep);
501 td->rate_pending_usleep = 0;
502 }
503 } else {
504 long overtime = time_spent - usec_cycle;
505
506 td->rate_pending_usleep -= overtime;
507 }
508}
509
510static int check_min_rate(struct thread_data *td, struct timeval *now)
511{
512 unsigned long spent;
513 unsigned long rate;
514
515 /*
516 * allow a 2 second settle period in the beginning
517 */
518 if (mtime_since(&td->start, now) < 2000)
519 return 0;
520
521 /*
522 * if rate blocks is set, sample is running
523 */
524 if (td->rate_kb) {
525 spent = mtime_since(&td->lastrate, now);
526 if (spent < td->ratecycle)
527 return 0;
528
529 rate = ((td->io_kb - td->rate_kb) * 1024) / spent;
530 if (rate < td->ratemin) {
531 printf("Client%d: min rate %d not met, got %ldKiB/sec\n", td->thread_number, td->ratemin, rate);
532 if (rate_quit)
533 sig_handler(0);
534 return 1;
535 }
536 }
537
538 td->rate_kb = td->io_kb;
539 memcpy(&td->lastrate, now, sizeof(*now));
540 return 0;
541}
542
543static inline int runtime_exceeded(struct thread_data *td, struct timeval *t)
544{
545 if (mtime_since(&td->start, t) >= td->timeout * 1000)
546 return 1;
547
548 return 0;
549}
550
551static void put_io_u(struct thread_data *td, struct io_u *io_u)
552{
553 list_del(&io_u->list);
554 list_add(&io_u->list, &td->io_u_freelist);
555 td->cur_depth--;
556}
557
558static struct io_u *get_io_u(struct thread_data *td)
559{
560 struct io_u *io_u;
561 unsigned int len;
562 unsigned long long off;
563
564 if (list_empty(&td->io_u_freelist))
565 return NULL;
566
567 off = get_next_offset(td);
568 len = get_next_buflen(td);
569 if (!len)
570 return NULL;
571
572 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
573 list_del(&io_u->list);
574 list_add(&io_u->list, &td->io_u_busylist);
575
576 io_u->offset = off;
577 io_u->buflen = len;
578
579 if (td->use_aio) {
580 if (td_read(td))
581 io_prep_pread(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
582 else
583 io_prep_pwrite(&io_u->iocb, td->fd, io_u->buf, io_u->buflen, io_u->offset);
584 }
585
586 gettimeofday(&io_u->start_time, NULL);
587 td->cur_depth++;
588 return io_u;
589}
590
591static void do_sync_io(struct thread_data *td)
592{
593 unsigned long msec, usec;
594 struct timeval e;
595
596 td->cur_off = 0;
597
598 for (td->io_kb = 0; td->io_kb < td->kb;) {
599 struct io_u *io_u;
600 int ret;
601
602 if (td->terminate)
603 break;
604
605 io_u = get_io_u(td);
606 if (!io_u)
607 break;
608
609 if (td->cur_off != io_u->offset) {
610 if (lseek(td->fd, io_u->offset, SEEK_SET) == -1) {
611 td->error = errno;
612 break;
613 }
614 }
615
616 if (td_read(td))
617 ret = read(td->fd, io_u->buf, io_u->buflen);
618 else
619 ret = write(td->fd, io_u->buf, io_u->buflen);
620
621 if (ret < (int) io_u->buflen) {
622 if (ret == -1)
623 td->error = errno;
624 break;
625 }
626
627 td->io_blocks++;
628 td->io_kb += io_u->buflen >> 10;
629 td->cur_off = io_u->offset + io_u->buflen;
630
631 gettimeofday(&e, NULL);
632
633 usec = utime_since(&io_u->start_time, &e);
634
635 rate_throttle(td, usec, io_u->buflen);
636
637 if (check_min_rate(td, &e)) {
638 td->error = ENODATA;
639 break;
640 }
641
642 msec = usec / 1000;
643 add_clat_sample(td, msec);
644 add_bw_sample(td, msec);
645
646 if (runtime_exceeded(td, &e))
647 break;
648
649 put_io_u(td, io_u);
650
651 if (td->thinktime)
652 usec_sleep(td->thinktime);
653
654 if (should_fsync(td) && td->fsync_blocks &&
655 (td->io_blocks % td->fsync_blocks) == 0)
656 fsync(td->fd);
657 }
658
659 if (should_fsync(td))
660 fsync(td->fd);
661}
662
663static int io_u_queue(struct thread_data *td, struct io_u *io_u)
664{
665 struct iocb *iocb = &io_u->iocb;
666 int ret;
667
668 do {
669 ret = io_submit(td->aio_ctx, 1, &iocb);
670 if (ret == 1)
671 return 0;
672 else if (ret == EAGAIN)
673 usleep(100);
674 else if (ret == EINTR)
675 continue;
676 else
677 break;
678 } while (1);
679
680 return ret;
681}
682
683#define iocb_time(iocb) ((unsigned long) (iocb)->data)
684#define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
685
686static int ios_completed(struct thread_data *td, int nr)
687{
688 unsigned long msec;
689 struct io_u *io_u;
690 struct timeval e;
691 int i, bytes_done;
692
693 gettimeofday(&e, NULL);
694
695 for (i = 0, bytes_done = 0; i < nr; i++) {
696 io_u = ev_to_iou(td->aio_events + i);
697
698 td->io_blocks++;
699 td->io_kb += io_u->buflen >> 10;
700 bytes_done += io_u->buflen;
701
702 msec = mtime_since(&io_u->issue_time, &e);
703
704 add_clat_sample(td, msec);
705 add_bw_sample(td, msec);
706
707 put_io_u(td, io_u);
708 }
709
710 return bytes_done;
711}
712
713static void cleanup_pending_aio(struct thread_data *td)
714{
715 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
716 struct list_head *entry, *n;
717 struct io_u *io_u;
718 int r;
719
720 /*
721 * get immediately available events, if any
722 */
723 r = io_getevents(td->aio_ctx, 0, td->cur_depth, td->aio_events, &ts);
724 if (r > 0)
725 ios_completed(td, r);
726
727 /*
728 * now cancel remaining active events
729 */
730 list_for_each_safe(entry, n, &td->io_u_busylist) {
731 io_u = list_entry(entry, struct io_u, list);
732
733 r = io_cancel(td->aio_ctx, &io_u->iocb, td->aio_events);
734 if (!r)
735 put_io_u(td, io_u);
736 }
737
738 if (td->cur_depth) {
739 r = io_getevents(td->aio_ctx, td->cur_depth, td->cur_depth, td->aio_events, NULL);
740 if (r > 0)
741 ios_completed(td, r);
742 }
743}
744
745static void do_async_io(struct thread_data *td)
746{
747 struct timeval s, e;
748 unsigned long usec;
749
750 for (td->io_kb = 0; td->io_kb < td->kb;) {
751 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0};
752 struct timespec *timeout;
753 int ret, min_evts = 0;
754 struct io_u *io_u;
755 unsigned int bytes_done;
756
757 if (td->terminate)
758 break;
759
760 io_u = get_io_u(td);
761 if (!io_u)
762 break;
763
764 memcpy(&s, &io_u->start_time, sizeof(s));
765
766 ret = io_u_queue(td, io_u);
767 if (ret) {
768 put_io_u(td, io_u);
769 td->error = ret;
770 break;
771 }
772
773 gettimeofday(&io_u->issue_time, NULL);
774 add_slat_sample(td, mtime_since(&io_u->start_time, &io_u->issue_time));
775 if (td->cur_depth < td->aio_depth) {
776 timeout = &ts;
777 min_evts = 0;
778 } else {
779 timeout = NULL;
780 min_evts = 1;
781 }
782
783 ret = io_getevents(td->aio_ctx, min_evts, td->cur_depth, td->aio_events, timeout);
784 if (ret < 0) {
785 td->error = errno;
786 break;
787 } else if (!ret)
788 continue;
789
790 bytes_done = ios_completed(td, ret);
791
792 /*
793 * the rate is batched for now, it should work for batches
794 * of completions except the very first one which may look
795 * a little bursty
796 */
797 gettimeofday(&e, NULL);
798 usec = utime_since(&s, &e);
799
800 rate_throttle(td, usec, bytes_done);
801
802 if (check_min_rate(td, &e)) {
803 td->error = ENODATA;
804 break;
805 }
806
807 if (runtime_exceeded(td, &e))
808 break;
809
810 if (td->thinktime)
811 usec_sleep(td->thinktime);
812
813 if (should_fsync(td) && td->fsync_blocks &&
814 (td->io_blocks % td->fsync_blocks) == 0)
815 fsync(td->fd);
816 }
817
818 if (td->cur_depth)
819 cleanup_pending_aio(td);
820
821 if (should_fsync(td))
822 fsync(td->fd);
823}
824
825static void cleanup_aio(struct thread_data *td)
826{
827 io_destroy(td->aio_ctx);
828
829 if (td->aio_events)
830 free(td->aio_events);
831}
832
833static int init_aio(struct thread_data *td)
834{
835 if (io_queue_init(td->aio_depth, &td->aio_ctx)) {
836 td->error = errno;
837 return 1;
838 }
839
840 td->aio_events = malloc(td->aio_depth * sizeof(struct io_event));
841 return 0;
842}
843
844static void cleanup_io_u(struct thread_data *td)
845{
846 struct list_head *entry, *n;
847 struct io_u *io_u;
848
849 list_for_each_safe(entry, n, &td->io_u_freelist) {
850 io_u = list_entry(entry, struct io_u, list);
851
852 list_del(&io_u->list);
853 free(io_u);
854 }
855
856 if (td->mem_type == MEM_MALLOC)
857 free(td->orig_buffer);
858 else if (td->mem_type == MEM_SHM) {
859 struct shmid_ds sbuf;
860
861 shmdt(td->orig_buffer);
862 shmctl(td->shm_id, IPC_RMID, &sbuf);
863 }
864}
865
866static int init_io_u(struct thread_data *td)
867{
868 struct io_u *io_u;
869 int i, max_units, mem_size;
870 char *p;
871
872 if (!td->use_aio)
873 max_units = 1;
874 else
875 max_units = td->aio_depth;
876
877 mem_size = td->max_bs * max_units + MASK;
878
879 if (td->mem_type == MEM_MALLOC)
880 td->orig_buffer = malloc(mem_size);
881 else if (td->mem_type == MEM_SHM) {
882 td->shm_id = shmget(IPC_PRIVATE, mem_size, IPC_CREAT | 0600);
883 if (td->shm_id < 0) {
884 td->error = errno;
885 perror("shmget");
886 return 1;
887 }
888
889 td->orig_buffer = shmat(td->shm_id, NULL, 0);
890 if (td->orig_buffer == (void *) -1) {
891 td->error = errno;
892 perror("shmat");
893 return 1;
894 }
895 }
896
897 INIT_LIST_HEAD(&td->io_u_freelist);
898 INIT_LIST_HEAD(&td->io_u_busylist);
899
900 p = ALIGN(td->orig_buffer);
901 for (i = 0; i < max_units; i++) {
902 io_u = malloc(sizeof(*io_u));
903 memset(io_u, 0, sizeof(*io_u));
904 INIT_LIST_HEAD(&io_u->list);
905
906 io_u->buf = p + td->max_bs * i;
907 list_add(&io_u->list, &td->io_u_freelist);
908 }
909
910 return 0;
911}
912
913static void setup_log(struct io_log **log)
914{
915 struct io_log *l = malloc(sizeof(*l));
916
917 l->nr_samples = 0;
918 l->max_samples = 1024;
919 l->log = malloc(l->max_samples * sizeof(struct io_sample));
920 *log = l;
921}
922
923static void finish_log(struct thread_data *td, struct io_log *log, char *name)
924{
925 char file_name[128];
926 FILE *f;
927 int i;
928
929 sprintf(file_name, "client%d_%s.log", td->thread_number, name);
930 f = fopen(file_name, "w");
931 if (!f) {
932 perror("fopen log");
933 return;
934 }
935
936 for (i = 0; i < log->nr_samples; i++)
937 fprintf(f, "%lu, %lu\n", log->log[i].time, log->log[i].val);
938
939 fclose(f);
940 free(log->log);
941 free(log);
942}
943
944static int create_file(struct thread_data *td)
945{
946 unsigned long long left;
947 char *b;
948 int r, bs;
949
950 /*
951 * unless specifically asked for overwrite, let normal io extend it
952 */
953 if (!td_read(td) && !td->overwrite)
954 return 0;
955
956 if (!td->file_size) {
957 fprintf(stderr, "Need size for create\n");
958 td->error = EINVAL;
959 return 1;
960 }
961
962 printf("Client%d: Laying out IO file\n", td->thread_number);
963
964 td->fd = open(td->file_name, O_WRONLY | O_CREAT | O_TRUNC, 0644);
965 if (td->fd < 0) {
966 td->error = errno;
967 return 1;
968 }
969
970 td->kb = td->file_size >> 10;
971 b = malloc(td->max_bs);
972 memset(b, 0, td->max_bs);
973
974 left = td->file_size;
975 while (left) {
976 bs = td->max_bs;
977 if (bs > left)
978 bs = left;
979
980 r = write(td->fd, b, bs);
981
982 if (r == bs) {
983 left -= bs;
984 continue;
985 } else {
986 if (r < 0)
987 td->error = errno;
988 else
989 td->error = EIO;
990
991 break;
992 }
993 }
994
995 fsync(td->fd);
996 close(td->fd);
997 td->fd = -1;
998 free(b);
999 return 0;
1000}
1001
1002static int file_exists(struct thread_data *td)
1003{
1004 struct stat st;
1005
1006 if (stat(td->file_name, &st) != -1)
1007 return 1;
1008
1009 return errno != ENOENT;
1010}
1011
1012static int setup_file(struct thread_data *td)
1013{
1014 struct stat st;
1015 int flags = 0;
1016
1017 if (!file_exists(td)) {
1018 if (!td->create_file) {
1019 td->error = ENOENT;
1020 return 1;
1021 }
1022 if (create_file(td))
1023 return 1;
1024 }
1025
1026 if (td->odirect)
1027 flags |= O_DIRECT;
1028
1029 if (td_read(td))
1030 td->fd = open(td->file_name, flags | O_RDONLY);
1031 else {
1032 if (!td->overwrite)
1033 flags |= O_TRUNC;
1034 if (td->sync_io)
1035 flags |= O_SYNC;
1036
1037 td->fd = open(td->file_name, flags | O_WRONLY | O_CREAT, 0600);
1038 }
1039
1040 if (td->fd == -1) {
1041 td->error = errno;
1042 return 1;
1043 }
1044
1045 if (td_read(td)) {
1046 if (fstat(td->fd, &st) == -1) {
1047 td->error = errno;
1048 return 1;
1049 }
1050
1051 if (td->file_size > st.st_size)
1052 st.st_size = td->file_size;
1053 } else {
1054 if (!td->file_size)
1055 td->file_size = 1024 * 1024 * 1024;
1056
1057 st.st_size = td->file_size;
1058 }
1059
1060 td->kb = (st.st_size - td->file_offset) / 1024;
1061 if (!td->kb) {
1062 fprintf(stderr, "Client%d: no io blocks\n", td->thread_number);
1063 td->error = EINVAL;
1064 return 1;
1065 }
1066
1067 if (td->invalidate_cache) {
1068 if (fadvise(td->fd, 0, st.st_size, POSIX_FADV_DONTNEED) < 0) {
1069 td->error = errno;
1070 return 1;
1071 }
1072 }
1073
1074 return 0;
1075}
1076
1077static void *thread_main(int shm_id, int offset, char *argv[])
1078{
1079 struct thread_data *td;
1080 int ret = 1;
1081 void *data;
1082
1083 setsid();
1084
1085 data = shmat(shm_id, NULL, 0);
1086 if (data == (void *) -1) {
1087 perror("shmat");
1088 return NULL;
1089 }
1090
1091 td = data + offset * sizeof(struct thread_data);
1092 td->pid = getpid();
1093
1094 if (init_io_u(td))
1095 goto err;
1096
1097 if (sched_setaffinity(td->pid, sizeof(td->cpumask), &td->cpumask) == -1) {
1098 td->error = errno;
1099 goto err;
1100 }
1101
1102 sprintf(argv[0], "fio%d", offset);
1103
1104 if (td->use_aio && init_aio(td))
1105 goto err;
1106
1107 if (init_random_state(td))
1108 goto err;
1109
1110 if (td->ioprio) {
1111 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) {
1112 td->error = errno;
1113 goto err;
1114 }
1115 }
1116
1117 sem_post(&startup_sem);
1118 sem_wait(&td->mutex);
1119
1120 gettimeofday(&td->start, NULL);
1121
1122 if (td->ratemin)
1123 memcpy(&td->lastrate, &td->start, sizeof(td->start));
1124
1125 memcpy(&td->stat_sample_time, &td->start, sizeof(td->start));
1126
1127 if (!td->use_aio)
1128 do_sync_io(td);
1129 else
1130 do_async_io(td);
1131
1132 td->runtime = mtime_since_now(&td->start);
1133 ret = 0;
1134
1135 if (td->bw_log)
1136 finish_log(td, td->bw_log, "bw");
1137 if (td->lat_log)
1138 finish_log(td, td->lat_log, "lat");
1139
1140 if (exitall_on_terminate)
1141 sig_handler(0);
1142
1143err:
1144 if (td->fd != -1) {
1145 close(td->fd);
1146 td->fd = -1;
1147 }
1148 if (td->use_aio)
1149 cleanup_aio(td);
1150 cleanup_io_u(td);
1151 if (ret) {
1152 sem_post(&startup_sem);
1153 sem_wait(&td->mutex);
1154 }
1155 td->runstate = TD_EXITED;
1156 shmdt(data);
1157 return NULL;
1158}
1159
1160static void free_shm(void)
1161{
1162 struct shmid_ds sbuf;
1163
1164 if (threads) {
1165 shmdt(threads);
1166 threads = NULL;
1167 shmctl(shm_id, IPC_RMID, &sbuf);
1168 }
1169}
1170
1171static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
1172 double *mean, double *dev)
1173{
1174 double n;
1175
1176 if (is->samples == 0)
1177 return 0;
1178
1179 *min = is->min_val;
1180 *max = is->max_val;
1181
1182 n = (double) is->samples;
1183 *mean = (double) is->val / n;
1184 *dev = sqrt(((double) is->val_sq - (*mean * *mean) / n) / (n - 1));
1185 return 1;
1186}
1187
1188static void show_thread_status(struct thread_data *td)
1189{
1190 int prio, prio_class;
1191 unsigned long min, max, bw = 0;
1192 double mean, dev;
1193
1194 if (!td->io_kb && !td->error)
1195 return;
1196
1197 if (td->runtime)
1198 bw = td->io_kb * 1024 / td->runtime;
1199
1200 prio = td->ioprio & 0xff;
1201 prio_class = td->ioprio >> IOPRIO_CLASS_SHIFT;
1202
1203 printf("Client%d: err=%2d, io=%6luMiB, bw=%6luKiB/s, runt=%6lumsec\n", td->thread_number, td->error, td->io_kb >> 10, bw, td->runtime);
1204
1205 if (calc_lat(&td->slat_stat, &min, &max, &mean, &dev))
1206 printf(" slat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1207 if (calc_lat(&td->clat_stat, &min, &max, &mean, &dev))
1208 printf(" clat (msec): min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1209 if (calc_lat(&td->bw_stat, &min, &max, &mean, &dev))
1210 printf(" bw (KiB/s) : min=%5lu, max=%5lu, avg=%5.02f, dev=%5.02f\n", min, max, mean, dev);
1211}
1212
1213static int setup_rate(struct thread_data *td)
1214{
1215 int nr_reads_per_sec;
1216
1217 if (!td->rate)
1218 return 0;
1219
1220 if (td->rate < td->ratemin) {
1221 fprintf(stderr, "min rate larger than nominal rate\n");
1222 return -1;
1223 }
1224
1225 nr_reads_per_sec = td->rate * 1024 / td->min_bs;
1226 td->rate_usec_cycle = 1000000 / nr_reads_per_sec;
1227 td->rate_pending_usleep = 0;
1228 return 0;
1229}
1230
1231static struct thread_data *get_new_job(int global)
1232{
1233 struct thread_data *td;
1234
1235 if (global)
1236 return &def_thread;
1237 if (thread_number >= max_jobs)
1238 return NULL;
1239
1240 td = &threads[thread_number++];
1241 memset(td, 0, sizeof(*td));
1242
1243 td->fd = -1;
1244 td->thread_number = thread_number;
1245
1246 td->ddir = def_thread.ddir;
1247 td->ioprio = def_thread.ioprio;
1248 td->sequential = def_thread.sequential;
1249 td->bs = def_thread.bs;
1250 td->min_bs = def_thread.min_bs;
1251 td->max_bs = def_thread.max_bs;
1252 td->odirect = def_thread.odirect;
1253 td->thinktime = def_thread.thinktime;
1254 td->fsync_blocks = def_thread.fsync_blocks;
1255 td->start_delay = def_thread.start_delay;
1256 td->timeout = def_thread.timeout;
1257 td->use_aio = def_thread.use_aio;
1258 td->create_file = def_thread.create_file;
1259 td->overwrite = def_thread.overwrite;
1260 td->invalidate_cache = def_thread.invalidate_cache;
1261 td->file_size = def_thread.file_size;
1262 td->file_offset = def_thread.file_offset;
1263 td->rate = def_thread.rate;
1264 td->ratemin = def_thread.ratemin;
1265 td->ratecycle = def_thread.ratecycle;
1266 td->aio_depth = def_thread.aio_depth;
1267 td->sync_io = def_thread.sync_io;
1268 td->mem_type = def_thread.mem_type;
1269 td->bw_avg_time = def_thread.bw_avg_time;
1270 memcpy(&td->cpumask, &def_thread.cpumask, sizeof(td->cpumask));
1271
1272 return td;
1273}
1274
1275static void put_job(struct thread_data *td)
1276{
1277 memset(&threads[td->thread_number - 1], 0, sizeof(*td));
1278 thread_number--;
1279}
1280
1281static int add_job(struct thread_data *td, const char *filename, int prioclass,
1282 int prio)
1283{
1284 if (td == &def_thread)
1285 return 0;
1286
1287 strcpy(td->file_name, filename);
1288 sem_init(&td->mutex, 1, 0);
1289 td->ioprio = (prioclass << IOPRIO_CLASS_SHIFT) | prio;
1290
1291 td->clat_stat.min_val = ULONG_MAX;
1292 td->slat_stat.min_val = ULONG_MAX;
1293 td->bw_stat.min_val = ULONG_MAX;
1294
1295 run_str[td->thread_number - 1] = 'P';
1296
1297 if (td->use_aio && !td->aio_depth)
1298 td->aio_depth = 1;
1299
1300 if (td->min_bs == -1)
1301 td->min_bs = td->bs;
1302 if (td->max_bs == -1)
1303 td->max_bs = td->bs;
1304
1305 if (setup_rate(td))
1306 return -1;
1307
1308 if (write_lat_log)
1309 setup_log(&td->lat_log);
1310 if (write_bw_log)
1311 setup_log(&td->bw_log);
1312
1313 printf("Client%d: file=%s, rw=%d, prio=%d/%d, seq=%d, odir=%d, bs=%d-%d, rate=%d, aio=%d, aio_depth=%d\n", td->thread_number, filename, td->ddir, prioclass, prio, td->sequential, td->odirect, td->min_bs, td->max_bs, td->rate, td->use_aio, td->aio_depth);
1314 return 0;
1315}
1316
1317static void fill_cpu_mask(cpu_set_t cpumask, int cpu)
1318{
1319 unsigned int i;
1320
1321 CPU_ZERO(&cpumask);
1322
1323 for (i = 0; i < sizeof(int) * 8; i++) {
1324 if ((1 << i) & cpu)
1325 CPU_SET(i, &cpumask);
1326 }
1327}
1328
1329static void fill_option(const char *input, char *output)
1330{
1331 int i;
1332
1333 i = 0;
1334 while (input[i] != ',' && input[i] != '}' && input[i] != '\0') {
1335 output[i] = input[i];
1336 i++;
1337 }
1338
1339 output[i] = '\0';
1340}
1341
1342unsigned long get_mult(char c)
1343{
1344 switch (c) {
1345 case 'k':
1346 case 'K':
1347 return 1024;
1348 case 'm':
1349 case 'M':
1350 return 1024 * 1024;
1351 case 'g':
1352 case 'G':
1353 return 1024 * 1024 * 1024;
1354 default:
1355 return 1;
1356 }
1357}
1358
1359/*
1360 * convert string after '=' into decimal value, noting any size suffix
1361 */
1362static int str_cnv(char *p, unsigned long long *val)
1363{
1364 char *str;
1365 int len;
1366
1367 str = strstr(p, "=");
1368 if (!str)
1369 return 1;
1370
1371 str++;
1372 len = strlen(str);
1373
1374 *val = strtoul(str, NULL, 10);
1375 if (*val == ULONG_MAX && errno == ERANGE)
1376 return 1;
1377
1378 *val *= get_mult(str[len - 2]);
1379 return 0;
1380}
1381
1382/*
1383 * job key words:
1384 *
1385 * file=
1386 * bs=
1387 * rw=
1388 * direct=
1389 */
1390static void parse_jobs_cmd(int argc, char *argv[], int index)
1391{
1392 struct thread_data *td;
1393 unsigned int prio, prioclass, cpu;
1394 char *string, *filename, *p, *c;
1395 int i;
1396
1397 string = malloc(256);
1398 filename = malloc(256);
1399
1400 for (i = index; i < argc; i++) {
1401 p = argv[i];
1402
1403 c = strpbrk(p, "{");
1404 if (!c)
1405 break;
1406
1407 filename[0] = 0;
1408
1409 td = get_new_job(0);
1410 if (!td)
1411 break;
1412
1413 prioclass = 2;
1414 prio = 4;
1415
1416 c = strstr(p, "rw=");
1417 if (c) {
1418 c += 3;
1419 if (*c == '0')
1420 td->ddir = DDIR_READ;
1421 else
1422 td->ddir = DDIR_WRITE;
1423 }
1424
1425 c = strstr(p, "prio=");
1426 if (c) {
1427 c += 5;
1428 prio = *c - '0';
1429 }
1430
1431 c = strstr(p, "prioclass=");
1432 if (c) {
1433 c += 10;
1434 prioclass = *c - '0';
1435 }
1436
1437 c = strstr(p, "file=");
1438 if (c) {
1439 c += 5;
1440 fill_option(c, filename);
1441 }
1442
1443 c = strstr(p, "direct=");
1444 if (c) {
1445 c += 7;
1446 if (*c != '0')
1447 td->odirect = 1;
1448 else
1449 td->odirect = 0;
1450 }
1451
1452 c = strstr(p, "sync=");
1453 if (c) {
1454 c += 5;
1455 if (*c != '0')
1456 td->sync_io = 1;
1457 else
1458 td->sync_io = 0;
1459 }
1460
1461 c = strstr(p, "thinktime=");
1462 if (c) {
1463 c += 10;
1464 fill_option(c, string);
1465 td->thinktime = strtoul(string, NULL, 10);
1466 }
1467
1468 c = strstr(p, "rate=");
1469 if (c) {
1470 c += 5;
1471 fill_option(c, string);
1472 td->rate = strtoul(string, NULL, 10);
1473 }
1474
1475 c = strstr(p, "ratemin=");
1476 if (c) {
1477 c += 8;
1478 fill_option(c, string);
1479 td->ratemin = strtoul(string, NULL, 10);
1480 }
1481
1482 c = strstr(p, "ratecycle=");
1483 if (c) {
1484 c += 10;
1485 fill_option(c, string);
1486 td->ratecycle = strtoul(string, NULL, 10);
1487 }
1488
1489 c = strstr(p, "cpumask=");
1490 if (c) {
1491 c += 8;
1492 fill_option(c, string);
1493 cpu = strtoul(string, NULL, 10);
1494 fill_cpu_mask(td->cpumask, cpu);
1495 }
1496
1497 c = strstr(p, "fsync=");
1498 if (c) {
1499 c += 6;
1500 fill_option(c, string);
1501 td->fsync_blocks = strtoul(string, NULL, 10);
1502 }
1503
1504 c = strstr(p, "startdelay=");
1505 if (c) {
1506 c += 11;
1507 fill_option(c, string);
1508 td->start_delay = strtoul(string, NULL, 10);
1509 }
1510
1511 c = strstr(p, "timeout=");
1512 if (c) {
1513 c += 8;
1514 fill_option(c, string);
1515 td->timeout = strtoul(string, NULL, 10);
1516 }
1517
1518 c = strstr(p, "invalidate=");
1519 if (c) {
1520 c += 11;
1521 if (*c != '0')
1522 td->invalidate_cache = 1;
1523 else
1524 td->invalidate_cache = 0;
1525 }
1526
1527 c = strstr(p, "bs=");
1528 if (c) {
1529 unsigned long long bs;
1530
1531 c += 3;
1532 str_cnv(c, &bs);
1533 td->bs = bs;
1534 }
1535
1536 c = strstr(p, "size=");
1537 if (c) {
1538 c += 5;
1539 str_cnv(c, &td->file_size);
1540 }
1541
1542 c = strstr(p, "offset=");
1543 if (c) {
1544 c += 7;
1545 str_cnv(c, &td->file_offset);
1546 }
1547
1548 c = strstr(p, "aio_depth=");
1549 if (c) {
1550 c += 10;
1551 fill_option(c, string);
1552 td->aio_depth = strtoul(string, NULL, 10);
1553 }
1554
1555 c = strstr(p, "mem=");
1556 if (c) {
1557 c += 4;
1558 if (!strncmp(c, "malloc", 6))
1559 td->mem_type = MEM_MALLOC;
1560 else if (!strncmp(c, "shm", 3))
1561 td->mem_type = MEM_SHM;
1562 else
1563 printf("bad mem type %s\n", c);
1564 }
1565
1566 c = strstr(p, "aio");
1567 if (c)
1568 td->use_aio = 1;
1569
1570 c = strstr(p, "create");
1571 if (c)
1572 td->create_file = 1;
1573
1574 c = strstr(p, "overwrite");
1575 if (c)
1576 td->overwrite = 1;
1577
1578 c = strstr(p, "random");
1579 if (c)
1580 td->sequential = 0;
1581 c = strstr(p, "sequential");
1582 if (c)
1583 td->sequential = 1;
1584
1585 if (add_job(td, filename, prioclass, prio))
1586 put_job(td);
1587 }
1588
1589 free(string);
1590 free(filename);
1591}
1592
1593static int check_strcnv(char *p, char *name, unsigned long long *val)
1594{
1595 if (!strstr(p, name))
1596 return 1;
1597
1598 return str_cnv(p, val);
1599}
1600
1601static int check_str(char *p, char *name, char *option)
1602{
1603 char *s = strstr(p, name);
1604
1605 if (!s)
1606 return 1;
1607
1608 s += strlen(name);
1609 if (strstr(s, option))
1610 return 0;
1611
1612 return 1;
1613}
1614
1615static int check_range(char *p, char *name, unsigned long *s, unsigned long *e)
1616{
1617 char str[128];
1618 char s1, s2;
1619
1620 sprintf(str, "%s=%%lu%%c-%%lu%%c", name);
1621 if (sscanf(p, str, s, &s1, e, &s2) == 4) {
1622 *s *= get_mult(s1);
1623 *e *= get_mult(s2);
1624 return 0;
1625 }
1626
1627 sprintf(str, "%s = %%lu%%c-%%lu%%c", name);
1628 if (sscanf(p, str, s, &s1, e, &s2) == 4) {
1629 *s *= get_mult(s1);
1630 *e *= get_mult(s2);
1631 return 0;
1632 }
1633
1634 sprintf(str, "%s=%%lu-%%lu", name);
1635 if (sscanf(p, str, s, e) == 2)
1636 return 0;
1637
1638 sprintf(str, "%s = %%lu-%%lu", name);
1639 if (sscanf(p, str, s, e) == 2)
1640 return 0;
1641
1642 return 1;
1643
1644}
1645
1646static int check_int(char *p, char *name, unsigned int *val)
1647{
1648 char str[128];
1649
1650 sprintf(str, "%s=%%d", name);
1651 if (sscanf(p, str, val) == 1)
1652 return 0;
1653
1654 sprintf(str, "%s = %%d", name);
1655 if (sscanf(p, str, val) == 1)
1656 return 0;
1657
1658 return 1;
1659}
1660
1661static int is_empty_or_comment(char *line)
1662{
1663 unsigned int i;
1664
1665 for (i = 0; i < strlen(line); i++) {
1666 if (line[i] == ';')
1667 return 1;
1668 if (!isspace(line[i]) && !iscntrl(line[i]))
1669 return 0;
1670 }
1671
1672 return 1;
1673}
1674
1675static int parse_jobs_ini(char *file)
1676{
1677 unsigned int prioclass, prio, cpu, global;
1678 unsigned long long ull;
1679 unsigned long ul1, ul2;
1680 struct thread_data *td;
1681 char *string, *name;
1682 fpos_t off;
1683 FILE *f;
1684 char *p;
1685
1686 f = fopen(file, "r");
1687 if (!f) {
1688 perror("fopen");
1689 return 1;
1690 }
1691
1692 string = malloc(4096);
1693 name = malloc(256);
1694
1695 while ((p = fgets(string, 4096, f)) != NULL) {
1696 if (is_empty_or_comment(p))
1697 continue;
1698 if (sscanf(p, "[%s]", name) != 1)
1699 continue;
1700
1701 global = !strncmp(name, "global", 6);
1702
1703 name[strlen(name) - 1] = '\0';
1704
1705 td = get_new_job(global);
1706 if (!td)
1707 break;
1708
1709 prioclass = 2;
1710 prio = 4;
1711
1712 fgetpos(f, &off);
1713 while ((p = fgets(string, 4096, f)) != NULL) {
1714 if (is_empty_or_comment(p))
1715 continue;
1716 if (strstr(p, "["))
1717 break;
1718 if (!check_int(p, "rw", &td->ddir)) {
1719 fgetpos(f, &off);
1720 continue;
1721 }
1722 if (!check_int(p, "prio", &prio)) {
1723 fgetpos(f, &off);
1724 continue;
1725 }
1726 if (!check_int(p, "prioclass", &prioclass)) {
1727 fgetpos(f, &off);
1728 continue;
1729 }
1730 if (!check_int(p, "direct", &td->odirect)) {
1731 fgetpos(f, &off);
1732 continue;
1733 }
1734 if (!check_int(p, "rate", &td->rate)) {
1735 fgetpos(f, &off);
1736 continue;
1737 }
1738 if (!check_int(p, "ratemin", &td->ratemin)) {
1739 fgetpos(f, &off);
1740 continue;
1741 }
1742 if (!check_int(p, "ratecycle", &td->ratecycle)) {
1743 fgetpos(f, &off);
1744 continue;
1745 }
1746 if (!check_int(p, "thinktime", &td->thinktime)) {
1747 fgetpos(f, &off);
1748 continue;
1749 }
1750 if (!check_int(p, "cpumask", &cpu)) {
1751 fill_cpu_mask(td->cpumask, cpu);
1752 fgetpos(f, &off);
1753 continue;
1754 }
1755 if (!check_int(p, "fsync", &td->fsync_blocks)) {
1756 fgetpos(f, &off);
1757 continue;
1758 }
1759 if (!check_int(p, "startdelay", &td->start_delay)) {
1760 fgetpos(f, &off);
1761 continue;
1762 }
1763 if (!check_int(p, "timeout", &td->timeout)) {
1764 fgetpos(f, &off);
1765 continue;
1766 }
1767 if (!check_int(p, "invalidate",&td->invalidate_cache)) {
1768 fgetpos(f, &off);
1769 continue;
1770 }
1771 if (!check_int(p, "aio_depth", &td->aio_depth)) {
1772 fgetpos(f, &off);
1773 continue;
1774 }
1775 if (!check_int(p, "sync", &td->sync_io)) {
1776 fgetpos(f, &off);
1777 continue;
1778 }
1779 if (!check_int(p, "bwavgtime", &td->bw_avg_time)) {
1780 fgetpos(f, &off);
1781 continue;
1782 }
1783 if (!check_range(p, "bsrange", &ul1, &ul2)) {
1784 td->min_bs = ul1;
1785 td->max_bs = ul2;
1786 fgetpos(f, &off);
1787 continue;
1788 }
1789 if (!check_strcnv(p, "bs", &ull)) {
1790 td->bs = ull;
1791 fgetpos(f, &off);
1792 continue;
1793 }
1794 if (!check_strcnv(p, "size", &td->file_size)) {
1795 fgetpos(f, &off);
1796 continue;
1797 }
1798 if (!check_strcnv(p, "offset", &td->file_offset)) {
1799 fgetpos(f, &off);
1800 continue;
1801 }
1802 if (!check_str(p, "mem", "malloc")) {
1803 td->mem_type = MEM_MALLOC;
1804 fgetpos(f, &off);
1805 continue;
1806 }
1807 if (!check_str(p, "mem", "shm")) {
1808 td->mem_type = MEM_SHM;
1809 fgetpos(f, &off);
1810 continue;
1811 }
1812 if (!strncmp(p, "sequential", 10)) {
1813 td->sequential = 1;
1814 fgetpos(f, &off);
1815 continue;
1816 }
1817 if (!strncmp(p, "random", 6)) {
1818 td->sequential = 0;
1819 fgetpos(f, &off);
1820 continue;
1821 }
1822 if (!strncmp(p, "aio", 3)) {
1823 td->use_aio = 1;
1824 fgetpos(f, &off);
1825 continue;
1826 }
1827 if (!strncmp(p, "create", 6)) {
1828 td->create_file = 1;
1829 fgetpos(f, &off);
1830 continue;
1831 }
1832 if (!strncmp(p, "overwrite", 9)) {
1833 td->overwrite = 1;
1834 fgetpos(f, &off);
1835 continue;
1836 }
1837 if (!strncmp(p, "exitall", 7)) {
1838 exitall_on_terminate = 1;
1839 fgetpos(f, &off);
1840 continue;
1841 }
1842 printf("Client%d: bad option %s\n",td->thread_number,p);
1843 }
1844 fsetpos(f, &off);
1845
1846 if (add_job(td, name, prioclass, prio))
1847 put_job(td);
1848 }
1849
1850 free(string);
1851 free(name);
1852 fclose(f);
1853 return 0;
1854}
1855
1856static int parse_options(int argc, char *argv[])
1857{
1858 int i;
1859
1860 for (i = 1; i < argc; i++) {
1861 char *parm = argv[i];
1862
1863 if (parm[0] != '-')
1864 break;
1865
1866 parm++;
1867 switch (*parm) {
1868 case 's':
1869 parm++;
1870 def_thread.sequential = !!atoi(parm);
1871 break;
1872 case 'b':
1873 parm++;
1874 def_thread.bs = atoi(parm);
1875 def_thread.bs <<= 10;
1876 if (!def_thread.bs) {
1877 printf("bad block size\n");
1878 def_thread.bs = DEF_BS;
1879 }
1880 break;
1881 case 't':
1882 parm++;
1883 def_thread.timeout = atoi(parm);
1884 break;
1885 case 'r':
1886 parm++;
1887 repeatable = !!atoi(parm);
1888 break;
1889 case 'R':
1890 parm++;
1891 rate_quit = !!atoi(parm);
1892 break;
1893 case 'o':
1894 parm++;
1895 def_thread.odirect = !!atoi(parm);
1896 break;
1897 case 'f':
1898 if (i + 1 >= argc) {
1899 printf("-f needs file as arg\n");
1900 break;
1901 }
1902 ini_file = strdup(argv[i+1]);
1903 i++;
1904 break;
1905 case 'l':
1906 write_lat_log = 1;
1907 break;
1908 case 'w':
1909 write_bw_log = 1;
1910 break;
1911 default:
1912 printf("bad option %s\n", argv[i]);
1913 break;
1914 }
1915 }
1916
1917 return i;
1918}
1919
1920static void print_thread_status(struct thread_data *td, int nr_running,
1921 int t_rate, int m_rate)
1922{
1923 printf("Threads now running: %d", nr_running);
1924 if (m_rate || t_rate)
1925 printf(", commitrate %d/%dKiB/sec", t_rate, m_rate);
1926 printf(" : [%s]\r", run_str);
1927 fflush(stdout);
1928}
1929
1930static void reap_threads(int *nr_running, int *t_rate, int *m_rate)
1931{
1932 int i;
1933
1934 /*
1935 * reap exited threads (TD_EXITED -> TD_REAPED)
1936 */
1937 for (i = 0; i < thread_number; i++) {
1938 struct thread_data *td = &threads[i];
1939
1940 if (td->runstate != TD_EXITED)
1941 continue;
1942
1943 td->runstate = TD_REAPED;
1944 run_str[td->thread_number - 1] = '_';
1945 waitpid(td->pid, NULL, 0);
1946 (*nr_running)--;
1947 (*m_rate) -= td->ratemin;
1948 (*t_rate) -= td->rate;
1949
1950 if (td->terminate)
1951 continue;
1952
1953 print_thread_status(td, *nr_running, *t_rate, *m_rate);
1954 }
1955}
1956
1957static void run_threads(char *argv[])
1958{
1959 struct timeval genesis;
1960 struct thread_data *td;
1961 unsigned long spent;
1962 int i, todo, nr_running, m_rate, t_rate;
1963
1964 printf("Starting %d threads\n", thread_number);
1965 fflush(stdout);
1966
1967 signal(SIGINT, sig_handler);
1968
1969 todo = thread_number;
1970 nr_running = 0;
1971 m_rate = t_rate = 0;
1972
1973 for (i = 0; i < thread_number; i++) {
1974 td = &threads[i];
1975
1976 /*
1977 * do file setup here so it happens sequentially,
1978 * we don't want X number of threads getting their
1979 * client data interspersed on disk
1980 */
1981 if (setup_file(td)) {
1982 td->runstate = TD_REAPED;
1983 todo--;
1984 }
1985 }
1986
1987 gettimeofday(&genesis, NULL);
1988
1989 while (todo) {
1990 /*
1991 * create threads (TD_NOT_CREATED -> TD_CREATED)
1992 */
1993 for (i = 0; i < thread_number; i++) {
1994 td = &threads[i];
1995
1996 if (td->runstate != TD_NOT_CREATED)
1997 continue;
1998
1999 /*
2000 * never got a chance to start, killed by other
2001 * thread for some reason
2002 */
2003 if (td->terminate) {
2004 todo--;
2005 continue;
2006 }
2007
2008 if (td->start_delay) {
2009 spent = mtime_since_now(&genesis);
2010
2011 if (td->start_delay * 1000 > spent)
2012 continue;
2013 }
2014
2015 td->runstate = TD_CREATED;
2016 run_str[td->thread_number - 1] = 'C';
2017 sem_init(&startup_sem, 1, 1);
2018 todo--;
2019
2020 if (fork())
2021 sem_wait(&startup_sem);
2022 else {
2023 thread_main(shm_id, i, argv);
2024 exit(0);
2025 }
2026 }
2027
2028 /*
2029 * start created threads (TD_CREATED -> TD_STARTED)
2030 */
2031 for (i = 0; i < thread_number; i++) {
2032 struct thread_data *td = &threads[i];
2033
2034 if (td->runstate != TD_CREATED)
2035 continue;
2036
2037 td->runstate = TD_STARTED;
2038 run_str[td->thread_number - 1] = '+';
2039 nr_running++;
2040 m_rate += td->ratemin;
2041 t_rate += td->rate;
2042 sem_post(&td->mutex);
2043
2044 print_thread_status(td, nr_running, t_rate, m_rate);
2045 }
2046
2047 reap_threads(&nr_running, &t_rate, &m_rate);
2048
2049 if (todo)
2050 usleep(100000);
2051 }
2052
2053 while (nr_running) {
2054 reap_threads(&nr_running, &t_rate, &m_rate);
2055 usleep(10000);
2056 }
2057}
2058
2059int setup_thread_area(void)
2060{
2061 /*
2062 * 1024 is too much on some machines, scale max_jobs if
2063 * we get a failure that looks like too large a shm segment
2064 */
2065 do {
2066 int s = max_jobs * sizeof(struct thread_data);
2067
2068 shm_id = shmget(0, s, IPC_CREAT | 0600);
2069 if (shm_id != -1)
2070 break;
2071 if (errno != EINVAL) {
2072 perror("shmget");
2073 break;
2074 }
2075
2076 max_jobs >>= 1;
2077 } while (max_jobs);
2078
2079 if (shm_id == -1)
2080 return 1;
2081
2082 threads = shmat(shm_id, NULL, 0);
2083 if (threads == (void *) -1) {
2084 perror("shmat");
2085 return 1;
2086 }
2087
2088 atexit(free_shm);
2089 return 0;
2090}
2091
2092int main(int argc, char *argv[])
2093{
2094 static unsigned long max_run[2], min_run[2];
2095 static unsigned long max_bw[2], min_bw[2];
2096 static unsigned long io_mb[2], agg[2];
2097 int i;
2098
2099 if (setup_thread_area())
2100 return 1;
2101
2102 if (sched_getaffinity(getpid(), sizeof(cpu_set_t), &def_thread.cpumask) == -1) {
2103 perror("sched_getaffinity");
2104 return 1;
2105 }
2106
2107 /*
2108 * fill globals
2109 */
2110 def_thread.ddir = DDIR_READ;
2111 def_thread.bs = DEF_BS;
2112 def_thread.min_bs = -1;
2113 def_thread.max_bs = -1;
2114 def_thread.odirect = DEF_ODIRECT;
2115 def_thread.ratecycle = DEF_RATE_CYCLE;
2116 def_thread.sequential = DEF_SEQUENTIAL;
2117 def_thread.timeout = DEF_TIMEOUT;
2118 def_thread.create_file = DEF_CREATE;
2119 def_thread.overwrite = DEF_OVERWRITE;
2120 def_thread.invalidate_cache = DEF_INVALIDATE;
2121 def_thread.sync_io = DEF_SYNCIO;
2122 def_thread.mem_type = MEM_MALLOC;
2123 def_thread.bw_avg_time = DEF_BWAVGTIME;
2124
2125 i = parse_options(argc, argv);
2126
2127 if (ini_file) {
2128 if (parse_jobs_ini(ini_file))
2129 return 1;
2130 } else
2131 parse_jobs_cmd(argc, argv, i);
2132
2133 if (!thread_number) {
2134 printf("Nothing to do\n");
2135 return 1;
2136 }
2137
2138 run_threads(argv);
2139
2140 min_bw[0] = min_run[0] = ~0UL;
2141 min_bw[1] = min_run[1] = ~0UL;
2142 io_mb[0] = io_mb[1] = 0;
2143 agg[0] = agg[1] = 0;
2144 for (i = 0; i < thread_number; i++) {
2145 struct thread_data *td = &threads[i];
2146 unsigned long bw = 0;
2147
2148 if (!td->error) {
2149 if (td->runtime < min_run[td->ddir])
2150 min_run[td->ddir] = td->runtime;
2151 if (td->runtime > max_run[td->ddir])
2152 max_run[td->ddir] = td->runtime;
2153
2154 if (td->runtime)
2155 bw = td->io_kb * 1024 / td->runtime;
2156 if (bw < min_bw[td->ddir])
2157 min_bw[td->ddir] = bw;
2158 if (bw > max_bw[td->ddir])
2159 max_bw[td->ddir] = bw;
2160
2161 io_mb[td->ddir] += td->io_kb >> 10;
2162 }
2163
2164 show_thread_status(td);
2165 }
2166
2167 if (max_run[0])
2168 agg[0] = io_mb[0] * 1024 * 1000 / max_run[0];
2169 if (max_run[1])
2170 agg[1] = io_mb[1] * 1024 * 1000 / max_run[1];
2171
2172 printf("\nRun status:\n");
2173 if (max_run[DDIR_READ])
2174 printf(" READ: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[0], agg[0], min_bw[0], max_bw[0], min_run[0], max_run[0]);
2175 if (max_run[DDIR_WRITE])
2176 printf(" WRITE: io=%luMiB, aggrb=%lu, minb=%lu, maxb=%lu, mint=%lumsec, maxt=%lumsec\n", io_mb[1], agg[1], min_bw[1], max_bw[1], min_run[1], max_run[1]);
2177
2178 return 0;
2179}