8 #include <sys/resource.h>
18 #include "compiler/compiler.h"
22 #include "arch/arch.h"
34 #ifdef FIO_HAVE_SOLARISAIO
35 #include <sys/asynch.h>
39 * Use for maintaining statistics
42 unsigned long max_val;
43 unsigned long min_val;
44 unsigned long samples;
51 * A single data sample
61 * Dynamically growing data sample log
64 unsigned long nr_samples;
65 unsigned long max_samples;
66 struct io_sample *log;
70 * When logging io actions, this matches a single sent io_u
74 struct rb_node rb_node;
75 struct flist_head list;
79 struct fio_file *file;
81 unsigned long long offset;
86 unsigned int file_action;
92 IO_U_F_FLIGHT = 1 << 1,
102 #ifdef FIO_HAVE_LIBAIO
105 #ifdef FIO_HAVE_POSIXAIO
109 struct sg_io_hdr hdr;
111 #ifdef FIO_HAVE_GUASI
114 #ifdef FIO_HAVE_SOLARISAIO
115 aio_result_t resultp;
119 struct timeval start_time;
120 struct timeval issue_time;
123 * Allocated/set buffer and length
126 unsigned long buflen;
127 unsigned long long offset;
130 * IO engine state, may be different from above when we get
131 * partial transfers / residual data counts
134 unsigned long xfer_buflen;
142 * io engine private data
152 struct fio_file *file;
154 struct flist_head list;
157 * Callback for io completion
159 int (*end_io)(struct thread_data *, struct io_u *);
163 * io_ops->queue() return values
166 FIO_Q_COMPLETED = 0, /* completed sync */
167 FIO_Q_QUEUED = 1, /* queued, will complete async */
168 FIO_Q_BUSY = 2, /* no more room, call ->commit() */
171 #define FIO_HDR_MAGIC 0xf00baaef
174 VERIFY_NONE = 0, /* no verification */
175 VERIFY_MD5, /* md5 sum data blocks */
176 VERIFY_CRC64, /* crc64 sum data blocks */
177 VERIFY_CRC32, /* crc32 sum data blocks */
178 VERIFY_CRC32C, /* crc32c sum data blocks */
179 VERIFY_CRC32C_INTEL, /* crc32c sum data blocks with hw */
180 VERIFY_CRC16, /* crc16 sum data blocks */
181 VERIFY_CRC7, /* crc7 sum data blocks */
182 VERIFY_SHA256, /* sha256 sum data blocks */
183 VERIFY_SHA512, /* sha512 sum data blocks */
184 VERIFY_META, /* block_num, timestamp etc. */
185 VERIFY_NULL, /* pretend to verify */
189 * A header structure associated with each checksummed data block. It is
190 * followed by a checksum specific header that contains the verification
193 struct verify_header {
194 unsigned int fio_magic;
196 unsigned int verify_type;
200 uint32_t md5_digest[16];
222 unsigned char thread;
223 unsigned short numberio;
224 unsigned long time_sec;
225 unsigned long time_usec;
228 struct group_run_stats {
229 unsigned long long max_run[2], min_run[2];
230 unsigned long long max_bw[2], min_bw[2];
231 unsigned long long io_kb[2];
232 unsigned long long agg[2];
236 * What type of allocation to use for io buffers
239 MEM_MALLOC = 0, /* ordinary malloc */
240 MEM_SHM, /* use shared memory segments */
241 MEM_SHMHUGE, /* use shared memory segments with huge pages */
242 MEM_MMAP, /* use anonynomous mmap */
243 MEM_MMAPHUGE, /* memory mapped huge file */
246 enum fio_ioengine_flags {
247 FIO_SYNCIO = 1 << 0, /* io engine has synchronous ->queue */
248 FIO_RAWIO = 1 << 1, /* some sort of direct/raw io */
249 FIO_DISKLESSIO = 1 << 2, /* no disk involved */
250 FIO_NOEXTEND = 1 << 3, /* engine can't extend file */
251 FIO_NODISKUTIL = 1 << 4, /* diskutil can't handle filename */
252 FIO_UNIDIR = 1 << 5, /* engine is uni-directional */
253 FIO_NOIO = 1 << 6, /* thread does only pseudo IO */
254 FIO_SIGQUIT = 1 << 7, /* needs SIGQUIT to exit */
258 * How many depth levels to log
260 #define FIO_IO_U_MAP_NR 8
261 #define FIO_IO_U_LAT_U_NR 10
262 #define FIO_IO_U_LAT_M_NR 12
273 struct io_log *slat_log;
274 struct io_log *clat_log;
275 struct io_log *bw_log;
278 * bandwidth and latency stats
280 struct io_stat clat_stat[2]; /* completion latency */
281 struct io_stat slat_stat[2]; /* submission latency */
282 struct io_stat bw_stat[2]; /* bandwidth stats */
284 unsigned long long stat_io_bytes[2];
285 struct timeval stat_sample_time[2];
288 * fio system usage accounting
290 struct rusage ru_start;
291 struct rusage ru_end;
292 unsigned long usr_time;
293 unsigned long sys_time;
295 unsigned long minf, majf;
298 * IO depth and latency stats
300 unsigned int io_u_map[FIO_IO_U_MAP_NR];
301 unsigned int io_u_submit[FIO_IO_U_MAP_NR];
302 unsigned int io_u_complete[FIO_IO_U_MAP_NR];
303 unsigned int io_u_lat_u[FIO_IO_U_LAT_U_NR];
304 unsigned int io_u_lat_m[FIO_IO_U_LAT_M_NR];
305 unsigned long total_io_u[2];
306 unsigned long short_io_u[2];
307 unsigned long total_submit;
308 unsigned long total_complete;
310 unsigned long long io_bytes[2];
311 unsigned long runtime[2];
312 unsigned long total_run_time;
320 struct thread_options {
328 enum td_ddir td_ddir;
329 unsigned int ddir_nr;
330 unsigned int iodepth;
331 unsigned int iodepth_low;
332 unsigned int iodepth_batch;
333 unsigned int iodepth_batch_complete;
335 unsigned long long size;
336 unsigned int fill_device;
337 unsigned long long file_size_low;
338 unsigned long long file_size_high;
339 unsigned long long start_offset;
343 unsigned int min_bs[2];
344 unsigned int max_bs[2];
345 struct bssplit *bssplit[2];
346 unsigned int bssplit_nr[2];
348 unsigned int nr_files;
349 unsigned int open_files;
350 enum file_lock_mode file_lock_mode;
351 unsigned int lockfile_batch;
353 unsigned int odirect;
354 unsigned int invalidate_cache;
355 unsigned int create_serialize;
356 unsigned int create_fsync;
357 unsigned int create_on_open;
358 unsigned int end_fsync;
359 unsigned int pre_read;
360 unsigned int sync_io;
362 unsigned int do_verify;
363 unsigned int verifysort;
364 unsigned int verify_interval;
365 unsigned int verify_offset;
366 unsigned int verify_pattern;
367 unsigned int verify_pattern_bytes;
368 unsigned int verify_fatal;
369 unsigned int use_thread;
371 unsigned int do_disk_util;
372 unsigned int override_sync;
373 unsigned int rand_repeatable;
374 unsigned int write_lat_log;
375 unsigned int write_bw_log;
376 unsigned int norandommap;
377 unsigned int softrandommap;
378 unsigned int bs_unaligned;
379 unsigned int fsync_on_close;
381 unsigned int hugepage_size;
382 unsigned int rw_min_bs;
383 unsigned int thinktime;
384 unsigned int thinktime_spin;
385 unsigned int thinktime_blocks;
386 unsigned int fsync_blocks;
387 unsigned int start_delay;
388 unsigned long long timeout;
389 unsigned long long ramp_time;
390 unsigned int overwrite;
391 unsigned int bw_avg_time;
393 unsigned long long zone_size;
394 unsigned long long zone_skip;
395 enum fio_memtype mem_type;
397 unsigned int stonewall;
398 unsigned int new_group;
399 unsigned int numjobs;
400 os_cpu_mask_t cpumask;
401 unsigned int cpumask_set;
403 unsigned int rwmixcycle;
404 unsigned int rwmix[2];
406 unsigned int file_service_type;
407 unsigned int group_reporting;
408 unsigned int fadvise_hint;
409 unsigned int zero_buffers;
410 unsigned int refill_buffers;
411 unsigned int time_based;
412 unsigned int disable_clat;
413 unsigned int disable_slat;
414 unsigned int disable_bw;
415 unsigned int gtod_reduce;
416 unsigned int gtod_cpu;
417 unsigned int gtod_offload;
419 char *read_iolog_file;
420 char *write_iolog_file;
425 * Pre-run and post-run shell
431 unsigned int ratemin;
432 unsigned int ratecycle;
433 unsigned int rate_iops;
434 unsigned int rate_iops_min;
439 * CPU "io" cycle burner
441 unsigned int cpuload;
442 unsigned int cpucycle;
445 #define FIO_VERROR_SIZE 128
448 * This describes a single thread/process executing a fio job.
451 struct thread_options o;
452 char verror[FIO_VERROR_SIZE];
456 struct thread_stat ts;
457 struct fio_file **files;
458 unsigned int files_size;
459 unsigned int files_index;
460 unsigned int nr_open_files;
461 unsigned int nr_done_files;
462 unsigned int nr_normal_files;
464 unsigned int next_file;
465 os_random_state_t next_file_state;
471 size_t orig_buffer_size;
472 volatile int terminate;
473 volatile int runstate;
475 unsigned int ioprio_set;
476 unsigned int last_was_sync;
486 unsigned long rand_seeds[6];
488 os_random_state_t bsrange_state;
489 os_random_state_t verify_state;
494 * IO engine hooks, contains everything needed to submit an io_u
495 * to any of the available IO engines.
497 struct ioengine_ops *io_ops;
500 * Current IO depth and list of free and busy io_u's.
502 unsigned int cur_depth;
503 unsigned int io_u_queued;
504 struct flist_head io_u_freelist;
505 struct flist_head io_u_busylist;
506 struct flist_head io_u_requeues;
511 unsigned long rate_usec_cycle;
512 long rate_pending_usleep;
513 unsigned long rate_bytes;
514 unsigned long rate_blocks;
515 struct timeval lastrate;
517 unsigned long long total_io_size;
519 unsigned long io_issues[2];
520 unsigned long long io_blocks[2];
521 unsigned long long io_bytes[2];
522 unsigned long long io_skip_bytes;
523 unsigned long long this_io_bytes[2];
524 unsigned long long zone_bytes;
525 struct fio_mutex *mutex;
528 * State for random io, a bitmap of blocks done vs not done
530 os_random_state_t random_state;
532 struct timeval start; /* start of this loop */
533 struct timeval epoch; /* time job was started */
534 struct timeval rw_end[2];
535 struct timeval last_issue;
536 struct timeval tv_cache;
537 unsigned int tv_cache_nr;
538 unsigned int tv_cache_mask;
539 unsigned int rw_end_set[2];
540 unsigned int ramp_time_over;
543 * read/write mixed workload state
545 os_random_state_t rwmix_state;
546 unsigned long rwmix_issues;
547 enum fio_ddir rwmix_ddir;
548 unsigned int ddir_nr;
551 * IO history logs for verification. We use a tree for sorting,
552 * if we are overwriting. Otherwise just use a fifo.
554 struct rb_root io_hist_tree;
555 struct flist_head io_hist_list;
560 struct flist_head io_log_list;
563 * for fileservice, how often to switch to a new file
565 unsigned int file_service_nr;
566 unsigned int file_service_left;
567 struct fio_file *file_service_file;
570 * For generating file sizes
572 os_random_state_t file_size_state;
576 * roundrobin available files, or choose one at random, or do each one
580 FIO_FSERVICE_RANDOM = 1,
582 FIO_FSERVICE_SEQ = 3,
586 * when should interactive ETA output be generated
594 #define __td_verror(td, err, msg, func) \
600 snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
604 #define td_verror(td, err, func) \
605 __td_verror((td), (err), strerror((err)), (func))
606 #define td_vmsg(td, err, msg, func) \
607 __td_verror((td), (err), (msg), (func))
609 extern int exitall_on_terminate;
610 extern int thread_number;
611 extern int nr_process, nr_thread;
614 extern int terse_output;
615 extern int temp_stall_ts;
616 extern unsigned long long mlock_size;
617 extern unsigned long page_mask, page_size;
618 extern int read_only;
619 extern int eta_print;
620 extern unsigned long done_secs;
621 extern char *job_section;
622 extern int fio_gtod_offload;
623 extern int fio_gtod_cpu;
625 extern struct thread_data *threads;
627 #define td_read(td) ((td)->o.td_ddir & TD_DDIR_READ)
628 #define td_write(td) ((td)->o.td_ddir & TD_DDIR_WRITE)
629 #define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
630 #define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
631 #define file_randommap(td, f) (!(td)->o.norandommap && (f)->file_map)
633 static inline void fio_ro_check(struct thread_data *td, struct io_u *io_u)
635 assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)));
638 #define BLOCKS_PER_MAP (8 * sizeof(int))
639 #define TO_MAP_BLOCK(f, b) (b)
640 #define RAND_MAP_IDX(f, b) (TO_MAP_BLOCK(f, b) / BLOCKS_PER_MAP)
641 #define RAND_MAP_BIT(f, b) (TO_MAP_BLOCK(f, b) & (BLOCKS_PER_MAP - 1))
643 #define MAX_JOBS (1024)
645 static inline int should_fsync(struct thread_data *td)
647 if (td->last_was_sync)
651 if (td_write(td) || td_rw(td) || td->o.override_sync)
658 * Disk utils as read in /sys/block/<dev>/stat
660 struct disk_util_stat {
663 unsigned long long sectors[2];
666 unsigned time_in_queue;
670 * Per-device disk util management
673 struct flist_head list;
674 /* If this disk is a slave, hook it into the master's
675 * list using this head.
677 struct flist_head slavelist;
684 struct disk_util_stat dus;
685 struct disk_util_stat last_dus;
687 /* For software raids, this entry maintains pointers to the
688 * entries for the slave devices. The disk_util entries for
689 * the slaves devices should primarily be maintained through
690 * the disk_list list, i.e. for memory allocation and
691 * de-allocation, etc. Whereas this list should be used only
692 * for aggregating a software RAID's disk util figures.
694 struct flist_head slaves;
699 struct fio_mutex *lock;
703 static inline void disk_util_inc(struct disk_util *du)
706 fio_mutex_down(du->lock);
708 fio_mutex_up(du->lock);
712 static inline void disk_util_dec(struct disk_util *du)
715 fio_mutex_down(du->lock);
717 fio_mutex_up(du->lock);
721 #define DISK_UTIL_MSEC (250)
733 extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
734 extern void log_io_u(struct thread_data *, struct io_u *);
735 extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
736 extern int __must_check init_iolog(struct thread_data *td);
737 extern void log_io_piece(struct thread_data *, struct io_u *);
738 extern void queue_io_piece(struct thread_data *, struct io_piece *);
739 extern void prune_io_piece_log(struct thread_data *);
740 extern void write_iolog_close(struct thread_data *);
745 extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long,
747 extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long,
749 extern void add_bw_sample(struct thread_data *, enum fio_ddir, unsigned int,
751 extern void show_run_stats(void);
752 extern void init_disk_util(struct thread_data *);
753 extern void update_rusage_stat(struct thread_data *);
754 extern void update_io_ticks(void);
755 extern void setup_log(struct io_log **);
756 extern void finish_log(struct thread_data *, struct io_log *, const char *);
757 extern void finish_log_named(struct thread_data *, struct io_log *, const char *, const char *);
758 extern void __finish_log(struct io_log *, const char *);
759 extern struct io_log *agg_io_log[2];
760 extern int write_bw_log;
761 extern void add_agg_sample(unsigned long, enum fio_ddir, unsigned int);
766 extern unsigned long long utime_since(struct timeval *, struct timeval *);
767 extern unsigned long long utime_since_now(struct timeval *);
768 extern unsigned long mtime_since(struct timeval *, struct timeval *);
769 extern unsigned long mtime_since_now(struct timeval *);
770 extern unsigned long time_since_now(struct timeval *);
771 extern unsigned long mtime_since_genesis(void);
772 extern void usec_spin(unsigned int);
773 extern void usec_sleep(struct thread_data *, unsigned long);
774 extern void rate_throttle(struct thread_data *, unsigned long, unsigned int);
775 extern void fill_start_time(struct timeval *);
776 extern void fio_gettime(struct timeval *, void *);
777 extern void fio_gtod_init(void);
778 extern void fio_gtod_update(void);
779 extern void set_genesis_time(void);
780 extern int ramp_time_over(struct thread_data *);
781 extern int in_ramp_time(struct thread_data *);
784 * Init/option functions
786 extern int __must_check parse_options(int, char **);
787 extern int fio_options_parse(struct thread_data *, char **, int);
788 extern int fio_cmd_option_parse(struct thread_data *, const char *, char *);
789 extern void fio_fill_default_options(struct thread_data *);
790 extern int fio_show_option_help(const char *);
791 extern void fio_options_dup_and_init(struct option *);
792 extern void options_mem_dupe(struct thread_data *);
793 extern void options_mem_free(struct thread_data *);
794 extern void td_fill_rand_seeds(struct thread_data *);
795 #define FIO_GETOPT_JOB 0x89988998
796 #define FIO_NR_OPTIONS 128
799 * File setup/shutdown
801 extern void close_files(struct thread_data *);
802 extern void close_and_free_files(struct thread_data *);
803 extern int __must_check setup_files(struct thread_data *);
804 extern int __must_check file_invalidate_cache(struct thread_data *, struct fio_file *);
805 extern int __must_check generic_open_file(struct thread_data *, struct fio_file *);
806 extern int __must_check generic_close_file(struct thread_data *, struct fio_file *);
807 extern int __must_check generic_get_file_size(struct thread_data *, struct fio_file *);
808 extern int __must_check pre_read_files(struct thread_data *);
809 extern int add_file(struct thread_data *, const char *);
810 extern void get_file(struct fio_file *);
811 extern int __must_check put_file(struct thread_data *, struct fio_file *);
812 extern void lock_file(struct thread_data *, struct fio_file *, enum fio_ddir);
813 extern void unlock_file(struct thread_data *, struct fio_file *);
814 extern void unlock_file_all(struct thread_data *, struct fio_file *);
815 extern int add_dir_files(struct thread_data *, const char *);
816 extern int init_random_map(struct thread_data *);
817 extern void dup_files(struct thread_data *, struct thread_data *);
818 extern int get_fileno(struct thread_data *, const char *);
819 extern void free_release_files(struct thread_data *);
824 extern void print_thread_status(void);
825 extern void print_status_init(int);
830 #ifdef FIO_HAVE_DISK_UTIL
831 extern void show_disk_util(void);
832 extern void init_disk_util(struct thread_data *);
833 extern void update_io_ticks(void);
835 #define show_disk_util()
836 #define init_disk_util(td)
837 #define update_io_ticks()
841 * Thread life cycle. Once a thread has a runstate beyond TD_INITIALIZED, it
842 * will never back again. It may cycle between running/verififying/fsyncing.
843 * Once the thread reaches TD_EXITED, it is just waiting for the core to
859 extern void td_set_runstate(struct thread_data *, int);
864 extern void populate_verify_io_u(struct thread_data *, struct io_u *);
865 extern int __must_check get_next_verify(struct thread_data *td, struct io_u *);
866 extern int __must_check verify_io_u(struct thread_data *, struct io_u *);
871 extern int __must_check fio_pin_memory(void);
872 extern void fio_unpin_memory(void);
873 extern int __must_check allocate_io_mem(struct thread_data *);
874 extern void free_io_mem(struct thread_data *);
879 #define queue_full(td) flist_empty(&(td)->io_u_freelist)
880 extern struct io_u *__get_io_u(struct thread_data *);
881 extern struct io_u *get_io_u(struct thread_data *);
882 extern void put_io_u(struct thread_data *, struct io_u *);
883 extern void requeue_io_u(struct thread_data *, struct io_u **);
884 extern long __must_check io_u_sync_complete(struct thread_data *, struct io_u *);
885 extern long __must_check io_u_queued_complete(struct thread_data *, int);
886 extern void io_u_queued(struct thread_data *, struct io_u *);
887 extern void io_u_log_error(struct thread_data *, struct io_u *);
888 extern void io_u_mark_depth(struct thread_data *, unsigned int);
889 extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int);
890 void io_u_mark_complete(struct thread_data *, unsigned int);
891 void io_u_mark_submit(struct thread_data *, unsigned int);
894 * Reset stats after ramp time completes
896 extern void reset_all_stats(struct thread_data *);
899 * io engine entry points
901 extern int __must_check td_io_init(struct thread_data *);
902 extern int __must_check td_io_prep(struct thread_data *, struct io_u *);
903 extern int __must_check td_io_queue(struct thread_data *, struct io_u *);
904 extern int __must_check td_io_sync(struct thread_data *, struct fio_file *);
905 extern int __must_check td_io_getevents(struct thread_data *, unsigned int, unsigned int, struct timespec *);
906 extern int __must_check td_io_commit(struct thread_data *);
907 extern int __must_check td_io_open_file(struct thread_data *, struct fio_file *);
908 extern int td_io_close_file(struct thread_data *, struct fio_file *);
909 extern int __must_check td_io_get_file_size(struct thread_data *, struct fio_file *);
914 #ifdef FIO_HAVE_BLKTRACE
915 extern int is_blktrace(const char *);
916 extern int load_blktrace(struct thread_data *, const char *);
919 struct ioengine_ops {
920 struct flist_head list;
924 int (*setup)(struct thread_data *);
925 int (*init)(struct thread_data *);
926 int (*prep)(struct thread_data *, struct io_u *);
927 int (*queue)(struct thread_data *, struct io_u *);
928 int (*commit)(struct thread_data *);
929 int (*getevents)(struct thread_data *, unsigned int, unsigned int, struct timespec *);
930 struct io_u *(*event)(struct thread_data *, int);
931 int (*cancel)(struct thread_data *, struct io_u *);
932 void (*cleanup)(struct thread_data *);
933 int (*open_file)(struct thread_data *, struct fio_file *);
934 int (*close_file)(struct thread_data *, struct fio_file *);
935 int (*get_file_size)(struct thread_data *, struct fio_file *);
940 #define FIO_IOOPS_VERSION 10
942 extern struct ioengine_ops *load_ioengine(struct thread_data *, const char *);
943 extern void register_ioengine(struct ioengine_ops *);
944 extern void unregister_ioengine(struct ioengine_ops *);
945 extern void close_ioengine(struct thread_data *);
948 * Mark unused variables passed to ops functions as unused, to silence gcc
950 #define fio_unused __attribute((__unused__))
951 #define fio_init __attribute__((constructor))
952 #define fio_exit __attribute__((destructor))
954 #define for_each_td(td, i) \
955 for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
956 #define for_each_file(td, f, i) \
957 if ((td)->files_index) \
958 for ((i) = 0, (f) = (td)->files[0]; \
959 (i) < (td)->o.nr_files && ((f) = (td)->files[i]) != NULL; \
962 #define fio_assert(td, cond) do { \
965 fprintf(stderr, "file:%s:%d, assert %s failed\n", __FILE__, __LINE__, #cond); \
966 (td)->runstate = TD_EXITED; \
967 (td)->error = EFAULT; \
972 static inline void fio_file_reset(struct fio_file *f)
974 f->last_free_lookup = 0;
975 f->last_pos = f->file_offset;
977 memset(f->file_map, 0, f->num_maps * sizeof(int));
980 static inline void clear_error(struct thread_data *td)
983 td->verror[0] = '\0';
987 static inline void dprint_io_u(struct io_u *io_u, const char *p)
989 struct fio_file *f = io_u->file;
991 dprint(FD_IO, "%s: io_u %p: off=%llu/len=%lu/ddir=%d", p, io_u,
992 (unsigned long long) io_u->offset,
993 io_u->buflen, io_u->ddir);
994 if (fio_debug & (1 << FD_IO)) {
996 log_info("/%s", f->file_name);
1002 #define dprint_io_u(io_u, p)
1005 static inline int fio_fill_issue_time(struct thread_data *td)
1007 if (td->o.read_iolog_file ||
1008 !td->o.disable_clat || !td->o.disable_slat || !td->o.disable_bw)