8 #include <sys/resource.h>
18 #include "compiler/compiler.h"
22 #include "arch/arch.h"
34 #ifdef FIO_HAVE_SOLARISAIO
35 #include <sys/asynch.h>
39 * Use for maintaining statistics
42 unsigned long max_val;
43 unsigned long min_val;
44 unsigned long samples;
51 * A single data sample
61 * Dynamically growing data sample log
64 unsigned long nr_samples;
65 unsigned long max_samples;
66 struct io_sample *log;
70 * When logging io actions, this matches a single sent io_u
74 struct rb_node rb_node;
75 struct flist_head list;
79 struct fio_file *file;
81 unsigned long long offset;
86 unsigned int file_action;
92 IO_U_F_FLIGHT = 1 << 1,
102 #ifdef FIO_HAVE_LIBAIO
105 #ifdef FIO_HAVE_POSIXAIO
109 struct sg_io_hdr hdr;
111 #ifdef FIO_HAVE_GUASI
114 #ifdef FIO_HAVE_SOLARISAIO
115 aio_result_t resultp;
119 struct timeval start_time;
120 struct timeval issue_time;
123 * Allocated/set buffer and length
126 unsigned long buflen;
127 unsigned long long offset;
130 * IO engine state, may be different from above when we get
131 * partial transfers / residual data counts
134 unsigned long xfer_buflen;
142 * io engine private data
152 struct fio_file *file;
154 struct flist_head list;
157 * Callback for io completion
159 int (*end_io)(struct thread_data *, struct io_u *);
163 * io_ops->queue() return values
166 FIO_Q_COMPLETED = 0, /* completed sync */
167 FIO_Q_QUEUED = 1, /* queued, will complete async */
168 FIO_Q_BUSY = 2, /* no more room, call ->commit() */
171 struct group_run_stats {
172 unsigned long long max_run[2], min_run[2];
173 unsigned long long max_bw[2], min_bw[2];
174 unsigned long long io_kb[2];
175 unsigned long long agg[2];
179 * What type of allocation to use for io buffers
182 MEM_MALLOC = 0, /* ordinary malloc */
183 MEM_SHM, /* use shared memory segments */
184 MEM_SHMHUGE, /* use shared memory segments with huge pages */
185 MEM_MMAP, /* use anonynomous mmap */
186 MEM_MMAPHUGE, /* memory mapped huge file */
189 enum fio_ioengine_flags {
190 FIO_SYNCIO = 1 << 0, /* io engine has synchronous ->queue */
191 FIO_RAWIO = 1 << 1, /* some sort of direct/raw io */
192 FIO_DISKLESSIO = 1 << 2, /* no disk involved */
193 FIO_NOEXTEND = 1 << 3, /* engine can't extend file */
194 FIO_NODISKUTIL = 1 << 4, /* diskutil can't handle filename */
195 FIO_UNIDIR = 1 << 5, /* engine is uni-directional */
196 FIO_NOIO = 1 << 6, /* thread does only pseudo IO */
197 FIO_SIGQUIT = 1 << 7, /* needs SIGQUIT to exit */
201 * How many depth levels to log
203 #define FIO_IO_U_MAP_NR 8
204 #define FIO_IO_U_LAT_U_NR 10
205 #define FIO_IO_U_LAT_M_NR 12
216 struct io_log *slat_log;
217 struct io_log *clat_log;
218 struct io_log *bw_log;
221 * bandwidth and latency stats
223 struct io_stat clat_stat[2]; /* completion latency */
224 struct io_stat slat_stat[2]; /* submission latency */
225 struct io_stat bw_stat[2]; /* bandwidth stats */
227 unsigned long long stat_io_bytes[2];
228 struct timeval stat_sample_time[2];
231 * fio system usage accounting
233 struct rusage ru_start;
234 struct rusage ru_end;
235 unsigned long usr_time;
236 unsigned long sys_time;
238 unsigned long minf, majf;
241 * IO depth and latency stats
243 unsigned int io_u_map[FIO_IO_U_MAP_NR];
244 unsigned int io_u_submit[FIO_IO_U_MAP_NR];
245 unsigned int io_u_complete[FIO_IO_U_MAP_NR];
246 unsigned int io_u_lat_u[FIO_IO_U_LAT_U_NR];
247 unsigned int io_u_lat_m[FIO_IO_U_LAT_M_NR];
248 unsigned long total_io_u[2];
249 unsigned long short_io_u[2];
250 unsigned long total_submit;
251 unsigned long total_complete;
253 unsigned long long io_bytes[2];
254 unsigned long runtime[2];
255 unsigned long total_run_time;
263 struct thread_options {
271 enum td_ddir td_ddir;
272 unsigned int ddir_nr;
273 unsigned int iodepth;
274 unsigned int iodepth_low;
275 unsigned int iodepth_batch;
276 unsigned int iodepth_batch_complete;
278 unsigned long long size;
279 unsigned int fill_device;
280 unsigned long long file_size_low;
281 unsigned long long file_size_high;
282 unsigned long long start_offset;
286 unsigned int min_bs[2];
287 unsigned int max_bs[2];
288 struct bssplit *bssplit[2];
289 unsigned int bssplit_nr[2];
291 unsigned int nr_files;
292 unsigned int open_files;
293 enum file_lock_mode file_lock_mode;
294 unsigned int lockfile_batch;
296 unsigned int odirect;
297 unsigned int invalidate_cache;
298 unsigned int create_serialize;
299 unsigned int create_fsync;
300 unsigned int create_on_open;
301 unsigned int end_fsync;
302 unsigned int pre_read;
303 unsigned int sync_io;
305 unsigned int do_verify;
306 unsigned int verifysort;
307 unsigned int verify_interval;
308 unsigned int verify_offset;
309 unsigned int verify_pattern;
310 unsigned int verify_pattern_bytes;
311 unsigned int verify_fatal;
312 unsigned int use_thread;
314 unsigned int do_disk_util;
315 unsigned int override_sync;
316 unsigned int rand_repeatable;
317 unsigned int write_lat_log;
318 unsigned int write_bw_log;
319 unsigned int norandommap;
320 unsigned int softrandommap;
321 unsigned int bs_unaligned;
322 unsigned int fsync_on_close;
324 unsigned int hugepage_size;
325 unsigned int rw_min_bs;
326 unsigned int thinktime;
327 unsigned int thinktime_spin;
328 unsigned int thinktime_blocks;
329 unsigned int fsync_blocks;
330 unsigned int start_delay;
331 unsigned long long timeout;
332 unsigned long long ramp_time;
333 unsigned int overwrite;
334 unsigned int bw_avg_time;
336 unsigned long long zone_size;
337 unsigned long long zone_skip;
338 enum fio_memtype mem_type;
340 unsigned int stonewall;
341 unsigned int new_group;
342 unsigned int numjobs;
343 os_cpu_mask_t cpumask;
344 unsigned int cpumask_set;
346 unsigned int rwmixcycle;
347 unsigned int rwmix[2];
349 unsigned int file_service_type;
350 unsigned int group_reporting;
351 unsigned int fadvise_hint;
352 unsigned int zero_buffers;
353 unsigned int refill_buffers;
354 unsigned int time_based;
355 unsigned int disable_clat;
356 unsigned int disable_slat;
357 unsigned int disable_bw;
358 unsigned int gtod_reduce;
359 unsigned int gtod_cpu;
360 unsigned int gtod_offload;
362 char *read_iolog_file;
363 char *write_iolog_file;
368 * Pre-run and post-run shell
374 unsigned int ratemin;
375 unsigned int ratecycle;
376 unsigned int rate_iops;
377 unsigned int rate_iops_min;
382 * CPU "io" cycle burner
384 unsigned int cpuload;
385 unsigned int cpucycle;
388 #define FIO_VERROR_SIZE 128
391 * This describes a single thread/process executing a fio job.
394 struct thread_options o;
395 char verror[FIO_VERROR_SIZE];
399 struct thread_stat ts;
400 struct fio_file **files;
401 unsigned int files_size;
402 unsigned int files_index;
403 unsigned int nr_open_files;
404 unsigned int nr_done_files;
405 unsigned int nr_normal_files;
407 unsigned int next_file;
408 os_random_state_t next_file_state;
414 size_t orig_buffer_size;
415 volatile int terminate;
416 volatile int runstate;
418 unsigned int ioprio_set;
419 unsigned int last_was_sync;
429 unsigned long rand_seeds[6];
431 os_random_state_t bsrange_state;
432 os_random_state_t verify_state;
437 * IO engine hooks, contains everything needed to submit an io_u
438 * to any of the available IO engines.
440 struct ioengine_ops *io_ops;
443 * Current IO depth and list of free and busy io_u's.
445 unsigned int cur_depth;
446 unsigned int io_u_queued;
447 struct flist_head io_u_freelist;
448 struct flist_head io_u_busylist;
449 struct flist_head io_u_requeues;
454 unsigned long rate_usec_cycle;
455 long rate_pending_usleep;
456 unsigned long rate_bytes;
457 unsigned long rate_blocks;
458 struct timeval lastrate;
460 unsigned long long total_io_size;
462 unsigned long io_issues[2];
463 unsigned long long io_blocks[2];
464 unsigned long long io_bytes[2];
465 unsigned long long io_skip_bytes;
466 unsigned long long this_io_bytes[2];
467 unsigned long long zone_bytes;
468 struct fio_mutex *mutex;
471 * State for random io, a bitmap of blocks done vs not done
473 os_random_state_t random_state;
475 struct timeval start; /* start of this loop */
476 struct timeval epoch; /* time job was started */
477 struct timeval rw_end[2];
478 struct timeval last_issue;
479 struct timeval tv_cache;
480 unsigned int tv_cache_nr;
481 unsigned int tv_cache_mask;
482 unsigned int rw_end_set[2];
483 unsigned int ramp_time_over;
486 * read/write mixed workload state
488 os_random_state_t rwmix_state;
489 unsigned long rwmix_issues;
490 enum fio_ddir rwmix_ddir;
491 unsigned int ddir_nr;
494 * IO history logs for verification. We use a tree for sorting,
495 * if we are overwriting. Otherwise just use a fifo.
497 struct rb_root io_hist_tree;
498 struct flist_head io_hist_list;
503 struct flist_head io_log_list;
506 * for fileservice, how often to switch to a new file
508 unsigned int file_service_nr;
509 unsigned int file_service_left;
510 struct fio_file *file_service_file;
513 * For generating file sizes
515 os_random_state_t file_size_state;
519 * roundrobin available files, or choose one at random, or do each one
523 FIO_FSERVICE_RANDOM = 1,
525 FIO_FSERVICE_SEQ = 3,
529 * when should interactive ETA output be generated
537 #define __td_verror(td, err, msg, func) \
543 snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
547 #define td_verror(td, err, func) \
548 __td_verror((td), (err), strerror((err)), (func))
549 #define td_vmsg(td, err, msg, func) \
550 __td_verror((td), (err), (msg), (func))
552 extern int exitall_on_terminate;
553 extern int thread_number;
554 extern int nr_process, nr_thread;
557 extern int terse_output;
558 extern int temp_stall_ts;
559 extern unsigned long long mlock_size;
560 extern unsigned long page_mask, page_size;
561 extern int read_only;
562 extern int eta_print;
563 extern unsigned long done_secs;
564 extern char *job_section;
565 extern int fio_gtod_offload;
566 extern int fio_gtod_cpu;
568 extern struct thread_data *threads;
570 #define td_read(td) ((td)->o.td_ddir & TD_DDIR_READ)
571 #define td_write(td) ((td)->o.td_ddir & TD_DDIR_WRITE)
572 #define td_rw(td) (((td)->o.td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
573 #define td_random(td) ((td)->o.td_ddir & TD_DDIR_RAND)
574 #define file_randommap(td, f) (!(td)->o.norandommap && (f)->file_map)
576 static inline void fio_ro_check(struct thread_data *td, struct io_u *io_u)
578 assert(!(io_u->ddir == DDIR_WRITE && !td_write(td)));
581 #define BLOCKS_PER_MAP (8 * sizeof(int))
582 #define TO_MAP_BLOCK(f, b) (b)
583 #define RAND_MAP_IDX(f, b) (TO_MAP_BLOCK(f, b) / BLOCKS_PER_MAP)
584 #define RAND_MAP_BIT(f, b) (TO_MAP_BLOCK(f, b) & (BLOCKS_PER_MAP - 1))
586 #define MAX_JOBS (1024)
588 static inline int should_fsync(struct thread_data *td)
590 if (td->last_was_sync)
594 if (td_write(td) || td_rw(td) || td->o.override_sync)
601 * Disk utils as read in /sys/block/<dev>/stat
603 struct disk_util_stat {
606 unsigned long long sectors[2];
609 unsigned time_in_queue;
613 * Per-device disk util management
616 struct flist_head list;
617 /* If this disk is a slave, hook it into the master's
618 * list using this head.
620 struct flist_head slavelist;
627 struct disk_util_stat dus;
628 struct disk_util_stat last_dus;
630 /* For software raids, this entry maintains pointers to the
631 * entries for the slave devices. The disk_util entries for
632 * the slaves devices should primarily be maintained through
633 * the disk_list list, i.e. for memory allocation and
634 * de-allocation, etc. Whereas this list should be used only
635 * for aggregating a software RAID's disk util figures.
637 struct flist_head slaves;
642 struct fio_mutex *lock;
646 static inline void disk_util_inc(struct disk_util *du)
649 fio_mutex_down(du->lock);
651 fio_mutex_up(du->lock);
655 static inline void disk_util_dec(struct disk_util *du)
658 fio_mutex_down(du->lock);
660 fio_mutex_up(du->lock);
664 #define DISK_UTIL_MSEC (250)
676 extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
677 extern void log_io_u(struct thread_data *, struct io_u *);
678 extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
679 extern int __must_check init_iolog(struct thread_data *td);
680 extern void log_io_piece(struct thread_data *, struct io_u *);
681 extern void queue_io_piece(struct thread_data *, struct io_piece *);
682 extern void prune_io_piece_log(struct thread_data *);
683 extern void write_iolog_close(struct thread_data *);
688 extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long,
690 extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long,
692 extern void add_bw_sample(struct thread_data *, enum fio_ddir, unsigned int,
694 extern void show_run_stats(void);
695 extern void init_disk_util(struct thread_data *);
696 extern void update_rusage_stat(struct thread_data *);
697 extern void update_io_ticks(void);
698 extern void setup_log(struct io_log **);
699 extern void finish_log(struct thread_data *, struct io_log *, const char *);
700 extern void finish_log_named(struct thread_data *, struct io_log *, const char *, const char *);
701 extern void __finish_log(struct io_log *, const char *);
702 extern struct io_log *agg_io_log[2];
703 extern int write_bw_log;
704 extern void add_agg_sample(unsigned long, enum fio_ddir, unsigned int);
709 extern unsigned long long utime_since(struct timeval *, struct timeval *);
710 extern unsigned long long utime_since_now(struct timeval *);
711 extern unsigned long mtime_since(struct timeval *, struct timeval *);
712 extern unsigned long mtime_since_now(struct timeval *);
713 extern unsigned long time_since_now(struct timeval *);
714 extern unsigned long mtime_since_genesis(void);
715 extern void usec_spin(unsigned int);
716 extern void usec_sleep(struct thread_data *, unsigned long);
717 extern void rate_throttle(struct thread_data *, unsigned long, unsigned int);
718 extern void fill_start_time(struct timeval *);
719 extern void fio_gettime(struct timeval *, void *);
720 extern void fio_gtod_init(void);
721 extern void fio_gtod_update(void);
722 extern void set_genesis_time(void);
723 extern int ramp_time_over(struct thread_data *);
724 extern int in_ramp_time(struct thread_data *);
727 * Init/option functions
729 extern int __must_check parse_options(int, char **);
730 extern int fio_options_parse(struct thread_data *, char **, int);
731 extern int fio_cmd_option_parse(struct thread_data *, const char *, char *);
732 extern void fio_fill_default_options(struct thread_data *);
733 extern int fio_show_option_help(const char *);
734 extern void fio_options_dup_and_init(struct option *);
735 extern void options_mem_dupe(struct thread_data *);
736 extern void options_mem_free(struct thread_data *);
737 extern void td_fill_rand_seeds(struct thread_data *);
738 #define FIO_GETOPT_JOB 0x89988998
739 #define FIO_NR_OPTIONS 128
744 extern void print_thread_status(void);
745 extern void print_status_init(int);
750 #ifdef FIO_HAVE_DISK_UTIL
751 extern void show_disk_util(void);
752 extern void init_disk_util(struct thread_data *);
753 extern void update_io_ticks(void);
755 #define show_disk_util()
756 #define init_disk_util(td)
757 #define update_io_ticks()
761 * Thread life cycle. Once a thread has a runstate beyond TD_INITIALIZED, it
762 * will never back again. It may cycle between running/verififying/fsyncing.
763 * Once the thread reaches TD_EXITED, it is just waiting for the core to
779 extern void td_set_runstate(struct thread_data *, int);
784 extern int __must_check fio_pin_memory(void);
785 extern void fio_unpin_memory(void);
786 extern int __must_check allocate_io_mem(struct thread_data *);
787 extern void free_io_mem(struct thread_data *);
792 #define queue_full(td) flist_empty(&(td)->io_u_freelist)
793 extern struct io_u *__get_io_u(struct thread_data *);
794 extern struct io_u *get_io_u(struct thread_data *);
795 extern void put_io_u(struct thread_data *, struct io_u *);
796 extern void requeue_io_u(struct thread_data *, struct io_u **);
797 extern long __must_check io_u_sync_complete(struct thread_data *, struct io_u *);
798 extern long __must_check io_u_queued_complete(struct thread_data *, int);
799 extern void io_u_queued(struct thread_data *, struct io_u *);
800 extern void io_u_log_error(struct thread_data *, struct io_u *);
801 extern void io_u_mark_depth(struct thread_data *, unsigned int);
802 extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int);
803 void io_u_mark_complete(struct thread_data *, unsigned int);
804 void io_u_mark_submit(struct thread_data *, unsigned int);
807 * Reset stats after ramp time completes
809 extern void reset_all_stats(struct thread_data *);
812 * io engine entry points
814 extern int __must_check td_io_init(struct thread_data *);
815 extern int __must_check td_io_prep(struct thread_data *, struct io_u *);
816 extern int __must_check td_io_queue(struct thread_data *, struct io_u *);
817 extern int __must_check td_io_sync(struct thread_data *, struct fio_file *);
818 extern int __must_check td_io_getevents(struct thread_data *, unsigned int, unsigned int, struct timespec *);
819 extern int __must_check td_io_commit(struct thread_data *);
820 extern int __must_check td_io_open_file(struct thread_data *, struct fio_file *);
821 extern int td_io_close_file(struct thread_data *, struct fio_file *);
822 extern int __must_check td_io_get_file_size(struct thread_data *, struct fio_file *);
827 #ifdef FIO_HAVE_BLKTRACE
828 extern int is_blktrace(const char *);
829 extern int load_blktrace(struct thread_data *, const char *);
832 struct ioengine_ops {
833 struct flist_head list;
837 int (*setup)(struct thread_data *);
838 int (*init)(struct thread_data *);
839 int (*prep)(struct thread_data *, struct io_u *);
840 int (*queue)(struct thread_data *, struct io_u *);
841 int (*commit)(struct thread_data *);
842 int (*getevents)(struct thread_data *, unsigned int, unsigned int, struct timespec *);
843 struct io_u *(*event)(struct thread_data *, int);
844 int (*cancel)(struct thread_data *, struct io_u *);
845 void (*cleanup)(struct thread_data *);
846 int (*open_file)(struct thread_data *, struct fio_file *);
847 int (*close_file)(struct thread_data *, struct fio_file *);
848 int (*get_file_size)(struct thread_data *, struct fio_file *);
853 #define FIO_IOOPS_VERSION 10
855 extern struct ioengine_ops *load_ioengine(struct thread_data *, const char *);
856 extern void register_ioengine(struct ioengine_ops *);
857 extern void unregister_ioengine(struct ioengine_ops *);
858 extern void close_ioengine(struct thread_data *);
861 * Mark unused variables passed to ops functions as unused, to silence gcc
863 #define fio_unused __attribute((__unused__))
864 #define fio_init __attribute__((constructor))
865 #define fio_exit __attribute__((destructor))
867 #define for_each_td(td, i) \
868 for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
869 #define for_each_file(td, f, i) \
870 if ((td)->files_index) \
871 for ((i) = 0, (f) = (td)->files[0]; \
872 (i) < (td)->o.nr_files && ((f) = (td)->files[i]) != NULL; \
875 #define fio_assert(td, cond) do { \
878 fprintf(stderr, "file:%s:%d, assert %s failed\n", __FILE__, __LINE__, #cond); \
879 (td)->runstate = TD_EXITED; \
880 (td)->error = EFAULT; \
885 static inline void fio_file_reset(struct fio_file *f)
887 f->last_free_lookup = 0;
888 f->last_pos = f->file_offset;
890 memset(f->file_map, 0, f->num_maps * sizeof(int));
893 static inline void clear_error(struct thread_data *td)
896 td->verror[0] = '\0';
900 static inline void dprint_io_u(struct io_u *io_u, const char *p)
902 struct fio_file *f = io_u->file;
904 dprint(FD_IO, "%s: io_u %p: off=%llu/len=%lu/ddir=%d", p, io_u,
905 (unsigned long long) io_u->offset,
906 io_u->buflen, io_u->ddir);
907 if (fio_debug & (1 << FD_IO)) {
909 log_info("/%s", f->file_name);
915 #define dprint_io_u(io_u, p)
918 static inline int fio_fill_issue_time(struct thread_data *td)
920 if (td->o.read_iolog_file ||
921 !td->o.disable_clat || !td->o.disable_slat || !td->o.disable_bw)