mutex error handling
[fio.git] / fio.h
... / ...
CommitLineData
1#ifndef FIO_H
2#define FIO_H
3
4#include <sched.h>
5#include <limits.h>
6#include <pthread.h>
7#include <sys/time.h>
8#include <sys/resource.h>
9#include <errno.h>
10#include <stdlib.h>
11#include <stdio.h>
12#include <unistd.h>
13#include <string.h>
14
15#include "list.h"
16#include "md5.h"
17#include "crc32.h"
18#include "arch.h"
19#include "os.h"
20#include "mutex.h"
21
22#ifdef FIO_HAVE_SYSLET
23#include "syslet.h"
24#endif
25
26enum fio_ddir {
27 DDIR_READ = 0,
28 DDIR_WRITE,
29 DDIR_SYNC,
30};
31
32enum td_ddir {
33 TD_DDIR_READ = 1 << 0,
34 TD_DDIR_WRITE = 1 << 1,
35 TD_DDIR_RAND = 1 << 2,
36 TD_DDIR_RW = TD_DDIR_READ | TD_DDIR_WRITE,
37 TD_DDIR_RANDREAD = TD_DDIR_READ | TD_DDIR_RAND,
38 TD_DDIR_RANDWRITE = TD_DDIR_WRITE | TD_DDIR_RAND,
39 TD_DDIR_RANDRW = TD_DDIR_RW | TD_DDIR_RAND,
40};
41
42/*
43 * Use for maintaining statistics
44 */
45struct io_stat {
46 unsigned long max_val;
47 unsigned long min_val;
48 unsigned long samples;
49
50 double mean;
51 double S;
52};
53
54/*
55 * A single data sample
56 */
57struct io_sample {
58 unsigned long time;
59 unsigned long val;
60 enum fio_ddir ddir;
61};
62
63/*
64 * Dynamically growing data sample log
65 */
66struct io_log {
67 unsigned long nr_samples;
68 unsigned long max_samples;
69 struct io_sample *log;
70};
71
72/*
73 * When logging io actions, this matches a single sent io_u
74 */
75struct io_piece {
76 struct list_head list;
77 struct fio_file *file;
78 unsigned long long offset;
79 unsigned long len;
80 enum fio_ddir ddir;
81};
82
83#ifdef FIO_HAVE_SYSLET
84struct syslet_req {
85 struct syslet_uatom atom; /* the atom to submit */
86 struct syslet_uatom *head; /* head of the sequence */
87 long ret; /* syscall return value */
88};
89#endif
90
91enum {
92 IO_U_F_FREE = 1 << 0,
93 IO_U_F_FLIGHT = 1 << 1,
94};
95
96/*
97 * The io unit
98 */
99struct io_u {
100 union {
101#ifdef FIO_HAVE_LIBAIO
102 struct iocb iocb;
103#endif
104#ifdef FIO_HAVE_POSIXAIO
105 struct aiocb aiocb;
106#endif
107#ifdef FIO_HAVE_SGIO
108 struct sg_io_hdr hdr;
109#endif
110#ifdef FIO_HAVE_SYSLET
111 struct syslet_req req;
112#endif
113 };
114 struct timeval start_time;
115 struct timeval issue_time;
116
117 /*
118 * Allocated/set buffer and length
119 */
120 void *buf;
121 unsigned long buflen;
122 unsigned long long offset;
123
124 /*
125 * IO engine state, may be different from above when we get
126 * partial transfers / residual data counts
127 */
128 void *xfer_buf;
129 unsigned long xfer_buflen;
130
131 unsigned int resid;
132 unsigned int error;
133
134 enum fio_ddir ddir;
135
136 /*
137 * io engine private data
138 */
139 union {
140 unsigned int index;
141 unsigned int seen;
142 };
143
144 unsigned int flags;
145
146 struct fio_file *file;
147
148 struct list_head list;
149
150 /*
151 * Callback for io completion
152 */
153 int (*end_io)(struct io_u *);
154};
155
156/*
157 * io_ops->queue() return values
158 */
159enum {
160 FIO_Q_COMPLETED = 0, /* completed sync */
161 FIO_Q_QUEUED = 1, /* queued, will complete async */
162 FIO_Q_BUSY = 2, /* no more room, call ->commit() */
163};
164
165#define FIO_HDR_MAGIC 0xf00baaef
166
167enum {
168 VERIFY_NONE = 0, /* no verification */
169 VERIFY_MD5, /* md5 sum data blocks */
170 VERIFY_CRC32, /* crc32 sum data blocks */
171};
172
173/*
174 * A header structure associated with each checksummed data block
175 */
176struct verify_header {
177 unsigned int fio_magic;
178 unsigned int len;
179 unsigned int verify_type;
180 union {
181 char md5_digest[MD5_HASH_WORDS * 4];
182 unsigned long crc32;
183 };
184};
185
186struct group_run_stats {
187 unsigned long long max_run[2], min_run[2];
188 unsigned long long max_bw[2], min_bw[2];
189 unsigned long long io_kb[2];
190 unsigned long long agg[2];
191};
192
193/*
194 * What type of allocation to use for io buffers
195 */
196enum fio_memtype {
197 MEM_MALLOC = 0, /* ordinary malloc */
198 MEM_SHM, /* use shared memory segments */
199 MEM_SHMHUGE, /* use shared memory segments with huge pages */
200 MEM_MMAP, /* use anonynomous mmap */
201 MEM_MMAPHUGE, /* memory mapped huge file */
202};
203
204/*
205 * The type of object we are working on
206 */
207enum fio_filetype {
208 FIO_TYPE_FILE = 1, /* plain file */
209 FIO_TYPE_BD, /* block device */
210 FIO_TYPE_CHAR, /* character device */
211};
212
213enum fio_ioengine_flags {
214 FIO_SYNCIO = 1 << 0, /* io engine has synchronous ->queue */
215 FIO_CPUIO = 1 << 1, /* cpu burner, doesn't do real io */
216 FIO_RAWIO = 1 << 2, /* some sort of direct/raw io */
217 FIO_DISKLESSIO = 1 << 3, /* no disk involved */
218};
219
220/*
221 * Each thread_data structure has a number of files associated with it,
222 * this structure holds state information for a single file.
223 */
224struct fio_file {
225 /*
226 * A file may not be a file descriptor, let the io engine decide
227 */
228 union {
229 unsigned long file_data;
230 int fd;
231 };
232 char *file_name;
233 void *mmap;
234 unsigned long long file_size;
235 unsigned long long real_file_size;
236 unsigned long long file_offset;
237 unsigned long long last_pos;
238 unsigned long long last_completed_pos;
239
240 /*
241 * block map for random io
242 */
243 unsigned long *file_map;
244 unsigned int num_maps;
245 unsigned int last_free_lookup;
246
247 unsigned int unlink;
248 unsigned int open;
249};
250
251/*
252 * How many depth levels to log
253 */
254#define FIO_IO_U_MAP_NR 8
255#define FIO_IO_U_LAT_NR 12
256
257struct thread_stat {
258 char *name;
259 char *verror;
260 int error;
261 int groupid;
262 pid_t pid;
263 char *description;
264 int members;
265
266 struct io_log *slat_log;
267 struct io_log *clat_log;
268 struct io_log *bw_log;
269
270 /*
271 * bandwidth and latency stats
272 */
273 struct io_stat clat_stat[2]; /* completion latency */
274 struct io_stat slat_stat[2]; /* submission latency */
275 struct io_stat bw_stat[2]; /* bandwidth stats */
276
277 unsigned long long stat_io_bytes[2];
278 struct timeval stat_sample_time[2];
279
280 /*
281 * fio system usage accounting
282 */
283 struct rusage ru_start;
284 struct rusage ru_end;
285 unsigned long usr_time;
286 unsigned long sys_time;
287 unsigned long ctx;
288
289 /*
290 * IO depth and latency stats
291 */
292 unsigned int io_u_map[FIO_IO_U_MAP_NR];
293 unsigned int io_u_lat[FIO_IO_U_LAT_NR];
294 unsigned long total_io_u;
295
296 unsigned long long io_bytes[2];
297 unsigned long runtime[2];
298 unsigned long total_run_time;
299};
300
301/*
302 * This describes a single thread/process executing a fio job.
303 */
304struct thread_data {
305 int pad;
306 char *description;
307 char *name;
308 char *directory;
309 char *filename;
310 char verror[128];
311 pthread_t thread;
312 int thread_number;
313 int groupid;
314 struct thread_stat ts;
315 enum fio_filetype filetype;
316 struct fio_file *files;
317 unsigned int nr_files;
318 unsigned int nr_open_files;
319 unsigned int nr_uniq_files;
320 union {
321 unsigned int next_file;
322 os_random_state_t next_file_state;
323 };
324 int error;
325 pid_t pid;
326 char *orig_buffer;
327 size_t orig_buffer_size;
328 volatile int terminate;
329 volatile int runstate;
330 enum td_ddir td_ddir;
331 unsigned int ioprio;
332 unsigned int last_was_sync;
333
334 unsigned int odirect;
335 unsigned int invalidate_cache;
336 unsigned int create_serialize;
337 unsigned int create_fsync;
338 unsigned int end_fsync;
339 unsigned int sync_io;
340 unsigned int verify;
341 unsigned int use_thread;
342 unsigned int unlink;
343 unsigned int do_disk_util;
344 unsigned int override_sync;
345 unsigned int rand_repeatable;
346 unsigned int write_lat_log;
347 unsigned int write_bw_log;
348 unsigned int norandommap;
349 unsigned int bs_unaligned;
350
351 unsigned int bs[2];
352 unsigned int min_bs[2];
353 unsigned int max_bs[2];
354 unsigned int hugepage_size;
355 unsigned int rw_min_bs;
356 unsigned int thinktime;
357 unsigned int thinktime_spin;
358 unsigned int thinktime_blocks;
359 unsigned int fsync_blocks;
360 unsigned int start_delay;
361 unsigned long timeout;
362 unsigned int overwrite;
363 unsigned int bw_avg_time;
364 unsigned int loops;
365 unsigned long long zone_size;
366 unsigned long long zone_skip;
367 enum fio_memtype mem_type;
368 char *mmapfile;
369 int mmapfd;
370 unsigned int stonewall;
371 unsigned int numjobs;
372 unsigned int iodepth;
373 unsigned int iodepth_low;
374 unsigned int iodepth_batch;
375 os_cpu_mask_t cpumask;
376 unsigned int iolog;
377 unsigned int read_iolog;
378 unsigned int rwmixcycle;
379 unsigned int rwmixread;
380 unsigned int rwmixwrite;
381 unsigned int nice;
382 unsigned int file_service_type;
383 unsigned int group_reporting;
384 unsigned int open_files;
385
386 char *read_iolog_file;
387 char *write_iolog_file;
388 void *iolog_buf;
389 FILE *iolog_f;
390
391 char *sysfs_root;
392 char *ioscheduler;
393
394 os_random_state_t bsrange_state;
395 os_random_state_t verify_state;
396
397 int shm_id;
398
399 /*
400 * IO engine hooks, contains everything needed to submit an io_u
401 * to any of the available IO engines.
402 */
403 struct ioengine_ops *io_ops;
404
405 /*
406 * Current IO depth and list of free and busy io_u's.
407 */
408 unsigned int cur_depth;
409 struct list_head io_u_freelist;
410 struct list_head io_u_busylist;
411 struct list_head io_u_requeues;
412 unsigned int io_u_queued;
413
414 /*
415 * Rate state
416 */
417 unsigned int rate;
418 unsigned int ratemin;
419 unsigned int ratecycle;
420 unsigned long rate_usec_cycle;
421 long rate_pending_usleep;
422 unsigned long rate_bytes;
423 struct timeval lastrate;
424
425 unsigned long long io_size;
426 unsigned long long total_file_size;
427 unsigned long long start_offset;
428 unsigned long long total_io_size;
429
430 unsigned long io_issues[2];
431 unsigned long long io_blocks[2];
432 unsigned long long io_bytes[2];
433 unsigned long long this_io_bytes[2];
434 unsigned long long zone_bytes;
435 struct fio_sem *mutex;
436
437 /*
438 * State for random io, a bitmap of blocks done vs not done
439 */
440 os_random_state_t random_state;
441
442 /*
443 * CPU "io" cycle burner
444 */
445 unsigned int cpuload;
446 unsigned int cpucycle;
447
448 struct timeval start; /* start of this loop */
449 struct timeval epoch; /* time job was started */
450
451 /*
452 * read/write mixed workload state
453 */
454 os_random_state_t rwmix_state;
455 struct timeval rwmix_switch;
456 enum fio_ddir rwmix_ddir;
457
458 /*
459 * Pre-run and post-run shell
460 */
461 char *exec_prerun;
462 char *exec_postrun;
463
464 /*
465 * IO historic logs
466 */
467 struct list_head io_hist_list;
468 struct list_head io_log_list;
469
470 /*
471 * timeout handling
472 */
473 struct timeval timeout_end;
474 struct itimerval timer;
475};
476
477/*
478 * roundrobin available files, or choose one at random.
479 */
480enum {
481 FIO_FSERVICE_RANDOM = 1,
482 FIO_FSERVICE_RR = 2,
483};
484
485/*
486 * 30 second per-io_u timeout, with 5 second intervals to avoid resetting
487 * the timer on each queue operation.
488 */
489#define IO_U_TIMEOUT_INC 5
490#define IO_U_TIMEOUT 30
491
492#define __td_verror(td, err, msg, func) \
493 do { \
494 if ((td)->error) \
495 break; \
496 int e = (err); \
497 (td)->error = e; \
498 snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, func=%s, error=%s", __FILE__, __LINE__, (func), (msg)); \
499 } while (0)
500
501
502#define td_verror(td, err, func) \
503 __td_verror((td), (err), strerror((err)), (func))
504#define td_vmsg(td, err, msg, func) \
505 __td_verror((td), (err), (msg), (func))
506
507extern int exitall_on_terminate;
508extern int thread_number;
509extern int shm_id;
510extern int groupid;
511extern int terse_output;
512extern FILE *f_out;
513extern FILE *f_err;
514extern int temp_stall_ts;
515extern unsigned long long mlock_size;
516
517extern struct thread_data *threads;
518
519#define td_read(td) ((td)->td_ddir & TD_DDIR_READ)
520#define td_write(td) ((td)->td_ddir & TD_DDIR_WRITE)
521#define td_rw(td) (((td)->td_ddir & TD_DDIR_RW) == TD_DDIR_RW)
522#define td_random(td) ((td)->td_ddir & TD_DDIR_RAND)
523
524#define BLOCKS_PER_MAP (8 * sizeof(long))
525#define TO_MAP_BLOCK(td, f, b) ((b) - ((f)->file_offset / (td)->rw_min_bs))
526#define RAND_MAP_IDX(td, f, b) (TO_MAP_BLOCK(td, f, b) / BLOCKS_PER_MAP)
527#define RAND_MAP_BIT(td, f, b) (TO_MAP_BLOCK(td, f, b) & (BLOCKS_PER_MAP - 1))
528
529#define MAX_JOBS (1024)
530
531static inline int should_fsync(struct thread_data *td)
532{
533 if (td->last_was_sync)
534 return 0;
535 if (td->odirect)
536 return 0;
537 if (td_write(td) || td_rw(td) || td->override_sync)
538 return 1;
539
540 return 0;
541}
542
543/*
544 * Disk utils as read in /sys/block/<dev>/stat
545 */
546struct disk_util_stat {
547 unsigned ios[2];
548 unsigned merges[2];
549 unsigned long long sectors[2];
550 unsigned ticks[2];
551 unsigned io_ticks;
552 unsigned time_in_queue;
553};
554
555/*
556 * Per-device disk util management
557 */
558struct disk_util {
559 struct list_head list;
560
561 char *name;
562 char path[256];
563 dev_t dev;
564
565 struct disk_util_stat dus;
566 struct disk_util_stat last_dus;
567
568 unsigned long msec;
569 struct timeval time;
570};
571
572#define DISK_UTIL_MSEC (250)
573
574#ifndef min
575#define min(a, b) ((a) < (b) ? (a) : (b))
576#endif
577#ifndef max
578#define max(a, b) ((a) > (b) ? (a) : (b))
579#endif
580
581/*
582 * Log exports
583 */
584extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
585extern void write_iolog_put(struct thread_data *, struct io_u *);
586extern int __must_check init_iolog(struct thread_data *td);
587extern void log_io_piece(struct thread_data *, struct io_u *);
588extern void prune_io_piece_log(struct thread_data *);
589extern void write_iolog_close(struct thread_data *);
590
591/*
592 * Logging
593 */
594extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long);
595extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long);
596extern void add_bw_sample(struct thread_data *, enum fio_ddir, struct timeval *);
597extern void show_run_stats(void);
598extern void init_disk_util(struct thread_data *);
599extern void update_rusage_stat(struct thread_data *);
600extern void update_io_ticks(void);
601extern void disk_util_timer_arm(void);
602extern void setup_log(struct io_log **);
603extern void finish_log(struct thread_data *, struct io_log *, const char *);
604extern void __finish_log(struct io_log *, const char *);
605extern int setup_rate(struct thread_data *);
606extern struct io_log *agg_io_log[2];
607extern int write_bw_log;
608extern void add_agg_sample(unsigned long, enum fio_ddir);
609
610/*
611 * Time functions
612 */
613extern unsigned long utime_since(struct timeval *, struct timeval *);
614extern unsigned long utime_since_now(struct timeval *);
615extern unsigned long mtime_since(struct timeval *, struct timeval *);
616extern unsigned long mtime_since_now(struct timeval *);
617extern unsigned long time_since_now(struct timeval *);
618extern unsigned long mtime_since_genesis(void);
619extern void __usec_sleep(unsigned int);
620extern void usec_sleep(struct thread_data *, unsigned long);
621extern void rate_throttle(struct thread_data *, unsigned long, unsigned int);
622extern void fill_start_time(struct timeval *);
623extern void fio_gettime(struct timeval *, void *);
624extern void set_genesis_time(void);
625
626/*
627 * Init functions
628 */
629extern int __must_check parse_options(int, char **);
630extern int __must_check init_random_state(struct thread_data *);
631
632/*
633 * File setup/shutdown
634 */
635extern void close_files(struct thread_data *);
636extern int __must_check setup_files(struct thread_data *);
637extern int __must_check open_files(struct thread_data *);
638extern int __must_check file_invalidate_cache(struct thread_data *, struct fio_file *);
639extern int __must_check generic_open_file(struct thread_data *, struct fio_file *);
640extern void generic_close_file(struct thread_data *, struct fio_file *);
641
642/*
643 * ETA/status stuff
644 */
645extern void print_thread_status(void);
646extern void print_status_init(int);
647
648/*
649 * Thread life cycle. Once a thread has a runstate beyond TD_INITIALIZED, it
650 * will never back again. It may cycle between running/verififying/fsyncing.
651 * Once the thread reaches TD_EXITED, it is just waiting for the core to
652 * reap it.
653 */
654enum {
655 TD_NOT_CREATED = 0,
656 TD_CREATED,
657 TD_INITIALIZED,
658 TD_RUNNING,
659 TD_VERIFYING,
660 TD_FSYNCING,
661 TD_EXITED,
662 TD_REAPED,
663};
664
665/*
666 * Verify helpers
667 */
668extern void populate_verify_io_u(struct thread_data *, struct io_u *);
669extern int __must_check get_next_verify(struct thread_data *td, struct io_u *);
670extern int __must_check verify_io_u(struct io_u *);
671
672/*
673 * Memory helpers
674 */
675extern int __must_check fio_pin_memory(void);
676extern void fio_unpin_memory(void);
677extern int __must_check allocate_io_mem(struct thread_data *);
678extern void free_io_mem(struct thread_data *);
679
680/*
681 * io unit handling
682 */
683#define queue_full(td) list_empty(&(td)->io_u_freelist)
684extern struct io_u *__get_io_u(struct thread_data *);
685extern struct io_u *get_io_u(struct thread_data *);
686extern void put_io_u(struct thread_data *, struct io_u *);
687extern void requeue_io_u(struct thread_data *, struct io_u **);
688extern long __must_check io_u_sync_complete(struct thread_data *, struct io_u *);
689extern long __must_check io_u_queued_complete(struct thread_data *, int);
690extern void io_u_queued(struct thread_data *, struct io_u *);
691extern void io_u_log_error(struct thread_data *, struct io_u *);
692extern void io_u_init_timeout(void);
693extern void io_u_set_timeout(struct thread_data *);
694
695/*
696 * io engine entry points
697 */
698extern int __must_check td_io_init(struct thread_data *);
699extern int __must_check td_io_prep(struct thread_data *, struct io_u *);
700extern int __must_check td_io_queue(struct thread_data *, struct io_u *);
701extern int __must_check td_io_sync(struct thread_data *, struct fio_file *);
702extern int __must_check td_io_getevents(struct thread_data *, int, int, struct timespec *);
703extern int __must_check td_io_commit(struct thread_data *);
704extern int __must_check td_io_open_file(struct thread_data *, struct fio_file *);
705extern void td_io_close_file(struct thread_data *, struct fio_file *);
706
707/*
708 * If logging output to a file, stderr should go to both stderr and f_err
709 */
710#define log_err(args...) do { \
711 fprintf(f_err, ##args); \
712 if (f_err != stderr) \
713 fprintf(stderr, ##args); \
714 } while (0)
715
716FILE *get_f_out(void);
717FILE *get_f_err(void);
718
719struct ioengine_ops {
720 struct list_head list;
721 char name[16];
722 int version;
723 int flags;
724 int (*setup)(struct thread_data *);
725 int (*init)(struct thread_data *);
726 int (*prep)(struct thread_data *, struct io_u *);
727 int (*queue)(struct thread_data *, struct io_u *);
728 int (*commit)(struct thread_data *);
729 int (*getevents)(struct thread_data *, int, int, struct timespec *);
730 struct io_u *(*event)(struct thread_data *, int);
731 int (*cancel)(struct thread_data *, struct io_u *);
732 void (*cleanup)(struct thread_data *);
733 int (*open_file)(struct thread_data *, struct fio_file *);
734 void (*close_file)(struct thread_data *, struct fio_file *);
735 void *data;
736 void *dlhandle;
737};
738
739#define FIO_IOOPS_VERSION 6
740
741extern struct ioengine_ops *load_ioengine(struct thread_data *, const char *);
742extern void register_ioengine(struct ioengine_ops *);
743extern void unregister_ioengine(struct ioengine_ops *);
744extern void close_ioengine(struct thread_data *);
745
746/*
747 * Mark unused variables passed to ops functions as unused, to silence gcc
748 */
749#define fio_unused __attribute((__unused__))
750#define fio_init __attribute__((constructor))
751#define fio_exit __attribute__((destructor))
752
753#define for_each_td(td, i) \
754 for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
755#define for_each_file(td, f, i) \
756 for ((i) = 0, (f) = &(td)->files[0]; (i) < (int) (td)->open_files; (i)++, (f)++)
757
758#define fio_assert(td, cond) do { \
759 if (!(cond)) { \
760 int *__foo = NULL; \
761 fprintf(stderr, "file:%s:%d, assert %s failed\n", __FILE__, __LINE__, #cond); \
762 (td)->runstate = TD_EXITED; \
763 (td)->error = EFAULT; \
764 *__foo = 0; \
765 } \
766} while (0)
767
768#endif