Differentiate between bool error return and real error value
[fio.git] / fio.h
... / ...
CommitLineData
1#ifndef FIO_H
2#define FIO_H
3
4#include <sched.h>
5#include <limits.h>
6#include <pthread.h>
7#include <sys/time.h>
8#include <sys/resource.h>
9#include <errno.h>
10#include <stdlib.h>
11#include <stdio.h>
12#include <unistd.h>
13#include <string.h>
14
15#include "list.h"
16#include "md5.h"
17#include "crc32.h"
18#include "arch.h"
19#include "os.h"
20
21#ifdef FIO_HAVE_SYSLET
22#include "syslet.h"
23#endif
24
25enum fio_ddir {
26 DDIR_READ = 0,
27 DDIR_WRITE,
28 DDIR_SYNC,
29};
30
31/*
32 * Use for maintaining statistics
33 */
34struct io_stat {
35 unsigned long max_val;
36 unsigned long min_val;
37 unsigned long samples;
38
39 double mean;
40 double S;
41};
42
43/*
44 * A single data sample
45 */
46struct io_sample {
47 unsigned long time;
48 unsigned long val;
49 enum fio_ddir ddir;
50};
51
52/*
53 * Dynamically growing data sample log
54 */
55struct io_log {
56 unsigned long nr_samples;
57 unsigned long max_samples;
58 struct io_sample *log;
59};
60
61/*
62 * When logging io actions, this matches a single sent io_u
63 */
64struct io_piece {
65 struct list_head list;
66 struct fio_file *file;
67 unsigned long long offset;
68 unsigned long len;
69 enum fio_ddir ddir;
70};
71
72#ifdef FIO_HAVE_SYSLET
73struct syslet_req {
74 struct syslet_uatom atom;
75 long ret;
76};
77#endif
78
79/*
80 * The io unit
81 */
82struct io_u {
83 union {
84#ifdef FIO_HAVE_LIBAIO
85 struct iocb iocb;
86#endif
87#ifdef FIO_HAVE_POSIXAIO
88 struct aiocb aiocb;
89#endif
90#ifdef FIO_HAVE_SGIO
91 struct sg_io_hdr hdr;
92#endif
93#ifdef FIO_HAVE_SYSLET
94 struct syslet_req req;
95#endif
96 };
97 struct timeval start_time;
98 struct timeval issue_time;
99
100 /*
101 * Allocated/set buffer and length
102 */
103 void *buf;
104 unsigned long buflen;
105 unsigned long long offset;
106
107 /*
108 * IO engine state, may be different from above when we get
109 * partial transfers / residual data counts
110 */
111 void *xfer_buf;
112 unsigned long xfer_buflen;
113
114 unsigned int resid;
115 unsigned int error;
116
117 enum fio_ddir ddir;
118
119 /*
120 * io engine private data
121 */
122 union {
123 unsigned int index;
124 unsigned int seen;
125 };
126
127 struct fio_file *file;
128
129 struct list_head list;
130};
131
132/*
133 * io_ops->queue() return values
134 */
135enum {
136 FIO_Q_COMPLETED = 0, /* completed sync */
137 FIO_Q_QUEUED = 1, /* queued, will complete async */
138};
139
140#define FIO_HDR_MAGIC 0xf00baaef
141
142enum {
143 VERIFY_NONE = 0, /* no verification */
144 VERIFY_MD5, /* md5 sum data blocks */
145 VERIFY_CRC32, /* crc32 sum data blocks */
146};
147
148/*
149 * A header structure associated with each checksummed data block
150 */
151struct verify_header {
152 unsigned int fio_magic;
153 unsigned int len;
154 unsigned int verify_type;
155 union {
156 char md5_digest[MD5_HASH_WORDS * 4];
157 unsigned long crc32;
158 };
159};
160
161struct group_run_stats {
162 unsigned long long max_run[2], min_run[2];
163 unsigned long long max_bw[2], min_bw[2];
164 unsigned long long io_kb[2];
165 unsigned long long agg[2];
166};
167
168/*
169 * What type of allocation to use for io buffers
170 */
171enum fio_memtype {
172 MEM_MALLOC = 0, /* ordinary malloc */
173 MEM_SHM, /* use shared memory segments */
174 MEM_SHMHUGE, /* use shared memory segments with huge pages */
175 MEM_MMAP, /* use anonynomous mmap */
176 MEM_MMAPHUGE, /* memory mapped huge file */
177};
178
179/*
180 * The type of object we are working on
181 */
182enum fio_filetype {
183 FIO_TYPE_FILE = 1, /* plain file */
184 FIO_TYPE_BD, /* block device */
185 FIO_TYPE_CHAR, /* character device */
186};
187
188enum fio_ioengine_flags {
189 FIO_SYNCIO = 1 << 0, /* io engine has synchronous ->queue */
190 FIO_CPUIO = 1 << 1, /* cpu burner, doesn't do real io */
191 FIO_MMAPIO = 1 << 2, /* uses memory mapped io */
192 FIO_RAWIO = 1 << 3, /* some sort of direct/raw io */
193 FIO_NETIO = 1 << 4, /* networked io */
194 FIO_NULLIO = 1 << 5, /* no real data transfer (cpu/null) */
195};
196
197/*
198 * Each thread_data structure has a number of files associated with it,
199 * this structure holds state information for a single file.
200 */
201struct fio_file {
202 /*
203 * A file may not be a file descriptor, let the io engine decide
204 */
205 union {
206 unsigned long file_data;
207 int fd;
208 };
209 char *file_name;
210 void *mmap;
211 unsigned long long file_size;
212 unsigned long long real_file_size;
213 unsigned long long file_offset;
214 unsigned long long last_pos;
215 unsigned long long last_completed_pos;
216
217 /*
218 * block map for random io
219 */
220 unsigned long *file_map;
221 unsigned int num_maps;
222 unsigned int last_free_lookup;
223
224 unsigned int unlink;
225};
226
227/*
228 * How many depth levels to log
229 */
230#define FIO_IO_U_MAP_NR 8
231#define FIO_IO_U_LAT_NR 12
232
233/*
234 * This describes a single thread/process executing a fio job.
235 */
236struct thread_data {
237 char *description;
238 char *name;
239 char *directory;
240 char *filename;
241 char verror[80];
242 pthread_t thread;
243 int thread_number;
244 int groupid;
245 enum fio_filetype filetype;
246 struct fio_file *files;
247 unsigned int nr_files;
248 unsigned int nr_uniq_files;
249 unsigned int next_file;
250 int error;
251 pid_t pid;
252 char *orig_buffer;
253 size_t orig_buffer_size;
254 volatile int terminate;
255 volatile int runstate;
256 enum fio_ddir ddir;
257 unsigned int iomix;
258 unsigned int ioprio;
259 unsigned int last_was_sync;
260
261 unsigned int sequential;
262 unsigned int odirect;
263 unsigned int invalidate_cache;
264 unsigned int create_serialize;
265 unsigned int create_fsync;
266 unsigned int end_fsync;
267 unsigned int sync_io;
268 unsigned int verify;
269 unsigned int use_thread;
270 unsigned int unlink;
271 unsigned int do_disk_util;
272 unsigned int override_sync;
273 unsigned int rand_repeatable;
274 unsigned int write_lat_log;
275 unsigned int write_bw_log;
276 unsigned int norandommap;
277 unsigned int bs_unaligned;
278
279 unsigned int bs[2];
280 unsigned int min_bs[2];
281 unsigned int max_bs[2];
282 unsigned int hugepage_size;
283 unsigned int rw_min_bs;
284 unsigned int thinktime;
285 unsigned int thinktime_spin;
286 unsigned int thinktime_blocks;
287 unsigned int fsync_blocks;
288 unsigned int start_delay;
289 unsigned long timeout;
290 unsigned int overwrite;
291 unsigned int bw_avg_time;
292 unsigned int loops;
293 unsigned long long zone_size;
294 unsigned long long zone_skip;
295 enum fio_memtype mem_type;
296 char *mmapfile;
297 int mmapfd;
298 unsigned int stonewall;
299 unsigned int numjobs;
300 unsigned int iodepth;
301 os_cpu_mask_t cpumask;
302 unsigned int iolog;
303 unsigned int read_iolog;
304 unsigned int rwmixcycle;
305 unsigned int rwmixread;
306 unsigned int rwmixwrite;
307 unsigned int nice;
308
309 char *read_iolog_file;
310 char *write_iolog_file;
311 void *iolog_buf;
312 FILE *iolog_f;
313
314 char *sysfs_root;
315 char *ioscheduler;
316
317 os_random_state_t bsrange_state;
318 os_random_state_t verify_state;
319
320 int shm_id;
321
322 /*
323 * IO engine hooks, contains everything needed to submit an io_u
324 * to any of the available IO engines.
325 */
326 struct ioengine_ops *io_ops;
327
328 /*
329 * Current IO depth and list of free and busy io_u's.
330 */
331 unsigned int cur_depth;
332 unsigned int io_u_map[FIO_IO_U_MAP_NR];
333 unsigned int io_u_lat[FIO_IO_U_LAT_NR];
334 unsigned long total_io_u;
335 struct list_head io_u_freelist;
336 struct list_head io_u_busylist;
337
338 /*
339 * Rate state
340 */
341 unsigned int rate;
342 unsigned int ratemin;
343 unsigned int ratecycle;
344 unsigned long rate_usec_cycle;
345 long rate_pending_usleep;
346 unsigned long rate_bytes;
347 struct timeval lastrate;
348
349 unsigned long runtime[2]; /* msec */
350 unsigned long long io_size;
351 unsigned long long total_file_size;
352 unsigned long long start_offset;
353 unsigned long long total_io_size;
354
355 unsigned long long io_blocks[2];
356 unsigned long long io_bytes[2];
357 unsigned long long zone_bytes;
358 unsigned long long this_io_bytes[2];
359 volatile int mutex;
360
361 /*
362 * State for random io, a bitmap of blocks done vs not done
363 */
364 os_random_state_t random_state;
365
366 /*
367 * CPU "io" cycle burner
368 */
369 unsigned int cpuload;
370 unsigned int cpucycle;
371
372 /*
373 * bandwidth and latency stats
374 */
375 struct io_stat clat_stat[2]; /* completion latency */
376 struct io_stat slat_stat[2]; /* submission latency */
377 struct io_stat bw_stat[2]; /* bandwidth stats */
378
379 unsigned long long stat_io_bytes[2];
380 struct timeval stat_sample_time[2];
381
382 struct io_log *slat_log;
383 struct io_log *clat_log;
384 struct io_log *bw_log;
385
386 struct timeval start; /* start of this loop */
387 struct timeval epoch; /* time job was started */
388 struct timeval end_time;/* time job ended */
389
390 /*
391 * fio system usage accounting
392 */
393 struct rusage ru_start;
394 struct rusage ru_end;
395 unsigned long usr_time;
396 unsigned long sys_time;
397 unsigned long ctx;
398
399 /*
400 * read/write mixed workload state
401 */
402 os_random_state_t rwmix_state;
403 struct timeval rwmix_switch;
404 enum fio_ddir rwmix_ddir;
405
406 /*
407 * Pre-run and post-run shell
408 */
409 char *exec_prerun;
410 char *exec_postrun;
411
412 /*
413 * IO historic logs
414 */
415 struct list_head io_hist_list;
416 struct list_head io_log_list;
417};
418
419#define __td_verror(td, err, msg) \
420 do { \
421 int e = (err); \
422 (td)->error = e; \
423 snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, error=%s", __FILE__, __LINE__, (msg)); \
424 } while (0)
425
426
427#define td_verror(td, err) __td_verror((td), (err), strerror((err)))
428#define td_vmsg(td, err, msg) __td_verror((td), (err), (msg))
429
430extern int exitall_on_terminate;
431extern int thread_number;
432extern int shm_id;
433extern int groupid;
434extern int terse_output;
435extern FILE *f_out;
436extern FILE *f_err;
437extern int temp_stall_ts;
438extern unsigned long long mlock_size;
439
440extern struct thread_data *threads;
441
442#define td_read(td) ((td)->ddir == DDIR_READ)
443#define td_write(td) ((td)->ddir == DDIR_WRITE)
444#define td_rw(td) ((td)->iomix != 0)
445
446#define BLOCKS_PER_MAP (8 * sizeof(long))
447#define TO_MAP_BLOCK(td, f, b) ((b) - ((f)->file_offset / (td)->rw_min_bs))
448#define RAND_MAP_IDX(td, f, b) (TO_MAP_BLOCK(td, f, b) / BLOCKS_PER_MAP)
449#define RAND_MAP_BIT(td, f, b) (TO_MAP_BLOCK(td, f, b) & (BLOCKS_PER_MAP - 1))
450
451#define MAX_JOBS (1024)
452
453static inline int should_fsync(struct thread_data *td)
454{
455 if (td->last_was_sync)
456 return 0;
457 if (td->odirect)
458 return 0;
459 if (td_write(td) || td_rw(td) || td->override_sync)
460 return 1;
461
462 return 0;
463}
464
465/*
466 * Disk utils as read in /sys/block/<dev>/stat
467 */
468struct disk_util_stat {
469 unsigned ios[2];
470 unsigned merges[2];
471 unsigned long long sectors[2];
472 unsigned ticks[2];
473 unsigned io_ticks;
474 unsigned time_in_queue;
475};
476
477/*
478 * Per-device disk util management
479 */
480struct disk_util {
481 struct list_head list;
482
483 char *name;
484 char path[256];
485 dev_t dev;
486
487 struct disk_util_stat dus;
488 struct disk_util_stat last_dus;
489
490 unsigned long msec;
491 struct timeval time;
492};
493
494/*
495 * Callback for io completion
496 */
497typedef int (endio_handler)(struct io_u *);
498
499#define DISK_UTIL_MSEC (250)
500
501#ifndef min
502#define min(a, b) ((a) < (b) ? (a) : (b))
503#endif
504#ifndef max
505#define max(a, b) ((a) > (b) ? (a) : (b))
506#endif
507
508/*
509 * Log exports
510 */
511extern int read_iolog_get(struct thread_data *, struct io_u *);
512extern void write_iolog_put(struct thread_data *, struct io_u *);
513extern int init_iolog(struct thread_data *td);
514extern void log_io_piece(struct thread_data *, struct io_u *);
515extern void prune_io_piece_log(struct thread_data *);
516extern void write_iolog_close(struct thread_data *);
517
518/*
519 * Logging
520 */
521extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long);
522extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long);
523extern void add_bw_sample(struct thread_data *, enum fio_ddir, struct timeval *);
524extern void show_run_stats(void);
525extern void init_disk_util(struct thread_data *);
526extern void update_rusage_stat(struct thread_data *);
527extern void update_io_ticks(void);
528extern void disk_util_timer_arm(void);
529extern void setup_log(struct io_log **);
530extern void finish_log(struct thread_data *, struct io_log *, const char *);
531extern void __finish_log(struct io_log *, const char *);
532extern int setup_rate(struct thread_data *);
533extern struct io_log *agg_io_log[2];
534extern int write_bw_log;
535extern void add_agg_sample(unsigned long, enum fio_ddir);
536
537/*
538 * Time functions
539 */
540extern unsigned long utime_since(struct timeval *, struct timeval *);
541extern unsigned long utime_since_now(struct timeval *);
542extern unsigned long mtime_since(struct timeval *, struct timeval *);
543extern unsigned long mtime_since_now(struct timeval *);
544extern unsigned long time_since_now(struct timeval *);
545extern unsigned long mtime_since_genesis(void);
546extern void __usec_sleep(unsigned int);
547extern void usec_sleep(struct thread_data *, unsigned long);
548extern void rate_throttle(struct thread_data *, unsigned long, unsigned int, int);
549extern void fill_start_time(struct timeval *);
550extern void fio_gettime(struct timeval *, void *);
551
552/*
553 * Init functions
554 */
555extern int parse_options(int, char **);
556extern int init_random_state(struct thread_data *);
557
558/*
559 * File setup/shutdown
560 */
561extern void close_files(struct thread_data *);
562extern int setup_files(struct thread_data *);
563extern int open_files(struct thread_data *);
564extern int file_invalidate_cache(struct thread_data *, struct fio_file *);
565
566/*
567 * ETA/status stuff
568 */
569extern void print_thread_status(void);
570extern void print_status_init(int);
571
572/*
573 * Thread life cycle. Once a thread has a runstate beyond TD_INITIALIZED, it
574 * will never back again. It may cycle between running/verififying/fsyncing.
575 * Once the thread reaches TD_EXITED, it is just waiting for the core to
576 * reap it.
577 */
578enum {
579 TD_NOT_CREATED = 0,
580 TD_CREATED,
581 TD_INITIALIZED,
582 TD_RUNNING,
583 TD_VERIFYING,
584 TD_FSYNCING,
585 TD_EXITED,
586 TD_REAPED,
587};
588
589/*
590 * Verify helpers
591 */
592extern void populate_verify_io_u(struct thread_data *, struct io_u *);
593extern int get_next_verify(struct thread_data *td, struct io_u *);
594extern int verify_io_u(struct io_u *);
595
596/*
597 * Memory helpers
598 */
599extern int fio_pin_memory(void);
600extern void fio_unpin_memory(void);
601extern int allocate_io_mem(struct thread_data *);
602extern void free_io_mem(struct thread_data *);
603
604/*
605 * io unit handling
606 */
607#define queue_full(td) list_empty(&(td)->io_u_freelist)
608extern struct io_u *__get_io_u(struct thread_data *);
609extern struct io_u *get_io_u(struct thread_data *, struct fio_file *);
610extern void put_io_u(struct thread_data *, struct io_u *);
611extern long io_u_sync_complete(struct thread_data *, struct io_u *, endio_handler *);
612extern long io_u_queued_complete(struct thread_data *, int, endio_handler *);
613
614/*
615 * io engine entry points
616 */
617extern int td_io_init(struct thread_data *);
618extern int td_io_prep(struct thread_data *, struct io_u *);
619extern int td_io_queue(struct thread_data *, struct io_u *);
620extern int td_io_sync(struct thread_data *, struct fio_file *);
621extern int td_io_getevents(struct thread_data *, int, int, struct timespec *);
622
623/*
624 * This is a pretty crappy semaphore implementation, but with the use that fio
625 * has (just signalling start/go conditions), it doesn't have to be better.
626 * Naturally this would not work for any type of contended semaphore or
627 * for real locking.
628 */
629static inline void fio_sem_init(volatile int *sem, int val)
630{
631 *sem = val;
632}
633
634static inline void fio_sem_down(volatile int *sem)
635{
636 while (*sem == 0)
637 usleep(10000);
638
639 (*sem)--;
640}
641
642static inline void fio_sem_up(volatile int *sem)
643{
644 (*sem)++;
645}
646
647/*
648 * If logging output to a file, stderr should go to both stderr and f_err
649 */
650#define log_err(args...) do { \
651 fprintf(f_err, ##args); \
652 if (f_err != stderr) \
653 fprintf(stderr, ##args); \
654 } while (0)
655
656struct ioengine_ops {
657 struct list_head list;
658 char name[16];
659 int version;
660 int flags;
661 int (*setup)(struct thread_data *);
662 int (*init)(struct thread_data *);
663 int (*prep)(struct thread_data *, struct io_u *);
664 int (*queue)(struct thread_data *, struct io_u *);
665 int (*getevents)(struct thread_data *, int, int, struct timespec *);
666 struct io_u *(*event)(struct thread_data *, int);
667 int (*cancel)(struct thread_data *, struct io_u *);
668 void (*cleanup)(struct thread_data *);
669 void *data;
670 void *dlhandle;
671 unsigned long priv;
672};
673
674#define FIO_IOOPS_VERSION 4
675
676extern struct ioengine_ops *load_ioengine(struct thread_data *, const char *);
677extern int register_ioengine(struct ioengine_ops *);
678extern void unregister_ioengine(struct ioengine_ops *);
679extern void close_ioengine(struct thread_data *);
680
681/*
682 * Mark unused variables passed to ops functions as unused, to silence gcc
683 */
684#define fio_unused __attribute((__unused__))
685#define fio_init __attribute__((constructor))
686#define fio_exit __attribute__((destructor))
687
688#define for_each_td(td, i) \
689 for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
690#define for_each_file(td, f, i) \
691 for ((i) = 0, (f) = &(td)->files[0]; (i) < (int) (td)->nr_files; (i)++, (f)++)
692
693#define fio_assert(td, cond) do { \
694 if (!(cond)) { \
695 int *__foo = NULL; \
696 fprintf(stderr, "file:%s:%d, assert %s failed\n", __FILE__, __LINE__, #cond); \
697 (td)->runstate = TD_EXITED; \
698 (td)->error = EFAULT; \
699 *__foo = 0; \
700 } \
701} while (0)
702
703#endif