[PATCH] Only generate global bandwidth log if write_bw_log set
[fio.git] / fio.h
... / ...
CommitLineData
1#ifndef FIO_H
2#define FIO_H
3
4#include <sched.h>
5#include <limits.h>
6#include <pthread.h>
7#include <sys/time.h>
8#include <sys/resource.h>
9#include <errno.h>
10#include <stdlib.h>
11#include <stdio.h>
12#include <unistd.h>
13#include <string.h>
14
15#include "list.h"
16#include "md5.h"
17#include "crc32.h"
18#include "arch.h"
19#include "os.h"
20
21enum fio_ddir {
22 DDIR_READ = 0,
23 DDIR_WRITE,
24 DDIR_SYNC,
25};
26
27struct io_stat {
28 unsigned long max_val;
29 unsigned long min_val;
30 unsigned long samples;
31
32 double mean;
33 double S;
34};
35
36struct io_sample {
37 unsigned long time;
38 unsigned long val;
39 enum fio_ddir ddir;
40};
41
42struct io_log {
43 unsigned long nr_samples;
44 unsigned long max_samples;
45 struct io_sample *log;
46};
47
48struct io_piece {
49 struct list_head list;
50 struct fio_file *file;
51 unsigned long long offset;
52 unsigned int len;
53 enum fio_ddir ddir;
54};
55
56/*
57 * The io unit
58 */
59struct io_u {
60 union {
61#ifdef FIO_HAVE_LIBAIO
62 struct iocb iocb;
63#endif
64#ifdef FIO_HAVE_POSIXAIO
65 struct aiocb aiocb;
66#endif
67#ifdef FIO_HAVE_SGIO
68 struct sg_io_hdr hdr;
69#endif
70 };
71 struct timeval start_time;
72 struct timeval issue_time;
73
74 void *buf;
75 unsigned int buflen;
76 unsigned long long offset;
77
78 unsigned int resid;
79 unsigned int error;
80
81 enum fio_ddir ddir;
82
83 /*
84 * io engine private data
85 */
86 union {
87 unsigned int index;
88 unsigned int seen;
89 };
90
91 struct fio_file *file;
92
93 struct list_head list;
94};
95
96#define FIO_HDR_MAGIC 0xf00baaef
97
98enum {
99 VERIFY_NONE = 0,
100 VERIFY_MD5,
101 VERIFY_CRC32,
102};
103
104struct verify_header {
105 unsigned int fio_magic;
106 unsigned int len;
107 unsigned int verify_type;
108 union {
109 char md5_digest[MD5_HASH_WORDS * 4];
110 unsigned long crc32;
111 };
112};
113
114struct group_run_stats {
115 unsigned long long max_run[2], min_run[2];
116 unsigned long long max_bw[2], min_bw[2];
117 unsigned long long io_kb[2];
118 unsigned long long agg[2];
119};
120
121/*
122 * What type of allocation to use for io buffers
123 */
124enum fio_memtype {
125 MEM_MALLOC = 0, /* ordinary malloc */
126 MEM_SHM, /* use shared memory segments */
127 MEM_SHMHUGE, /* use shared memory segments with huge pages */
128 MEM_MMAP, /* use anonynomous mmap */
129 MEM_MMAPHUGE, /* memory mapped huge file */
130};
131
132/*
133 * The type of object we are working on
134 */
135enum fio_filetype {
136 FIO_TYPE_FILE = 1,
137 FIO_TYPE_BD,
138 FIO_TYPE_CHAR,
139};
140
141enum fio_ioengine_flags {
142 FIO_SYNCIO = 1 << 0,
143 FIO_CPUIO = 1 << 1,
144 FIO_MMAPIO = 1 << 2,
145 FIO_RAWIO = 1 << 3,
146};
147
148struct fio_file {
149 /*
150 * A file may not be a file descriptor, let the io engine decide
151 */
152 union {
153 unsigned long file_data;
154 int fd;
155 };
156 char *file_name;
157 void *mmap;
158 unsigned long long file_size;
159 unsigned long long real_file_size;
160 unsigned long long file_offset;
161 unsigned long long last_pos;
162 unsigned long long last_completed_pos;
163
164 unsigned long *file_map;
165 unsigned int num_maps;
166
167 unsigned int unlink;
168};
169
170/*
171 * How many depth levels to log
172 */
173#define FIO_IO_U_MAP_NR 8
174
175/*
176 * This describes a single thread/process executing a fio job.
177 */
178struct thread_data {
179 char *name;
180 char *directory;
181 char *filename;
182 char verror[80];
183 pthread_t thread;
184 int thread_number;
185 int groupid;
186 enum fio_filetype filetype;
187 struct fio_file *files;
188 unsigned int nr_files;
189 unsigned int nr_uniq_files;
190 unsigned int next_file;
191 int error;
192 pid_t pid;
193 char *orig_buffer;
194 size_t orig_buffer_size;
195 volatile int terminate;
196 volatile int runstate;
197 enum fio_ddir ddir;
198 unsigned int iomix;
199 unsigned int ioprio;
200 unsigned int last_was_sync;
201
202 unsigned int sequential;
203 unsigned int odirect;
204 unsigned int invalidate_cache;
205 unsigned int create_serialize;
206 unsigned int create_fsync;
207 unsigned int end_fsync;
208 unsigned int sync_io;
209 unsigned int verify;
210 unsigned int use_thread;
211 unsigned int unlink;
212 unsigned int do_disk_util;
213 unsigned int override_sync;
214 unsigned int rand_repeatable;
215 unsigned int write_lat_log;
216 unsigned int write_bw_log;
217 unsigned int norandommap;
218 unsigned int bs_unaligned;
219
220 unsigned int bs[2];
221 unsigned int min_bs[2];
222 unsigned int max_bs[2];
223 unsigned int hugepage_size;
224 unsigned int rw_min_bs;
225 unsigned int thinktime;
226 unsigned int thinktime_blocks;
227 unsigned int fsync_blocks;
228 unsigned int start_delay;
229 unsigned long timeout;
230 unsigned int overwrite;
231 unsigned int bw_avg_time;
232 unsigned int loops;
233 unsigned long long zone_size;
234 unsigned long long zone_skip;
235 enum fio_memtype mem_type;
236 char *mmapfile;
237 int mmapfd;
238 unsigned int stonewall;
239 unsigned int numjobs;
240 unsigned int iodepth;
241 os_cpu_mask_t cpumask;
242 unsigned int iolog;
243 unsigned int read_iolog;
244 unsigned int rwmixcycle;
245 unsigned int rwmixread;
246 unsigned int rwmixwrite;
247 unsigned int nice;
248
249 char *read_iolog_file;
250 char *write_iolog_file;
251 void *iolog_buf;
252 FILE *iolog_f;
253
254 char *sysfs_root;
255 char *ioscheduler;
256
257 os_random_state_t bsrange_state;
258 os_random_state_t verify_state;
259
260 int shm_id;
261
262 /*
263 * IO engine hooks, contains everything needed to submit an io_u
264 * to any of the available IO engines.
265 */
266 struct ioengine_ops *io_ops;
267
268 /*
269 * Current IO depth and list of free and busy io_u's.
270 */
271 unsigned int cur_depth;
272 unsigned int io_u_map[FIO_IO_U_MAP_NR];
273 unsigned long total_io_u;
274 struct list_head io_u_freelist;
275 struct list_head io_u_busylist;
276
277 /*
278 * Rate state
279 */
280 unsigned int rate;
281 unsigned int ratemin;
282 unsigned int ratecycle;
283 unsigned long rate_usec_cycle;
284 long rate_pending_usleep;
285 unsigned long rate_bytes;
286 struct timeval lastrate;
287
288 unsigned long runtime[2]; /* msec */
289 unsigned long long io_size;
290 unsigned long long total_file_size;
291 unsigned long long start_offset;
292 unsigned long long total_io_size;
293
294 unsigned long long io_blocks[2];
295 unsigned long long io_bytes[2];
296 unsigned long long zone_bytes;
297 unsigned long long this_io_bytes[2];
298 volatile int mutex;
299
300 /*
301 * State for random io, a bitmap of blocks done vs not done
302 */
303 os_random_state_t random_state;
304
305 /*
306 * CPU "io" cycle burner
307 */
308 unsigned int cpuload;
309 unsigned int cpucycle;
310
311 /*
312 * bandwidth and latency stats
313 */
314 struct io_stat clat_stat[2]; /* completion latency */
315 struct io_stat slat_stat[2]; /* submission latency */
316 struct io_stat bw_stat[2]; /* bandwidth stats */
317
318 unsigned long long stat_io_bytes[2];
319 struct timeval stat_sample_time[2];
320
321 struct io_log *slat_log;
322 struct io_log *clat_log;
323 struct io_log *bw_log;
324
325 struct timeval start; /* start of this loop */
326 struct timeval epoch; /* time job was started */
327 struct timeval end_time;/* time job ended */
328
329 /*
330 * fio system usage accounting
331 */
332 struct rusage ru_start;
333 struct rusage ru_end;
334 unsigned long usr_time;
335 unsigned long sys_time;
336 unsigned long ctx;
337
338 /*
339 * read/write mixed workload state
340 */
341 os_random_state_t rwmix_state;
342 struct timeval rwmix_switch;
343 enum fio_ddir rwmix_ddir;
344
345 /*
346 * Pre-run and post-run shell
347 */
348 char *exec_prerun;
349 char *exec_postrun;
350
351 /*
352 * IO historic logs
353 */
354 struct list_head io_hist_list;
355 struct list_head io_log_list;
356};
357
358#define __td_verror(td, err, msg) \
359 do { \
360 int e = (err); \
361 (td)->error = e; \
362 snprintf(td->verror, sizeof(td->verror) - 1, "file:%s:%d, error=%s", __FILE__, __LINE__, (msg)); \
363 } while (0)
364
365
366#define td_verror(td, err) __td_verror((td), (err), strerror((err)))
367#define td_vmsg(td, err, msg) __td_verror((td), (err), (msg))
368
369extern int exitall_on_terminate;
370extern int thread_number;
371extern int shm_id;
372extern int groupid;
373extern int terse_output;
374extern FILE *f_out;
375extern FILE *f_err;
376extern int temp_stall_ts;
377extern unsigned long long mlock_size;
378
379extern struct thread_data *threads;
380
381#define td_read(td) ((td)->ddir == DDIR_READ)
382#define td_write(td) ((td)->ddir == DDIR_WRITE)
383#define td_rw(td) ((td)->iomix != 0)
384
385#define BLOCKS_PER_MAP (8 * sizeof(long))
386#define TO_MAP_BLOCK(td, f, b) ((b) - ((f)->file_offset / (td)->rw_min_bs))
387#define RAND_MAP_IDX(td, f, b) (TO_MAP_BLOCK(td, f, b) / BLOCKS_PER_MAP)
388#define RAND_MAP_BIT(td, f, b) (TO_MAP_BLOCK(td, f, b) & (BLOCKS_PER_MAP - 1))
389
390#define MAX_JOBS (1024)
391
392static inline int should_fsync(struct thread_data *td)
393{
394 if (td->last_was_sync)
395 return 0;
396 if (td->odirect)
397 return 0;
398 if (td_write(td) || td_rw(td) || td->override_sync)
399 return 1;
400
401 return 0;
402}
403
404struct disk_util_stat {
405 unsigned ios[2];
406 unsigned merges[2];
407 unsigned long long sectors[2];
408 unsigned ticks[2];
409 unsigned io_ticks;
410 unsigned time_in_queue;
411};
412
413struct disk_util {
414 struct list_head list;
415
416 char *name;
417 char path[256];
418 dev_t dev;
419
420 struct disk_util_stat dus;
421 struct disk_util_stat last_dus;
422
423 unsigned long msec;
424 struct timeval time;
425};
426
427struct io_completion_data {
428 int nr; /* input */
429
430 int error; /* output */
431 unsigned long bytes_done[2]; /* output */
432 struct timeval time; /* output */
433};
434
435#define DISK_UTIL_MSEC (250)
436
437#ifndef min
438#define min(a, b) ((a) < (b) ? (a) : (b))
439#endif
440#ifndef max
441#define max(a, b) ((a) > (b) ? (a) : (b))
442#endif
443
444/*
445 * Log exports
446 */
447extern int read_iolog_get(struct thread_data *, struct io_u *);
448extern void write_iolog_put(struct thread_data *, struct io_u *);
449extern int init_iolog(struct thread_data *td);
450extern void log_io_piece(struct thread_data *, struct io_u *);
451extern void prune_io_piece_log(struct thread_data *);
452extern void write_iolog_close(struct thread_data *);
453
454/*
455 * Logging
456 */
457extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long);
458extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long);
459extern void add_bw_sample(struct thread_data *, enum fio_ddir, struct timeval *);
460extern void show_run_stats(void);
461extern void init_disk_util(struct thread_data *);
462extern void update_rusage_stat(struct thread_data *);
463extern void update_io_ticks(void);
464extern void disk_util_timer_arm(void);
465extern void setup_log(struct io_log **);
466extern void finish_log(struct thread_data *, struct io_log *, const char *);
467extern void __finish_log(struct io_log *, const char *);
468extern int setup_rate(struct thread_data *);
469extern struct io_log *agg_io_log[2];
470extern int write_bw_log;
471extern void add_agg_sample(unsigned long, enum fio_ddir);
472
473/*
474 * Time functions
475 */
476extern unsigned long utime_since(struct timeval *, struct timeval *);
477extern unsigned long utime_since_now(struct timeval *);
478extern unsigned long mtime_since(struct timeval *, struct timeval *);
479extern unsigned long mtime_since_now(struct timeval *);
480extern unsigned long time_since_now(struct timeval *);
481extern unsigned long mtime_since_genesis(void);
482extern void __usec_sleep(unsigned int);
483extern void usec_sleep(struct thread_data *, unsigned long);
484extern void rate_throttle(struct thread_data *, unsigned long, unsigned int, int);
485extern void fill_start_time(struct timeval *);
486extern void fio_gettime(struct timeval *, void *);
487
488/*
489 * Init functions
490 */
491extern int parse_options(int, char **);
492extern int init_random_state(struct thread_data *);
493
494/*
495 * File setup/shutdown
496 */
497extern void close_files(struct thread_data *);
498extern int setup_files(struct thread_data *);
499extern int open_files(struct thread_data *);
500extern int file_invalidate_cache(struct thread_data *, struct fio_file *);
501
502/*
503 * ETA/status stuff
504 */
505extern void print_thread_status(void);
506extern void print_status_init(int);
507
508/*
509 * Thread life cycle. Once a thread has a runstate beyond TD_INITIALIZED, it
510 * will never back again. It may cycle between running/verififying/fsyncing.
511 * Once the thread reaches TD_EXITED, it is just waiting for the core to
512 * reap it.
513 */
514enum {
515 TD_NOT_CREATED = 0,
516 TD_CREATED,
517 TD_INITIALIZED,
518 TD_RUNNING,
519 TD_VERIFYING,
520 TD_FSYNCING,
521 TD_EXITED,
522 TD_REAPED,
523};
524
525/*
526 * Verify helpers
527 */
528extern void populate_verify_io_u(struct thread_data *, struct io_u *);
529extern int get_next_verify(struct thread_data *td, struct io_u *);
530extern int do_io_u_verify(struct thread_data *, struct io_u **);
531
532/*
533 * Memory helpers
534 */
535extern int fio_pin_memory(void);
536extern void fio_unpin_memory(void);
537extern int allocate_io_mem(struct thread_data *);
538extern void free_io_mem(struct thread_data *);
539
540/*
541 * io unit handling
542 */
543#define queue_full(td) list_empty(&(td)->io_u_freelist)
544extern struct io_u *__get_io_u(struct thread_data *);
545extern struct io_u *get_io_u(struct thread_data *, struct fio_file *);
546extern void put_io_u(struct thread_data *, struct io_u *);
547extern void ios_completed(struct thread_data *, struct io_completion_data *);
548extern void io_completed(struct thread_data *, struct io_u *, struct io_completion_data *);
549
550/*
551 * io engine entry points
552 */
553extern int td_io_init(struct thread_data *);
554extern int td_io_prep(struct thread_data *, struct io_u *);
555extern int td_io_queue(struct thread_data *, struct io_u *);
556extern int td_io_sync(struct thread_data *, struct fio_file *);
557extern int td_io_getevents(struct thread_data *, int, int, struct timespec *);
558
559/*
560 * This is a pretty crappy semaphore implementation, but with the use that fio
561 * has (just signalling start/go conditions), it doesn't have to be better.
562 * Naturally this would not work for any type of contended semaphore or
563 * for real locking.
564 */
565static inline void fio_sem_init(volatile int *sem, int val)
566{
567 *sem = val;
568}
569
570static inline void fio_sem_down(volatile int *sem)
571{
572 while (*sem == 0)
573 usleep(10000);
574
575 (*sem)--;
576}
577
578static inline void fio_sem_up(volatile int *sem)
579{
580 (*sem)++;
581}
582
583/*
584 * If logging output to a file, stderr should go to both stderr and f_err
585 */
586#define log_err(args...) do { \
587 fprintf(f_err, ##args); \
588 if (f_err != stderr) \
589 fprintf(stderr, ##args); \
590 } while (0)
591
592struct ioengine_ops {
593 struct list_head list;
594 char name[16];
595 int version;
596 int flags;
597 int (*setup)(struct thread_data *);
598 int (*init)(struct thread_data *);
599 int (*prep)(struct thread_data *, struct io_u *);
600 int (*queue)(struct thread_data *, struct io_u *);
601 int (*getevents)(struct thread_data *, int, int, struct timespec *);
602 struct io_u *(*event)(struct thread_data *, int);
603 int (*cancel)(struct thread_data *, struct io_u *);
604 void (*cleanup)(struct thread_data *);
605 void *data;
606 void *dlhandle;
607};
608
609#define FIO_IOOPS_VERSION 3
610
611extern struct ioengine_ops *load_ioengine(struct thread_data *, const char *);
612extern int register_ioengine(struct ioengine_ops *);
613extern void unregister_ioengine(struct ioengine_ops *);
614extern void close_ioengine(struct thread_data *);
615
616/*
617 * Mark unused variables passed to ops functions as unused, to silence gcc
618 */
619#define fio_unused __attribute((__unused__))
620#define fio_init __attribute__((constructor))
621#define fio_exit __attribute__((destructor))
622
623#define for_each_td(td, i) \
624 for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
625#define for_each_file(td, f, i) \
626 for ((i) = 0, (f) = &(td)->files[0]; (i) < (int) (td)->nr_files; (i)++, (f)++)
627
628#endif