16 struct io_completion_data {
19 int error; /* output */
20 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
21 struct timespec time; /* output */
25 * The ->io_axmap contains a map of blocks we have or have not done io
26 * to yet. Used to make sure we cover the entire range in a fair fashion.
28 static bool random_map_free(struct fio_file *f, const uint64_t block)
30 return !axmap_isset(f->io_axmap, block);
34 * Mark a given offset as used in the map.
36 static uint64_t mark_random_map(struct thread_data *td, struct io_u *io_u,
37 uint64_t offset, uint64_t buflen)
39 unsigned long long min_bs = td->o.min_bs[io_u->ddir];
40 struct fio_file *f = io_u->file;
41 unsigned long long nr_blocks;
44 block = (offset - f->file_offset) / (uint64_t) min_bs;
45 nr_blocks = (buflen + min_bs - 1) / min_bs;
46 assert(nr_blocks > 0);
48 if (!(io_u->flags & IO_U_F_BUSY_OK)) {
49 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
50 assert(nr_blocks > 0);
53 if ((nr_blocks * min_bs) < buflen)
54 buflen = nr_blocks * min_bs;
59 static uint64_t last_block(struct thread_data *td, struct fio_file *f,
65 assert(ddir_rw(ddir));
68 * Hmm, should we make sure that ->io_size <= ->real_file_size?
69 * -> not for now since there is code assuming it could go either.
71 max_size = f->io_size;
72 if (max_size > f->real_file_size)
73 max_size = f->real_file_size;
75 if (td->o.zone_mode == ZONE_MODE_STRIDED && td->o.zone_range)
76 max_size = td->o.zone_range;
78 if (td->o.min_bs[ddir] > td->o.ba[ddir])
79 max_size -= td->o.min_bs[ddir] - td->o.ba[ddir];
81 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
88 static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
89 enum fio_ddir ddir, uint64_t *b,
94 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
95 td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) {
97 r = __rand(&td->random_state);
99 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
101 *b = lastb * (r / (rand_max(&td->random_state) + 1.0));
105 assert(fio_file_lfsr(f));
107 if (lfsr_next(&f->lfsr, &off))
114 * if we are not maintaining a random map, we are done.
116 if (!file_randommap(td, f))
120 * calculate map offset and check if it's free
122 if (random_map_free(f, *b))
125 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
126 (unsigned long long) *b);
128 *b = axmap_next_free(f->io_axmap, *b);
129 if (*b == (uint64_t) -1ULL)
135 static int __get_next_rand_offset_zipf(struct thread_data *td,
136 struct fio_file *f, enum fio_ddir ddir,
139 *b = zipf_next(&f->zipf);
143 static int __get_next_rand_offset_pareto(struct thread_data *td,
144 struct fio_file *f, enum fio_ddir ddir,
147 *b = pareto_next(&f->zipf);
151 static int __get_next_rand_offset_gauss(struct thread_data *td,
152 struct fio_file *f, enum fio_ddir ddir,
155 *b = gauss_next(&f->gauss);
159 static int __get_next_rand_offset_zoned_abs(struct thread_data *td,
161 enum fio_ddir ddir, uint64_t *b)
163 struct zone_split_index *zsi;
164 uint64_t lastb, send, stotal;
167 lastb = last_block(td, f, ddir);
171 if (!td->o.zone_split_nr[ddir]) {
173 return __get_next_rand_offset(td, f, ddir, b, lastb);
177 * Generate a value, v, between 1 and 100, both inclusive
179 v = rand_between(&td->zone_state, 1, 100);
182 * Find our generated table. 'send' is the end block of this zone,
183 * 'stotal' is our start offset.
185 zsi = &td->zone_state_index[ddir][v - 1];
186 stotal = zsi->size_prev / td->o.ba[ddir];
187 send = zsi->size / td->o.ba[ddir];
190 * Should never happen
193 if (!fio_did_warn(FIO_WARN_ZONED_BUG))
194 log_err("fio: bug in zoned generation\n");
196 } else if (send > lastb) {
198 * This happens if the user specifies ranges that exceed
199 * the file/device size. We can't handle that gracefully,
202 log_err("fio: zoned_abs sizes exceed file size\n");
207 * Generate index from 0..send-stotal
209 if (__get_next_rand_offset(td, f, ddir, b, send - stotal) == 1)
216 static int __get_next_rand_offset_zoned(struct thread_data *td,
217 struct fio_file *f, enum fio_ddir ddir,
220 unsigned int v, send, stotal;
221 uint64_t offset, lastb;
222 struct zone_split_index *zsi;
224 lastb = last_block(td, f, ddir);
228 if (!td->o.zone_split_nr[ddir]) {
230 return __get_next_rand_offset(td, f, ddir, b, lastb);
234 * Generate a value, v, between 1 and 100, both inclusive
236 v = rand_between(&td->zone_state, 1, 100);
238 zsi = &td->zone_state_index[ddir][v - 1];
239 stotal = zsi->size_perc_prev;
240 send = zsi->size_perc;
243 * Should never happen
246 if (!fio_did_warn(FIO_WARN_ZONED_BUG))
247 log_err("fio: bug in zoned generation\n");
252 * 'send' is some percentage below or equal to 100 that
253 * marks the end of the current IO range. 'stotal' marks
254 * the start, in percent.
257 offset = stotal * lastb / 100ULL;
261 lastb = lastb * (send - stotal) / 100ULL;
264 * Generate index from 0..send-of-lastb
266 if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1)
270 * Add our start offset, if any
278 static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
279 enum fio_ddir ddir, uint64_t *b)
281 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) {
284 lastb = last_block(td, f, ddir);
288 return __get_next_rand_offset(td, f, ddir, b, lastb);
289 } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
290 return __get_next_rand_offset_zipf(td, f, ddir, b);
291 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
292 return __get_next_rand_offset_pareto(td, f, ddir, b);
293 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
294 return __get_next_rand_offset_gauss(td, f, ddir, b);
295 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
296 return __get_next_rand_offset_zoned(td, f, ddir, b);
297 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS)
298 return __get_next_rand_offset_zoned_abs(td, f, ddir, b);
300 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
304 static bool should_do_random(struct thread_data *td, enum fio_ddir ddir)
308 if (td->o.perc_rand[ddir] == 100)
311 v = rand_between(&td->seq_rand_state[ddir], 1, 100);
313 return v <= td->o.perc_rand[ddir];
316 static void loop_cache_invalidate(struct thread_data *td, struct fio_file *f)
318 struct thread_options *o = &td->o;
320 if (o->invalidate_cache && !o->odirect) {
323 ret = file_invalidate_cache(td, f);
327 static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
328 enum fio_ddir ddir, uint64_t *b)
330 if (!get_next_rand_offset(td, f, ddir, b))
333 if (td->o.time_based ||
334 (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) {
335 fio_file_reset(td, f);
336 loop_cache_invalidate(td, f);
337 if (!get_next_rand_offset(td, f, ddir, b))
341 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
342 f->file_name, (unsigned long long) f->last_pos[ddir],
343 (unsigned long long) f->real_file_size);
347 static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
348 enum fio_ddir ddir, uint64_t *offset)
350 struct thread_options *o = &td->o;
352 assert(ddir_rw(ddir));
355 * If we reach the end for a time based run, reset us back to 0
356 * and invalidate the cache, if we need to.
358 if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
360 f->last_pos[ddir] = f->file_offset;
361 loop_cache_invalidate(td, f);
364 if (f->last_pos[ddir] < f->real_file_size) {
368 * Only rewind if we already hit the end
370 if (f->last_pos[ddir] == f->file_offset &&
371 f->file_offset && o->ddir_seq_add < 0) {
372 if (f->real_file_size > f->io_size)
373 f->last_pos[ddir] = f->io_size;
375 f->last_pos[ddir] = f->real_file_size;
378 pos = f->last_pos[ddir] - f->file_offset;
379 if (pos && o->ddir_seq_add) {
380 pos += o->ddir_seq_add;
383 * If we reach beyond the end of the file
384 * with holed IO, wrap around to the
385 * beginning again. If we're doing backwards IO,
388 if (pos >= f->real_file_size) {
389 if (o->ddir_seq_add > 0)
390 pos = f->file_offset;
392 if (f->real_file_size > f->io_size)
395 pos = f->real_file_size;
397 pos += o->ddir_seq_add;
409 static int get_next_block(struct thread_data *td, struct io_u *io_u,
410 enum fio_ddir ddir, int rw_seq,
413 struct fio_file *f = io_u->file;
417 assert(ddir_rw(ddir));
423 if (should_do_random(td, ddir)) {
424 ret = get_next_rand_block(td, f, ddir, &b);
428 io_u_set(td, io_u, IO_U_F_BUSY_OK);
429 ret = get_next_seq_offset(td, f, ddir, &offset);
431 ret = get_next_rand_block(td, f, ddir, &b);
435 ret = get_next_seq_offset(td, f, ddir, &offset);
438 io_u_set(td, io_u, IO_U_F_BUSY_OK);
441 if (td->o.rw_seq == RW_SEQ_SEQ) {
442 ret = get_next_seq_offset(td, f, ddir, &offset);
444 ret = get_next_rand_block(td, f, ddir, &b);
447 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
448 if (f->last_start[ddir] != -1ULL)
449 offset = f->last_start[ddir] - f->file_offset;
454 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
461 io_u->offset = offset;
463 io_u->offset = b * td->o.ba[ddir];
465 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
474 * For random io, generate a random new block and see if it's used. Repeat
475 * until we find a free one. For sequential io, just return the end of
476 * the last io issued.
478 static int get_next_offset(struct thread_data *td, struct io_u *io_u,
481 struct fio_file *f = io_u->file;
482 enum fio_ddir ddir = io_u->ddir;
485 assert(ddir_rw(ddir));
487 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
489 td->ddir_seq_nr = td->o.ddir_seq_nr;
492 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
495 if (io_u->offset >= f->io_size) {
496 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
497 (unsigned long long) io_u->offset,
498 (unsigned long long) f->io_size);
502 io_u->offset += f->file_offset;
503 if (io_u->offset >= f->real_file_size) {
504 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
505 (unsigned long long) io_u->offset,
506 (unsigned long long) f->real_file_size);
513 static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u,
514 unsigned long long buflen)
516 struct fio_file *f = io_u->file;
518 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
521 static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *io_u,
524 int ddir = io_u->ddir;
525 unsigned long long buflen = 0;
526 unsigned long long minbs, maxbs;
527 uint64_t frand_max, r;
530 assert(ddir_rw(ddir));
532 if (td->o.bs_is_seq_rand)
533 ddir = is_random ? DDIR_WRITE : DDIR_READ;
535 minbs = td->o.min_bs[ddir];
536 maxbs = td->o.max_bs[ddir];
542 * If we can't satisfy the min block size from here, then fail
544 if (!io_u_fits(td, io_u, minbs))
547 frand_max = rand_max(&td->bsrange_state[ddir]);
549 r = __rand(&td->bsrange_state[ddir]);
551 if (!td->o.bssplit_nr[ddir]) {
552 buflen = minbs + (unsigned long long) ((double) maxbs *
553 (r / (frand_max + 1.0)));
558 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
559 struct bssplit *bsp = &td->o.bssplit[ddir][i];
565 if ((r / perc <= frand_max / 100ULL) &&
566 io_u_fits(td, io_u, buflen))
571 power_2 = is_power_of_2(minbs);
572 if (!td->o.bs_unaligned && power_2)
573 buflen &= ~(minbs - 1);
574 else if (!td->o.bs_unaligned && !power_2)
575 buflen -= buflen % minbs;
576 } while (!io_u_fits(td, io_u, buflen));
581 static void set_rwmix_bytes(struct thread_data *td)
586 * we do time or byte based switch. this is needed because
587 * buffered writes may issue a lot quicker than they complete,
588 * whereas reads do not.
590 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
591 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
594 static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
598 v = rand_between(&td->rwmix_state, 1, 100);
600 if (v <= td->o.rwmix[DDIR_READ])
606 int io_u_quiesce(struct thread_data *td)
611 * We are going to sleep, ensure that we flush anything pending as
612 * not to skew our latency numbers.
614 * Changed to only monitor 'in flight' requests here instead of the
615 * td->cur_depth, b/c td->cur_depth does not accurately represent
616 * io's that have been actually submitted to an async engine,
617 * and cur_depth is meaningless for sync engines.
619 if (td->io_u_queued || td->cur_depth)
622 while (td->io_u_in_flight) {
625 ret = io_u_queued_complete(td, 1);
630 if (td->flags & TD_F_REGROW_LOGS)
636 static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
638 enum fio_ddir odir = ddir ^ 1;
642 assert(ddir_rw(ddir));
643 now = utime_since_now(&td->start);
646 * if rate_next_io_time is in the past, need to catch up to rate
648 if (td->rate_next_io_time[ddir] <= now)
652 * We are ahead of rate in this direction. See if we
655 if (td_rw(td) && td->o.rwmix[odir]) {
657 * Other direction is behind rate, switch
659 if (td->rate_next_io_time[odir] <= now)
663 * Both directions are ahead of rate. sleep the min,
664 * switch if necessary
666 if (td->rate_next_io_time[ddir] <=
667 td->rate_next_io_time[odir]) {
668 usec = td->rate_next_io_time[ddir] - now;
670 usec = td->rate_next_io_time[odir] - now;
674 usec = td->rate_next_io_time[ddir] - now;
676 if (td->o.io_submit_mode == IO_MODE_INLINE)
679 usec_sleep(td, usec);
684 * Return the data direction for the next io_u. If the job is a
685 * mixed read/write workload, check the rwmix cycle and switch if
688 static enum fio_ddir get_rw_ddir(struct thread_data *td)
693 * See if it's time to fsync/fdatasync/sync_file_range first,
694 * and if not then move on to check regular I/Os.
696 if (should_fsync(td)) {
697 if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] &&
698 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks))
701 if (td->o.fdatasync_blocks && td->io_issues[DDIR_WRITE] &&
702 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks))
703 return DDIR_DATASYNC;
705 if (td->sync_file_range_nr && td->io_issues[DDIR_WRITE] &&
706 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr))
707 return DDIR_SYNC_FILE_RANGE;
712 * Check if it's time to seed a new data direction.
714 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
716 * Put a top limit on how many bytes we do for
717 * one data direction, to avoid overflowing the
720 ddir = get_rand_ddir(td);
722 if (ddir != td->rwmix_ddir)
725 td->rwmix_ddir = ddir;
727 ddir = td->rwmix_ddir;
728 } else if (td_read(td))
730 else if (td_write(td))
732 else if (td_trim(td))
737 td->rwmix_ddir = rate_ddir(td, ddir);
738 return td->rwmix_ddir;
741 static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
743 enum fio_ddir ddir = get_rw_ddir(td);
745 if (td_trimwrite(td)) {
746 struct fio_file *f = io_u->file;
747 if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
753 io_u->ddir = io_u->acct_ddir = ddir;
755 if (io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_BARRIER) &&
756 td->o.barrier_blocks &&
757 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
758 td->io_issues[DDIR_WRITE])
759 io_u_set(td, io_u, IO_U_F_BARRIER);
762 void put_file_log(struct thread_data *td, struct fio_file *f)
764 unsigned int ret = put_file(td, f);
767 td_verror(td, ret, "file close");
770 void put_io_u(struct thread_data *td, struct io_u *io_u)
772 const bool needs_lock = td_async_processing(td);
774 if (io_u->post_submit) {
775 io_u->post_submit(io_u, io_u->error == 0);
776 io_u->post_submit = NULL;
785 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
786 put_file_log(td, io_u->file);
789 io_u_set(td, io_u, IO_U_F_FREE);
791 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
793 assert(!(td->flags & TD_F_CHILD));
795 io_u_qpush(&td->io_u_freelist, io_u);
796 td_io_u_free_notify(td);
799 __td_io_u_unlock(td);
802 void clear_io_u(struct thread_data *td, struct io_u *io_u)
804 io_u_clear(td, io_u, IO_U_F_FLIGHT);
808 void requeue_io_u(struct thread_data *td, struct io_u **io_u)
810 const bool needs_lock = td_async_processing(td);
811 struct io_u *__io_u = *io_u;
812 enum fio_ddir ddir = acct_ddir(__io_u);
814 dprint(FD_IO, "requeue %p\n", __io_u);
822 io_u_set(td, __io_u, IO_U_F_FREE);
823 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
824 td->io_issues[ddir]--;
826 io_u_clear(td, __io_u, IO_U_F_FLIGHT);
827 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) {
829 assert(!(td->flags & TD_F_CHILD));
832 io_u_rpush(&td->io_u_requeues, __io_u);
833 td_io_u_free_notify(td);
836 __td_io_u_unlock(td);
841 static void setup_strided_zone_mode(struct thread_data *td, struct io_u *io_u)
843 struct fio_file *f = io_u->file;
845 assert(td->o.zone_mode == ZONE_MODE_STRIDED);
846 assert(td->o.zone_size);
847 assert(td->o.zone_range);
850 * See if it's time to switch to a new zone
852 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
854 f->file_offset += td->o.zone_range + td->o.zone_skip;
857 * Wrap from the beginning, if we exceed the file size
859 if (f->file_offset >= f->real_file_size)
860 f->file_offset = get_start_offset(td, f);
862 f->last_pos[io_u->ddir] = f->file_offset;
863 td->io_skip_bytes += td->o.zone_skip;
867 * If zone_size > zone_range, then maintain the same zone until
868 * zone_bytes >= zone_size.
870 if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) {
871 dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n",
872 f->file_offset, f->last_pos[io_u->ddir]);
873 f->last_pos[io_u->ddir] = f->file_offset;
877 * For random: if 'norandommap' is not set and zone_size > zone_range,
878 * map needs to be reset as it's done with zone_range everytime.
880 if ((td->zone_bytes % td->o.zone_range) == 0)
881 fio_file_reset(td, f);
884 static int fill_io_u(struct thread_data *td, struct io_u *io_u)
888 enum io_u_action ret;
890 if (td_ioengine_flagged(td, FIO_NOIO))
893 set_rw_ddir(td, io_u);
896 * fsync() or fdatasync() or trim etc, we are done
898 if (!ddir_rw(io_u->ddir))
901 if (td->o.zone_mode == ZONE_MODE_STRIDED)
902 setup_strided_zone_mode(td, io_u);
905 * No log, let the seq/rand engine retrieve the next buflen and
908 if (get_next_offset(td, io_u, &is_random)) {
909 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
913 io_u->buflen = get_next_buflen(td, io_u, is_random);
915 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
919 offset = io_u->offset;
920 if (td->o.zone_mode == ZONE_MODE_ZBD) {
921 ret = zbd_adjust_block(td, io_u);
926 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
927 dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
929 (unsigned long long) io_u->offset, io_u->buflen,
930 (unsigned long long) io_u->file->real_file_size);
935 * mark entry before potentially trimming io_u
937 if (td_random(td) && file_randommap(td, io_u->file))
938 io_u->buflen = mark_random_map(td, io_u, offset, io_u->buflen);
941 dprint_io_u(io_u, "fill");
942 td->zone_bytes += io_u->buflen;
946 static void __io_u_mark_map(uint64_t *map, unsigned int nr)
975 void io_u_mark_submit(struct thread_data *td, unsigned int nr)
977 __io_u_mark_map(td->ts.io_u_submit, nr);
978 td->ts.total_submit++;
981 void io_u_mark_complete(struct thread_data *td, unsigned int nr)
983 __io_u_mark_map(td->ts.io_u_complete, nr);
984 td->ts.total_complete++;
987 void io_u_mark_depth(struct thread_data *td, unsigned int nr)
991 switch (td->cur_depth) {
1013 td->ts.io_u_map[idx] += nr;
1016 static void io_u_mark_lat_nsec(struct thread_data *td, unsigned long long nsec)
1020 assert(nsec < 1000);
1053 assert(idx < FIO_IO_U_LAT_N_NR);
1054 td->ts.io_u_lat_n[idx]++;
1057 static void io_u_mark_lat_usec(struct thread_data *td, unsigned long long usec)
1061 assert(usec < 1000 && usec >= 1);
1094 assert(idx < FIO_IO_U_LAT_U_NR);
1095 td->ts.io_u_lat_u[idx]++;
1098 static void io_u_mark_lat_msec(struct thread_data *td, unsigned long long msec)
1141 assert(idx < FIO_IO_U_LAT_M_NR);
1142 td->ts.io_u_lat_m[idx]++;
1145 static void io_u_mark_latency(struct thread_data *td, unsigned long long nsec)
1148 io_u_mark_lat_nsec(td, nsec);
1149 else if (nsec < 1000000)
1150 io_u_mark_lat_usec(td, nsec / 1000);
1152 io_u_mark_lat_msec(td, nsec / 1000000);
1155 static unsigned int __get_next_fileno_rand(struct thread_data *td)
1157 unsigned long fileno;
1159 if (td->o.file_service_type == FIO_FSERVICE_RANDOM) {
1160 uint64_t frand_max = rand_max(&td->next_file_state);
1163 r = __rand(&td->next_file_state);
1164 return (unsigned int) ((double) td->o.nr_files
1165 * (r / (frand_max + 1.0)));
1168 if (td->o.file_service_type == FIO_FSERVICE_ZIPF)
1169 fileno = zipf_next(&td->next_file_zipf);
1170 else if (td->o.file_service_type == FIO_FSERVICE_PARETO)
1171 fileno = pareto_next(&td->next_file_zipf);
1172 else if (td->o.file_service_type == FIO_FSERVICE_GAUSS)
1173 fileno = gauss_next(&td->next_file_gauss);
1175 log_err("fio: bad file service type: %d\n", td->o.file_service_type);
1180 return fileno >> FIO_FSERVICE_SHIFT;
1184 * Get next file to service by choosing one at random
1186 static struct fio_file *get_next_file_rand(struct thread_data *td,
1187 enum fio_file_flags goodf,
1188 enum fio_file_flags badf)
1196 fno = __get_next_fileno_rand(td);
1199 if (fio_file_done(f))
1202 if (!fio_file_open(f)) {
1205 if (td->nr_open_files >= td->o.open_files)
1206 return ERR_PTR(-EBUSY);
1208 err = td_io_open_file(td, f);
1214 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
1215 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
1219 td_io_close_file(td, f);
1224 * Get next file to service by doing round robin between all available ones
1226 static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1229 unsigned int old_next_file = td->next_file;
1235 f = td->files[td->next_file];
1238 if (td->next_file >= td->o.nr_files)
1241 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
1242 if (fio_file_done(f)) {
1247 if (!fio_file_open(f)) {
1250 if (td->nr_open_files >= td->o.open_files)
1251 return ERR_PTR(-EBUSY);
1253 err = td_io_open_file(td, f);
1255 dprint(FD_FILE, "error %d on open of %s\n",
1263 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1265 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
1269 td_io_close_file(td, f);
1272 } while (td->next_file != old_next_file);
1274 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
1278 static struct fio_file *__get_next_file(struct thread_data *td)
1282 assert(td->o.nr_files <= td->files_index);
1284 if (td->nr_done_files >= td->o.nr_files) {
1285 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1286 " nr_files=%d\n", td->nr_open_files,
1292 f = td->file_service_file;
1293 if (f && fio_file_open(f) && !fio_file_closing(f)) {
1294 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1296 if (td->file_service_left--)
1300 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1301 td->o.file_service_type == FIO_FSERVICE_SEQ)
1302 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
1304 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
1309 td->file_service_file = f;
1310 td->file_service_left = td->file_service_nr - 1;
1313 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1315 dprint(FD_FILE, "get_next_file: NULL\n");
1319 static struct fio_file *get_next_file(struct thread_data *td)
1321 return __get_next_file(td);
1324 static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
1329 f = get_next_file(td);
1330 if (IS_ERR_OR_NULL(f))
1336 if (!fill_io_u(td, io_u))
1339 if (io_u->post_submit) {
1340 io_u->post_submit(io_u, false);
1341 io_u->post_submit = NULL;
1344 put_file_log(td, f);
1345 td_io_close_file(td, f);
1347 if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)
1348 fio_file_reset(td, f);
1350 fio_file_set_done(f);
1351 td->nr_done_files++;
1352 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1353 td->nr_done_files, td->o.nr_files);
1361 * If latency target is enabled, we might be ramping up or down and not
1362 * using the full queue depth available.
1364 bool queue_full(const struct thread_data *td)
1366 const int qempty = io_u_qempty(&td->io_u_freelist);
1370 if (!td->o.latency_target || td->o.iodepth_mode != IOD_STEPPED)
1373 return td->cur_depth >= td->latency_qd;
1376 struct io_u *__get_io_u(struct thread_data *td)
1378 const bool needs_lock = td_async_processing(td);
1379 struct io_u *io_u = NULL;
1389 if (!io_u_rempty(&td->io_u_requeues))
1390 io_u = io_u_rpop(&td->io_u_requeues);
1391 else if (!queue_full(td)) {
1392 io_u = io_u_qpop(&td->io_u_freelist);
1397 io_u->end_io = NULL;
1401 assert(io_u->flags & IO_U_F_FREE);
1402 io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
1403 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1407 io_u->acct_ddir = -1;
1409 assert(!(td->flags & TD_F_CHILD));
1410 io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH);
1412 } else if (td_async_processing(td)) {
1414 * We ran out, wait for async verify threads to finish and
1417 assert(!(td->flags & TD_F_CHILD));
1418 ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1424 __td_io_u_unlock(td);
1429 static bool check_get_trim(struct thread_data *td, struct io_u *io_u)
1431 if (!(td->flags & TD_F_TRIM_BACKLOG))
1433 if (!td->trim_entries)
1436 if (td->trim_batch) {
1438 if (get_next_trim(td, io_u))
1440 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1441 td->last_ddir != DDIR_READ) {
1442 td->trim_batch = td->o.trim_batch;
1443 if (!td->trim_batch)
1444 td->trim_batch = td->o.trim_backlog;
1445 if (get_next_trim(td, io_u))
1452 static bool check_get_verify(struct thread_data *td, struct io_u *io_u)
1454 if (!(td->flags & TD_F_VER_BACKLOG))
1457 if (td->io_hist_len) {
1460 if (td->verify_batch)
1462 else if (!(td->io_hist_len % td->o.verify_backlog) &&
1463 td->last_ddir != DDIR_READ) {
1464 td->verify_batch = td->o.verify_batch;
1465 if (!td->verify_batch)
1466 td->verify_batch = td->o.verify_backlog;
1470 if (get_verify && !get_next_verify(td, io_u)) {
1480 * Fill offset and start time into the buffer content, to prevent too
1481 * easy compressible data for simple de-dupe attempts. Do this for every
1482 * 512b block in the range, since that should be the smallest block size
1483 * we can expect from a device.
1485 static void small_content_scramble(struct io_u *io_u)
1487 unsigned long long i, nr_blocks = io_u->buflen >> 9;
1488 unsigned int offset;
1489 uint64_t boffset, *iptr;
1496 boffset = io_u->offset;
1498 if (io_u->buf_filled_len)
1499 io_u->buf_filled_len = 0;
1502 * Generate random index between 0..7. We do chunks of 512b, if
1503 * we assume a cacheline is 64 bytes, then we have 8 of those.
1504 * Scramble content within the blocks in the same cacheline to
1507 offset = (io_u->start_time.tv_nsec ^ boffset) & 7;
1509 for (i = 0; i < nr_blocks; i++) {
1511 * Fill offset into start of cacheline, time into end
1514 iptr = (void *) p + (offset << 6);
1517 iptr = (void *) p + 64 - 2 * sizeof(uint64_t);
1518 iptr[0] = io_u->start_time.tv_sec;
1519 iptr[1] = io_u->start_time.tv_nsec;
1527 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1528 * etc. The returned io_u is fully ready to be prepped, populated and submitted.
1530 struct io_u *get_io_u(struct thread_data *td)
1534 int do_scramble = 0;
1537 io_u = __get_io_u(td);
1539 dprint(FD_IO, "__get_io_u failed\n");
1543 if (check_get_verify(td, io_u))
1545 if (check_get_trim(td, io_u))
1549 * from a requeue, io_u already setup
1555 * If using an iolog, grab next piece if any available.
1557 if (td->flags & TD_F_READ_IOLOG) {
1558 if (read_iolog_get(td, io_u))
1560 } else if (set_io_u_file(td, io_u)) {
1562 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1568 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1572 assert(fio_file_open(f));
1574 if (ddir_rw(io_u->ddir)) {
1575 if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) {
1576 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
1580 f->last_start[io_u->ddir] = io_u->offset;
1581 f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen;
1583 if (io_u->ddir == DDIR_WRITE) {
1584 if (td->flags & TD_F_REFILL_BUFFERS) {
1585 io_u_fill_buffer(td, io_u,
1586 td->o.min_bs[DDIR_WRITE],
1588 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
1589 !(td->flags & TD_F_COMPRESS) &&
1590 !(td->flags & TD_F_DO_VERIFY))
1592 } else if (io_u->ddir == DDIR_READ) {
1594 * Reset the buf_filled parameters so next time if the
1595 * buffer is used for writes it is refilled.
1597 io_u->buf_filled_len = 0;
1602 * Set io data pointers.
1604 io_u->xfer_buf = io_u->buf;
1605 io_u->xfer_buflen = io_u->buflen;
1609 if (!td_io_prep(td, io_u)) {
1610 if (!td->o.disable_lat)
1611 fio_gettime(&io_u->start_time, NULL);
1614 small_content_scramble(io_u);
1619 dprint(FD_IO, "get_io_u failed\n");
1621 return ERR_PTR(ret);
1624 static void __io_u_log_error(struct thread_data *td, struct io_u *io_u)
1626 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
1628 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1631 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%llu\n",
1632 io_u->file ? " on file " : "",
1633 io_u->file ? io_u->file->file_name : "",
1634 strerror(io_u->error),
1635 io_ddir_name(io_u->ddir),
1636 io_u->offset, io_u->xfer_buflen);
1638 if (td->io_ops->errdetails) {
1639 char *err = td->io_ops->errdetails(io_u);
1641 log_err("fio: %s\n", err);
1646 td_verror(td, io_u->error, "io_u error");
1649 void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1651 __io_u_log_error(td, io_u);
1653 __io_u_log_error(td->parent, io_u);
1656 static inline bool gtod_reduce(struct thread_data *td)
1658 return (td->o.disable_clat && td->o.disable_slat && td->o.disable_bw)
1659 || td->o.gtod_reduce;
1662 static void trim_block_info(struct thread_data *td, struct io_u *io_u)
1664 uint32_t *info = io_u_block_info(td, io_u);
1666 if (BLOCK_INFO_STATE(*info) >= BLOCK_STATE_TRIM_FAILURE)
1669 *info = BLOCK_INFO(BLOCK_STATE_TRIMMED, BLOCK_INFO_TRIMS(*info) + 1);
1672 static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1673 struct io_completion_data *icd,
1674 const enum fio_ddir idx, unsigned int bytes)
1676 const int no_reduce = !gtod_reduce(td);
1677 unsigned long long llnsec = 0;
1682 if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS))
1686 llnsec = ntime_since(&io_u->issue_time, &icd->time);
1688 if (!td->o.disable_lat) {
1689 unsigned long long tnsec;
1691 tnsec = ntime_since(&io_u->start_time, &icd->time);
1692 add_lat_sample(td, idx, tnsec, bytes, io_u->offset);
1694 if (td->flags & TD_F_PROFILE_OPS) {
1695 struct prof_io_ops *ops = &td->prof_io_ops;
1698 icd->error = ops->io_u_lat(td, tnsec);
1701 if (td->o.max_latency && tnsec > td->o.max_latency) {
1702 icd->error = ETIMEDOUT;
1703 lat_fatal(td, tnsec, td->o.max_latency);
1705 if (td->o.latency_target && tnsec > td->o.latency_target) {
1706 if (lat_target_failed(td)) {
1707 icd->error = ETIMEDOUT;
1708 lat_fatal(td, tnsec, td->o.latency_target);
1714 if (!td->o.disable_clat) {
1715 add_clat_sample(td, idx, llnsec, bytes, io_u->offset);
1716 io_u_mark_latency(td, llnsec);
1719 if (!td->o.disable_bw && per_unit_log(td->bw_log))
1720 add_bw_sample(td, io_u, bytes, llnsec);
1722 if (no_reduce && per_unit_log(td->iops_log))
1723 add_iops_sample(td, io_u, bytes);
1724 } else if (ddir_sync(idx) && !td->o.disable_clat)
1725 add_sync_clat_sample(&td->ts, llnsec);
1727 if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM)
1728 trim_block_info(td, io_u);
1731 static void file_log_write_comp(const struct thread_data *td, struct fio_file *f,
1732 uint64_t offset, unsigned int bytes)
1739 if (f->first_write == -1ULL || offset < f->first_write)
1740 f->first_write = offset;
1741 if (f->last_write == -1ULL || ((offset + bytes) > f->last_write))
1742 f->last_write = offset + bytes;
1744 if (!f->last_write_comp)
1747 idx = f->last_write_idx++;
1748 f->last_write_comp[idx] = offset;
1749 if (f->last_write_idx == td->o.iodepth)
1750 f->last_write_idx = 0;
1753 static bool should_account(struct thread_data *td)
1755 return lat_step_account(td) && ramp_time_over(td) &&
1756 (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING);
1759 static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
1760 struct io_completion_data *icd)
1762 struct io_u *io_u = *io_u_ptr;
1763 enum fio_ddir ddir = io_u->ddir;
1764 struct fio_file *f = io_u->file;
1766 dprint_io_u(io_u, "complete");
1768 assert(io_u->flags & IO_U_F_FLIGHT);
1769 io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
1772 * Mark IO ok to verify
1776 * Remove errored entry from the verification list
1779 unlog_io_piece(td, io_u);
1781 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1786 if (ddir_sync(ddir)) {
1787 td->last_was_sync = true;
1789 f->first_write = -1ULL;
1790 f->last_write = -1ULL;
1792 if (should_account(td))
1793 account_io_completion(td, io_u, icd, ddir, io_u->buflen);
1797 td->last_was_sync = false;
1798 td->last_ddir = ddir;
1800 if (!io_u->error && ddir_rw(ddir)) {
1801 unsigned long long bytes = io_u->buflen - io_u->resid;
1804 td->io_blocks[ddir]++;
1805 td->io_bytes[ddir] += bytes;
1807 if (!(io_u->flags & IO_U_F_VER_LIST)) {
1808 td->this_io_blocks[ddir]++;
1809 td->this_io_bytes[ddir] += bytes;
1812 if (ddir == DDIR_WRITE)
1813 file_log_write_comp(td, f, io_u->offset, bytes);
1815 if (should_account(td))
1816 account_io_completion(td, io_u, icd, ddir, bytes);
1818 icd->bytes_done[ddir] += bytes;
1821 ret = io_u->end_io(td, io_u_ptr);
1823 if (ret && !icd->error)
1826 } else if (io_u->error) {
1827 icd->error = io_u->error;
1828 io_u_log_error(td, io_u);
1831 enum error_type_bit eb = td_error_type(ddir, icd->error);
1833 if (!td_non_fatal_error(td, eb, icd->error))
1837 * If there is a non_fatal error, then add to the error count
1838 * and clear all the errors.
1840 update_error_count(td, icd->error);
1848 static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1853 if (!gtod_reduce(td))
1854 fio_gettime(&icd->time, NULL);
1859 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
1860 icd->bytes_done[ddir] = 0;
1863 static void ios_completed(struct thread_data *td,
1864 struct io_completion_data *icd)
1869 for (i = 0; i < icd->nr; i++) {
1870 io_u = td->io_ops->event(td, i);
1872 io_completed(td, &io_u, icd);
1880 * Complete a single io_u for the sync engines.
1882 int io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
1884 struct io_completion_data icd;
1887 init_icd(td, &icd, 1);
1888 io_completed(td, &io_u, &icd);
1894 td_verror(td, icd.error, "io_u_sync_complete");
1898 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
1899 td->bytes_done[ddir] += icd.bytes_done[ddir];
1905 * Called to complete min_events number of io for the async engines.
1907 int io_u_queued_complete(struct thread_data *td, int min_evts)
1909 struct io_completion_data icd;
1910 struct timespec *tvp = NULL;
1912 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
1914 dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts);
1918 else if (min_evts > td->cur_depth)
1919 min_evts = td->cur_depth;
1921 /* No worries, td_io_getevents fixes min and max if they are
1922 * set incorrectly */
1923 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp);
1925 td_verror(td, -ret, "td_io_getevents");
1930 init_icd(td, &icd, ret);
1931 ios_completed(td, &icd);
1933 td_verror(td, icd.error, "io_u_queued_complete");
1937 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
1938 td->bytes_done[ddir] += icd.bytes_done[ddir];
1944 * Call when io_u is really queued, to update the submission latency.
1946 void io_u_queued(struct thread_data *td, struct io_u *io_u)
1948 if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) {
1949 unsigned long slat_time;
1951 slat_time = ntime_since(&io_u->start_time, &io_u->issue_time);
1956 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
1962 * See if we should reuse the last seed, if dedupe is enabled
1964 static struct frand_state *get_buf_state(struct thread_data *td)
1968 if (!td->o.dedupe_percentage)
1969 return &td->buf_state;
1970 else if (td->o.dedupe_percentage == 100) {
1971 frand_copy(&td->buf_state_prev, &td->buf_state);
1972 return &td->buf_state;
1975 v = rand_between(&td->dedupe_state, 1, 100);
1977 if (v <= td->o.dedupe_percentage)
1978 return &td->buf_state_prev;
1980 return &td->buf_state;
1983 static void save_buf_state(struct thread_data *td, struct frand_state *rs)
1985 if (td->o.dedupe_percentage == 100)
1986 frand_copy(rs, &td->buf_state_prev);
1987 else if (rs == &td->buf_state)
1988 frand_copy(&td->buf_state_prev, rs);
1991 void fill_io_buffer(struct thread_data *td, void *buf, unsigned long long min_write,
1992 unsigned long long max_bs)
1994 struct thread_options *o = &td->o;
1996 if (o->mem_type == MEM_CUDA_MALLOC)
1999 if (o->compress_percentage || o->dedupe_percentage) {
2000 unsigned int perc = td->o.compress_percentage;
2001 struct frand_state *rs;
2002 unsigned long long left = max_bs;
2003 unsigned long long this_write;
2006 rs = get_buf_state(td);
2008 min_write = min(min_write, left);
2011 this_write = min_not_zero(min_write,
2012 (unsigned long long) td->o.compress_chunk);
2014 fill_random_buf_percentage(rs, buf, perc,
2015 this_write, this_write,
2017 o->buffer_pattern_bytes);
2019 fill_random_buf(rs, buf, min_write);
2020 this_write = min_write;
2025 save_buf_state(td, rs);
2027 } else if (o->buffer_pattern_bytes)
2028 fill_buffer_pattern(td, buf, max_bs);
2029 else if (o->zero_buffers)
2030 memset(buf, 0, max_bs);
2032 fill_random_buf(get_buf_state(td), buf, max_bs);
2036 * "randomly" fill the buffer contents
2038 void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
2039 unsigned long long min_write, unsigned long long max_bs)
2041 io_u->buf_filled_len = 0;
2042 fill_io_buffer(td, io_u->buf, min_write, max_bs);
2045 static int do_sync_file_range(const struct thread_data *td,
2048 off64_t offset, nbytes;
2050 offset = f->first_write;
2051 nbytes = f->last_write - f->first_write;
2056 return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
2059 int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
2063 if (io_u->ddir == DDIR_SYNC) {
2064 ret = fsync(io_u->file->fd);
2065 } else if (io_u->ddir == DDIR_DATASYNC) {
2066 #ifdef CONFIG_FDATASYNC
2067 ret = fdatasync(io_u->file->fd);
2069 ret = io_u->xfer_buflen;
2070 io_u->error = EINVAL;
2072 } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
2073 ret = do_sync_file_range(td, io_u->file);
2075 ret = io_u->xfer_buflen;
2076 io_u->error = EINVAL;
2080 io_u->error = errno;
2085 int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
2087 #ifndef FIO_HAVE_TRIM
2088 io_u->error = EINVAL;
2091 struct fio_file *f = io_u->file;
2094 ret = os_trim(f, io_u->offset, io_u->xfer_buflen);
2096 return io_u->xfer_buflen;