workqueue: ensure we see deferred error for IOs
[fio.git] / io_u.c
CommitLineData
10ba535a 1#include <unistd.h>
10ba535a 2#include <string.h>
0c6e7517 3#include <assert.h>
10ba535a
JA
4
5#include "fio.h"
4f5af7b2 6#include "verify.h"
0d29de83 7#include "trim.h"
1fbbf72e 8#include "lib/rand.h"
7ebd796f 9#include "lib/axmap.h"
002fe734 10#include "err.h"
0f38bbef 11#include "lib/pow2.h"
4b157ac6 12#include "minmax.h"
bfbdd35b 13#include "zbd.h"
10ba535a 14
97601024
JA
15struct io_completion_data {
16 int nr; /* input */
97601024
JA
17
18 int error; /* output */
100f49f1 19 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
8b6a404c 20 struct timespec time; /* output */
97601024
JA
21};
22
10ba535a 23/*
7ebd796f 24 * The ->io_axmap contains a map of blocks we have or have not done io
10ba535a
JA
25 * to yet. Used to make sure we cover the entire range in a fair fashion.
26 */
e39c0676 27static bool random_map_free(struct fio_file *f, const uint64_t block)
10ba535a 28{
7ebd796f 29 return !axmap_isset(f->io_axmap, block);
10ba535a
JA
30}
31
df415585
JA
32/*
33 * Mark a given offset as used in the map.
34 */
6cc1a3d1
BVA
35static uint64_t mark_random_map(struct thread_data *td, struct io_u *io_u,
36 uint64_t offset, uint64_t buflen)
df415585 37{
5fff9543 38 unsigned long long min_bs = td->o.min_bs[io_u->ddir];
9bf2061e 39 struct fio_file *f = io_u->file;
5fff9543 40 unsigned long long nr_blocks;
1ae83d45 41 uint64_t block;
df415585 42
6cc1a3d1
BVA
43 block = (offset - f->file_offset) / (uint64_t) min_bs;
44 nr_blocks = (buflen + min_bs - 1) / min_bs;
bd6b959a 45 assert(nr_blocks > 0);
df415585 46
bd6b959a 47 if (!(io_u->flags & IO_U_F_BUSY_OK)) {
7ebd796f 48 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
bd6b959a
BVA
49 assert(nr_blocks > 0);
50 }
df415585 51
6cc1a3d1
BVA
52 if ((nr_blocks * min_bs) < buflen)
53 buflen = nr_blocks * min_bs;
54
55 return buflen;
df415585
JA
56}
57
74776733
JA
58static uint64_t last_block(struct thread_data *td, struct fio_file *f,
59 enum fio_ddir ddir)
2ba1c290 60{
74776733
JA
61 uint64_t max_blocks;
62 uint64_t max_size;
2ba1c290 63
ff58fced
JA
64 assert(ddir_rw(ddir));
65
d9dd70f7
JA
66 /*
67 * Hmm, should we make sure that ->io_size <= ->real_file_size?
79591fa9 68 * -> not for now since there is code assuming it could go either.
d9dd70f7
JA
69 */
70 max_size = f->io_size;
71 if (max_size > f->real_file_size)
72 max_size = f->real_file_size;
73
7b865a2f 74 if (td->o.zone_mode == ZONE_MODE_STRIDED && td->o.zone_range)
ed335855
SN
75 max_size = td->o.zone_range;
76
0412d12e
JE
77 if (td->o.min_bs[ddir] > td->o.ba[ddir])
78 max_size -= td->o.min_bs[ddir] - td->o.ba[ddir];
79
1ae83d45 80 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
2ba1c290
JA
81 if (!max_blocks)
82 return 0;
83
67778e88 84 return max_blocks;
2ba1c290
JA
85}
86
e25839d4 87static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
e0a04ac1
JA
88 enum fio_ddir ddir, uint64_t *b,
89 uint64_t lastb)
ec4015da 90{
6f49f8bc 91 uint64_t r;
5e0baa7f 92
c3546b53
JA
93 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
94 td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) {
6f49f8bc 95
d6b72507 96 r = __rand(&td->random_state);
8055e41d 97
4b91ee8f 98 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
8055e41d 99
e0a04ac1 100 *b = lastb * (r / (rand_max(&td->random_state) + 1.0));
51ede0b1 101 } else {
8055e41d 102 uint64_t off = 0;
43c63a78 103
967d1b63
JA
104 assert(fio_file_lfsr(f));
105
6f49f8bc 106 if (lfsr_next(&f->lfsr, &off))
8055e41d 107 return 1;
ec4015da 108
8055e41d
JA
109 *b = off;
110 }
0ce8b119 111
ec4015da 112 /*
51ede0b1 113 * if we are not maintaining a random map, we are done.
ec4015da 114 */
51ede0b1
JA
115 if (!file_randommap(td, f))
116 goto ret;
43c63a78
JA
117
118 /*
51ede0b1 119 * calculate map offset and check if it's free
43c63a78 120 */
51ede0b1
JA
121 if (random_map_free(f, *b))
122 goto ret;
123
4b91ee8f
JA
124 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
125 (unsigned long long) *b);
51ede0b1 126
7ebd796f 127 *b = axmap_next_free(f->io_axmap, *b);
51ede0b1
JA
128 if (*b == (uint64_t) -1ULL)
129 return 1;
0ce8b119
JA
130ret:
131 return 0;
ec4015da
JA
132}
133
925fee33
JA
134static int __get_next_rand_offset_zipf(struct thread_data *td,
135 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 136 uint64_t *b)
e25839d4 137{
9c6f6316 138 *b = zipf_next(&f->zipf);
e25839d4
JA
139 return 0;
140}
141
925fee33
JA
142static int __get_next_rand_offset_pareto(struct thread_data *td,
143 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 144 uint64_t *b)
925fee33 145{
9c6f6316 146 *b = pareto_next(&f->zipf);
925fee33
JA
147 return 0;
148}
149
56d9fa4b
JA
150static int __get_next_rand_offset_gauss(struct thread_data *td,
151 struct fio_file *f, enum fio_ddir ddir,
152 uint64_t *b)
153{
154 *b = gauss_next(&f->gauss);
155 return 0;
156}
157
59466396
JA
158static int __get_next_rand_offset_zoned_abs(struct thread_data *td,
159 struct fio_file *f,
160 enum fio_ddir ddir, uint64_t *b)
161{
162 struct zone_split_index *zsi;
e3c8c108 163 uint64_t lastb, send, stotal;
59466396
JA
164 unsigned int v;
165
166 lastb = last_block(td, f, ddir);
167 if (!lastb)
168 return 1;
169
170 if (!td->o.zone_split_nr[ddir]) {
171bail:
172 return __get_next_rand_offset(td, f, ddir, b, lastb);
173 }
174
175 /*
176 * Generate a value, v, between 1 and 100, both inclusive
177 */
1bd5d213 178 v = rand_between(&td->zone_state, 1, 100);
59466396 179
e3c8c108
JA
180 /*
181 * Find our generated table. 'send' is the end block of this zone,
182 * 'stotal' is our start offset.
183 */
59466396
JA
184 zsi = &td->zone_state_index[ddir][v - 1];
185 stotal = zsi->size_prev / td->o.ba[ddir];
186 send = zsi->size / td->o.ba[ddir];
187
188 /*
189 * Should never happen
190 */
191 if (send == -1U) {
264e3d30 192 if (!fio_did_warn(FIO_WARN_ZONED_BUG))
59466396 193 log_err("fio: bug in zoned generation\n");
59466396
JA
194 goto bail;
195 } else if (send > lastb) {
196 /*
197 * This happens if the user specifies ranges that exceed
198 * the file/device size. We can't handle that gracefully,
199 * so error and exit.
200 */
201 log_err("fio: zoned_abs sizes exceed file size\n");
202 return 1;
203 }
204
205 /*
e3c8c108 206 * Generate index from 0..send-stotal
59466396 207 */
e3c8c108 208 if (__get_next_rand_offset(td, f, ddir, b, send - stotal) == 1)
59466396
JA
209 return 1;
210
e3c8c108 211 *b += stotal;
59466396
JA
212 return 0;
213}
214
e0a04ac1
JA
215static int __get_next_rand_offset_zoned(struct thread_data *td,
216 struct fio_file *f, enum fio_ddir ddir,
217 uint64_t *b)
218{
219 unsigned int v, send, stotal;
220 uint64_t offset, lastb;
e0a04ac1
JA
221 struct zone_split_index *zsi;
222
223 lastb = last_block(td, f, ddir);
224 if (!lastb)
225 return 1;
226
227 if (!td->o.zone_split_nr[ddir]) {
228bail:
229 return __get_next_rand_offset(td, f, ddir, b, lastb);
230 }
231
232 /*
233 * Generate a value, v, between 1 and 100, both inclusive
234 */
1bd5d213 235 v = rand_between(&td->zone_state, 1, 100);
e0a04ac1
JA
236
237 zsi = &td->zone_state_index[ddir][v - 1];
238 stotal = zsi->size_perc_prev;
239 send = zsi->size_perc;
240
241 /*
242 * Should never happen
243 */
244 if (send == -1U) {
264e3d30 245 if (!fio_did_warn(FIO_WARN_ZONED_BUG))
e0a04ac1 246 log_err("fio: bug in zoned generation\n");
e0a04ac1
JA
247 goto bail;
248 }
249
250 /*
251 * 'send' is some percentage below or equal to 100 that
252 * marks the end of the current IO range. 'stotal' marks
253 * the start, in percent.
254 */
255 if (stotal)
256 offset = stotal * lastb / 100ULL;
257 else
258 offset = 0;
259
260 lastb = lastb * (send - stotal) / 100ULL;
261
262 /*
263 * Generate index from 0..send-of-lastb
264 */
265 if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1)
266 return 1;
267
268 /*
269 * Add our start offset, if any
270 */
271 if (offset)
272 *b += offset;
273
274 return 0;
275}
56d9fa4b 276
f31feaa2
JA
277static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
278 enum fio_ddir ddir, uint64_t *b)
e25839d4 279{
e0a04ac1
JA
280 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) {
281 uint64_t lastb;
282
283 lastb = last_block(td, f, ddir);
284 if (!lastb)
285 return 1;
286
287 return __get_next_rand_offset(td, f, ddir, b, lastb);
288 } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
e25839d4 289 return __get_next_rand_offset_zipf(td, f, ddir, b);
925fee33
JA
290 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
291 return __get_next_rand_offset_pareto(td, f, ddir, b);
56d9fa4b
JA
292 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
293 return __get_next_rand_offset_gauss(td, f, ddir, b);
e0a04ac1
JA
294 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
295 return __get_next_rand_offset_zoned(td, f, ddir, b);
59466396
JA
296 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS)
297 return __get_next_rand_offset_zoned_abs(td, f, ddir, b);
e25839d4
JA
298
299 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
300 return 1;
301}
302
e39c0676 303static bool should_do_random(struct thread_data *td, enum fio_ddir ddir)
211c9b89
JA
304{
305 unsigned int v;
211c9b89 306
d9472271 307 if (td->o.perc_rand[ddir] == 100)
e39c0676 308 return true;
211c9b89 309
1bd5d213 310 v = rand_between(&td->seq_rand_state[ddir], 1, 100);
211c9b89 311
d9472271 312 return v <= td->o.perc_rand[ddir];
211c9b89
JA
313}
314
0bcf41cd
JA
315static void loop_cache_invalidate(struct thread_data *td, struct fio_file *f)
316{
317 struct thread_options *o = &td->o;
318
319 if (o->invalidate_cache && !o->odirect) {
320 int fio_unused ret;
321
322 ret = file_invalidate_cache(td, f);
323 }
324}
325
38dad62d 326static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
1ae83d45 327 enum fio_ddir ddir, uint64_t *b)
38dad62d 328{
c04e4661
DE
329 if (!get_next_rand_offset(td, f, ddir, b))
330 return 0;
331
8c07860d
JA
332 if (td->o.time_based ||
333 (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) {
33c48814 334 fio_file_reset(td, f);
80f02150 335 loop_cache_invalidate(td, f);
c04e4661
DE
336 if (!get_next_rand_offset(td, f, ddir, b))
337 return 0;
38dad62d
JA
338 }
339
c04e4661 340 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
f1dfb668 341 f->file_name, (unsigned long long) f->last_pos[ddir],
4b91ee8f 342 (unsigned long long) f->real_file_size);
c04e4661 343 return 1;
38dad62d
JA
344}
345
37cf9e3c 346static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
1ae83d45 347 enum fio_ddir ddir, uint64_t *offset)
38dad62d 348{
ac002339
JA
349 struct thread_options *o = &td->o;
350
ff58fced
JA
351 assert(ddir_rw(ddir));
352
17373ce2
JA
353 /*
354 * If we reach the end for a time based run, reset us back to 0
355 * and invalidate the cache, if we need to.
356 */
f1dfb668 357 if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
19ddc35b 358 o->time_based) {
c89daa4a 359 f->last_pos[ddir] = f->file_offset;
0bcf41cd 360 loop_cache_invalidate(td, f);
19ddc35b 361 }
c04e4661 362
f1dfb668 363 if (f->last_pos[ddir] < f->real_file_size) {
1ae83d45 364 uint64_t pos;
059b0802 365
69b98f11
JA
366 /*
367 * Only rewind if we already hit the end
368 */
369 if (f->last_pos[ddir] == f->file_offset &&
370 f->file_offset && o->ddir_seq_add < 0) {
c22825bb
JA
371 if (f->real_file_size > f->io_size)
372 f->last_pos[ddir] = f->io_size;
373 else
374 f->last_pos[ddir] = f->real_file_size;
375 }
a66da7a2 376
f1dfb668 377 pos = f->last_pos[ddir] - f->file_offset;
ac002339
JA
378 if (pos && o->ddir_seq_add) {
379 pos += o->ddir_seq_add;
380
381 /*
382 * If we reach beyond the end of the file
383 * with holed IO, wrap around to the
b0a84f48
JA
384 * beginning again. If we're doing backwards IO,
385 * wrap to the end.
ac002339 386 */
b0a84f48
JA
387 if (pos >= f->real_file_size) {
388 if (o->ddir_seq_add > 0)
389 pos = f->file_offset;
c22825bb
JA
390 else {
391 if (f->real_file_size > f->io_size)
392 pos = f->io_size;
393 else
394 pos = f->real_file_size;
395
396 pos += o->ddir_seq_add;
397 }
b0a84f48 398 }
ac002339 399 }
059b0802 400
37cf9e3c 401 *offset = pos;
38dad62d
JA
402 return 0;
403 }
404
405 return 1;
406}
407
408static int get_next_block(struct thread_data *td, struct io_u *io_u,
6aca9b3d 409 enum fio_ddir ddir, int rw_seq,
ec370c22 410 bool *is_random)
38dad62d
JA
411{
412 struct fio_file *f = io_u->file;
1ae83d45 413 uint64_t b, offset;
38dad62d
JA
414 int ret;
415
ff58fced
JA
416 assert(ddir_rw(ddir));
417
37cf9e3c
JA
418 b = offset = -1ULL;
419
38dad62d 420 if (rw_seq) {
211c9b89 421 if (td_random(td)) {
6aca9b3d 422 if (should_do_random(td, ddir)) {
211c9b89 423 ret = get_next_rand_block(td, f, ddir, &b);
ec370c22 424 *is_random = true;
6aca9b3d 425 } else {
ec370c22 426 *is_random = false;
1651e431 427 io_u_set(td, io_u, IO_U_F_BUSY_OK);
211c9b89
JA
428 ret = get_next_seq_offset(td, f, ddir, &offset);
429 if (ret)
430 ret = get_next_rand_block(td, f, ddir, &b);
431 }
6aca9b3d 432 } else {
ec370c22 433 *is_random = false;
37cf9e3c 434 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 435 }
38dad62d 436 } else {
1651e431 437 io_u_set(td, io_u, IO_U_F_BUSY_OK);
ec370c22 438 *is_random = false;
38dad62d
JA
439
440 if (td->o.rw_seq == RW_SEQ_SEQ) {
37cf9e3c 441 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 442 if (ret) {
37cf9e3c 443 ret = get_next_rand_block(td, f, ddir, &b);
ec370c22 444 *is_random = false;
6aca9b3d 445 }
38dad62d 446 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
f1dfb668
JA
447 if (f->last_start[ddir] != -1ULL)
448 offset = f->last_start[ddir] - f->file_offset;
38dad62d 449 else
37cf9e3c 450 offset = 0;
38dad62d
JA
451 ret = 0;
452 } else {
453 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
454 ret = 1;
455 }
456 }
6d68b997 457
37cf9e3c
JA
458 if (!ret) {
459 if (offset != -1ULL)
460 io_u->offset = offset;
461 else if (b != -1ULL)
462 io_u->offset = b * td->o.ba[ddir];
463 else {
4e0a8fa2 464 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
37cf9e3c
JA
465 ret = 1;
466 }
467 }
468
38dad62d
JA
469 return ret;
470}
471
10ba535a
JA
472/*
473 * For random io, generate a random new block and see if it's used. Repeat
474 * until we find a free one. For sequential io, just return the end of
475 * the last io issued.
476 */
e8fb335e 477static int get_next_offset(struct thread_data *td, struct io_u *io_u,
ec370c22 478 bool *is_random)
10ba535a 479{
9bf2061e 480 struct fio_file *f = io_u->file;
4ba66134 481 enum fio_ddir ddir = io_u->ddir;
38dad62d 482 int rw_seq_hit = 0;
10ba535a 483
ff58fced
JA
484 assert(ddir_rw(ddir));
485
38dad62d
JA
486 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
487 rw_seq_hit = 1;
5736c10d 488 td->ddir_seq_nr = td->o.ddir_seq_nr;
38dad62d 489 }
211097b2 490
6aca9b3d 491 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
38dad62d 492 return 1;
10ba535a 493
009bd847
JA
494 if (io_u->offset >= f->io_size) {
495 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
4b91ee8f
JA
496 (unsigned long long) io_u->offset,
497 (unsigned long long) f->io_size);
009bd847
JA
498 return 1;
499 }
500
501 io_u->offset += f->file_offset;
2ba1c290
JA
502 if (io_u->offset >= f->real_file_size) {
503 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
4b91ee8f
JA
504 (unsigned long long) io_u->offset,
505 (unsigned long long) f->real_file_size);
10ba535a 506 return 1;
2ba1c290 507 }
10ba535a
JA
508
509 return 0;
510}
511
e39c0676 512static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u,
5fff9543 513 unsigned long long buflen)
79944128
JA
514{
515 struct fio_file *f = io_u->file;
516
bedc9dc2 517 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
79944128
JA
518}
519
5fff9543 520static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *io_u,
ec370c22 521 bool is_random)
10ba535a 522{
6aca9b3d 523 int ddir = io_u->ddir;
5fff9543
JF
524 unsigned long long buflen = 0;
525 unsigned long long minbs, maxbs;
3dd29f7c 526 uint64_t frand_max, r;
7c961359 527 bool power_2;
10ba535a 528
9ee1c647 529 assert(ddir_rw(ddir));
6aca9b3d
JA
530
531 if (td->o.bs_is_seq_rand)
ec370c22 532 ddir = is_random ? DDIR_WRITE : DDIR_READ;
ff58fced 533
f3059de1
JA
534 minbs = td->o.min_bs[ddir];
535 maxbs = td->o.max_bs[ddir];
536
79944128
JA
537 if (minbs == maxbs)
538 return minbs;
539
52c58027
JA
540 /*
541 * If we can't satisfy the min block size from here, then fail
542 */
543 if (!io_u_fits(td, io_u, minbs))
544 return 0;
545
2f282cec 546 frand_max = rand_max(&td->bsrange_state[ddir]);
79944128 547 do {
2f282cec 548 r = __rand(&td->bsrange_state[ddir]);
4c07ad86 549
720e84ad 550 if (!td->o.bssplit_nr[ddir]) {
5fff9543 551 buflen = minbs + (unsigned long long) ((double) maxbs *
c3546b53 552 (r / (frand_max + 1.0)));
5ec10eaa 553 } else {
3dd29f7c 554 long long perc = 0;
564ca972
JA
555 unsigned int i;
556
720e84ad
JA
557 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
558 struct bssplit *bsp = &td->o.bssplit[ddir][i];
564ca972
JA
559
560 buflen = bsp->bs;
561 perc += bsp->perc;
3dd29f7c
JA
562 if (!perc)
563 break;
564 if ((r / perc <= frand_max / 100ULL) &&
79944128 565 io_u_fits(td, io_u, buflen))
564ca972
JA
566 break;
567 }
568 }
79944128 569
17a6b702
PL
570 power_2 = is_power_of_2(minbs);
571 if (!td->o.bs_unaligned && power_2)
7c306bb1 572 buflen &= ~(minbs - 1);
17a6b702
PL
573 else if (!td->o.bs_unaligned && !power_2)
574 buflen -= buflen % minbs;
79944128 575 } while (!io_u_fits(td, io_u, buflen));
6a5e6884 576
10ba535a
JA
577 return buflen;
578}
579
afe24a5a
JA
580static void set_rwmix_bytes(struct thread_data *td)
581{
afe24a5a
JA
582 unsigned int diff;
583
584 /*
585 * we do time or byte based switch. this is needed because
586 * buffered writes may issue a lot quicker than they complete,
587 * whereas reads do not.
588 */
e47f799f 589 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
04c540d9 590 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
e47f799f
JA
591}
592
593static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
594{
595 unsigned int v;
e47f799f 596
1bd5d213 597 v = rand_between(&td->rwmix_state, 1, 100);
4c07ad86 598
04c540d9 599 if (v <= td->o.rwmix[DDIR_READ])
e47f799f
JA
600 return DDIR_READ;
601
602 return DDIR_WRITE;
afe24a5a
JA
603}
604
0d593542 605int io_u_quiesce(struct thread_data *td)
002e7183 606{
d28174f0 607 int ret = 0, completed = 0;
0d593542 608
002e7183
JA
609 /*
610 * We are going to sleep, ensure that we flush anything pending as
611 * not to skew our latency numbers.
612 *
613 * Changed to only monitor 'in flight' requests here instead of the
614 * td->cur_depth, b/c td->cur_depth does not accurately represent
615 * io's that have been actually submitted to an async engine,
616 * and cur_depth is meaningless for sync engines.
617 */
a80cb54b
BVA
618 if (td->io_u_queued || td->cur_depth)
619 td_io_commit(td);
9cc80b6d 620
002e7183 621 while (td->io_u_in_flight) {
55312f9f 622 ret = io_u_queued_complete(td, 1);
0d593542
JA
623 if (ret > 0)
624 completed += ret;
d28174f0
JA
625 else if (ret < 0)
626 break;
002e7183 627 }
0d593542 628
6be06c46
JA
629 if (td->flags & TD_F_REGROW_LOGS)
630 regrow_logs(td);
631
d28174f0
JA
632 if (completed)
633 return completed;
634
635 return ret;
002e7183
JA
636}
637
581e7141
JA
638static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
639{
640 enum fio_ddir odir = ddir ^ 1;
b5407f8b 641 uint64_t usec;
90eff1c9 642 uint64_t now;
581e7141 643
ff58fced 644 assert(ddir_rw(ddir));
50a8ce86 645 now = utime_since_now(&td->start);
ff58fced 646
50a8ce86
D
647 /*
648 * if rate_next_io_time is in the past, need to catch up to rate
649 */
650 if (td->rate_next_io_time[ddir] <= now)
581e7141
JA
651 return ddir;
652
653 /*
50a8ce86 654 * We are ahead of rate in this direction. See if we
581e7141
JA
655 * should switch.
656 */
315fcfec 657 if (td_rw(td) && td->o.rwmix[odir]) {
581e7141 658 /*
50a8ce86 659 * Other direction is behind rate, switch
581e7141 660 */
50a8ce86 661 if (td->rate_next_io_time[odir] <= now)
581e7141
JA
662 return odir;
663
664 /*
395feabb
JA
665 * Both directions are ahead of rate. sleep the min,
666 * switch if necessary
581e7141 667 */
50a8ce86 668 if (td->rate_next_io_time[ddir] <=
395feabb 669 td->rate_next_io_time[odir]) {
50a8ce86 670 usec = td->rate_next_io_time[ddir] - now;
581e7141 671 } else {
50a8ce86 672 usec = td->rate_next_io_time[odir] - now;
581e7141
JA
673 ddir = odir;
674 }
675 } else
50a8ce86 676 usec = td->rate_next_io_time[ddir] - now;
581e7141 677
a9da8ab2
JA
678 if (td->o.io_submit_mode == IO_MODE_INLINE)
679 io_u_quiesce(td);
78c1eda5 680
1a9bf814 681 usec_sleep(td, usec);
581e7141
JA
682 return ddir;
683}
684
10ba535a
JA
685/*
686 * Return the data direction for the next io_u. If the job is a
687 * mixed read/write workload, check the rwmix cycle and switch if
688 * necessary.
689 */
1e97cce9 690static enum fio_ddir get_rw_ddir(struct thread_data *td)
10ba535a 691{
581e7141
JA
692 enum fio_ddir ddir;
693
5f9099ea 694 /*
f6860972
TK
695 * See if it's time to fsync/fdatasync/sync_file_range first,
696 * and if not then move on to check regular I/Os.
5f9099ea 697 */
f6860972
TK
698 if (should_fsync(td)) {
699 if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] &&
700 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks))
701 return DDIR_SYNC;
702
703 if (td->o.fdatasync_blocks && td->io_issues[DDIR_WRITE] &&
704 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks))
705 return DDIR_DATASYNC;
706
707 if (td->sync_file_range_nr && td->io_issues[DDIR_WRITE] &&
708 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr))
709 return DDIR_SYNC_FILE_RANGE;
710 }
44f29692 711
10ba535a 712 if (td_rw(td)) {
10ba535a
JA
713 /*
714 * Check if it's time to seed a new data direction.
715 */
e4928662 716 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
e47f799f
JA
717 /*
718 * Put a top limit on how many bytes we do for
719 * one data direction, to avoid overflowing the
720 * ranges too much
721 */
722 ddir = get_rand_ddir(td);
e47f799f
JA
723
724 if (ddir != td->rwmix_ddir)
725 set_rwmix_bytes(td);
726
727 td->rwmix_ddir = ddir;
10ba535a 728 }
581e7141 729 ddir = td->rwmix_ddir;
10ba535a 730 } else if (td_read(td))
581e7141 731 ddir = DDIR_READ;
6eaf09d6 732 else if (td_write(td))
581e7141 733 ddir = DDIR_WRITE;
5c8e84ca 734 else if (td_trim(td))
6eaf09d6 735 ddir = DDIR_TRIM;
5c8e84ca
TK
736 else
737 ddir = DDIR_INVAL;
581e7141
JA
738
739 td->rwmix_ddir = rate_ddir(td, ddir);
740 return td->rwmix_ddir;
10ba535a
JA
741}
742
1ef2b6be
JA
743static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
744{
0e4dd95c
DE
745 enum fio_ddir ddir = get_rw_ddir(td);
746
82a90686 747 if (td_trimwrite(td)) {
0e4dd95c
DE
748 struct fio_file *f = io_u->file;
749 if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
750 ddir = DDIR_TRIM;
751 else
752 ddir = DDIR_WRITE;
753 }
754
755 io_u->ddir = io_u->acct_ddir = ddir;
1ef2b6be 756
9b87f09b 757 if (io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_BARRIER) &&
1ef2b6be
JA
758 td->o.barrier_blocks &&
759 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
760 td->io_issues[DDIR_WRITE])
1651e431 761 io_u_set(td, io_u, IO_U_F_BARRIER);
1ef2b6be
JA
762}
763
e8462bd8 764void put_file_log(struct thread_data *td, struct fio_file *f)
60f2c658 765{
71b84caa 766 unsigned int ret = put_file(td, f);
60f2c658
JA
767
768 if (ret)
769 td_verror(td, ret, "file close");
770}
771
10ba535a
JA
772void put_io_u(struct thread_data *td, struct io_u *io_u)
773{
26b3a188
JA
774 const bool needs_lock = td_async_processing(td);
775
99952ca7
BVA
776 if (io_u->post_submit) {
777 io_u->post_submit(io_u, io_u->error == 0);
778 io_u->post_submit = NULL;
779 }
780
a9da8ab2
JA
781 if (td->parent)
782 td = td->parent;
783
26b3a188
JA
784 if (needs_lock)
785 __td_io_u_lock(td);
e8462bd8 786
f8b0bd10 787 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
60f2c658 788 put_file_log(td, io_u->file);
f8b0bd10 789
10ba535a 790 io_u->file = NULL;
1651e431 791 io_u_set(td, io_u, IO_U_F_FREE);
d7ee2a7d 792
a9da8ab2 793 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
0c41214f 794 td->cur_depth--;
a9da8ab2
JA
795 assert(!(td->flags & TD_F_CHILD));
796 }
2ae0b204 797 io_u_qpush(&td->io_u_freelist, io_u);
e8462bd8 798 td_io_u_free_notify(td);
26b3a188
JA
799
800 if (needs_lock)
801 __td_io_u_unlock(td);
10ba535a
JA
802}
803
f2bba182
RR
804void clear_io_u(struct thread_data *td, struct io_u *io_u)
805{
1651e431 806 io_u_clear(td, io_u, IO_U_F_FLIGHT);
f2bba182
RR
807 put_io_u(td, io_u);
808}
809
755200a3
JA
810void requeue_io_u(struct thread_data *td, struct io_u **io_u)
811{
26b3a188 812 const bool needs_lock = td_async_processing(td);
755200a3 813 struct io_u *__io_u = *io_u;
bcd5abfa 814 enum fio_ddir ddir = acct_ddir(__io_u);
755200a3 815
465221b0
JA
816 dprint(FD_IO, "requeue %p\n", __io_u);
817
a9da8ab2
JA
818 if (td->parent)
819 td = td->parent;
820
26b3a188
JA
821 if (needs_lock)
822 __td_io_u_lock(td);
e8462bd8 823
1651e431 824 io_u_set(td, __io_u, IO_U_F_FREE);
bcd5abfa
JA
825 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
826 td->io_issues[ddir]--;
5ec10eaa 827
1651e431 828 io_u_clear(td, __io_u, IO_U_F_FLIGHT);
a9da8ab2 829 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) {
0c41214f 830 td->cur_depth--;
a9da8ab2
JA
831 assert(!(td->flags & TD_F_CHILD));
832 }
2ae0b204
JA
833
834 io_u_rpush(&td->io_u_requeues, __io_u);
a9da8ab2 835 td_io_u_free_notify(td);
26b3a188
JA
836
837 if (needs_lock)
838 __td_io_u_unlock(td);
839
755200a3
JA
840 *io_u = NULL;
841}
842
7b865a2f 843static void setup_strided_zone_mode(struct thread_data *td, struct io_u *io_u)
224b3093 844{
845 struct fio_file *f = io_u->file;
846
7b865a2f
BVA
847 assert(td->o.zone_mode == ZONE_MODE_STRIDED);
848 assert(td->o.zone_size);
849 assert(td->o.zone_range);
850
224b3093 851 /*
852 * See if it's time to switch to a new zone
853 */
854 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
855 td->zone_bytes = 0;
856 f->file_offset += td->o.zone_range + td->o.zone_skip;
857
858 /*
859 * Wrap from the beginning, if we exceed the file size
860 */
861 if (f->file_offset >= f->real_file_size)
04bc85a1
JA
862 f->file_offset = get_start_offset(td, f);
863
224b3093 864 f->last_pos[io_u->ddir] = f->file_offset;
865 td->io_skip_bytes += td->o.zone_skip;
866 }
867
868 /*
04bc85a1
JA
869 * If zone_size > zone_range, then maintain the same zone until
870 * zone_bytes >= zone_size.
871 */
224b3093 872 if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) {
873 dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n",
874 f->file_offset, f->last_pos[io_u->ddir]);
875 f->last_pos[io_u->ddir] = f->file_offset;
876 }
877
878 /*
879 * For random: if 'norandommap' is not set and zone_size > zone_range,
880 * map needs to be reset as it's done with zone_range everytime.
881 */
04bc85a1 882 if ((td->zone_bytes % td->o.zone_range) == 0)
224b3093 883 fio_file_reset(td, f);
224b3093 884}
885
9bf2061e 886static int fill_io_u(struct thread_data *td, struct io_u *io_u)
10ba535a 887{
ec370c22 888 bool is_random;
bfbdd35b
BVA
889 uint64_t offset;
890 enum io_u_action ret;
6aca9b3d 891
9b87f09b 892 if (td_ioengine_flagged(td, FIO_NOIO))
b4c5e1ac
JA
893 goto out;
894
1ef2b6be 895 set_rw_ddir(td, io_u);
5f9099ea 896
87dc1ab1 897 /*
ff58fced 898 * fsync() or fdatasync() or trim etc, we are done
87dc1ab1 899 */
ff58fced 900 if (!ddir_rw(io_u->ddir))
c38e9468 901 goto out;
a00735e6 902
7b865a2f
BVA
903 if (td->o.zone_mode == ZONE_MODE_STRIDED)
904 setup_strided_zone_mode(td, io_u);
48f5abd3 905
10ba535a 906 /*
c685b5b2
JA
907 * No log, let the seq/rand engine retrieve the next buflen and
908 * position.
10ba535a 909 */
6aca9b3d 910 if (get_next_offset(td, io_u, &is_random)) {
2ba1c290 911 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
bca4ed4d 912 return 1;
2ba1c290 913 }
10ba535a 914
6aca9b3d 915 io_u->buflen = get_next_buflen(td, io_u, is_random);
2ba1c290
JA
916 if (!io_u->buflen) {
917 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
bca4ed4d 918 return 1;
2ba1c290 919 }
bca4ed4d 920
bfbdd35b
BVA
921 offset = io_u->offset;
922 if (td->o.zone_mode == ZONE_MODE_ZBD) {
923 ret = zbd_adjust_block(td, io_u);
924 if (ret == io_u_eof)
925 return 1;
926 }
927
2ba1c290 928 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
5fff9543 929 dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
e5f9a813 930 io_u,
4b91ee8f
JA
931 (unsigned long long) io_u->offset, io_u->buflen,
932 (unsigned long long) io_u->file->real_file_size);
6a5e6884 933 return 1;
2ba1c290 934 }
6a5e6884 935
bca4ed4d
JA
936 /*
937 * mark entry before potentially trimming io_u
938 */
303032ae 939 if (td_random(td) && file_randommap(td, io_u->file))
bfbdd35b 940 io_u->buflen = mark_random_map(td, io_u, offset, io_u->buflen);
bca4ed4d 941
c38e9468 942out:
e5f9a813 943 dprint_io_u(io_u, "fill");
d9d91e39 944 td->zone_bytes += io_u->buflen;
bca4ed4d 945 return 0;
10ba535a
JA
946}
947
6cc0e5aa 948static void __io_u_mark_map(uint64_t *map, unsigned int nr)
838bc709 949{
2b13e716 950 int idx = 0;
838bc709
JA
951
952 switch (nr) {
953 default:
2b13e716 954 idx = 6;
838bc709
JA
955 break;
956 case 33 ... 64:
2b13e716 957 idx = 5;
838bc709
JA
958 break;
959 case 17 ... 32:
2b13e716 960 idx = 4;
838bc709
JA
961 break;
962 case 9 ... 16:
2b13e716 963 idx = 3;
838bc709
JA
964 break;
965 case 5 ... 8:
2b13e716 966 idx = 2;
838bc709
JA
967 break;
968 case 1 ... 4:
2b13e716 969 idx = 1;
838bc709
JA
970 case 0:
971 break;
972 }
973
2b13e716 974 map[idx]++;
838bc709
JA
975}
976
977void io_u_mark_submit(struct thread_data *td, unsigned int nr)
978{
979 __io_u_mark_map(td->ts.io_u_submit, nr);
980 td->ts.total_submit++;
981}
982
983void io_u_mark_complete(struct thread_data *td, unsigned int nr)
984{
985 __io_u_mark_map(td->ts.io_u_complete, nr);
986 td->ts.total_complete++;
987}
988
d8005759 989void io_u_mark_depth(struct thread_data *td, unsigned int nr)
71619dc2 990{
2b13e716 991 int idx = 0;
71619dc2
JA
992
993 switch (td->cur_depth) {
994 default:
2b13e716 995 idx = 6;
a783e61a 996 break;
71619dc2 997 case 32 ... 63:
2b13e716 998 idx = 5;
a783e61a 999 break;
71619dc2 1000 case 16 ... 31:
2b13e716 1001 idx = 4;
a783e61a 1002 break;
71619dc2 1003 case 8 ... 15:
2b13e716 1004 idx = 3;
a783e61a 1005 break;
71619dc2 1006 case 4 ... 7:
2b13e716 1007 idx = 2;
a783e61a 1008 break;
71619dc2 1009 case 2 ... 3:
2b13e716 1010 idx = 1;
71619dc2
JA
1011 case 1:
1012 break;
1013 }
1014
2b13e716 1015 td->ts.io_u_map[idx] += nr;
71619dc2
JA
1016}
1017
d6bb626e 1018static void io_u_mark_lat_nsec(struct thread_data *td, unsigned long long nsec)
04a0feae 1019{
2b13e716 1020 int idx = 0;
04a0feae 1021
d6bb626e
VF
1022 assert(nsec < 1000);
1023
1024 switch (nsec) {
1025 case 750 ... 999:
1026 idx = 9;
1027 break;
1028 case 500 ... 749:
1029 idx = 8;
1030 break;
1031 case 250 ... 499:
1032 idx = 7;
1033 break;
1034 case 100 ... 249:
1035 idx = 6;
1036 break;
1037 case 50 ... 99:
1038 idx = 5;
1039 break;
1040 case 20 ... 49:
1041 idx = 4;
1042 break;
1043 case 10 ... 19:
1044 idx = 3;
1045 break;
1046 case 4 ... 9:
1047 idx = 2;
1048 break;
1049 case 2 ... 3:
1050 idx = 1;
1051 case 0 ... 1:
1052 break;
1053 }
1054
1055 assert(idx < FIO_IO_U_LAT_N_NR);
1056 td->ts.io_u_lat_n[idx]++;
1057}
1058
1059static void io_u_mark_lat_usec(struct thread_data *td, unsigned long long usec)
1060{
1061 int idx = 0;
1062
1063 assert(usec < 1000 && usec >= 1);
04a0feae
JA
1064
1065 switch (usec) {
1066 case 750 ... 999:
2b13e716 1067 idx = 9;
04a0feae
JA
1068 break;
1069 case 500 ... 749:
2b13e716 1070 idx = 8;
04a0feae
JA
1071 break;
1072 case 250 ... 499:
2b13e716 1073 idx = 7;
04a0feae
JA
1074 break;
1075 case 100 ... 249:
2b13e716 1076 idx = 6;
04a0feae
JA
1077 break;
1078 case 50 ... 99:
2b13e716 1079 idx = 5;
04a0feae
JA
1080 break;
1081 case 20 ... 49:
2b13e716 1082 idx = 4;
04a0feae
JA
1083 break;
1084 case 10 ... 19:
2b13e716 1085 idx = 3;
04a0feae
JA
1086 break;
1087 case 4 ... 9:
2b13e716 1088 idx = 2;
04a0feae
JA
1089 break;
1090 case 2 ... 3:
2b13e716 1091 idx = 1;
04a0feae
JA
1092 case 0 ... 1:
1093 break;
1094 }
1095
2b13e716
JA
1096 assert(idx < FIO_IO_U_LAT_U_NR);
1097 td->ts.io_u_lat_u[idx]++;
04a0feae
JA
1098}
1099
d6bb626e 1100static void io_u_mark_lat_msec(struct thread_data *td, unsigned long long msec)
ec118304 1101{
2b13e716 1102 int idx = 0;
ec118304 1103
d6bb626e
VF
1104 assert(msec >= 1);
1105
ec118304
JA
1106 switch (msec) {
1107 default:
2b13e716 1108 idx = 11;
04a0feae 1109 break;
8abdce66 1110 case 1000 ... 1999:
2b13e716 1111 idx = 10;
04a0feae 1112 break;
8abdce66 1113 case 750 ... 999:
2b13e716 1114 idx = 9;
04a0feae 1115 break;
8abdce66 1116 case 500 ... 749:
2b13e716 1117 idx = 8;
04a0feae 1118 break;
8abdce66 1119 case 250 ... 499:
2b13e716 1120 idx = 7;
04a0feae 1121 break;
8abdce66 1122 case 100 ... 249:
2b13e716 1123 idx = 6;
04a0feae 1124 break;
8abdce66 1125 case 50 ... 99:
2b13e716 1126 idx = 5;
04a0feae 1127 break;
8abdce66 1128 case 20 ... 49:
2b13e716 1129 idx = 4;
04a0feae 1130 break;
8abdce66 1131 case 10 ... 19:
2b13e716 1132 idx = 3;
04a0feae 1133 break;
8abdce66 1134 case 4 ... 9:
2b13e716 1135 idx = 2;
04a0feae 1136 break;
ec118304 1137 case 2 ... 3:
2b13e716 1138 idx = 1;
ec118304
JA
1139 case 0 ... 1:
1140 break;
1141 }
1142
2b13e716
JA
1143 assert(idx < FIO_IO_U_LAT_M_NR);
1144 td->ts.io_u_lat_m[idx]++;
04a0feae
JA
1145}
1146
d6bb626e 1147static void io_u_mark_latency(struct thread_data *td, unsigned long long nsec)
04a0feae 1148{
d6bb626e
VF
1149 if (nsec < 1000)
1150 io_u_mark_lat_nsec(td, nsec);
1151 else if (nsec < 1000000)
1152 io_u_mark_lat_usec(td, nsec / 1000);
04a0feae 1153 else
d6bb626e 1154 io_u_mark_lat_msec(td, nsec / 1000000);
ec118304
JA
1155}
1156
8c07860d
JA
1157static unsigned int __get_next_fileno_rand(struct thread_data *td)
1158{
1159 unsigned long fileno;
1160
1161 if (td->o.file_service_type == FIO_FSERVICE_RANDOM) {
1162 uint64_t frand_max = rand_max(&td->next_file_state);
1163 unsigned long r;
1164
1165 r = __rand(&td->next_file_state);
1166 return (unsigned int) ((double) td->o.nr_files
1167 * (r / (frand_max + 1.0)));
1168 }
1169
1170 if (td->o.file_service_type == FIO_FSERVICE_ZIPF)
1171 fileno = zipf_next(&td->next_file_zipf);
1172 else if (td->o.file_service_type == FIO_FSERVICE_PARETO)
1173 fileno = pareto_next(&td->next_file_zipf);
1174 else if (td->o.file_service_type == FIO_FSERVICE_GAUSS)
1175 fileno = gauss_next(&td->next_file_gauss);
1176 else {
1177 log_err("fio: bad file service type: %d\n", td->o.file_service_type);
1178 assert(0);
1179 return 0;
1180 }
1181
1182 return fileno >> FIO_FSERVICE_SHIFT;
1183}
1184
0aabe160
JA
1185/*
1186 * Get next file to service by choosing one at random
1187 */
2cc52930
JA
1188static struct fio_file *get_next_file_rand(struct thread_data *td,
1189 enum fio_file_flags goodf,
d6aed795 1190 enum fio_file_flags badf)
0aabe160 1191{
0aabe160 1192 struct fio_file *f;
1c178180 1193 int fno;
0aabe160
JA
1194
1195 do {
87b10676 1196 int opened = 0;
4c07ad86 1197
8c07860d 1198 fno = __get_next_fileno_rand(td);
7c83c089 1199
126d65c6 1200 f = td->files[fno];
d6aed795 1201 if (fio_file_done(f))
059e63c0 1202 continue;
1c178180 1203
d6aed795 1204 if (!fio_file_open(f)) {
87b10676
JA
1205 int err;
1206
002fe734
JA
1207 if (td->nr_open_files >= td->o.open_files)
1208 return ERR_PTR(-EBUSY);
1209
87b10676
JA
1210 err = td_io_open_file(td, f);
1211 if (err)
1212 continue;
1213 opened = 1;
1214 }
1215
2ba1c290
JA
1216 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
1217 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
0aabe160 1218 return f;
2ba1c290 1219 }
87b10676
JA
1220 if (opened)
1221 td_io_close_file(td, f);
0aabe160
JA
1222 } while (1);
1223}
1224
1225/*
1226 * Get next file to service by doing round robin between all available ones
1227 */
1c178180
JA
1228static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1229 int badf)
3d7c391d
JA
1230{
1231 unsigned int old_next_file = td->next_file;
1232 struct fio_file *f;
1233
1234 do {
87b10676
JA
1235 int opened = 0;
1236
126d65c6 1237 f = td->files[td->next_file];
3d7c391d
JA
1238
1239 td->next_file++;
2dc1bbeb 1240 if (td->next_file >= td->o.nr_files)
3d7c391d
JA
1241 td->next_file = 0;
1242
87b10676 1243 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
d6aed795 1244 if (fio_file_done(f)) {
d5ed68ea 1245 f = NULL;
059e63c0 1246 continue;
d5ed68ea 1247 }
059e63c0 1248
d6aed795 1249 if (!fio_file_open(f)) {
87b10676
JA
1250 int err;
1251
002fe734
JA
1252 if (td->nr_open_files >= td->o.open_files)
1253 return ERR_PTR(-EBUSY);
1254
87b10676 1255 err = td_io_open_file(td, f);
b5696bfc
JA
1256 if (err) {
1257 dprint(FD_FILE, "error %d on open of %s\n",
1258 err, f->file_name);
87c27b45 1259 f = NULL;
87b10676 1260 continue;
b5696bfc 1261 }
87b10676
JA
1262 opened = 1;
1263 }
1264
0b9d69ec
JA
1265 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1266 f->flags);
1c178180 1267 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
3d7c391d
JA
1268 break;
1269
87b10676
JA
1270 if (opened)
1271 td_io_close_file(td, f);
1272
3d7c391d
JA
1273 f = NULL;
1274 } while (td->next_file != old_next_file);
1275
2ba1c290 1276 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
3d7c391d
JA
1277 return f;
1278}
1279
7eb36574 1280static struct fio_file *__get_next_file(struct thread_data *td)
bdb4e2e9 1281{
1907dbc6
JA
1282 struct fio_file *f;
1283
2dc1bbeb 1284 assert(td->o.nr_files <= td->files_index);
1c178180 1285
b5696bfc 1286 if (td->nr_done_files >= td->o.nr_files) {
5ec10eaa
JA
1287 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1288 " nr_files=%d\n", td->nr_open_files,
1289 td->nr_done_files,
1290 td->o.nr_files);
bdb4e2e9 1291 return NULL;
2ba1c290 1292 }
bdb4e2e9 1293
1907dbc6 1294 f = td->file_service_file;
d6aed795 1295 if (f && fio_file_open(f) && !fio_file_closing(f)) {
a086c257
JA
1296 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1297 goto out;
1298 if (td->file_service_left--)
1299 goto out;
1300 }
1907dbc6 1301
a086c257
JA
1302 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1303 td->o.file_service_type == FIO_FSERVICE_SEQ)
d6aed795 1304 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
bdb4e2e9 1305 else
d6aed795 1306 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
1907dbc6 1307
002fe734
JA
1308 if (IS_ERR(f))
1309 return f;
1310
1907dbc6
JA
1311 td->file_service_file = f;
1312 td->file_service_left = td->file_service_nr - 1;
2ba1c290 1313out:
0dac421f
JA
1314 if (f)
1315 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1316 else
1317 dprint(FD_FILE, "get_next_file: NULL\n");
1907dbc6 1318 return f;
bdb4e2e9
JA
1319}
1320
7eb36574
JA
1321static struct fio_file *get_next_file(struct thread_data *td)
1322{
7eb36574
JA
1323 return __get_next_file(td);
1324}
1325
002fe734 1326static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
429f6675
JA
1327{
1328 struct fio_file *f;
1329
1330 do {
1331 f = get_next_file(td);
002fe734
JA
1332 if (IS_ERR_OR_NULL(f))
1333 return PTR_ERR(f);
429f6675 1334
429f6675
JA
1335 io_u->file = f;
1336 get_file(f);
1337
1338 if (!fill_io_u(td, io_u))
1339 break;
1340
99952ca7
BVA
1341 if (io_u->post_submit) {
1342 io_u->post_submit(io_u, false);
1343 io_u->post_submit = NULL;
1344 }
1345
b5696bfc 1346 put_file_log(td, f);
429f6675 1347 td_io_close_file(td, f);
b5696bfc 1348 io_u->file = NULL;
8c07860d
JA
1349 if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)
1350 fio_file_reset(td, f);
1351 else {
1352 fio_file_set_done(f);
1353 td->nr_done_files++;
1354 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
0b9d69ec 1355 td->nr_done_files, td->o.nr_files);
8c07860d 1356 }
429f6675
JA
1357 } while (1);
1358
1359 return 0;
1360}
1361
3e260a46 1362static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
c3a32714 1363 unsigned long long tnsec, unsigned long long max_nsec)
3e260a46
JA
1364{
1365 if (!td->error)
c3a32714 1366 log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec)\n", tnsec, max_nsec);
3e260a46
JA
1367 td_verror(td, ETIMEDOUT, "max latency exceeded");
1368 icd->error = ETIMEDOUT;
1369}
1370
1371static void lat_new_cycle(struct thread_data *td)
1372{
1373 fio_gettime(&td->latency_ts, NULL);
1374 td->latency_ios = ddir_rw_sum(td->io_blocks);
1375 td->latency_failed = 0;
1376}
1377
1378/*
1379 * We had an IO outside the latency target. Reduce the queue depth. If we
1380 * are at QD=1, then it's time to give up.
1381 */
e39c0676 1382static bool __lat_target_failed(struct thread_data *td)
3e260a46
JA
1383{
1384 if (td->latency_qd == 1)
e39c0676 1385 return true;
3e260a46
JA
1386
1387 td->latency_qd_high = td->latency_qd;
6bb58215
JA
1388
1389 if (td->latency_qd == td->latency_qd_low)
1390 td->latency_qd_low--;
1391
3e260a46
JA
1392 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1393
1394 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1395
1396 /*
1397 * When we ramp QD down, quiesce existing IO to prevent
1398 * a storm of ramp downs due to pending higher depth.
1399 */
1400 io_u_quiesce(td);
1401 lat_new_cycle(td);
e39c0676 1402 return false;
3e260a46
JA
1403}
1404
e39c0676 1405static bool lat_target_failed(struct thread_data *td)
3e260a46
JA
1406{
1407 if (td->o.latency_percentile.u.f == 100.0)
1408 return __lat_target_failed(td);
1409
1410 td->latency_failed++;
e39c0676 1411 return false;
3e260a46
JA
1412}
1413
1414void lat_target_init(struct thread_data *td)
1415{
6bb58215
JA
1416 td->latency_end_run = 0;
1417
3e260a46
JA
1418 if (td->o.latency_target) {
1419 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1420 fio_gettime(&td->latency_ts, NULL);
1421 td->latency_qd = 1;
1422 td->latency_qd_high = td->o.iodepth;
1423 td->latency_qd_low = 1;
1424 td->latency_ios = ddir_rw_sum(td->io_blocks);
1425 } else
1426 td->latency_qd = td->o.iodepth;
1427}
1428
6bb58215
JA
1429void lat_target_reset(struct thread_data *td)
1430{
1431 if (!td->latency_end_run)
1432 lat_target_init(td);
1433}
1434
3e260a46
JA
1435static void lat_target_success(struct thread_data *td)
1436{
1437 const unsigned int qd = td->latency_qd;
6bb58215 1438 struct thread_options *o = &td->o;
3e260a46
JA
1439
1440 td->latency_qd_low = td->latency_qd;
1441
1442 /*
1443 * If we haven't failed yet, we double up to a failing value instead
1444 * of bisecting from highest possible queue depth. If we have set
1445 * a limit other than td->o.iodepth, bisect between that.
1446 */
6bb58215 1447 if (td->latency_qd_high != o->iodepth)
3e260a46
JA
1448 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1449 else
1450 td->latency_qd *= 2;
1451
6bb58215
JA
1452 if (td->latency_qd > o->iodepth)
1453 td->latency_qd = o->iodepth;
3e260a46
JA
1454
1455 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
6bb58215 1456
3e260a46 1457 /*
6bb58215
JA
1458 * Same as last one, we are done. Let it run a latency cycle, so
1459 * we get only the results from the targeted depth.
3e260a46 1460 */
6bb58215
JA
1461 if (td->latency_qd == qd) {
1462 if (td->latency_end_run) {
1463 dprint(FD_RATE, "We are done\n");
1464 td->done = 1;
1465 } else {
1466 dprint(FD_RATE, "Quiesce and final run\n");
1467 io_u_quiesce(td);
1468 td->latency_end_run = 1;
1469 reset_all_stats(td);
1470 reset_io_stats(td);
1471 }
1472 }
3e260a46
JA
1473
1474 lat_new_cycle(td);
1475}
1476
1477/*
1478 * Check if we can bump the queue depth
1479 */
1480void lat_target_check(struct thread_data *td)
1481{
1482 uint64_t usec_window;
1483 uint64_t ios;
1484 double success_ios;
1485
1486 usec_window = utime_since_now(&td->latency_ts);
1487 if (usec_window < td->o.latency_window)
1488 return;
1489
1490 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1491 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1492 success_ios *= 100.0;
1493
1494 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1495
1496 if (success_ios >= td->o.latency_percentile.u.f)
1497 lat_target_success(td);
1498 else
1499 __lat_target_failed(td);
1500}
1501
1502/*
1503 * If latency target is enabled, we might be ramping up or down and not
1504 * using the full queue depth available.
1505 */
e39c0676 1506bool queue_full(const struct thread_data *td)
3e260a46
JA
1507{
1508 const int qempty = io_u_qempty(&td->io_u_freelist);
1509
1510 if (qempty)
e39c0676 1511 return true;
3e260a46 1512 if (!td->o.latency_target)
e39c0676 1513 return false;
3e260a46
JA
1514
1515 return td->cur_depth >= td->latency_qd;
1516}
429f6675 1517
10ba535a
JA
1518struct io_u *__get_io_u(struct thread_data *td)
1519{
26b3a188 1520 const bool needs_lock = td_async_processing(td);
0cae66f6 1521 struct io_u *io_u = NULL;
93b45bb2 1522 int ret;
10ba535a 1523
ca09be4b
JA
1524 if (td->stop_io)
1525 return NULL;
1526
26b3a188
JA
1527 if (needs_lock)
1528 __td_io_u_lock(td);
e8462bd8
JA
1529
1530again:
2ae0b204
JA
1531 if (!io_u_rempty(&td->io_u_requeues))
1532 io_u = io_u_rpop(&td->io_u_requeues);
3e260a46 1533 else if (!queue_full(td)) {
2ae0b204 1534 io_u = io_u_qpop(&td->io_u_freelist);
10ba535a 1535
225ba9e3 1536 io_u->file = NULL;
6040dabc 1537 io_u->buflen = 0;
10ba535a 1538 io_u->resid = 0;
d7762cf8 1539 io_u->end_io = NULL;
755200a3
JA
1540 }
1541
1542 if (io_u) {
0c6e7517 1543 assert(io_u->flags & IO_U_F_FREE);
1651e431 1544 io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
f8b0bd10
JA
1545 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1546 IO_U_F_VER_LIST);
0c6e7517 1547
755200a3 1548 io_u->error = 0;
bcd5abfa 1549 io_u->acct_ddir = -1;
10ba535a 1550 td->cur_depth++;
a9da8ab2 1551 assert(!(td->flags & TD_F_CHILD));
1651e431 1552 io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH);
f9401285 1553 io_u->ipo = NULL;
a9da8ab2 1554 } else if (td_async_processing(td)) {
1dec3e07
JA
1555 /*
1556 * We ran out, wait for async verify threads to finish and
1557 * return one
1558 */
a9da8ab2 1559 assert(!(td->flags & TD_F_CHILD));
93b45bb2
BVA
1560 ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1561 assert(ret == 0);
d28174f0
JA
1562 if (td->error)
1563 return NULL;
1dec3e07 1564 goto again;
10ba535a
JA
1565 }
1566
26b3a188
JA
1567 if (needs_lock)
1568 __td_io_u_unlock(td);
1569
10ba535a
JA
1570 return io_u;
1571}
1572
e39c0676 1573static bool check_get_trim(struct thread_data *td, struct io_u *io_u)
10ba535a 1574{
d72be545 1575 if (!(td->flags & TD_F_TRIM_BACKLOG))
e39c0676 1576 return false;
c9a73054
JA
1577 if (!td->trim_entries)
1578 return false;
d72be545 1579
c9a73054
JA
1580 if (td->trim_batch) {
1581 td->trim_batch--;
1582 if (get_next_trim(td, io_u))
1583 return true;
1584 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1585 td->last_ddir != DDIR_READ) {
1586 td->trim_batch = td->o.trim_batch;
1587 if (!td->trim_batch)
1588 td->trim_batch = td->o.trim_backlog;
1589 if (get_next_trim(td, io_u))
e39c0676 1590 return true;
2ba1c290 1591 }
10ba535a 1592
e39c0676 1593 return false;
0d29de83
JA
1594}
1595
e39c0676 1596static bool check_get_verify(struct thread_data *td, struct io_u *io_u)
0d29de83 1597{
d72be545 1598 if (!(td->flags & TD_F_VER_BACKLOG))
e39c0676 1599 return false;
d72be545
JA
1600
1601 if (td->io_hist_len) {
9e144189
JA
1602 int get_verify = 0;
1603
d1ece0c7 1604 if (td->verify_batch)
9e144189 1605 get_verify = 1;
d1ece0c7 1606 else if (!(td->io_hist_len % td->o.verify_backlog) &&
9e144189
JA
1607 td->last_ddir != DDIR_READ) {
1608 td->verify_batch = td->o.verify_batch;
f8a75c99
JA
1609 if (!td->verify_batch)
1610 td->verify_batch = td->o.verify_backlog;
9e144189
JA
1611 get_verify = 1;
1612 }
1613
d1ece0c7
JA
1614 if (get_verify && !get_next_verify(td, io_u)) {
1615 td->verify_batch--;
e39c0676 1616 return true;
d1ece0c7 1617 }
9e144189
JA
1618 }
1619
e39c0676 1620 return false;
0d29de83
JA
1621}
1622
de789769
JA
1623/*
1624 * Fill offset and start time into the buffer content, to prevent too
23f394d5
JA
1625 * easy compressible data for simple de-dupe attempts. Do this for every
1626 * 512b block in the range, since that should be the smallest block size
1627 * we can expect from a device.
de789769
JA
1628 */
1629static void small_content_scramble(struct io_u *io_u)
1630{
5fff9543 1631 unsigned long long i, nr_blocks = io_u->buflen >> 9;
23f394d5 1632 unsigned int offset;
319b073f
JA
1633 uint64_t boffset, *iptr;
1634 char *p;
de789769 1635
23f394d5
JA
1636 if (!nr_blocks)
1637 return;
1638
1639 p = io_u->xfer_buf;
fba76ee8 1640 boffset = io_u->offset;
319b073f
JA
1641
1642 if (io_u->buf_filled_len)
1643 io_u->buf_filled_len = 0;
1644
1645 /*
1646 * Generate random index between 0..7. We do chunks of 512b, if
1647 * we assume a cacheline is 64 bytes, then we have 8 of those.
1648 * Scramble content within the blocks in the same cacheline to
1649 * speed things up.
1650 */
1651 offset = (io_u->start_time.tv_nsec ^ boffset) & 7;
fad82f76 1652
23f394d5
JA
1653 for (i = 0; i < nr_blocks; i++) {
1654 /*
319b073f
JA
1655 * Fill offset into start of cacheline, time into end
1656 * of cacheline
23f394d5 1657 */
319b073f
JA
1658 iptr = (void *) p + (offset << 6);
1659 *iptr = boffset;
1660
1661 iptr = (void *) p + 64 - 2 * sizeof(uint64_t);
1662 iptr[0] = io_u->start_time.tv_sec;
1663 iptr[1] = io_u->start_time.tv_nsec;
23f394d5 1664
23f394d5 1665 p += 512;
fad82f76 1666 boffset += 512;
23f394d5 1667 }
de789769
JA
1668}
1669
0d29de83
JA
1670/*
1671 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
5c5c33c1 1672 * etc. The returned io_u is fully ready to be prepped, populated and submitted.
0d29de83
JA
1673 */
1674struct io_u *get_io_u(struct thread_data *td)
1675{
1676 struct fio_file *f;
1677 struct io_u *io_u;
de789769 1678 int do_scramble = 0;
002fe734 1679 long ret = 0;
0d29de83
JA
1680
1681 io_u = __get_io_u(td);
1682 if (!io_u) {
1683 dprint(FD_IO, "__get_io_u failed\n");
1684 return NULL;
1685 }
1686
1687 if (check_get_verify(td, io_u))
1688 goto out;
1689 if (check_get_trim(td, io_u))
1690 goto out;
1691
755200a3
JA
1692 /*
1693 * from a requeue, io_u already setup
1694 */
1695 if (io_u->file)
77f392bf 1696 goto out;
755200a3 1697
429f6675
JA
1698 /*
1699 * If using an iolog, grab next piece if any available.
1700 */
d72be545 1701 if (td->flags & TD_F_READ_IOLOG) {
429f6675
JA
1702 if (read_iolog_get(td, io_u))
1703 goto err_put;
2ba1c290 1704 } else if (set_io_u_file(td, io_u)) {
002fe734 1705 ret = -EBUSY;
2ba1c290 1706 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
429f6675 1707 goto err_put;
2ba1c290 1708 }
5ec10eaa 1709
429f6675 1710 f = io_u->file;
002fe734
JA
1711 if (!f) {
1712 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1713 goto err_put;
1714 }
1715
d6aed795 1716 assert(fio_file_open(f));
97af62ce 1717
ff58fced 1718 if (ddir_rw(io_u->ddir)) {
9b87f09b 1719 if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) {
2ba1c290 1720 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
429f6675 1721 goto err_put;
2ba1c290 1722 }
10ba535a 1723
f1dfb668
JA
1724 f->last_start[io_u->ddir] = io_u->offset;
1725 f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen;
10ba535a 1726
fd68418e 1727 if (io_u->ddir == DDIR_WRITE) {
d72be545 1728 if (td->flags & TD_F_REFILL_BUFFERS) {
9c42684e 1729 io_u_fill_buffer(td, io_u,
1066358a 1730 td->o.min_bs[DDIR_WRITE],
9e129577 1731 io_u->buflen);
ff441ae8 1732 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
5c5c33c1
BVA
1733 !(td->flags & TD_F_COMPRESS) &&
1734 !(td->flags & TD_F_DO_VERIFY))
fd68418e
JA
1735 do_scramble = 1;
1736 } else if (io_u->ddir == DDIR_READ) {
cbe8d756
RR
1737 /*
1738 * Reset the buf_filled parameters so next time if the
1739 * buffer is used for writes it is refilled.
1740 */
cbe8d756
RR
1741 io_u->buf_filled_len = 0;
1742 }
87dc1ab1 1743 }
10ba535a 1744
165faf16
JA
1745 /*
1746 * Set io data pointers.
1747 */
cec6b55d
JA
1748 io_u->xfer_buf = io_u->buf;
1749 io_u->xfer_buflen = io_u->buflen;
5973cafb 1750
6ac7a331 1751out:
0d29de83 1752 assert(io_u->file);
429f6675 1753 if (!td_io_prep(td, io_u)) {
3ecc8c67 1754 if (!td->o.disable_lat)
993bf48b 1755 fio_gettime(&io_u->start_time, NULL);
03553853 1756
de789769
JA
1757 if (do_scramble)
1758 small_content_scramble(io_u);
03553853 1759
429f6675 1760 return io_u;
36167d82 1761 }
429f6675 1762err_put:
2ba1c290 1763 dprint(FD_IO, "get_io_u failed\n");
429f6675 1764 put_io_u(td, io_u);
002fe734 1765 return ERR_PTR(ret);
10ba535a
JA
1766}
1767
a9da8ab2 1768static void __io_u_log_error(struct thread_data *td, struct io_u *io_u)
5451792e 1769{
8b28bd41 1770 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
825f818e 1771
8b28bd41
DM
1772 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1773 return;
5451792e 1774
5fff9543 1775 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%llu\n",
709c8313
RE
1776 io_u->file ? " on file " : "",
1777 io_u->file ? io_u->file->file_name : "",
1778 strerror(io_u->error),
1779 io_ddir_name(io_u->ddir),
1780 io_u->offset, io_u->xfer_buflen);
5451792e 1781
5ad7be56
KD
1782 if (td->io_ops->errdetails) {
1783 char *err = td->io_ops->errdetails(io_u);
1784
1785 log_err("fio: %s\n", err);
1786 free(err);
1787 }
1788
5451792e
JA
1789 if (!td->error)
1790 td_verror(td, io_u->error, "io_u error");
1791}
1792
a9da8ab2
JA
1793void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1794{
1795 __io_u_log_error(td, io_u);
1796 if (td->parent)
094e66cb 1797 __io_u_log_error(td->parent, io_u);
a9da8ab2
JA
1798}
1799
e39c0676 1800static inline bool gtod_reduce(struct thread_data *td)
aba6c951 1801{
3ecc8c67
JA
1802 return (td->o.disable_clat && td->o.disable_slat && td->o.disable_bw)
1803 || td->o.gtod_reduce;
aba6c951
JA
1804}
1805
d7e92306
JA
1806static void trim_block_info(struct thread_data *td, struct io_u *io_u)
1807{
1808 uint32_t *info = io_u_block_info(td, io_u);
1809
1810 if (BLOCK_INFO_STATE(*info) >= BLOCK_STATE_TRIM_FAILURE)
1811 return;
1812
1813 *info = BLOCK_INFO(BLOCK_STATE_TRIMMED, BLOCK_INFO_TRIMS(*info) + 1);
1814}
1815
c8eeb9df
JA
1816static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1817 struct io_completion_data *icd,
1818 const enum fio_ddir idx, unsigned int bytes)
1819{
a9da8ab2 1820 const int no_reduce = !gtod_reduce(td);
d6bb626e 1821 unsigned long long llnsec = 0;
c8eeb9df 1822
75dc383e
JA
1823 if (td->parent)
1824 td = td->parent;
1825
132b1ee4 1826 if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS))
8243be59
JA
1827 return;
1828
a9da8ab2 1829 if (no_reduce)
d6bb626e 1830 llnsec = ntime_since(&io_u->issue_time, &icd->time);
c8eeb9df
JA
1831
1832 if (!td->o.disable_lat) {
c3a32714 1833 unsigned long long tnsec;
c8eeb9df 1834
d6bb626e
VF
1835 tnsec = ntime_since(&io_u->start_time, &icd->time);
1836 add_lat_sample(td, idx, tnsec, bytes, io_u->offset);
15501535 1837
d4afedfd
JA
1838 if (td->flags & TD_F_PROFILE_OPS) {
1839 struct prof_io_ops *ops = &td->prof_io_ops;
1840
1841 if (ops->io_u_lat)
c3a32714 1842 icd->error = ops->io_u_lat(td, tnsec);
d4afedfd
JA
1843 }
1844
c3a32714
JA
1845 if (td->o.max_latency && tnsec > td->o.max_latency)
1846 lat_fatal(td, icd, tnsec, td->o.max_latency);
1847 if (td->o.latency_target && tnsec > td->o.latency_target) {
3e260a46 1848 if (lat_target_failed(td))
c3a32714 1849 lat_fatal(td, icd, tnsec, td->o.latency_target);
15501535 1850 }
c8eeb9df
JA
1851 }
1852
a47591e4
JA
1853 if (ddir_rw(idx)) {
1854 if (!td->o.disable_clat) {
d6bb626e
VF
1855 add_clat_sample(td, idx, llnsec, bytes, io_u->offset);
1856 io_u_mark_latency(td, llnsec);
a47591e4 1857 }
c8eeb9df 1858
a47591e4 1859 if (!td->o.disable_bw && per_unit_log(td->bw_log))
d6bb626e 1860 add_bw_sample(td, io_u, bytes, llnsec);
c8eeb9df 1861
a47591e4
JA
1862 if (no_reduce && per_unit_log(td->iops_log))
1863 add_iops_sample(td, io_u, bytes);
b2b3eefe
JA
1864 } else if (ddir_sync(idx) && !td->o.disable_clat)
1865 add_sync_clat_sample(&td->ts, llnsec);
66347cfa 1866
d7e92306
JA
1867 if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM)
1868 trim_block_info(td, io_u);
c8eeb9df
JA
1869}
1870
94a6e1bb
JA
1871static void file_log_write_comp(const struct thread_data *td, struct fio_file *f,
1872 uint64_t offset, unsigned int bytes)
1873{
1874 int idx;
1875
639ad1ea
JA
1876 if (!f)
1877 return;
1878
94a6e1bb
JA
1879 if (f->first_write == -1ULL || offset < f->first_write)
1880 f->first_write = offset;
1881 if (f->last_write == -1ULL || ((offset + bytes) > f->last_write))
1882 f->last_write = offset + bytes;
1883
1884 if (!f->last_write_comp)
1885 return;
1886
1887 idx = f->last_write_idx++;
1888 f->last_write_comp[idx] = offset;
1889 if (f->last_write_idx == td->o.iodepth)
1890 f->last_write_idx = 0;
1891}
1892
b2b3eefe
JA
1893static bool should_account(struct thread_data *td)
1894{
1895 return ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1896 td->runstate == TD_VERIFYING);
1897}
1898
f8b0bd10 1899static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
97601024 1900 struct io_completion_data *icd)
10ba535a 1901{
f8b0bd10
JA
1902 struct io_u *io_u = *io_u_ptr;
1903 enum fio_ddir ddir = io_u->ddir;
1904 struct fio_file *f = io_u->file;
10ba535a 1905
e5f9a813 1906 dprint_io_u(io_u, "complete");
2ba1c290 1907
0c6e7517 1908 assert(io_u->flags & IO_U_F_FLIGHT);
1651e431 1909 io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
f9401285
JA
1910
1911 /*
1912 * Mark IO ok to verify
1913 */
1914 if (io_u->ipo) {
890b6656
JA
1915 /*
1916 * Remove errored entry from the verification list
1917 */
1918 if (io_u->error)
1919 unlog_io_piece(td, io_u);
1920 else {
1921 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1922 write_barrier();
1923 }
f9401285
JA
1924 }
1925
f8b0bd10 1926 if (ddir_sync(ddir)) {
2ea93f98 1927 td->last_was_sync = true;
44f29692
JA
1928 if (f) {
1929 f->first_write = -1ULL;
1930 f->last_write = -1ULL;
1931 }
b2b3eefe
JA
1932 if (should_account(td))
1933 account_io_completion(td, io_u, icd, ddir, io_u->buflen);
87dc1ab1
JA
1934 return;
1935 }
1936
2ea93f98 1937 td->last_was_sync = false;
f8b0bd10 1938 td->last_ddir = ddir;
87dc1ab1 1939
f8b0bd10 1940 if (!io_u->error && ddir_rw(ddir)) {
5fff9543 1941 unsigned long long bytes = io_u->buflen - io_u->resid;
b29ee5b3 1942 int ret;
10ba535a 1943
f8b0bd10 1944 td->io_blocks[ddir]++;
f8b0bd10 1945 td->io_bytes[ddir] += bytes;
ae2fafc8 1946
e1c325d2
JA
1947 if (!(io_u->flags & IO_U_F_VER_LIST)) {
1948 td->this_io_blocks[ddir]++;
f8b0bd10 1949 td->this_io_bytes[ddir] += bytes;
e1c325d2 1950 }
f8b0bd10 1951
639ad1ea 1952 if (ddir == DDIR_WRITE)
94a6e1bb 1953 file_log_write_comp(td, f, io_u->offset, bytes);
44f29692 1954
b2b3eefe 1955 if (should_account(td))
f8b0bd10 1956 account_io_completion(td, io_u, icd, ddir, bytes);
40e1a6f0 1957
f8b0bd10 1958 icd->bytes_done[ddir] += bytes;
3af6ef39 1959
d7762cf8 1960 if (io_u->end_io) {
f8b0bd10
JA
1961 ret = io_u->end_io(td, io_u_ptr);
1962 io_u = *io_u_ptr;
3af6ef39
JA
1963 if (ret && !icd->error)
1964 icd->error = ret;
1965 }
ff58fced 1966 } else if (io_u->error) {
10ba535a 1967 icd->error = io_u->error;
5451792e
JA
1968 io_u_log_error(td, io_u);
1969 }
8b28bd41 1970 if (icd->error) {
f8b0bd10
JA
1971 enum error_type_bit eb = td_error_type(ddir, icd->error);
1972
8b28bd41
DM
1973 if (!td_non_fatal_error(td, eb, icd->error))
1974 return;
f8b0bd10 1975
f2bba182
RR
1976 /*
1977 * If there is a non_fatal error, then add to the error count
1978 * and clear all the errors.
1979 */
1980 update_error_count(td, icd->error);
1981 td_clear_error(td);
1982 icd->error = 0;
f8b0bd10
JA
1983 if (io_u)
1984 io_u->error = 0;
f2bba182 1985 }
10ba535a
JA
1986}
1987
9520ebb9
JA
1988static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1989 int nr)
10ba535a 1990{
6eaf09d6 1991 int ddir;
aba6c951
JA
1992
1993 if (!gtod_reduce(td))
9520ebb9 1994 fio_gettime(&icd->time, NULL);
02bcaa8c 1995
3af6ef39
JA
1996 icd->nr = nr;
1997
10ba535a 1998 icd->error = 0;
c1f50f76 1999 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
6eaf09d6 2000 icd->bytes_done[ddir] = 0;
36167d82
JA
2001}
2002
97601024
JA
2003static void ios_completed(struct thread_data *td,
2004 struct io_completion_data *icd)
36167d82
JA
2005{
2006 struct io_u *io_u;
2007 int i;
2008
10ba535a
JA
2009 for (i = 0; i < icd->nr; i++) {
2010 io_u = td->io_ops->event(td, i);
2011
f8b0bd10 2012 io_completed(td, &io_u, icd);
e8462bd8 2013
f8b0bd10 2014 if (io_u)
e8462bd8 2015 put_io_u(td, io_u);
10ba535a
JA
2016 }
2017}
97601024 2018
e7e6cfb4
JA
2019/*
2020 * Complete a single io_u for the sync engines.
2021 */
55312f9f 2022int io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
97601024
JA
2023{
2024 struct io_completion_data icd;
55312f9f 2025 int ddir;
97601024 2026
9520ebb9 2027 init_icd(td, &icd, 1);
f8b0bd10 2028 io_completed(td, &io_u, &icd);
e8462bd8 2029
f8b0bd10 2030 if (io_u)
e8462bd8 2031 put_io_u(td, io_u);
97601024 2032
581e7141
JA
2033 if (icd.error) {
2034 td_verror(td, icd.error, "io_u_sync_complete");
2035 return -1;
2036 }
97601024 2037
c1f50f76 2038 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
55312f9f 2039 td->bytes_done[ddir] += icd.bytes_done[ddir];
581e7141
JA
2040
2041 return 0;
97601024
JA
2042}
2043
e7e6cfb4
JA
2044/*
2045 * Called to complete min_events number of io for the async engines.
2046 */
55312f9f 2047int io_u_queued_complete(struct thread_data *td, int min_evts)
97601024 2048{
97601024 2049 struct io_completion_data icd;
00de55ef 2050 struct timespec *tvp = NULL;
55312f9f 2051 int ret, ddir;
4d06a338 2052 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
97601024 2053
12bb8569 2054 dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts);
b271fe62 2055
4950421a 2056 if (!min_evts)
00de55ef 2057 tvp = &ts;
5fb4b366
RE
2058 else if (min_evts > td->cur_depth)
2059 min_evts = td->cur_depth;
97601024 2060
82407585
RP
2061 /* No worries, td_io_getevents fixes min and max if they are
2062 * set incorrectly */
2063 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp);
97601024 2064 if (ret < 0) {
e1161c32 2065 td_verror(td, -ret, "td_io_getevents");
97601024
JA
2066 return ret;
2067 } else if (!ret)
2068 return ret;
2069
9520ebb9 2070 init_icd(td, &icd, ret);
97601024 2071 ios_completed(td, &icd);
581e7141
JA
2072 if (icd.error) {
2073 td_verror(td, icd.error, "io_u_queued_complete");
2074 return -1;
2075 }
97601024 2076
c1f50f76 2077 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
55312f9f 2078 td->bytes_done[ddir] += icd.bytes_done[ddir];
581e7141 2079
0d593542 2080 return ret;
97601024 2081}
7e77dd02
JA
2082
2083/*
2084 * Call when io_u is really queued, to update the submission latency.
2085 */
2086void io_u_queued(struct thread_data *td, struct io_u *io_u)
2087{
8243be59 2088 if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) {
9520ebb9 2089 unsigned long slat_time;
7e77dd02 2090
d6bb626e 2091 slat_time = ntime_since(&io_u->start_time, &io_u->issue_time);
75dc383e
JA
2092
2093 if (td->parent)
2094 td = td->parent;
2095
ae588852
JA
2096 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
2097 io_u->offset);
9520ebb9 2098 }
7e77dd02 2099}
433afcb4 2100
5c94b008
JA
2101/*
2102 * See if we should reuse the last seed, if dedupe is enabled
2103 */
9451b93e 2104static struct frand_state *get_buf_state(struct thread_data *td)
5c94b008
JA
2105{
2106 unsigned int v;
5c94b008
JA
2107
2108 if (!td->o.dedupe_percentage)
2109 return &td->buf_state;
732eedd0 2110 else if (td->o.dedupe_percentage == 100) {
9451b93e
JA
2111 frand_copy(&td->buf_state_prev, &td->buf_state);
2112 return &td->buf_state;
732eedd0 2113 }
5c94b008 2114
1bd5d213 2115 v = rand_between(&td->dedupe_state, 1, 100);
5c94b008
JA
2116
2117 if (v <= td->o.dedupe_percentage)
2118 return &td->buf_state_prev;
2119
2120 return &td->buf_state;
2121}
2122
9451b93e 2123static void save_buf_state(struct thread_data *td, struct frand_state *rs)
5c94b008 2124{
9451b93e
JA
2125 if (td->o.dedupe_percentage == 100)
2126 frand_copy(rs, &td->buf_state_prev);
2127 else if (rs == &td->buf_state)
5c94b008
JA
2128 frand_copy(&td->buf_state_prev, rs);
2129}
2130
5fff9543
JF
2131void fill_io_buffer(struct thread_data *td, void *buf, unsigned long long min_write,
2132 unsigned long long max_bs)
5973cafb 2133{
d1af2894
JA
2134 struct thread_options *o = &td->o;
2135
15600335
JA
2136 if (o->mem_type == MEM_CUDA_MALLOC)
2137 return;
03553853 2138
4eff3e57 2139 if (o->compress_percentage || o->dedupe_percentage) {
9c42684e 2140 unsigned int perc = td->o.compress_percentage;
5c94b008 2141 struct frand_state *rs;
5fff9543
JF
2142 unsigned long long left = max_bs;
2143 unsigned long long this_write;
5c94b008 2144
1066358a 2145 do {
9451b93e 2146 rs = get_buf_state(td);
9c42684e 2147
1066358a 2148 min_write = min(min_write, left);
f97a43a1 2149
1066358a 2150 if (perc) {
4b157ac6 2151 this_write = min_not_zero(min_write,
5fff9543 2152 (unsigned long long) td->o.compress_chunk);
1e7f82e2
JA
2153
2154 fill_random_buf_percentage(rs, buf, perc,
2155 this_write, this_write,
2156 o->buffer_pattern,
2157 o->buffer_pattern_bytes);
2158 } else {
1066358a 2159 fill_random_buf(rs, buf, min_write);
1e7f82e2
JA
2160 this_write = min_write;
2161 }
1066358a 2162
1e7f82e2
JA
2163 buf += this_write;
2164 left -= this_write;
9451b93e 2165 save_buf_state(td, rs);
1066358a 2166 } while (left);
d1af2894
JA
2167 } else if (o->buffer_pattern_bytes)
2168 fill_buffer_pattern(td, buf, max_bs);
999d245e 2169 else if (o->zero_buffers)
cc86c395 2170 memset(buf, 0, max_bs);
999d245e 2171 else
9451b93e 2172 fill_random_buf(get_buf_state(td), buf, max_bs);
cc86c395
JA
2173}
2174
2175/*
2176 * "randomly" fill the buffer contents
2177 */
2178void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
5fff9543 2179 unsigned long long min_write, unsigned long long max_bs)
cc86c395
JA
2180{
2181 io_u->buf_filled_len = 0;
2182 fill_io_buffer(td, io_u->buf, min_write, max_bs);
5973cafb 2183}
e2c75fc4
TK
2184
2185static int do_sync_file_range(const struct thread_data *td,
2186 struct fio_file *f)
2187{
2188 off64_t offset, nbytes;
2189
2190 offset = f->first_write;
2191 nbytes = f->last_write - f->first_write;
2192
2193 if (!nbytes)
2194 return 0;
2195
2196 return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
2197}
2198
2199int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
2200{
2201 int ret;
2202
2203 if (io_u->ddir == DDIR_SYNC) {
2204 ret = fsync(io_u->file->fd);
2205 } else if (io_u->ddir == DDIR_DATASYNC) {
2206#ifdef CONFIG_FDATASYNC
2207 ret = fdatasync(io_u->file->fd);
2208#else
2209 ret = io_u->xfer_buflen;
2210 io_u->error = EINVAL;
2211#endif
2212 } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
2213 ret = do_sync_file_range(td, io_u->file);
2214 else {
2215 ret = io_u->xfer_buflen;
2216 io_u->error = EINVAL;
2217 }
2218
2219 if (ret < 0)
2220 io_u->error = errno;
2221
2222 return ret;
2223}
2224
2225int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
2226{
2227#ifndef FIO_HAVE_TRIM
2228 io_u->error = EINVAL;
2229 return 0;
2230#else
2231 struct fio_file *f = io_u->file;
2232 int ret;
2233
496b1f9e 2234 ret = os_trim(f, io_u->offset, io_u->xfer_buflen);
e2c75fc4
TK
2235 if (!ret)
2236 return io_u->xfer_buflen;
2237
2238 io_u->error = ret;
2239 return 0;
2240#endif
2241}