Suppress uninteresting data race reports
[fio.git] / io_u.c
CommitLineData
10ba535a
JA
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
0c6e7517 6#include <assert.h>
10ba535a
JA
7
8#include "fio.h"
5973cafb 9#include "hash.h"
4f5af7b2 10#include "verify.h"
0d29de83 11#include "trim.h"
1fbbf72e 12#include "lib/rand.h"
7ebd796f 13#include "lib/axmap.h"
002fe734 14#include "err.h"
0f38bbef 15#include "lib/pow2.h"
4b157ac6 16#include "minmax.h"
10ba535a 17
97601024
JA
18struct io_completion_data {
19 int nr; /* input */
97601024
JA
20
21 int error; /* output */
100f49f1 22 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
8b6a404c 23 struct timespec time; /* output */
97601024
JA
24};
25
10ba535a 26/*
7ebd796f 27 * The ->io_axmap contains a map of blocks we have or have not done io
10ba535a
JA
28 * to yet. Used to make sure we cover the entire range in a fair fashion.
29 */
e39c0676 30static bool random_map_free(struct fio_file *f, const uint64_t block)
10ba535a 31{
7ebd796f 32 return !axmap_isset(f->io_axmap, block);
10ba535a
JA
33}
34
df415585
JA
35/*
36 * Mark a given offset as used in the map.
37 */
9bf2061e 38static void mark_random_map(struct thread_data *td, struct io_u *io_u)
df415585 39{
956e60ea 40 unsigned int min_bs = td->o.min_bs[io_u->ddir];
9bf2061e 41 struct fio_file *f = io_u->file;
51ede0b1 42 unsigned int nr_blocks;
1ae83d45 43 uint64_t block;
df415585 44
1ae83d45 45 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
c685b5b2 46 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
df415585 47
2ab9e98b 48 if (!(io_u->flags & IO_U_F_BUSY_OK))
7ebd796f 49 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
df415585 50
51ede0b1
JA
51 if ((nr_blocks * min_bs) < io_u->buflen)
52 io_u->buflen = nr_blocks * min_bs;
df415585
JA
53}
54
74776733
JA
55static uint64_t last_block(struct thread_data *td, struct fio_file *f,
56 enum fio_ddir ddir)
2ba1c290 57{
74776733
JA
58 uint64_t max_blocks;
59 uint64_t max_size;
2ba1c290 60
ff58fced
JA
61 assert(ddir_rw(ddir));
62
d9dd70f7
JA
63 /*
64 * Hmm, should we make sure that ->io_size <= ->real_file_size?
79591fa9 65 * -> not for now since there is code assuming it could go either.
d9dd70f7
JA
66 */
67 max_size = f->io_size;
68 if (max_size > f->real_file_size)
69 max_size = f->real_file_size;
70
ed335855
SN
71 if (td->o.zone_range)
72 max_size = td->o.zone_range;
73
0412d12e
JE
74 if (td->o.min_bs[ddir] > td->o.ba[ddir])
75 max_size -= td->o.min_bs[ddir] - td->o.ba[ddir];
76
1ae83d45 77 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
2ba1c290
JA
78 if (!max_blocks)
79 return 0;
80
67778e88 81 return max_blocks;
2ba1c290
JA
82}
83
1ae83d45
JA
84struct rand_off {
85 struct flist_head list;
86 uint64_t off;
87};
88
e25839d4 89static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
e0a04ac1
JA
90 enum fio_ddir ddir, uint64_t *b,
91 uint64_t lastb)
ec4015da 92{
6f49f8bc 93 uint64_t r;
5e0baa7f 94
c3546b53
JA
95 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
96 td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) {
6f49f8bc 97
d6b72507 98 r = __rand(&td->random_state);
8055e41d 99
4b91ee8f 100 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
8055e41d 101
e0a04ac1 102 *b = lastb * (r / (rand_max(&td->random_state) + 1.0));
51ede0b1 103 } else {
8055e41d 104 uint64_t off = 0;
43c63a78 105
967d1b63
JA
106 assert(fio_file_lfsr(f));
107
6f49f8bc 108 if (lfsr_next(&f->lfsr, &off))
8055e41d 109 return 1;
ec4015da 110
8055e41d
JA
111 *b = off;
112 }
0ce8b119 113
ec4015da 114 /*
51ede0b1 115 * if we are not maintaining a random map, we are done.
ec4015da 116 */
51ede0b1
JA
117 if (!file_randommap(td, f))
118 goto ret;
43c63a78
JA
119
120 /*
51ede0b1 121 * calculate map offset and check if it's free
43c63a78 122 */
51ede0b1
JA
123 if (random_map_free(f, *b))
124 goto ret;
125
4b91ee8f
JA
126 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
127 (unsigned long long) *b);
51ede0b1 128
7ebd796f 129 *b = axmap_next_free(f->io_axmap, *b);
51ede0b1
JA
130 if (*b == (uint64_t) -1ULL)
131 return 1;
0ce8b119
JA
132ret:
133 return 0;
ec4015da
JA
134}
135
925fee33
JA
136static int __get_next_rand_offset_zipf(struct thread_data *td,
137 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 138 uint64_t *b)
e25839d4 139{
9c6f6316 140 *b = zipf_next(&f->zipf);
e25839d4
JA
141 return 0;
142}
143
925fee33
JA
144static int __get_next_rand_offset_pareto(struct thread_data *td,
145 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 146 uint64_t *b)
925fee33 147{
9c6f6316 148 *b = pareto_next(&f->zipf);
925fee33
JA
149 return 0;
150}
151
56d9fa4b
JA
152static int __get_next_rand_offset_gauss(struct thread_data *td,
153 struct fio_file *f, enum fio_ddir ddir,
154 uint64_t *b)
155{
156 *b = gauss_next(&f->gauss);
157 return 0;
158}
159
59466396
JA
160static int __get_next_rand_offset_zoned_abs(struct thread_data *td,
161 struct fio_file *f,
162 enum fio_ddir ddir, uint64_t *b)
163{
164 struct zone_split_index *zsi;
e3c8c108 165 uint64_t lastb, send, stotal;
59466396
JA
166 unsigned int v;
167
168 lastb = last_block(td, f, ddir);
169 if (!lastb)
170 return 1;
171
172 if (!td->o.zone_split_nr[ddir]) {
173bail:
174 return __get_next_rand_offset(td, f, ddir, b, lastb);
175 }
176
177 /*
178 * Generate a value, v, between 1 and 100, both inclusive
179 */
180 v = rand32_between(&td->zone_state, 1, 100);
181
e3c8c108
JA
182 /*
183 * Find our generated table. 'send' is the end block of this zone,
184 * 'stotal' is our start offset.
185 */
59466396
JA
186 zsi = &td->zone_state_index[ddir][v - 1];
187 stotal = zsi->size_prev / td->o.ba[ddir];
188 send = zsi->size / td->o.ba[ddir];
189
190 /*
191 * Should never happen
192 */
193 if (send == -1U) {
264e3d30 194 if (!fio_did_warn(FIO_WARN_ZONED_BUG))
59466396 195 log_err("fio: bug in zoned generation\n");
59466396
JA
196 goto bail;
197 } else if (send > lastb) {
198 /*
199 * This happens if the user specifies ranges that exceed
200 * the file/device size. We can't handle that gracefully,
201 * so error and exit.
202 */
203 log_err("fio: zoned_abs sizes exceed file size\n");
204 return 1;
205 }
206
207 /*
e3c8c108 208 * Generate index from 0..send-stotal
59466396 209 */
e3c8c108 210 if (__get_next_rand_offset(td, f, ddir, b, send - stotal) == 1)
59466396
JA
211 return 1;
212
e3c8c108 213 *b += stotal;
59466396
JA
214 return 0;
215}
216
e0a04ac1
JA
217static int __get_next_rand_offset_zoned(struct thread_data *td,
218 struct fio_file *f, enum fio_ddir ddir,
219 uint64_t *b)
220{
221 unsigned int v, send, stotal;
222 uint64_t offset, lastb;
e0a04ac1
JA
223 struct zone_split_index *zsi;
224
225 lastb = last_block(td, f, ddir);
226 if (!lastb)
227 return 1;
228
229 if (!td->o.zone_split_nr[ddir]) {
230bail:
231 return __get_next_rand_offset(td, f, ddir, b, lastb);
232 }
233
234 /*
235 * Generate a value, v, between 1 and 100, both inclusive
236 */
36dd3379 237 v = rand32_between(&td->zone_state, 1, 100);
e0a04ac1
JA
238
239 zsi = &td->zone_state_index[ddir][v - 1];
240 stotal = zsi->size_perc_prev;
241 send = zsi->size_perc;
242
243 /*
244 * Should never happen
245 */
246 if (send == -1U) {
264e3d30 247 if (!fio_did_warn(FIO_WARN_ZONED_BUG))
e0a04ac1 248 log_err("fio: bug in zoned generation\n");
e0a04ac1
JA
249 goto bail;
250 }
251
252 /*
253 * 'send' is some percentage below or equal to 100 that
254 * marks the end of the current IO range. 'stotal' marks
255 * the start, in percent.
256 */
257 if (stotal)
258 offset = stotal * lastb / 100ULL;
259 else
260 offset = 0;
261
262 lastb = lastb * (send - stotal) / 100ULL;
263
264 /*
265 * Generate index from 0..send-of-lastb
266 */
267 if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1)
268 return 1;
269
270 /*
271 * Add our start offset, if any
272 */
273 if (offset)
274 *b += offset;
275
276 return 0;
277}
56d9fa4b 278
1ae83d45
JA
279static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
280{
281 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
282 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
283
284 return r1->off - r2->off;
285}
286
287static int get_off_from_method(struct thread_data *td, struct fio_file *f,
288 enum fio_ddir ddir, uint64_t *b)
e25839d4 289{
e0a04ac1
JA
290 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) {
291 uint64_t lastb;
292
293 lastb = last_block(td, f, ddir);
294 if (!lastb)
295 return 1;
296
297 return __get_next_rand_offset(td, f, ddir, b, lastb);
298 } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
e25839d4 299 return __get_next_rand_offset_zipf(td, f, ddir, b);
925fee33
JA
300 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
301 return __get_next_rand_offset_pareto(td, f, ddir, b);
56d9fa4b
JA
302 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
303 return __get_next_rand_offset_gauss(td, f, ddir, b);
e0a04ac1
JA
304 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
305 return __get_next_rand_offset_zoned(td, f, ddir, b);
59466396
JA
306 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS)
307 return __get_next_rand_offset_zoned_abs(td, f, ddir, b);
e25839d4
JA
308
309 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
310 return 1;
311}
312
bcd5abfa
JA
313/*
314 * Sort the reads for a verify phase in batches of verifysort_nr, if
315 * specified.
316 */
e39c0676 317static inline bool should_sort_io(struct thread_data *td)
bcd5abfa
JA
318{
319 if (!td->o.verifysort_nr || !td->o.do_verify)
e39c0676 320 return false;
bcd5abfa 321 if (!td_random(td))
e39c0676 322 return false;
bcd5abfa 323 if (td->runstate != TD_VERIFYING)
e39c0676 324 return false;
c3546b53
JA
325 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
326 td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64)
e39c0676 327 return false;
bcd5abfa 328
e39c0676 329 return true;
bcd5abfa
JA
330}
331
e39c0676 332static bool should_do_random(struct thread_data *td, enum fio_ddir ddir)
211c9b89
JA
333{
334 unsigned int v;
211c9b89 335
d9472271 336 if (td->o.perc_rand[ddir] == 100)
e39c0676 337 return true;
211c9b89 338
36dd3379 339 v = rand32_between(&td->seq_rand_state[ddir], 1, 100);
211c9b89 340
d9472271 341 return v <= td->o.perc_rand[ddir];
211c9b89
JA
342}
343
1ae83d45
JA
344static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
345 enum fio_ddir ddir, uint64_t *b)
346{
347 struct rand_off *r;
348 int i, ret = 1;
349
bcd5abfa 350 if (!should_sort_io(td))
1ae83d45
JA
351 return get_off_from_method(td, f, ddir, b);
352
353 if (!flist_empty(&td->next_rand_list)) {
1ae83d45 354fetch:
9342d5f8 355 r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
1ae83d45
JA
356 flist_del(&r->list);
357 *b = r->off;
358 free(r);
359 return 0;
360 }
361
362 for (i = 0; i < td->o.verifysort_nr; i++) {
363 r = malloc(sizeof(*r));
364
365 ret = get_off_from_method(td, f, ddir, &r->off);
366 if (ret) {
367 free(r);
368 break;
369 }
370
371 flist_add(&r->list, &td->next_rand_list);
372 }
373
374 if (ret && !i)
375 return ret;
376
377 assert(!flist_empty(&td->next_rand_list));
378 flist_sort(NULL, &td->next_rand_list, flist_cmp);
379 goto fetch;
380}
381
0bcf41cd
JA
382static void loop_cache_invalidate(struct thread_data *td, struct fio_file *f)
383{
384 struct thread_options *o = &td->o;
385
386 if (o->invalidate_cache && !o->odirect) {
387 int fio_unused ret;
388
389 ret = file_invalidate_cache(td, f);
390 }
391}
392
38dad62d 393static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
1ae83d45 394 enum fio_ddir ddir, uint64_t *b)
38dad62d 395{
c04e4661
DE
396 if (!get_next_rand_offset(td, f, ddir, b))
397 return 0;
398
8c07860d
JA
399 if (td->o.time_based ||
400 (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) {
33c48814 401 fio_file_reset(td, f);
c04e4661
DE
402 if (!get_next_rand_offset(td, f, ddir, b))
403 return 0;
0bcf41cd 404 loop_cache_invalidate(td, f);
38dad62d
JA
405 }
406
c04e4661 407 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
f1dfb668 408 f->file_name, (unsigned long long) f->last_pos[ddir],
4b91ee8f 409 (unsigned long long) f->real_file_size);
c04e4661 410 return 1;
38dad62d
JA
411}
412
37cf9e3c 413static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
1ae83d45 414 enum fio_ddir ddir, uint64_t *offset)
38dad62d 415{
ac002339
JA
416 struct thread_options *o = &td->o;
417
ff58fced
JA
418 assert(ddir_rw(ddir));
419
17373ce2
JA
420 /*
421 * If we reach the end for a time based run, reset us back to 0
422 * and invalidate the cache, if we need to.
423 */
f1dfb668 424 if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
19ddc35b 425 o->time_based) {
c89daa4a 426 f->last_pos[ddir] = f->file_offset;
0bcf41cd 427 loop_cache_invalidate(td, f);
19ddc35b 428 }
c04e4661 429
f1dfb668 430 if (f->last_pos[ddir] < f->real_file_size) {
1ae83d45 431 uint64_t pos;
059b0802 432
69b98f11
JA
433 /*
434 * Only rewind if we already hit the end
435 */
436 if (f->last_pos[ddir] == f->file_offset &&
437 f->file_offset && o->ddir_seq_add < 0) {
c22825bb
JA
438 if (f->real_file_size > f->io_size)
439 f->last_pos[ddir] = f->io_size;
440 else
441 f->last_pos[ddir] = f->real_file_size;
442 }
a66da7a2 443
f1dfb668 444 pos = f->last_pos[ddir] - f->file_offset;
ac002339
JA
445 if (pos && o->ddir_seq_add) {
446 pos += o->ddir_seq_add;
447
448 /*
449 * If we reach beyond the end of the file
450 * with holed IO, wrap around to the
b0a84f48
JA
451 * beginning again. If we're doing backwards IO,
452 * wrap to the end.
ac002339 453 */
b0a84f48
JA
454 if (pos >= f->real_file_size) {
455 if (o->ddir_seq_add > 0)
456 pos = f->file_offset;
c22825bb
JA
457 else {
458 if (f->real_file_size > f->io_size)
459 pos = f->io_size;
460 else
461 pos = f->real_file_size;
462
463 pos += o->ddir_seq_add;
464 }
b0a84f48 465 }
ac002339 466 }
059b0802 467
37cf9e3c 468 *offset = pos;
38dad62d
JA
469 return 0;
470 }
471
472 return 1;
473}
474
475static int get_next_block(struct thread_data *td, struct io_u *io_u,
6aca9b3d 476 enum fio_ddir ddir, int rw_seq,
ec370c22 477 bool *is_random)
38dad62d
JA
478{
479 struct fio_file *f = io_u->file;
1ae83d45 480 uint64_t b, offset;
38dad62d
JA
481 int ret;
482
ff58fced
JA
483 assert(ddir_rw(ddir));
484
37cf9e3c
JA
485 b = offset = -1ULL;
486
38dad62d 487 if (rw_seq) {
211c9b89 488 if (td_random(td)) {
6aca9b3d 489 if (should_do_random(td, ddir)) {
211c9b89 490 ret = get_next_rand_block(td, f, ddir, &b);
ec370c22 491 *is_random = true;
6aca9b3d 492 } else {
ec370c22 493 *is_random = false;
1651e431 494 io_u_set(td, io_u, IO_U_F_BUSY_OK);
211c9b89
JA
495 ret = get_next_seq_offset(td, f, ddir, &offset);
496 if (ret)
497 ret = get_next_rand_block(td, f, ddir, &b);
498 }
6aca9b3d 499 } else {
ec370c22 500 *is_random = false;
37cf9e3c 501 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 502 }
38dad62d 503 } else {
1651e431 504 io_u_set(td, io_u, IO_U_F_BUSY_OK);
ec370c22 505 *is_random = false;
38dad62d
JA
506
507 if (td->o.rw_seq == RW_SEQ_SEQ) {
37cf9e3c 508 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 509 if (ret) {
37cf9e3c 510 ret = get_next_rand_block(td, f, ddir, &b);
ec370c22 511 *is_random = false;
6aca9b3d 512 }
38dad62d 513 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
f1dfb668
JA
514 if (f->last_start[ddir] != -1ULL)
515 offset = f->last_start[ddir] - f->file_offset;
38dad62d 516 else
37cf9e3c 517 offset = 0;
38dad62d
JA
518 ret = 0;
519 } else {
520 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
521 ret = 1;
522 }
523 }
6d68b997 524
37cf9e3c
JA
525 if (!ret) {
526 if (offset != -1ULL)
527 io_u->offset = offset;
528 else if (b != -1ULL)
529 io_u->offset = b * td->o.ba[ddir];
530 else {
4e0a8fa2 531 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
37cf9e3c
JA
532 ret = 1;
533 }
534 }
535
38dad62d
JA
536 return ret;
537}
538
10ba535a
JA
539/*
540 * For random io, generate a random new block and see if it's used. Repeat
541 * until we find a free one. For sequential io, just return the end of
542 * the last io issued.
543 */
e8fb335e 544static int get_next_offset(struct thread_data *td, struct io_u *io_u,
ec370c22 545 bool *is_random)
10ba535a 546{
9bf2061e 547 struct fio_file *f = io_u->file;
4ba66134 548 enum fio_ddir ddir = io_u->ddir;
38dad62d 549 int rw_seq_hit = 0;
10ba535a 550
ff58fced
JA
551 assert(ddir_rw(ddir));
552
38dad62d
JA
553 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
554 rw_seq_hit = 1;
5736c10d 555 td->ddir_seq_nr = td->o.ddir_seq_nr;
38dad62d 556 }
211097b2 557
6aca9b3d 558 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
38dad62d 559 return 1;
10ba535a 560
009bd847
JA
561 if (io_u->offset >= f->io_size) {
562 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
4b91ee8f
JA
563 (unsigned long long) io_u->offset,
564 (unsigned long long) f->io_size);
009bd847
JA
565 return 1;
566 }
567
568 io_u->offset += f->file_offset;
2ba1c290
JA
569 if (io_u->offset >= f->real_file_size) {
570 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
4b91ee8f
JA
571 (unsigned long long) io_u->offset,
572 (unsigned long long) f->real_file_size);
10ba535a 573 return 1;
2ba1c290 574 }
10ba535a
JA
575
576 return 0;
577}
578
e39c0676
JA
579static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u,
580 unsigned int buflen)
79944128
JA
581{
582 struct fio_file *f = io_u->file;
583
bedc9dc2 584 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
79944128
JA
585}
586
e8fb335e 587static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
ec370c22 588 bool is_random)
10ba535a 589{
6aca9b3d 590 int ddir = io_u->ddir;
24d23ca7 591 unsigned int buflen = 0;
f3059de1 592 unsigned int minbs, maxbs;
3dd29f7c 593 uint64_t frand_max, r;
7c961359 594 bool power_2;
10ba535a 595
9ee1c647 596 assert(ddir_rw(ddir));
6aca9b3d
JA
597
598 if (td->o.bs_is_seq_rand)
ec370c22 599 ddir = is_random ? DDIR_WRITE : DDIR_READ;
ff58fced 600
f3059de1
JA
601 minbs = td->o.min_bs[ddir];
602 maxbs = td->o.max_bs[ddir];
603
79944128
JA
604 if (minbs == maxbs)
605 return minbs;
606
52c58027
JA
607 /*
608 * If we can't satisfy the min block size from here, then fail
609 */
610 if (!io_u_fits(td, io_u, minbs))
611 return 0;
612
2f282cec 613 frand_max = rand_max(&td->bsrange_state[ddir]);
79944128 614 do {
2f282cec 615 r = __rand(&td->bsrange_state[ddir]);
4c07ad86 616
720e84ad 617 if (!td->o.bssplit_nr[ddir]) {
f3059de1 618 buflen = 1 + (unsigned int) ((double) maxbs *
c3546b53 619 (r / (frand_max + 1.0)));
f3059de1
JA
620 if (buflen < minbs)
621 buflen = minbs;
5ec10eaa 622 } else {
3dd29f7c 623 long long perc = 0;
564ca972
JA
624 unsigned int i;
625
720e84ad
JA
626 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
627 struct bssplit *bsp = &td->o.bssplit[ddir][i];
564ca972
JA
628
629 buflen = bsp->bs;
630 perc += bsp->perc;
3dd29f7c
JA
631 if (!perc)
632 break;
633 if ((r / perc <= frand_max / 100ULL) &&
79944128 634 io_u_fits(td, io_u, buflen))
564ca972
JA
635 break;
636 }
637 }
79944128 638
17a6b702
PL
639 power_2 = is_power_of_2(minbs);
640 if (!td->o.bs_unaligned && power_2)
7c306bb1 641 buflen &= ~(minbs - 1);
17a6b702
PL
642 else if (!td->o.bs_unaligned && !power_2)
643 buflen -= buflen % minbs;
79944128 644 } while (!io_u_fits(td, io_u, buflen));
6a5e6884 645
10ba535a
JA
646 return buflen;
647}
648
afe24a5a
JA
649static void set_rwmix_bytes(struct thread_data *td)
650{
afe24a5a
JA
651 unsigned int diff;
652
653 /*
654 * we do time or byte based switch. this is needed because
655 * buffered writes may issue a lot quicker than they complete,
656 * whereas reads do not.
657 */
e47f799f 658 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
04c540d9 659 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
e47f799f
JA
660}
661
662static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
663{
664 unsigned int v;
e47f799f 665
36dd3379 666 v = rand32_between(&td->rwmix_state, 1, 100);
4c07ad86 667
04c540d9 668 if (v <= td->o.rwmix[DDIR_READ])
e47f799f
JA
669 return DDIR_READ;
670
671 return DDIR_WRITE;
afe24a5a
JA
672}
673
0d593542 674int io_u_quiesce(struct thread_data *td)
002e7183 675{
0d593542
JA
676 int completed = 0;
677
002e7183
JA
678 /*
679 * We are going to sleep, ensure that we flush anything pending as
680 * not to skew our latency numbers.
681 *
682 * Changed to only monitor 'in flight' requests here instead of the
683 * td->cur_depth, b/c td->cur_depth does not accurately represent
684 * io's that have been actually submitted to an async engine,
685 * and cur_depth is meaningless for sync engines.
686 */
9cc80b6d
JA
687 if (td->io_u_queued || td->cur_depth) {
688 int fio_unused ret;
689
690 ret = td_io_commit(td);
691 }
692
002e7183 693 while (td->io_u_in_flight) {
89fd0573 694 int ret;
002e7183 695
55312f9f 696 ret = io_u_queued_complete(td, 1);
0d593542
JA
697 if (ret > 0)
698 completed += ret;
002e7183 699 }
0d593542 700
6be06c46
JA
701 if (td->flags & TD_F_REGROW_LOGS)
702 regrow_logs(td);
703
0d593542 704 return completed;
002e7183
JA
705}
706
581e7141
JA
707static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
708{
709 enum fio_ddir odir = ddir ^ 1;
b5407f8b 710 uint64_t usec;
90eff1c9 711 uint64_t now;
581e7141 712
ff58fced 713 assert(ddir_rw(ddir));
50a8ce86 714 now = utime_since_now(&td->start);
ff58fced 715
50a8ce86
D
716 /*
717 * if rate_next_io_time is in the past, need to catch up to rate
718 */
719 if (td->rate_next_io_time[ddir] <= now)
581e7141
JA
720 return ddir;
721
722 /*
50a8ce86 723 * We are ahead of rate in this direction. See if we
581e7141
JA
724 * should switch.
725 */
315fcfec 726 if (td_rw(td) && td->o.rwmix[odir]) {
581e7141 727 /*
50a8ce86 728 * Other direction is behind rate, switch
581e7141 729 */
50a8ce86 730 if (td->rate_next_io_time[odir] <= now)
581e7141
JA
731 return odir;
732
733 /*
395feabb
JA
734 * Both directions are ahead of rate. sleep the min,
735 * switch if necessary
581e7141 736 */
50a8ce86 737 if (td->rate_next_io_time[ddir] <=
395feabb 738 td->rate_next_io_time[odir]) {
50a8ce86 739 usec = td->rate_next_io_time[ddir] - now;
581e7141 740 } else {
50a8ce86 741 usec = td->rate_next_io_time[odir] - now;
581e7141
JA
742 ddir = odir;
743 }
744 } else
50a8ce86 745 usec = td->rate_next_io_time[ddir] - now;
581e7141 746
a9da8ab2
JA
747 if (td->o.io_submit_mode == IO_MODE_INLINE)
748 io_u_quiesce(td);
78c1eda5 749
1a9bf814 750 usec_sleep(td, usec);
581e7141
JA
751 return ddir;
752}
753
10ba535a
JA
754/*
755 * Return the data direction for the next io_u. If the job is a
756 * mixed read/write workload, check the rwmix cycle and switch if
757 * necessary.
758 */
1e97cce9 759static enum fio_ddir get_rw_ddir(struct thread_data *td)
10ba535a 760{
581e7141
JA
761 enum fio_ddir ddir;
762
5f9099ea 763 /*
f6860972
TK
764 * See if it's time to fsync/fdatasync/sync_file_range first,
765 * and if not then move on to check regular I/Os.
5f9099ea 766 */
f6860972
TK
767 if (should_fsync(td)) {
768 if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] &&
769 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks))
770 return DDIR_SYNC;
771
772 if (td->o.fdatasync_blocks && td->io_issues[DDIR_WRITE] &&
773 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks))
774 return DDIR_DATASYNC;
775
776 if (td->sync_file_range_nr && td->io_issues[DDIR_WRITE] &&
777 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr))
778 return DDIR_SYNC_FILE_RANGE;
779 }
44f29692 780
10ba535a 781 if (td_rw(td)) {
10ba535a
JA
782 /*
783 * Check if it's time to seed a new data direction.
784 */
e4928662 785 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
e47f799f
JA
786 /*
787 * Put a top limit on how many bytes we do for
788 * one data direction, to avoid overflowing the
789 * ranges too much
790 */
791 ddir = get_rand_ddir(td);
e47f799f
JA
792
793 if (ddir != td->rwmix_ddir)
794 set_rwmix_bytes(td);
795
796 td->rwmix_ddir = ddir;
10ba535a 797 }
581e7141 798 ddir = td->rwmix_ddir;
10ba535a 799 } else if (td_read(td))
581e7141 800 ddir = DDIR_READ;
6eaf09d6 801 else if (td_write(td))
581e7141 802 ddir = DDIR_WRITE;
5c8e84ca 803 else if (td_trim(td))
6eaf09d6 804 ddir = DDIR_TRIM;
5c8e84ca
TK
805 else
806 ddir = DDIR_INVAL;
581e7141
JA
807
808 td->rwmix_ddir = rate_ddir(td, ddir);
809 return td->rwmix_ddir;
10ba535a
JA
810}
811
1ef2b6be
JA
812static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
813{
0e4dd95c
DE
814 enum fio_ddir ddir = get_rw_ddir(td);
815
82a90686 816 if (td_trimwrite(td)) {
0e4dd95c
DE
817 struct fio_file *f = io_u->file;
818 if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
819 ddir = DDIR_TRIM;
820 else
821 ddir = DDIR_WRITE;
822 }
823
824 io_u->ddir = io_u->acct_ddir = ddir;
1ef2b6be 825
9b87f09b 826 if (io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_BARRIER) &&
1ef2b6be
JA
827 td->o.barrier_blocks &&
828 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
829 td->io_issues[DDIR_WRITE])
1651e431 830 io_u_set(td, io_u, IO_U_F_BARRIER);
1ef2b6be
JA
831}
832
e8462bd8 833void put_file_log(struct thread_data *td, struct fio_file *f)
60f2c658 834{
71b84caa 835 unsigned int ret = put_file(td, f);
60f2c658
JA
836
837 if (ret)
838 td_verror(td, ret, "file close");
839}
840
10ba535a
JA
841void put_io_u(struct thread_data *td, struct io_u *io_u)
842{
a9da8ab2
JA
843 if (td->parent)
844 td = td->parent;
845
e8462bd8
JA
846 td_io_u_lock(td);
847
f8b0bd10 848 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
60f2c658 849 put_file_log(td, io_u->file);
f8b0bd10 850
10ba535a 851 io_u->file = NULL;
1651e431 852 io_u_set(td, io_u, IO_U_F_FREE);
d7ee2a7d 853
a9da8ab2 854 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
0c41214f 855 td->cur_depth--;
a9da8ab2
JA
856 assert(!(td->flags & TD_F_CHILD));
857 }
2ae0b204 858 io_u_qpush(&td->io_u_freelist, io_u);
e8462bd8
JA
859 td_io_u_unlock(td);
860 td_io_u_free_notify(td);
10ba535a
JA
861}
862
f2bba182
RR
863void clear_io_u(struct thread_data *td, struct io_u *io_u)
864{
1651e431 865 io_u_clear(td, io_u, IO_U_F_FLIGHT);
f2bba182
RR
866 put_io_u(td, io_u);
867}
868
755200a3
JA
869void requeue_io_u(struct thread_data *td, struct io_u **io_u)
870{
871 struct io_u *__io_u = *io_u;
bcd5abfa 872 enum fio_ddir ddir = acct_ddir(__io_u);
755200a3 873
465221b0
JA
874 dprint(FD_IO, "requeue %p\n", __io_u);
875
a9da8ab2
JA
876 if (td->parent)
877 td = td->parent;
878
e8462bd8
JA
879 td_io_u_lock(td);
880
1651e431 881 io_u_set(td, __io_u, IO_U_F_FREE);
bcd5abfa
JA
882 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
883 td->io_issues[ddir]--;
5ec10eaa 884
1651e431 885 io_u_clear(td, __io_u, IO_U_F_FLIGHT);
a9da8ab2 886 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) {
0c41214f 887 td->cur_depth--;
a9da8ab2
JA
888 assert(!(td->flags & TD_F_CHILD));
889 }
2ae0b204
JA
890
891 io_u_rpush(&td->io_u_requeues, __io_u);
e8462bd8 892 td_io_u_unlock(td);
a9da8ab2 893 td_io_u_free_notify(td);
755200a3
JA
894 *io_u = NULL;
895}
896
224b3093 897static void __fill_io_u_zone(struct thread_data *td, struct io_u *io_u)
898{
899 struct fio_file *f = io_u->file;
900
901 /*
902 * See if it's time to switch to a new zone
903 */
904 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
905 td->zone_bytes = 0;
906 f->file_offset += td->o.zone_range + td->o.zone_skip;
907
908 /*
909 * Wrap from the beginning, if we exceed the file size
910 */
911 if (f->file_offset >= f->real_file_size)
912 f->file_offset = f->real_file_size - f->file_offset;
913 f->last_pos[io_u->ddir] = f->file_offset;
914 td->io_skip_bytes += td->o.zone_skip;
915 }
916
917 /*
918 * If zone_size > zone_range, then maintain the same zone until
919 * zone_bytes >= zone_size.
920 */
921 if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) {
922 dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n",
923 f->file_offset, f->last_pos[io_u->ddir]);
924 f->last_pos[io_u->ddir] = f->file_offset;
925 }
926
927 /*
928 * For random: if 'norandommap' is not set and zone_size > zone_range,
929 * map needs to be reset as it's done with zone_range everytime.
930 */
931 if ((td->zone_bytes % td->o.zone_range) == 0) {
932 fio_file_reset(td, f);
933 }
934}
935
9bf2061e 936static int fill_io_u(struct thread_data *td, struct io_u *io_u)
10ba535a 937{
ec370c22 938 bool is_random;
6aca9b3d 939
9b87f09b 940 if (td_ioengine_flagged(td, FIO_NOIO))
b4c5e1ac
JA
941 goto out;
942
1ef2b6be 943 set_rw_ddir(td, io_u);
5f9099ea 944
87dc1ab1 945 /*
ff58fced 946 * fsync() or fdatasync() or trim etc, we are done
87dc1ab1 947 */
ff58fced 948 if (!ddir_rw(io_u->ddir))
c38e9468 949 goto out;
a00735e6 950
48f5abd3 951 /*
224b3093 952 * When file is zoned zone_range is always positive
48f5abd3 953 */
224b3093 954 if (td->o.zone_range) {
955 __fill_io_u_zone(td, io_u);
48f5abd3
JA
956 }
957
10ba535a 958 /*
c685b5b2
JA
959 * No log, let the seq/rand engine retrieve the next buflen and
960 * position.
10ba535a 961 */
6aca9b3d 962 if (get_next_offset(td, io_u, &is_random)) {
2ba1c290 963 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
bca4ed4d 964 return 1;
2ba1c290 965 }
10ba535a 966
6aca9b3d 967 io_u->buflen = get_next_buflen(td, io_u, is_random);
2ba1c290
JA
968 if (!io_u->buflen) {
969 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
bca4ed4d 970 return 1;
2ba1c290 971 }
bca4ed4d 972
2ba1c290 973 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
e5f9a813
RE
974 dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%lx exceeds file size=0x%llx\n",
975 io_u,
4b91ee8f
JA
976 (unsigned long long) io_u->offset, io_u->buflen,
977 (unsigned long long) io_u->file->real_file_size);
6a5e6884 978 return 1;
2ba1c290 979 }
6a5e6884 980
bca4ed4d
JA
981 /*
982 * mark entry before potentially trimming io_u
983 */
303032ae 984 if (td_random(td) && file_randommap(td, io_u->file))
9bf2061e 985 mark_random_map(td, io_u);
bca4ed4d 986
c38e9468 987out:
e5f9a813 988 dprint_io_u(io_u, "fill");
d9d91e39 989 td->zone_bytes += io_u->buflen;
bca4ed4d 990 return 0;
10ba535a
JA
991}
992
6cc0e5aa 993static void __io_u_mark_map(uint64_t *map, unsigned int nr)
838bc709 994{
2b13e716 995 int idx = 0;
838bc709
JA
996
997 switch (nr) {
998 default:
2b13e716 999 idx = 6;
838bc709
JA
1000 break;
1001 case 33 ... 64:
2b13e716 1002 idx = 5;
838bc709
JA
1003 break;
1004 case 17 ... 32:
2b13e716 1005 idx = 4;
838bc709
JA
1006 break;
1007 case 9 ... 16:
2b13e716 1008 idx = 3;
838bc709
JA
1009 break;
1010 case 5 ... 8:
2b13e716 1011 idx = 2;
838bc709
JA
1012 break;
1013 case 1 ... 4:
2b13e716 1014 idx = 1;
838bc709
JA
1015 case 0:
1016 break;
1017 }
1018
2b13e716 1019 map[idx]++;
838bc709
JA
1020}
1021
1022void io_u_mark_submit(struct thread_data *td, unsigned int nr)
1023{
1024 __io_u_mark_map(td->ts.io_u_submit, nr);
1025 td->ts.total_submit++;
1026}
1027
1028void io_u_mark_complete(struct thread_data *td, unsigned int nr)
1029{
1030 __io_u_mark_map(td->ts.io_u_complete, nr);
1031 td->ts.total_complete++;
1032}
1033
d8005759 1034void io_u_mark_depth(struct thread_data *td, unsigned int nr)
71619dc2 1035{
2b13e716 1036 int idx = 0;
71619dc2
JA
1037
1038 switch (td->cur_depth) {
1039 default:
2b13e716 1040 idx = 6;
a783e61a 1041 break;
71619dc2 1042 case 32 ... 63:
2b13e716 1043 idx = 5;
a783e61a 1044 break;
71619dc2 1045 case 16 ... 31:
2b13e716 1046 idx = 4;
a783e61a 1047 break;
71619dc2 1048 case 8 ... 15:
2b13e716 1049 idx = 3;
a783e61a 1050 break;
71619dc2 1051 case 4 ... 7:
2b13e716 1052 idx = 2;
a783e61a 1053 break;
71619dc2 1054 case 2 ... 3:
2b13e716 1055 idx = 1;
71619dc2
JA
1056 case 1:
1057 break;
1058 }
1059
2b13e716 1060 td->ts.io_u_map[idx] += nr;
71619dc2
JA
1061}
1062
d6bb626e 1063static void io_u_mark_lat_nsec(struct thread_data *td, unsigned long long nsec)
04a0feae 1064{
2b13e716 1065 int idx = 0;
04a0feae 1066
d6bb626e
VF
1067 assert(nsec < 1000);
1068
1069 switch (nsec) {
1070 case 750 ... 999:
1071 idx = 9;
1072 break;
1073 case 500 ... 749:
1074 idx = 8;
1075 break;
1076 case 250 ... 499:
1077 idx = 7;
1078 break;
1079 case 100 ... 249:
1080 idx = 6;
1081 break;
1082 case 50 ... 99:
1083 idx = 5;
1084 break;
1085 case 20 ... 49:
1086 idx = 4;
1087 break;
1088 case 10 ... 19:
1089 idx = 3;
1090 break;
1091 case 4 ... 9:
1092 idx = 2;
1093 break;
1094 case 2 ... 3:
1095 idx = 1;
1096 case 0 ... 1:
1097 break;
1098 }
1099
1100 assert(idx < FIO_IO_U_LAT_N_NR);
1101 td->ts.io_u_lat_n[idx]++;
1102}
1103
1104static void io_u_mark_lat_usec(struct thread_data *td, unsigned long long usec)
1105{
1106 int idx = 0;
1107
1108 assert(usec < 1000 && usec >= 1);
04a0feae
JA
1109
1110 switch (usec) {
1111 case 750 ... 999:
2b13e716 1112 idx = 9;
04a0feae
JA
1113 break;
1114 case 500 ... 749:
2b13e716 1115 idx = 8;
04a0feae
JA
1116 break;
1117 case 250 ... 499:
2b13e716 1118 idx = 7;
04a0feae
JA
1119 break;
1120 case 100 ... 249:
2b13e716 1121 idx = 6;
04a0feae
JA
1122 break;
1123 case 50 ... 99:
2b13e716 1124 idx = 5;
04a0feae
JA
1125 break;
1126 case 20 ... 49:
2b13e716 1127 idx = 4;
04a0feae
JA
1128 break;
1129 case 10 ... 19:
2b13e716 1130 idx = 3;
04a0feae
JA
1131 break;
1132 case 4 ... 9:
2b13e716 1133 idx = 2;
04a0feae
JA
1134 break;
1135 case 2 ... 3:
2b13e716 1136 idx = 1;
04a0feae
JA
1137 case 0 ... 1:
1138 break;
1139 }
1140
2b13e716
JA
1141 assert(idx < FIO_IO_U_LAT_U_NR);
1142 td->ts.io_u_lat_u[idx]++;
04a0feae
JA
1143}
1144
d6bb626e 1145static void io_u_mark_lat_msec(struct thread_data *td, unsigned long long msec)
ec118304 1146{
2b13e716 1147 int idx = 0;
ec118304 1148
d6bb626e
VF
1149 assert(msec >= 1);
1150
ec118304
JA
1151 switch (msec) {
1152 default:
2b13e716 1153 idx = 11;
04a0feae 1154 break;
8abdce66 1155 case 1000 ... 1999:
2b13e716 1156 idx = 10;
04a0feae 1157 break;
8abdce66 1158 case 750 ... 999:
2b13e716 1159 idx = 9;
04a0feae 1160 break;
8abdce66 1161 case 500 ... 749:
2b13e716 1162 idx = 8;
04a0feae 1163 break;
8abdce66 1164 case 250 ... 499:
2b13e716 1165 idx = 7;
04a0feae 1166 break;
8abdce66 1167 case 100 ... 249:
2b13e716 1168 idx = 6;
04a0feae 1169 break;
8abdce66 1170 case 50 ... 99:
2b13e716 1171 idx = 5;
04a0feae 1172 break;
8abdce66 1173 case 20 ... 49:
2b13e716 1174 idx = 4;
04a0feae 1175 break;
8abdce66 1176 case 10 ... 19:
2b13e716 1177 idx = 3;
04a0feae 1178 break;
8abdce66 1179 case 4 ... 9:
2b13e716 1180 idx = 2;
04a0feae 1181 break;
ec118304 1182 case 2 ... 3:
2b13e716 1183 idx = 1;
ec118304
JA
1184 case 0 ... 1:
1185 break;
1186 }
1187
2b13e716
JA
1188 assert(idx < FIO_IO_U_LAT_M_NR);
1189 td->ts.io_u_lat_m[idx]++;
04a0feae
JA
1190}
1191
d6bb626e 1192static void io_u_mark_latency(struct thread_data *td, unsigned long long nsec)
04a0feae 1193{
d6bb626e
VF
1194 if (nsec < 1000)
1195 io_u_mark_lat_nsec(td, nsec);
1196 else if (nsec < 1000000)
1197 io_u_mark_lat_usec(td, nsec / 1000);
04a0feae 1198 else
d6bb626e 1199 io_u_mark_lat_msec(td, nsec / 1000000);
ec118304
JA
1200}
1201
8c07860d
JA
1202static unsigned int __get_next_fileno_rand(struct thread_data *td)
1203{
1204 unsigned long fileno;
1205
1206 if (td->o.file_service_type == FIO_FSERVICE_RANDOM) {
1207 uint64_t frand_max = rand_max(&td->next_file_state);
1208 unsigned long r;
1209
1210 r = __rand(&td->next_file_state);
1211 return (unsigned int) ((double) td->o.nr_files
1212 * (r / (frand_max + 1.0)));
1213 }
1214
1215 if (td->o.file_service_type == FIO_FSERVICE_ZIPF)
1216 fileno = zipf_next(&td->next_file_zipf);
1217 else if (td->o.file_service_type == FIO_FSERVICE_PARETO)
1218 fileno = pareto_next(&td->next_file_zipf);
1219 else if (td->o.file_service_type == FIO_FSERVICE_GAUSS)
1220 fileno = gauss_next(&td->next_file_gauss);
1221 else {
1222 log_err("fio: bad file service type: %d\n", td->o.file_service_type);
1223 assert(0);
1224 return 0;
1225 }
1226
1227 return fileno >> FIO_FSERVICE_SHIFT;
1228}
1229
0aabe160
JA
1230/*
1231 * Get next file to service by choosing one at random
1232 */
2cc52930
JA
1233static struct fio_file *get_next_file_rand(struct thread_data *td,
1234 enum fio_file_flags goodf,
d6aed795 1235 enum fio_file_flags badf)
0aabe160 1236{
0aabe160 1237 struct fio_file *f;
1c178180 1238 int fno;
0aabe160
JA
1239
1240 do {
87b10676 1241 int opened = 0;
4c07ad86 1242
8c07860d 1243 fno = __get_next_fileno_rand(td);
7c83c089 1244
126d65c6 1245 f = td->files[fno];
d6aed795 1246 if (fio_file_done(f))
059e63c0 1247 continue;
1c178180 1248
d6aed795 1249 if (!fio_file_open(f)) {
87b10676
JA
1250 int err;
1251
002fe734
JA
1252 if (td->nr_open_files >= td->o.open_files)
1253 return ERR_PTR(-EBUSY);
1254
87b10676
JA
1255 err = td_io_open_file(td, f);
1256 if (err)
1257 continue;
1258 opened = 1;
1259 }
1260
2ba1c290
JA
1261 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
1262 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
0aabe160 1263 return f;
2ba1c290 1264 }
87b10676
JA
1265 if (opened)
1266 td_io_close_file(td, f);
0aabe160
JA
1267 } while (1);
1268}
1269
1270/*
1271 * Get next file to service by doing round robin between all available ones
1272 */
1c178180
JA
1273static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1274 int badf)
3d7c391d
JA
1275{
1276 unsigned int old_next_file = td->next_file;
1277 struct fio_file *f;
1278
1279 do {
87b10676
JA
1280 int opened = 0;
1281
126d65c6 1282 f = td->files[td->next_file];
3d7c391d
JA
1283
1284 td->next_file++;
2dc1bbeb 1285 if (td->next_file >= td->o.nr_files)
3d7c391d
JA
1286 td->next_file = 0;
1287
87b10676 1288 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
d6aed795 1289 if (fio_file_done(f)) {
d5ed68ea 1290 f = NULL;
059e63c0 1291 continue;
d5ed68ea 1292 }
059e63c0 1293
d6aed795 1294 if (!fio_file_open(f)) {
87b10676
JA
1295 int err;
1296
002fe734
JA
1297 if (td->nr_open_files >= td->o.open_files)
1298 return ERR_PTR(-EBUSY);
1299
87b10676 1300 err = td_io_open_file(td, f);
b5696bfc
JA
1301 if (err) {
1302 dprint(FD_FILE, "error %d on open of %s\n",
1303 err, f->file_name);
87c27b45 1304 f = NULL;
87b10676 1305 continue;
b5696bfc 1306 }
87b10676
JA
1307 opened = 1;
1308 }
1309
0b9d69ec
JA
1310 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1311 f->flags);
1c178180 1312 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
3d7c391d
JA
1313 break;
1314
87b10676
JA
1315 if (opened)
1316 td_io_close_file(td, f);
1317
3d7c391d
JA
1318 f = NULL;
1319 } while (td->next_file != old_next_file);
1320
2ba1c290 1321 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
3d7c391d
JA
1322 return f;
1323}
1324
7eb36574 1325static struct fio_file *__get_next_file(struct thread_data *td)
bdb4e2e9 1326{
1907dbc6
JA
1327 struct fio_file *f;
1328
2dc1bbeb 1329 assert(td->o.nr_files <= td->files_index);
1c178180 1330
b5696bfc 1331 if (td->nr_done_files >= td->o.nr_files) {
5ec10eaa
JA
1332 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1333 " nr_files=%d\n", td->nr_open_files,
1334 td->nr_done_files,
1335 td->o.nr_files);
bdb4e2e9 1336 return NULL;
2ba1c290 1337 }
bdb4e2e9 1338
1907dbc6 1339 f = td->file_service_file;
d6aed795 1340 if (f && fio_file_open(f) && !fio_file_closing(f)) {
a086c257
JA
1341 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1342 goto out;
1343 if (td->file_service_left--)
1344 goto out;
1345 }
1907dbc6 1346
a086c257
JA
1347 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1348 td->o.file_service_type == FIO_FSERVICE_SEQ)
d6aed795 1349 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
bdb4e2e9 1350 else
d6aed795 1351 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
1907dbc6 1352
002fe734
JA
1353 if (IS_ERR(f))
1354 return f;
1355
1907dbc6
JA
1356 td->file_service_file = f;
1357 td->file_service_left = td->file_service_nr - 1;
2ba1c290 1358out:
0dac421f
JA
1359 if (f)
1360 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1361 else
1362 dprint(FD_FILE, "get_next_file: NULL\n");
1907dbc6 1363 return f;
bdb4e2e9
JA
1364}
1365
7eb36574
JA
1366static struct fio_file *get_next_file(struct thread_data *td)
1367{
7eb36574
JA
1368 return __get_next_file(td);
1369}
1370
002fe734 1371static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
429f6675
JA
1372{
1373 struct fio_file *f;
1374
1375 do {
1376 f = get_next_file(td);
002fe734
JA
1377 if (IS_ERR_OR_NULL(f))
1378 return PTR_ERR(f);
429f6675 1379
429f6675
JA
1380 io_u->file = f;
1381 get_file(f);
1382
1383 if (!fill_io_u(td, io_u))
1384 break;
1385
b5696bfc 1386 put_file_log(td, f);
429f6675 1387 td_io_close_file(td, f);
b5696bfc 1388 io_u->file = NULL;
8c07860d
JA
1389 if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)
1390 fio_file_reset(td, f);
1391 else {
1392 fio_file_set_done(f);
1393 td->nr_done_files++;
1394 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
0b9d69ec 1395 td->nr_done_files, td->o.nr_files);
8c07860d 1396 }
429f6675
JA
1397 } while (1);
1398
1399 return 0;
1400}
1401
3e260a46 1402static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
c3a32714 1403 unsigned long long tnsec, unsigned long long max_nsec)
3e260a46
JA
1404{
1405 if (!td->error)
c3a32714 1406 log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec)\n", tnsec, max_nsec);
3e260a46
JA
1407 td_verror(td, ETIMEDOUT, "max latency exceeded");
1408 icd->error = ETIMEDOUT;
1409}
1410
1411static void lat_new_cycle(struct thread_data *td)
1412{
1413 fio_gettime(&td->latency_ts, NULL);
1414 td->latency_ios = ddir_rw_sum(td->io_blocks);
1415 td->latency_failed = 0;
1416}
1417
1418/*
1419 * We had an IO outside the latency target. Reduce the queue depth. If we
1420 * are at QD=1, then it's time to give up.
1421 */
e39c0676 1422static bool __lat_target_failed(struct thread_data *td)
3e260a46
JA
1423{
1424 if (td->latency_qd == 1)
e39c0676 1425 return true;
3e260a46
JA
1426
1427 td->latency_qd_high = td->latency_qd;
6bb58215
JA
1428
1429 if (td->latency_qd == td->latency_qd_low)
1430 td->latency_qd_low--;
1431
3e260a46
JA
1432 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1433
1434 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1435
1436 /*
1437 * When we ramp QD down, quiesce existing IO to prevent
1438 * a storm of ramp downs due to pending higher depth.
1439 */
1440 io_u_quiesce(td);
1441 lat_new_cycle(td);
e39c0676 1442 return false;
3e260a46
JA
1443}
1444
e39c0676 1445static bool lat_target_failed(struct thread_data *td)
3e260a46
JA
1446{
1447 if (td->o.latency_percentile.u.f == 100.0)
1448 return __lat_target_failed(td);
1449
1450 td->latency_failed++;
e39c0676 1451 return false;
3e260a46
JA
1452}
1453
1454void lat_target_init(struct thread_data *td)
1455{
6bb58215
JA
1456 td->latency_end_run = 0;
1457
3e260a46
JA
1458 if (td->o.latency_target) {
1459 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1460 fio_gettime(&td->latency_ts, NULL);
1461 td->latency_qd = 1;
1462 td->latency_qd_high = td->o.iodepth;
1463 td->latency_qd_low = 1;
1464 td->latency_ios = ddir_rw_sum(td->io_blocks);
1465 } else
1466 td->latency_qd = td->o.iodepth;
1467}
1468
6bb58215
JA
1469void lat_target_reset(struct thread_data *td)
1470{
1471 if (!td->latency_end_run)
1472 lat_target_init(td);
1473}
1474
3e260a46
JA
1475static void lat_target_success(struct thread_data *td)
1476{
1477 const unsigned int qd = td->latency_qd;
6bb58215 1478 struct thread_options *o = &td->o;
3e260a46
JA
1479
1480 td->latency_qd_low = td->latency_qd;
1481
1482 /*
1483 * If we haven't failed yet, we double up to a failing value instead
1484 * of bisecting from highest possible queue depth. If we have set
1485 * a limit other than td->o.iodepth, bisect between that.
1486 */
6bb58215 1487 if (td->latency_qd_high != o->iodepth)
3e260a46
JA
1488 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1489 else
1490 td->latency_qd *= 2;
1491
6bb58215
JA
1492 if (td->latency_qd > o->iodepth)
1493 td->latency_qd = o->iodepth;
3e260a46
JA
1494
1495 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
6bb58215 1496
3e260a46 1497 /*
6bb58215
JA
1498 * Same as last one, we are done. Let it run a latency cycle, so
1499 * we get only the results from the targeted depth.
3e260a46 1500 */
6bb58215
JA
1501 if (td->latency_qd == qd) {
1502 if (td->latency_end_run) {
1503 dprint(FD_RATE, "We are done\n");
1504 td->done = 1;
1505 } else {
1506 dprint(FD_RATE, "Quiesce and final run\n");
1507 io_u_quiesce(td);
1508 td->latency_end_run = 1;
1509 reset_all_stats(td);
1510 reset_io_stats(td);
1511 }
1512 }
3e260a46
JA
1513
1514 lat_new_cycle(td);
1515}
1516
1517/*
1518 * Check if we can bump the queue depth
1519 */
1520void lat_target_check(struct thread_data *td)
1521{
1522 uint64_t usec_window;
1523 uint64_t ios;
1524 double success_ios;
1525
1526 usec_window = utime_since_now(&td->latency_ts);
1527 if (usec_window < td->o.latency_window)
1528 return;
1529
1530 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1531 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1532 success_ios *= 100.0;
1533
1534 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1535
1536 if (success_ios >= td->o.latency_percentile.u.f)
1537 lat_target_success(td);
1538 else
1539 __lat_target_failed(td);
1540}
1541
1542/*
1543 * If latency target is enabled, we might be ramping up or down and not
1544 * using the full queue depth available.
1545 */
e39c0676 1546bool queue_full(const struct thread_data *td)
3e260a46
JA
1547{
1548 const int qempty = io_u_qempty(&td->io_u_freelist);
1549
1550 if (qempty)
e39c0676 1551 return true;
3e260a46 1552 if (!td->o.latency_target)
e39c0676 1553 return false;
3e260a46
JA
1554
1555 return td->cur_depth >= td->latency_qd;
1556}
429f6675 1557
10ba535a
JA
1558struct io_u *__get_io_u(struct thread_data *td)
1559{
0cae66f6 1560 struct io_u *io_u = NULL;
10ba535a 1561
ca09be4b
JA
1562 if (td->stop_io)
1563 return NULL;
1564
e8462bd8
JA
1565 td_io_u_lock(td);
1566
1567again:
2ae0b204
JA
1568 if (!io_u_rempty(&td->io_u_requeues))
1569 io_u = io_u_rpop(&td->io_u_requeues);
3e260a46 1570 else if (!queue_full(td)) {
2ae0b204 1571 io_u = io_u_qpop(&td->io_u_freelist);
10ba535a 1572
225ba9e3 1573 io_u->file = NULL;
6040dabc 1574 io_u->buflen = 0;
10ba535a 1575 io_u->resid = 0;
d7762cf8 1576 io_u->end_io = NULL;
755200a3
JA
1577 }
1578
1579 if (io_u) {
0c6e7517 1580 assert(io_u->flags & IO_U_F_FREE);
1651e431 1581 io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
f8b0bd10
JA
1582 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1583 IO_U_F_VER_LIST);
0c6e7517 1584
755200a3 1585 io_u->error = 0;
bcd5abfa 1586 io_u->acct_ddir = -1;
10ba535a 1587 td->cur_depth++;
a9da8ab2 1588 assert(!(td->flags & TD_F_CHILD));
1651e431 1589 io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH);
f9401285 1590 io_u->ipo = NULL;
a9da8ab2 1591 } else if (td_async_processing(td)) {
1dec3e07
JA
1592 /*
1593 * We ran out, wait for async verify threads to finish and
1594 * return one
1595 */
a9da8ab2
JA
1596 assert(!(td->flags & TD_F_CHILD));
1597 assert(!pthread_cond_wait(&td->free_cond, &td->io_u_lock));
1dec3e07 1598 goto again;
10ba535a
JA
1599 }
1600
e8462bd8 1601 td_io_u_unlock(td);
10ba535a
JA
1602 return io_u;
1603}
1604
e39c0676 1605static bool check_get_trim(struct thread_data *td, struct io_u *io_u)
10ba535a 1606{
d72be545 1607 if (!(td->flags & TD_F_TRIM_BACKLOG))
e39c0676 1608 return false;
c9a73054
JA
1609 if (!td->trim_entries)
1610 return false;
d72be545 1611
c9a73054
JA
1612 if (td->trim_batch) {
1613 td->trim_batch--;
1614 if (get_next_trim(td, io_u))
1615 return true;
1616 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1617 td->last_ddir != DDIR_READ) {
1618 td->trim_batch = td->o.trim_batch;
1619 if (!td->trim_batch)
1620 td->trim_batch = td->o.trim_backlog;
1621 if (get_next_trim(td, io_u))
e39c0676 1622 return true;
2ba1c290 1623 }
10ba535a 1624
e39c0676 1625 return false;
0d29de83
JA
1626}
1627
e39c0676 1628static bool check_get_verify(struct thread_data *td, struct io_u *io_u)
0d29de83 1629{
d72be545 1630 if (!(td->flags & TD_F_VER_BACKLOG))
e39c0676 1631 return false;
d72be545
JA
1632
1633 if (td->io_hist_len) {
9e144189
JA
1634 int get_verify = 0;
1635
d1ece0c7 1636 if (td->verify_batch)
9e144189 1637 get_verify = 1;
d1ece0c7 1638 else if (!(td->io_hist_len % td->o.verify_backlog) &&
9e144189
JA
1639 td->last_ddir != DDIR_READ) {
1640 td->verify_batch = td->o.verify_batch;
f8a75c99
JA
1641 if (!td->verify_batch)
1642 td->verify_batch = td->o.verify_backlog;
9e144189
JA
1643 get_verify = 1;
1644 }
1645
d1ece0c7
JA
1646 if (get_verify && !get_next_verify(td, io_u)) {
1647 td->verify_batch--;
e39c0676 1648 return true;
d1ece0c7 1649 }
9e144189
JA
1650 }
1651
e39c0676 1652 return false;
0d29de83
JA
1653}
1654
de789769
JA
1655/*
1656 * Fill offset and start time into the buffer content, to prevent too
23f394d5
JA
1657 * easy compressible data for simple de-dupe attempts. Do this for every
1658 * 512b block in the range, since that should be the smallest block size
1659 * we can expect from a device.
de789769
JA
1660 */
1661static void small_content_scramble(struct io_u *io_u)
1662{
319b073f 1663 unsigned int i, nr_blocks = io_u->buflen >> 9;
23f394d5 1664 unsigned int offset;
319b073f
JA
1665 uint64_t boffset, *iptr;
1666 char *p;
de789769 1667
23f394d5
JA
1668 if (!nr_blocks)
1669 return;
1670
1671 p = io_u->xfer_buf;
fba76ee8 1672 boffset = io_u->offset;
319b073f
JA
1673
1674 if (io_u->buf_filled_len)
1675 io_u->buf_filled_len = 0;
1676
1677 /*
1678 * Generate random index between 0..7. We do chunks of 512b, if
1679 * we assume a cacheline is 64 bytes, then we have 8 of those.
1680 * Scramble content within the blocks in the same cacheline to
1681 * speed things up.
1682 */
1683 offset = (io_u->start_time.tv_nsec ^ boffset) & 7;
fad82f76 1684
23f394d5
JA
1685 for (i = 0; i < nr_blocks; i++) {
1686 /*
319b073f
JA
1687 * Fill offset into start of cacheline, time into end
1688 * of cacheline
23f394d5 1689 */
319b073f
JA
1690 iptr = (void *) p + (offset << 6);
1691 *iptr = boffset;
1692
1693 iptr = (void *) p + 64 - 2 * sizeof(uint64_t);
1694 iptr[0] = io_u->start_time.tv_sec;
1695 iptr[1] = io_u->start_time.tv_nsec;
23f394d5 1696
23f394d5 1697 p += 512;
fad82f76 1698 boffset += 512;
23f394d5 1699 }
de789769
JA
1700}
1701
0d29de83
JA
1702/*
1703 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1704 * etc. The returned io_u is fully ready to be prepped and submitted.
1705 */
1706struct io_u *get_io_u(struct thread_data *td)
1707{
1708 struct fio_file *f;
1709 struct io_u *io_u;
de789769 1710 int do_scramble = 0;
002fe734 1711 long ret = 0;
0d29de83
JA
1712
1713 io_u = __get_io_u(td);
1714 if (!io_u) {
1715 dprint(FD_IO, "__get_io_u failed\n");
1716 return NULL;
1717 }
1718
1719 if (check_get_verify(td, io_u))
1720 goto out;
1721 if (check_get_trim(td, io_u))
1722 goto out;
1723
755200a3
JA
1724 /*
1725 * from a requeue, io_u already setup
1726 */
1727 if (io_u->file)
77f392bf 1728 goto out;
755200a3 1729
429f6675
JA
1730 /*
1731 * If using an iolog, grab next piece if any available.
1732 */
d72be545 1733 if (td->flags & TD_F_READ_IOLOG) {
429f6675
JA
1734 if (read_iolog_get(td, io_u))
1735 goto err_put;
2ba1c290 1736 } else if (set_io_u_file(td, io_u)) {
002fe734 1737 ret = -EBUSY;
2ba1c290 1738 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
429f6675 1739 goto err_put;
2ba1c290 1740 }
5ec10eaa 1741
429f6675 1742 f = io_u->file;
002fe734
JA
1743 if (!f) {
1744 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1745 goto err_put;
1746 }
1747
d6aed795 1748 assert(fio_file_open(f));
97af62ce 1749
ff58fced 1750 if (ddir_rw(io_u->ddir)) {
9b87f09b 1751 if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) {
2ba1c290 1752 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
429f6675 1753 goto err_put;
2ba1c290 1754 }
10ba535a 1755
f1dfb668
JA
1756 f->last_start[io_u->ddir] = io_u->offset;
1757 f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen;
10ba535a 1758
fd68418e 1759 if (io_u->ddir == DDIR_WRITE) {
d72be545 1760 if (td->flags & TD_F_REFILL_BUFFERS) {
9c42684e 1761 io_u_fill_buffer(td, io_u,
1066358a 1762 td->o.min_bs[DDIR_WRITE],
9e129577 1763 io_u->buflen);
ff441ae8 1764 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
bedc9dc2 1765 !(td->flags & TD_F_COMPRESS))
fd68418e 1766 do_scramble = 1;
d72be545 1767 if (td->flags & TD_F_VER_NONE) {
629f1d71
JA
1768 populate_verify_io_u(td, io_u);
1769 do_scramble = 0;
1770 }
fd68418e 1771 } else if (io_u->ddir == DDIR_READ) {
cbe8d756
RR
1772 /*
1773 * Reset the buf_filled parameters so next time if the
1774 * buffer is used for writes it is refilled.
1775 */
cbe8d756
RR
1776 io_u->buf_filled_len = 0;
1777 }
87dc1ab1 1778 }
10ba535a 1779
165faf16
JA
1780 /*
1781 * Set io data pointers.
1782 */
cec6b55d
JA
1783 io_u->xfer_buf = io_u->buf;
1784 io_u->xfer_buflen = io_u->buflen;
5973cafb 1785
6ac7a331 1786out:
0d29de83 1787 assert(io_u->file);
429f6675 1788 if (!td_io_prep(td, io_u)) {
3ecc8c67 1789 if (!td->o.disable_lat)
993bf48b 1790 fio_gettime(&io_u->start_time, NULL);
03553853 1791
de789769
JA
1792 if (do_scramble)
1793 small_content_scramble(io_u);
03553853 1794
429f6675 1795 return io_u;
36167d82 1796 }
429f6675 1797err_put:
2ba1c290 1798 dprint(FD_IO, "get_io_u failed\n");
429f6675 1799 put_io_u(td, io_u);
002fe734 1800 return ERR_PTR(ret);
10ba535a
JA
1801}
1802
a9da8ab2 1803static void __io_u_log_error(struct thread_data *td, struct io_u *io_u)
5451792e 1804{
8b28bd41 1805 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
825f818e 1806
8b28bd41
DM
1807 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1808 return;
5451792e 1809
709c8313
RE
1810 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
1811 io_u->file ? " on file " : "",
1812 io_u->file ? io_u->file->file_name : "",
1813 strerror(io_u->error),
1814 io_ddir_name(io_u->ddir),
1815 io_u->offset, io_u->xfer_buflen);
5451792e 1816
5ad7be56
KD
1817 if (td->io_ops->errdetails) {
1818 char *err = td->io_ops->errdetails(io_u);
1819
1820 log_err("fio: %s\n", err);
1821 free(err);
1822 }
1823
5451792e
JA
1824 if (!td->error)
1825 td_verror(td, io_u->error, "io_u error");
1826}
1827
a9da8ab2
JA
1828void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1829{
1830 __io_u_log_error(td, io_u);
1831 if (td->parent)
094e66cb 1832 __io_u_log_error(td->parent, io_u);
a9da8ab2
JA
1833}
1834
e39c0676 1835static inline bool gtod_reduce(struct thread_data *td)
aba6c951 1836{
3ecc8c67
JA
1837 return (td->o.disable_clat && td->o.disable_slat && td->o.disable_bw)
1838 || td->o.gtod_reduce;
aba6c951
JA
1839}
1840
c8eeb9df
JA
1841static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1842 struct io_completion_data *icd,
1843 const enum fio_ddir idx, unsigned int bytes)
1844{
a9da8ab2 1845 const int no_reduce = !gtod_reduce(td);
d6bb626e 1846 unsigned long long llnsec = 0;
c8eeb9df 1847
75dc383e
JA
1848 if (td->parent)
1849 td = td->parent;
1850
132b1ee4 1851 if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS))
8243be59
JA
1852 return;
1853
a9da8ab2 1854 if (no_reduce)
d6bb626e 1855 llnsec = ntime_since(&io_u->issue_time, &icd->time);
c8eeb9df
JA
1856
1857 if (!td->o.disable_lat) {
c3a32714 1858 unsigned long long tnsec;
c8eeb9df 1859
d6bb626e
VF
1860 tnsec = ntime_since(&io_u->start_time, &icd->time);
1861 add_lat_sample(td, idx, tnsec, bytes, io_u->offset);
15501535 1862
d4afedfd
JA
1863 if (td->flags & TD_F_PROFILE_OPS) {
1864 struct prof_io_ops *ops = &td->prof_io_ops;
1865
1866 if (ops->io_u_lat)
c3a32714 1867 icd->error = ops->io_u_lat(td, tnsec);
d4afedfd
JA
1868 }
1869
c3a32714
JA
1870 if (td->o.max_latency && tnsec > td->o.max_latency)
1871 lat_fatal(td, icd, tnsec, td->o.max_latency);
1872 if (td->o.latency_target && tnsec > td->o.latency_target) {
3e260a46 1873 if (lat_target_failed(td))
c3a32714 1874 lat_fatal(td, icd, tnsec, td->o.latency_target);
15501535 1875 }
c8eeb9df
JA
1876 }
1877
a47591e4
JA
1878 if (ddir_rw(idx)) {
1879 if (!td->o.disable_clat) {
d6bb626e
VF
1880 add_clat_sample(td, idx, llnsec, bytes, io_u->offset);
1881 io_u_mark_latency(td, llnsec);
a47591e4 1882 }
c8eeb9df 1883
a47591e4 1884 if (!td->o.disable_bw && per_unit_log(td->bw_log))
d6bb626e 1885 add_bw_sample(td, io_u, bytes, llnsec);
c8eeb9df 1886
a47591e4
JA
1887 if (no_reduce && per_unit_log(td->iops_log))
1888 add_iops_sample(td, io_u, bytes);
b2b3eefe
JA
1889 } else if (ddir_sync(idx) && !td->o.disable_clat)
1890 add_sync_clat_sample(&td->ts, llnsec);
66347cfa
DE
1891
1892 if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
1893 uint32_t *info = io_u_block_info(td, io_u);
1894 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
1895 if (io_u->ddir == DDIR_TRIM) {
1896 *info = BLOCK_INFO(BLOCK_STATE_TRIMMED,
1897 BLOCK_INFO_TRIMS(*info) + 1);
1898 } else if (io_u->ddir == DDIR_WRITE) {
1899 *info = BLOCK_INFO_SET_STATE(BLOCK_STATE_WRITTEN,
1900 *info);
1901 }
1902 }
1903 }
c8eeb9df
JA
1904}
1905
94a6e1bb
JA
1906static void file_log_write_comp(const struct thread_data *td, struct fio_file *f,
1907 uint64_t offset, unsigned int bytes)
1908{
1909 int idx;
1910
639ad1ea
JA
1911 if (!f)
1912 return;
1913
94a6e1bb
JA
1914 if (f->first_write == -1ULL || offset < f->first_write)
1915 f->first_write = offset;
1916 if (f->last_write == -1ULL || ((offset + bytes) > f->last_write))
1917 f->last_write = offset + bytes;
1918
1919 if (!f->last_write_comp)
1920 return;
1921
1922 idx = f->last_write_idx++;
1923 f->last_write_comp[idx] = offset;
1924 if (f->last_write_idx == td->o.iodepth)
1925 f->last_write_idx = 0;
1926}
1927
b2b3eefe
JA
1928static bool should_account(struct thread_data *td)
1929{
1930 return ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1931 td->runstate == TD_VERIFYING);
1932}
1933
f8b0bd10 1934static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
97601024 1935 struct io_completion_data *icd)
10ba535a 1936{
f8b0bd10
JA
1937 struct io_u *io_u = *io_u_ptr;
1938 enum fio_ddir ddir = io_u->ddir;
1939 struct fio_file *f = io_u->file;
10ba535a 1940
e5f9a813 1941 dprint_io_u(io_u, "complete");
2ba1c290 1942
0c6e7517 1943 assert(io_u->flags & IO_U_F_FLIGHT);
1651e431 1944 io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
f9401285
JA
1945
1946 /*
1947 * Mark IO ok to verify
1948 */
1949 if (io_u->ipo) {
890b6656
JA
1950 /*
1951 * Remove errored entry from the verification list
1952 */
1953 if (io_u->error)
1954 unlog_io_piece(td, io_u);
1955 else {
1956 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1957 write_barrier();
1958 }
f9401285
JA
1959 }
1960
f8b0bd10 1961 if (ddir_sync(ddir)) {
2ea93f98 1962 td->last_was_sync = true;
44f29692
JA
1963 if (f) {
1964 f->first_write = -1ULL;
1965 f->last_write = -1ULL;
1966 }
b2b3eefe
JA
1967 if (should_account(td))
1968 account_io_completion(td, io_u, icd, ddir, io_u->buflen);
87dc1ab1
JA
1969 return;
1970 }
1971
2ea93f98 1972 td->last_was_sync = false;
f8b0bd10 1973 td->last_ddir = ddir;
87dc1ab1 1974
f8b0bd10 1975 if (!io_u->error && ddir_rw(ddir)) {
10ba535a 1976 unsigned int bytes = io_u->buflen - io_u->resid;
b29ee5b3 1977 int ret;
10ba535a 1978
f8b0bd10 1979 td->io_blocks[ddir]++;
f8b0bd10 1980 td->io_bytes[ddir] += bytes;
ae2fafc8 1981
e1c325d2
JA
1982 if (!(io_u->flags & IO_U_F_VER_LIST)) {
1983 td->this_io_blocks[ddir]++;
f8b0bd10 1984 td->this_io_bytes[ddir] += bytes;
e1c325d2 1985 }
f8b0bd10 1986
639ad1ea 1987 if (ddir == DDIR_WRITE)
94a6e1bb 1988 file_log_write_comp(td, f, io_u->offset, bytes);
44f29692 1989
b2b3eefe 1990 if (should_account(td))
f8b0bd10 1991 account_io_completion(td, io_u, icd, ddir, bytes);
40e1a6f0 1992
f8b0bd10 1993 icd->bytes_done[ddir] += bytes;
3af6ef39 1994
d7762cf8 1995 if (io_u->end_io) {
f8b0bd10
JA
1996 ret = io_u->end_io(td, io_u_ptr);
1997 io_u = *io_u_ptr;
3af6ef39
JA
1998 if (ret && !icd->error)
1999 icd->error = ret;
2000 }
ff58fced 2001 } else if (io_u->error) {
10ba535a 2002 icd->error = io_u->error;
5451792e
JA
2003 io_u_log_error(td, io_u);
2004 }
8b28bd41 2005 if (icd->error) {
f8b0bd10
JA
2006 enum error_type_bit eb = td_error_type(ddir, icd->error);
2007
8b28bd41
DM
2008 if (!td_non_fatal_error(td, eb, icd->error))
2009 return;
f8b0bd10 2010
f2bba182
RR
2011 /*
2012 * If there is a non_fatal error, then add to the error count
2013 * and clear all the errors.
2014 */
2015 update_error_count(td, icd->error);
2016 td_clear_error(td);
2017 icd->error = 0;
f8b0bd10
JA
2018 if (io_u)
2019 io_u->error = 0;
f2bba182 2020 }
10ba535a
JA
2021}
2022
9520ebb9
JA
2023static void init_icd(struct thread_data *td, struct io_completion_data *icd,
2024 int nr)
10ba535a 2025{
6eaf09d6 2026 int ddir;
aba6c951
JA
2027
2028 if (!gtod_reduce(td))
9520ebb9 2029 fio_gettime(&icd->time, NULL);
02bcaa8c 2030
3af6ef39
JA
2031 icd->nr = nr;
2032
10ba535a 2033 icd->error = 0;
c1f50f76 2034 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
6eaf09d6 2035 icd->bytes_done[ddir] = 0;
36167d82
JA
2036}
2037
97601024
JA
2038static void ios_completed(struct thread_data *td,
2039 struct io_completion_data *icd)
36167d82
JA
2040{
2041 struct io_u *io_u;
2042 int i;
2043
10ba535a
JA
2044 for (i = 0; i < icd->nr; i++) {
2045 io_u = td->io_ops->event(td, i);
2046
f8b0bd10 2047 io_completed(td, &io_u, icd);
e8462bd8 2048
f8b0bd10 2049 if (io_u)
e8462bd8 2050 put_io_u(td, io_u);
10ba535a
JA
2051 }
2052}
97601024 2053
e7e6cfb4
JA
2054/*
2055 * Complete a single io_u for the sync engines.
2056 */
55312f9f 2057int io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
97601024
JA
2058{
2059 struct io_completion_data icd;
55312f9f 2060 int ddir;
97601024 2061
9520ebb9 2062 init_icd(td, &icd, 1);
f8b0bd10 2063 io_completed(td, &io_u, &icd);
e8462bd8 2064
f8b0bd10 2065 if (io_u)
e8462bd8 2066 put_io_u(td, io_u);
97601024 2067
581e7141
JA
2068 if (icd.error) {
2069 td_verror(td, icd.error, "io_u_sync_complete");
2070 return -1;
2071 }
97601024 2072
c1f50f76 2073 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
55312f9f 2074 td->bytes_done[ddir] += icd.bytes_done[ddir];
581e7141
JA
2075
2076 return 0;
97601024
JA
2077}
2078
e7e6cfb4
JA
2079/*
2080 * Called to complete min_events number of io for the async engines.
2081 */
55312f9f 2082int io_u_queued_complete(struct thread_data *td, int min_evts)
97601024 2083{
97601024 2084 struct io_completion_data icd;
00de55ef 2085 struct timespec *tvp = NULL;
55312f9f 2086 int ret, ddir;
4d06a338 2087 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
97601024 2088
12bb8569 2089 dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts);
b271fe62 2090
4950421a 2091 if (!min_evts)
00de55ef 2092 tvp = &ts;
5fb4b366
RE
2093 else if (min_evts > td->cur_depth)
2094 min_evts = td->cur_depth;
97601024 2095
82407585
RP
2096 /* No worries, td_io_getevents fixes min and max if they are
2097 * set incorrectly */
2098 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp);
97601024 2099 if (ret < 0) {
e1161c32 2100 td_verror(td, -ret, "td_io_getevents");
97601024
JA
2101 return ret;
2102 } else if (!ret)
2103 return ret;
2104
9520ebb9 2105 init_icd(td, &icd, ret);
97601024 2106 ios_completed(td, &icd);
581e7141
JA
2107 if (icd.error) {
2108 td_verror(td, icd.error, "io_u_queued_complete");
2109 return -1;
2110 }
97601024 2111
c1f50f76 2112 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
55312f9f 2113 td->bytes_done[ddir] += icd.bytes_done[ddir];
581e7141 2114
0d593542 2115 return ret;
97601024 2116}
7e77dd02
JA
2117
2118/*
2119 * Call when io_u is really queued, to update the submission latency.
2120 */
2121void io_u_queued(struct thread_data *td, struct io_u *io_u)
2122{
8243be59 2123 if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) {
9520ebb9 2124 unsigned long slat_time;
7e77dd02 2125
d6bb626e 2126 slat_time = ntime_since(&io_u->start_time, &io_u->issue_time);
75dc383e
JA
2127
2128 if (td->parent)
2129 td = td->parent;
2130
ae588852
JA
2131 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
2132 io_u->offset);
9520ebb9 2133 }
7e77dd02 2134}
433afcb4 2135
5c94b008
JA
2136/*
2137 * See if we should reuse the last seed, if dedupe is enabled
2138 */
9451b93e 2139static struct frand_state *get_buf_state(struct thread_data *td)
5c94b008
JA
2140{
2141 unsigned int v;
5c94b008
JA
2142
2143 if (!td->o.dedupe_percentage)
2144 return &td->buf_state;
732eedd0 2145 else if (td->o.dedupe_percentage == 100) {
9451b93e
JA
2146 frand_copy(&td->buf_state_prev, &td->buf_state);
2147 return &td->buf_state;
732eedd0 2148 }
5c94b008 2149
36dd3379 2150 v = rand32_between(&td->dedupe_state, 1, 100);
5c94b008
JA
2151
2152 if (v <= td->o.dedupe_percentage)
2153 return &td->buf_state_prev;
2154
2155 return &td->buf_state;
2156}
2157
9451b93e 2158static void save_buf_state(struct thread_data *td, struct frand_state *rs)
5c94b008 2159{
9451b93e
JA
2160 if (td->o.dedupe_percentage == 100)
2161 frand_copy(rs, &td->buf_state_prev);
2162 else if (rs == &td->buf_state)
5c94b008
JA
2163 frand_copy(&td->buf_state_prev, rs);
2164}
2165
cc86c395
JA
2166void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
2167 unsigned int max_bs)
5973cafb 2168{
d1af2894
JA
2169 struct thread_options *o = &td->o;
2170
15600335
JA
2171 if (o->mem_type == MEM_CUDA_MALLOC)
2172 return;
03553853 2173
4eff3e57 2174 if (o->compress_percentage || o->dedupe_percentage) {
9c42684e 2175 unsigned int perc = td->o.compress_percentage;
5c94b008 2176 struct frand_state *rs;
1066358a 2177 unsigned int left = max_bs;
1e7f82e2 2178 unsigned int this_write;
5c94b008 2179
1066358a 2180 do {
9451b93e 2181 rs = get_buf_state(td);
9c42684e 2182
1066358a 2183 min_write = min(min_write, left);
f97a43a1 2184
1066358a 2185 if (perc) {
4b157ac6
JA
2186 this_write = min_not_zero(min_write,
2187 td->o.compress_chunk);
1e7f82e2
JA
2188
2189 fill_random_buf_percentage(rs, buf, perc,
2190 this_write, this_write,
2191 o->buffer_pattern,
2192 o->buffer_pattern_bytes);
2193 } else {
1066358a 2194 fill_random_buf(rs, buf, min_write);
1e7f82e2
JA
2195 this_write = min_write;
2196 }
1066358a 2197
1e7f82e2
JA
2198 buf += this_write;
2199 left -= this_write;
9451b93e 2200 save_buf_state(td, rs);
1066358a 2201 } while (left);
d1af2894
JA
2202 } else if (o->buffer_pattern_bytes)
2203 fill_buffer_pattern(td, buf, max_bs);
999d245e 2204 else if (o->zero_buffers)
cc86c395 2205 memset(buf, 0, max_bs);
999d245e 2206 else
9451b93e 2207 fill_random_buf(get_buf_state(td), buf, max_bs);
cc86c395
JA
2208}
2209
2210/*
2211 * "randomly" fill the buffer contents
2212 */
2213void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
2214 unsigned int min_write, unsigned int max_bs)
2215{
2216 io_u->buf_filled_len = 0;
2217 fill_io_buffer(td, io_u->buf, min_write, max_bs);
5973cafb 2218}
e2c75fc4
TK
2219
2220static int do_sync_file_range(const struct thread_data *td,
2221 struct fio_file *f)
2222{
2223 off64_t offset, nbytes;
2224
2225 offset = f->first_write;
2226 nbytes = f->last_write - f->first_write;
2227
2228 if (!nbytes)
2229 return 0;
2230
2231 return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
2232}
2233
2234int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
2235{
2236 int ret;
2237
2238 if (io_u->ddir == DDIR_SYNC) {
2239 ret = fsync(io_u->file->fd);
2240 } else if (io_u->ddir == DDIR_DATASYNC) {
2241#ifdef CONFIG_FDATASYNC
2242 ret = fdatasync(io_u->file->fd);
2243#else
2244 ret = io_u->xfer_buflen;
2245 io_u->error = EINVAL;
2246#endif
2247 } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
2248 ret = do_sync_file_range(td, io_u->file);
2249 else {
2250 ret = io_u->xfer_buflen;
2251 io_u->error = EINVAL;
2252 }
2253
2254 if (ret < 0)
2255 io_u->error = errno;
2256
2257 return ret;
2258}
2259
2260int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
2261{
2262#ifndef FIO_HAVE_TRIM
2263 io_u->error = EINVAL;
2264 return 0;
2265#else
2266 struct fio_file *f = io_u->file;
2267 int ret;
2268
496b1f9e 2269 ret = os_trim(f, io_u->offset, io_u->xfer_buflen);
e2c75fc4
TK
2270 if (!ret)
2271 return io_u->xfer_buflen;
2272
2273 io_u->error = ret;
2274 return 0;
2275#endif
2276}