Merge branch 'fio-issue-450' of https://github.com/gvkovai/fio
[fio.git] / io_u.c
CommitLineData
10ba535a
JA
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
0c6e7517 6#include <assert.h>
10ba535a
JA
7
8#include "fio.h"
5973cafb 9#include "hash.h"
4f5af7b2 10#include "verify.h"
0d29de83 11#include "trim.h"
1fbbf72e 12#include "lib/rand.h"
7ebd796f 13#include "lib/axmap.h"
002fe734 14#include "err.h"
0f38bbef 15#include "lib/pow2.h"
4b157ac6 16#include "minmax.h"
10ba535a 17
97601024
JA
18struct io_completion_data {
19 int nr; /* input */
97601024
JA
20
21 int error; /* output */
100f49f1 22 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
8b6a404c 23 struct timespec time; /* output */
97601024
JA
24};
25
10ba535a 26/*
7ebd796f 27 * The ->io_axmap contains a map of blocks we have or have not done io
10ba535a
JA
28 * to yet. Used to make sure we cover the entire range in a fair fashion.
29 */
e39c0676 30static bool random_map_free(struct fio_file *f, const uint64_t block)
10ba535a 31{
7ebd796f 32 return !axmap_isset(f->io_axmap, block);
10ba535a
JA
33}
34
df415585
JA
35/*
36 * Mark a given offset as used in the map.
37 */
9bf2061e 38static void mark_random_map(struct thread_data *td, struct io_u *io_u)
df415585 39{
956e60ea 40 unsigned int min_bs = td->o.min_bs[io_u->ddir];
9bf2061e 41 struct fio_file *f = io_u->file;
51ede0b1 42 unsigned int nr_blocks;
1ae83d45 43 uint64_t block;
df415585 44
1ae83d45 45 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
c685b5b2 46 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
df415585 47
2ab9e98b 48 if (!(io_u->flags & IO_U_F_BUSY_OK))
7ebd796f 49 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
df415585 50
51ede0b1
JA
51 if ((nr_blocks * min_bs) < io_u->buflen)
52 io_u->buflen = nr_blocks * min_bs;
df415585
JA
53}
54
74776733
JA
55static uint64_t last_block(struct thread_data *td, struct fio_file *f,
56 enum fio_ddir ddir)
2ba1c290 57{
74776733
JA
58 uint64_t max_blocks;
59 uint64_t max_size;
2ba1c290 60
ff58fced
JA
61 assert(ddir_rw(ddir));
62
d9dd70f7
JA
63 /*
64 * Hmm, should we make sure that ->io_size <= ->real_file_size?
79591fa9 65 * -> not for now since there is code assuming it could go either.
d9dd70f7
JA
66 */
67 max_size = f->io_size;
68 if (max_size > f->real_file_size)
69 max_size = f->real_file_size;
70
ed335855
SN
71 if (td->o.zone_range)
72 max_size = td->o.zone_range;
73
0412d12e
JE
74 if (td->o.min_bs[ddir] > td->o.ba[ddir])
75 max_size -= td->o.min_bs[ddir] - td->o.ba[ddir];
76
1ae83d45 77 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
2ba1c290
JA
78 if (!max_blocks)
79 return 0;
80
67778e88 81 return max_blocks;
2ba1c290
JA
82}
83
1ae83d45
JA
84struct rand_off {
85 struct flist_head list;
86 uint64_t off;
87};
88
e25839d4 89static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
e0a04ac1
JA
90 enum fio_ddir ddir, uint64_t *b,
91 uint64_t lastb)
ec4015da 92{
6f49f8bc 93 uint64_t r;
5e0baa7f 94
c3546b53
JA
95 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
96 td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) {
6f49f8bc 97
d6b72507 98 r = __rand(&td->random_state);
8055e41d 99
4b91ee8f 100 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
8055e41d 101
e0a04ac1 102 *b = lastb * (r / (rand_max(&td->random_state) + 1.0));
51ede0b1 103 } else {
8055e41d 104 uint64_t off = 0;
43c63a78 105
967d1b63
JA
106 assert(fio_file_lfsr(f));
107
6f49f8bc 108 if (lfsr_next(&f->lfsr, &off))
8055e41d 109 return 1;
ec4015da 110
8055e41d
JA
111 *b = off;
112 }
0ce8b119 113
ec4015da 114 /*
51ede0b1 115 * if we are not maintaining a random map, we are done.
ec4015da 116 */
51ede0b1
JA
117 if (!file_randommap(td, f))
118 goto ret;
43c63a78
JA
119
120 /*
51ede0b1 121 * calculate map offset and check if it's free
43c63a78 122 */
51ede0b1
JA
123 if (random_map_free(f, *b))
124 goto ret;
125
4b91ee8f
JA
126 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
127 (unsigned long long) *b);
51ede0b1 128
7ebd796f 129 *b = axmap_next_free(f->io_axmap, *b);
51ede0b1
JA
130 if (*b == (uint64_t) -1ULL)
131 return 1;
0ce8b119
JA
132ret:
133 return 0;
ec4015da
JA
134}
135
925fee33
JA
136static int __get_next_rand_offset_zipf(struct thread_data *td,
137 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 138 uint64_t *b)
e25839d4 139{
9c6f6316 140 *b = zipf_next(&f->zipf);
e25839d4
JA
141 return 0;
142}
143
925fee33
JA
144static int __get_next_rand_offset_pareto(struct thread_data *td,
145 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 146 uint64_t *b)
925fee33 147{
9c6f6316 148 *b = pareto_next(&f->zipf);
925fee33
JA
149 return 0;
150}
151
56d9fa4b
JA
152static int __get_next_rand_offset_gauss(struct thread_data *td,
153 struct fio_file *f, enum fio_ddir ddir,
154 uint64_t *b)
155{
156 *b = gauss_next(&f->gauss);
157 return 0;
158}
159
59466396
JA
160static int __get_next_rand_offset_zoned_abs(struct thread_data *td,
161 struct fio_file *f,
162 enum fio_ddir ddir, uint64_t *b)
163{
164 struct zone_split_index *zsi;
e3c8c108 165 uint64_t lastb, send, stotal;
59466396
JA
166 static int warned;
167 unsigned int v;
168
169 lastb = last_block(td, f, ddir);
170 if (!lastb)
171 return 1;
172
173 if (!td->o.zone_split_nr[ddir]) {
174bail:
175 return __get_next_rand_offset(td, f, ddir, b, lastb);
176 }
177
178 /*
179 * Generate a value, v, between 1 and 100, both inclusive
180 */
181 v = rand32_between(&td->zone_state, 1, 100);
182
e3c8c108
JA
183 /*
184 * Find our generated table. 'send' is the end block of this zone,
185 * 'stotal' is our start offset.
186 */
59466396
JA
187 zsi = &td->zone_state_index[ddir][v - 1];
188 stotal = zsi->size_prev / td->o.ba[ddir];
189 send = zsi->size / td->o.ba[ddir];
190
191 /*
192 * Should never happen
193 */
194 if (send == -1U) {
195 if (!warned) {
196 log_err("fio: bug in zoned generation\n");
197 warned = 1;
198 }
199 goto bail;
200 } else if (send > lastb) {
201 /*
202 * This happens if the user specifies ranges that exceed
203 * the file/device size. We can't handle that gracefully,
204 * so error and exit.
205 */
206 log_err("fio: zoned_abs sizes exceed file size\n");
207 return 1;
208 }
209
210 /*
e3c8c108 211 * Generate index from 0..send-stotal
59466396 212 */
e3c8c108 213 if (__get_next_rand_offset(td, f, ddir, b, send - stotal) == 1)
59466396
JA
214 return 1;
215
e3c8c108 216 *b += stotal;
59466396
JA
217 return 0;
218}
219
e0a04ac1
JA
220static int __get_next_rand_offset_zoned(struct thread_data *td,
221 struct fio_file *f, enum fio_ddir ddir,
222 uint64_t *b)
223{
224 unsigned int v, send, stotal;
225 uint64_t offset, lastb;
226 static int warned;
227 struct zone_split_index *zsi;
228
229 lastb = last_block(td, f, ddir);
230 if (!lastb)
231 return 1;
232
233 if (!td->o.zone_split_nr[ddir]) {
234bail:
235 return __get_next_rand_offset(td, f, ddir, b, lastb);
236 }
237
238 /*
239 * Generate a value, v, between 1 and 100, both inclusive
240 */
36dd3379 241 v = rand32_between(&td->zone_state, 1, 100);
e0a04ac1
JA
242
243 zsi = &td->zone_state_index[ddir][v - 1];
244 stotal = zsi->size_perc_prev;
245 send = zsi->size_perc;
246
247 /*
248 * Should never happen
249 */
250 if (send == -1U) {
251 if (!warned) {
252 log_err("fio: bug in zoned generation\n");
253 warned = 1;
254 }
255 goto bail;
256 }
257
258 /*
259 * 'send' is some percentage below or equal to 100 that
260 * marks the end of the current IO range. 'stotal' marks
261 * the start, in percent.
262 */
263 if (stotal)
264 offset = stotal * lastb / 100ULL;
265 else
266 offset = 0;
267
268 lastb = lastb * (send - stotal) / 100ULL;
269
270 /*
271 * Generate index from 0..send-of-lastb
272 */
273 if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1)
274 return 1;
275
276 /*
277 * Add our start offset, if any
278 */
279 if (offset)
280 *b += offset;
281
282 return 0;
283}
56d9fa4b 284
1ae83d45
JA
285static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
286{
287 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
288 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
289
290 return r1->off - r2->off;
291}
292
293static int get_off_from_method(struct thread_data *td, struct fio_file *f,
294 enum fio_ddir ddir, uint64_t *b)
e25839d4 295{
e0a04ac1
JA
296 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) {
297 uint64_t lastb;
298
299 lastb = last_block(td, f, ddir);
300 if (!lastb)
301 return 1;
302
303 return __get_next_rand_offset(td, f, ddir, b, lastb);
304 } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
e25839d4 305 return __get_next_rand_offset_zipf(td, f, ddir, b);
925fee33
JA
306 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
307 return __get_next_rand_offset_pareto(td, f, ddir, b);
56d9fa4b
JA
308 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
309 return __get_next_rand_offset_gauss(td, f, ddir, b);
e0a04ac1
JA
310 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
311 return __get_next_rand_offset_zoned(td, f, ddir, b);
59466396
JA
312 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS)
313 return __get_next_rand_offset_zoned_abs(td, f, ddir, b);
e25839d4
JA
314
315 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
316 return 1;
317}
318
bcd5abfa
JA
319/*
320 * Sort the reads for a verify phase in batches of verifysort_nr, if
321 * specified.
322 */
e39c0676 323static inline bool should_sort_io(struct thread_data *td)
bcd5abfa
JA
324{
325 if (!td->o.verifysort_nr || !td->o.do_verify)
e39c0676 326 return false;
bcd5abfa 327 if (!td_random(td))
e39c0676 328 return false;
bcd5abfa 329 if (td->runstate != TD_VERIFYING)
e39c0676 330 return false;
c3546b53
JA
331 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
332 td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64)
e39c0676 333 return false;
bcd5abfa 334
e39c0676 335 return true;
bcd5abfa
JA
336}
337
e39c0676 338static bool should_do_random(struct thread_data *td, enum fio_ddir ddir)
211c9b89
JA
339{
340 unsigned int v;
211c9b89 341
d9472271 342 if (td->o.perc_rand[ddir] == 100)
e39c0676 343 return true;
211c9b89 344
36dd3379 345 v = rand32_between(&td->seq_rand_state[ddir], 1, 100);
211c9b89 346
d9472271 347 return v <= td->o.perc_rand[ddir];
211c9b89
JA
348}
349
1ae83d45
JA
350static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
351 enum fio_ddir ddir, uint64_t *b)
352{
353 struct rand_off *r;
354 int i, ret = 1;
355
bcd5abfa 356 if (!should_sort_io(td))
1ae83d45
JA
357 return get_off_from_method(td, f, ddir, b);
358
359 if (!flist_empty(&td->next_rand_list)) {
1ae83d45 360fetch:
9342d5f8 361 r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
1ae83d45
JA
362 flist_del(&r->list);
363 *b = r->off;
364 free(r);
365 return 0;
366 }
367
368 for (i = 0; i < td->o.verifysort_nr; i++) {
369 r = malloc(sizeof(*r));
370
371 ret = get_off_from_method(td, f, ddir, &r->off);
372 if (ret) {
373 free(r);
374 break;
375 }
376
377 flist_add(&r->list, &td->next_rand_list);
378 }
379
380 if (ret && !i)
381 return ret;
382
383 assert(!flist_empty(&td->next_rand_list));
384 flist_sort(NULL, &td->next_rand_list, flist_cmp);
385 goto fetch;
386}
387
0bcf41cd
JA
388static void loop_cache_invalidate(struct thread_data *td, struct fio_file *f)
389{
390 struct thread_options *o = &td->o;
391
392 if (o->invalidate_cache && !o->odirect) {
393 int fio_unused ret;
394
395 ret = file_invalidate_cache(td, f);
396 }
397}
398
38dad62d 399static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
1ae83d45 400 enum fio_ddir ddir, uint64_t *b)
38dad62d 401{
c04e4661
DE
402 if (!get_next_rand_offset(td, f, ddir, b))
403 return 0;
404
8c07860d
JA
405 if (td->o.time_based ||
406 (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) {
33c48814 407 fio_file_reset(td, f);
c04e4661
DE
408 if (!get_next_rand_offset(td, f, ddir, b))
409 return 0;
0bcf41cd 410 loop_cache_invalidate(td, f);
38dad62d
JA
411 }
412
c04e4661 413 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
f1dfb668 414 f->file_name, (unsigned long long) f->last_pos[ddir],
4b91ee8f 415 (unsigned long long) f->real_file_size);
c04e4661 416 return 1;
38dad62d
JA
417}
418
37cf9e3c 419static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
1ae83d45 420 enum fio_ddir ddir, uint64_t *offset)
38dad62d 421{
ac002339
JA
422 struct thread_options *o = &td->o;
423
ff58fced
JA
424 assert(ddir_rw(ddir));
425
17373ce2
JA
426 /*
427 * If we reach the end for a time based run, reset us back to 0
428 * and invalidate the cache, if we need to.
429 */
f1dfb668 430 if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
19ddc35b 431 o->time_based) {
c89daa4a 432 f->last_pos[ddir] = f->file_offset;
0bcf41cd 433 loop_cache_invalidate(td, f);
19ddc35b 434 }
c04e4661 435
f1dfb668 436 if (f->last_pos[ddir] < f->real_file_size) {
1ae83d45 437 uint64_t pos;
059b0802 438
c22825bb
JA
439 if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) {
440 if (f->real_file_size > f->io_size)
441 f->last_pos[ddir] = f->io_size;
442 else
443 f->last_pos[ddir] = f->real_file_size;
444 }
a66da7a2 445
f1dfb668 446 pos = f->last_pos[ddir] - f->file_offset;
ac002339
JA
447 if (pos && o->ddir_seq_add) {
448 pos += o->ddir_seq_add;
449
450 /*
451 * If we reach beyond the end of the file
452 * with holed IO, wrap around to the
b0a84f48
JA
453 * beginning again. If we're doing backwards IO,
454 * wrap to the end.
ac002339 455 */
b0a84f48
JA
456 if (pos >= f->real_file_size) {
457 if (o->ddir_seq_add > 0)
458 pos = f->file_offset;
c22825bb
JA
459 else {
460 if (f->real_file_size > f->io_size)
461 pos = f->io_size;
462 else
463 pos = f->real_file_size;
464
465 pos += o->ddir_seq_add;
466 }
b0a84f48 467 }
ac002339 468 }
059b0802 469
37cf9e3c 470 *offset = pos;
38dad62d
JA
471 return 0;
472 }
473
474 return 1;
475}
476
477static int get_next_block(struct thread_data *td, struct io_u *io_u,
6aca9b3d
JA
478 enum fio_ddir ddir, int rw_seq,
479 unsigned int *is_random)
38dad62d
JA
480{
481 struct fio_file *f = io_u->file;
1ae83d45 482 uint64_t b, offset;
38dad62d
JA
483 int ret;
484
ff58fced
JA
485 assert(ddir_rw(ddir));
486
37cf9e3c
JA
487 b = offset = -1ULL;
488
38dad62d 489 if (rw_seq) {
211c9b89 490 if (td_random(td)) {
6aca9b3d 491 if (should_do_random(td, ddir)) {
211c9b89 492 ret = get_next_rand_block(td, f, ddir, &b);
6aca9b3d
JA
493 *is_random = 1;
494 } else {
495 *is_random = 0;
1651e431 496 io_u_set(td, io_u, IO_U_F_BUSY_OK);
211c9b89
JA
497 ret = get_next_seq_offset(td, f, ddir, &offset);
498 if (ret)
499 ret = get_next_rand_block(td, f, ddir, &b);
500 }
6aca9b3d
JA
501 } else {
502 *is_random = 0;
37cf9e3c 503 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 504 }
38dad62d 505 } else {
1651e431 506 io_u_set(td, io_u, IO_U_F_BUSY_OK);
6aca9b3d 507 *is_random = 0;
38dad62d
JA
508
509 if (td->o.rw_seq == RW_SEQ_SEQ) {
37cf9e3c 510 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 511 if (ret) {
37cf9e3c 512 ret = get_next_rand_block(td, f, ddir, &b);
6aca9b3d
JA
513 *is_random = 0;
514 }
38dad62d 515 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
f1dfb668
JA
516 if (f->last_start[ddir] != -1ULL)
517 offset = f->last_start[ddir] - f->file_offset;
38dad62d 518 else
37cf9e3c 519 offset = 0;
38dad62d
JA
520 ret = 0;
521 } else {
522 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
523 ret = 1;
524 }
525 }
6d68b997 526
37cf9e3c
JA
527 if (!ret) {
528 if (offset != -1ULL)
529 io_u->offset = offset;
530 else if (b != -1ULL)
531 io_u->offset = b * td->o.ba[ddir];
532 else {
4e0a8fa2 533 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
37cf9e3c
JA
534 ret = 1;
535 }
536 }
537
38dad62d
JA
538 return ret;
539}
540
10ba535a
JA
541/*
542 * For random io, generate a random new block and see if it's used. Repeat
543 * until we find a free one. For sequential io, just return the end of
544 * the last io issued.
545 */
6aca9b3d
JA
546static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
547 unsigned int *is_random)
10ba535a 548{
9bf2061e 549 struct fio_file *f = io_u->file;
4ba66134 550 enum fio_ddir ddir = io_u->ddir;
38dad62d 551 int rw_seq_hit = 0;
10ba535a 552
ff58fced
JA
553 assert(ddir_rw(ddir));
554
38dad62d
JA
555 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
556 rw_seq_hit = 1;
5736c10d 557 td->ddir_seq_nr = td->o.ddir_seq_nr;
38dad62d 558 }
211097b2 559
6aca9b3d 560 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
38dad62d 561 return 1;
10ba535a 562
009bd847
JA
563 if (io_u->offset >= f->io_size) {
564 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
4b91ee8f
JA
565 (unsigned long long) io_u->offset,
566 (unsigned long long) f->io_size);
009bd847
JA
567 return 1;
568 }
569
570 io_u->offset += f->file_offset;
2ba1c290
JA
571 if (io_u->offset >= f->real_file_size) {
572 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
4b91ee8f
JA
573 (unsigned long long) io_u->offset,
574 (unsigned long long) f->real_file_size);
10ba535a 575 return 1;
2ba1c290 576 }
10ba535a
JA
577
578 return 0;
579}
580
6aca9b3d
JA
581static int get_next_offset(struct thread_data *td, struct io_u *io_u,
582 unsigned int *is_random)
15dc1934 583{
d72be545
JA
584 if (td->flags & TD_F_PROFILE_OPS) {
585 struct prof_io_ops *ops = &td->prof_io_ops;
7eb36574 586
d72be545 587 if (ops->fill_io_u_off)
6aca9b3d 588 return ops->fill_io_u_off(td, io_u, is_random);
d72be545 589 }
15dc1934 590
6aca9b3d 591 return __get_next_offset(td, io_u, is_random);
15dc1934
JA
592}
593
e39c0676
JA
594static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u,
595 unsigned int buflen)
79944128
JA
596{
597 struct fio_file *f = io_u->file;
598
bedc9dc2 599 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
79944128
JA
600}
601
6aca9b3d
JA
602static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
603 unsigned int is_random)
10ba535a 604{
6aca9b3d 605 int ddir = io_u->ddir;
24d23ca7 606 unsigned int buflen = 0;
f3059de1 607 unsigned int minbs, maxbs;
3dd29f7c 608 uint64_t frand_max, r;
7c961359 609 bool power_2;
10ba535a 610
9ee1c647 611 assert(ddir_rw(ddir));
6aca9b3d
JA
612
613 if (td->o.bs_is_seq_rand)
614 ddir = is_random ? DDIR_WRITE: DDIR_READ;
ff58fced 615
f3059de1
JA
616 minbs = td->o.min_bs[ddir];
617 maxbs = td->o.max_bs[ddir];
618
79944128
JA
619 if (minbs == maxbs)
620 return minbs;
621
52c58027
JA
622 /*
623 * If we can't satisfy the min block size from here, then fail
624 */
625 if (!io_u_fits(td, io_u, minbs))
626 return 0;
627
2f282cec 628 frand_max = rand_max(&td->bsrange_state[ddir]);
79944128 629 do {
2f282cec 630 r = __rand(&td->bsrange_state[ddir]);
4c07ad86 631
720e84ad 632 if (!td->o.bssplit_nr[ddir]) {
f3059de1 633 buflen = 1 + (unsigned int) ((double) maxbs *
c3546b53 634 (r / (frand_max + 1.0)));
f3059de1
JA
635 if (buflen < minbs)
636 buflen = minbs;
5ec10eaa 637 } else {
3dd29f7c 638 long long perc = 0;
564ca972
JA
639 unsigned int i;
640
720e84ad
JA
641 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
642 struct bssplit *bsp = &td->o.bssplit[ddir][i];
564ca972
JA
643
644 buflen = bsp->bs;
645 perc += bsp->perc;
3dd29f7c
JA
646 if (!perc)
647 break;
648 if ((r / perc <= frand_max / 100ULL) &&
79944128 649 io_u_fits(td, io_u, buflen))
564ca972
JA
650 break;
651 }
652 }
79944128 653
17a6b702
PL
654 power_2 = is_power_of_2(minbs);
655 if (!td->o.bs_unaligned && power_2)
7c306bb1 656 buflen &= ~(minbs - 1);
17a6b702
PL
657 else if (!td->o.bs_unaligned && !power_2)
658 buflen -= buflen % minbs;
79944128 659 } while (!io_u_fits(td, io_u, buflen));
6a5e6884 660
10ba535a
JA
661 return buflen;
662}
663
6aca9b3d
JA
664static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
665 unsigned int is_random)
15dc1934 666{
d72be545
JA
667 if (td->flags & TD_F_PROFILE_OPS) {
668 struct prof_io_ops *ops = &td->prof_io_ops;
7eb36574 669
d72be545 670 if (ops->fill_io_u_size)
6aca9b3d 671 return ops->fill_io_u_size(td, io_u, is_random);
d72be545 672 }
15dc1934 673
6aca9b3d 674 return __get_next_buflen(td, io_u, is_random);
15dc1934
JA
675}
676
afe24a5a
JA
677static void set_rwmix_bytes(struct thread_data *td)
678{
afe24a5a
JA
679 unsigned int diff;
680
681 /*
682 * we do time or byte based switch. this is needed because
683 * buffered writes may issue a lot quicker than they complete,
684 * whereas reads do not.
685 */
e47f799f 686 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
04c540d9 687 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
e47f799f
JA
688}
689
690static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
691{
692 unsigned int v;
e47f799f 693
36dd3379 694 v = rand32_between(&td->rwmix_state, 1, 100);
4c07ad86 695
04c540d9 696 if (v <= td->o.rwmix[DDIR_READ])
e47f799f
JA
697 return DDIR_READ;
698
699 return DDIR_WRITE;
afe24a5a
JA
700}
701
0d593542 702int io_u_quiesce(struct thread_data *td)
002e7183 703{
0d593542
JA
704 int completed = 0;
705
002e7183
JA
706 /*
707 * We are going to sleep, ensure that we flush anything pending as
708 * not to skew our latency numbers.
709 *
710 * Changed to only monitor 'in flight' requests here instead of the
711 * td->cur_depth, b/c td->cur_depth does not accurately represent
712 * io's that have been actually submitted to an async engine,
713 * and cur_depth is meaningless for sync engines.
714 */
9cc80b6d
JA
715 if (td->io_u_queued || td->cur_depth) {
716 int fio_unused ret;
717
718 ret = td_io_commit(td);
719 }
720
002e7183 721 while (td->io_u_in_flight) {
89fd0573 722 int ret;
002e7183 723
55312f9f 724 ret = io_u_queued_complete(td, 1);
0d593542
JA
725 if (ret > 0)
726 completed += ret;
002e7183 727 }
0d593542 728
6be06c46
JA
729 if (td->flags & TD_F_REGROW_LOGS)
730 regrow_logs(td);
731
0d593542 732 return completed;
002e7183
JA
733}
734
581e7141
JA
735static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
736{
737 enum fio_ddir odir = ddir ^ 1;
b5407f8b 738 uint64_t usec;
90eff1c9 739 uint64_t now;
581e7141 740
ff58fced 741 assert(ddir_rw(ddir));
50a8ce86 742 now = utime_since_now(&td->start);
ff58fced 743
50a8ce86
D
744 /*
745 * if rate_next_io_time is in the past, need to catch up to rate
746 */
747 if (td->rate_next_io_time[ddir] <= now)
581e7141
JA
748 return ddir;
749
750 /*
50a8ce86 751 * We are ahead of rate in this direction. See if we
581e7141
JA
752 * should switch.
753 */
315fcfec 754 if (td_rw(td) && td->o.rwmix[odir]) {
581e7141 755 /*
50a8ce86 756 * Other direction is behind rate, switch
581e7141 757 */
50a8ce86 758 if (td->rate_next_io_time[odir] <= now)
581e7141
JA
759 return odir;
760
761 /*
395feabb
JA
762 * Both directions are ahead of rate. sleep the min,
763 * switch if necessary
581e7141 764 */
50a8ce86 765 if (td->rate_next_io_time[ddir] <=
395feabb 766 td->rate_next_io_time[odir]) {
50a8ce86 767 usec = td->rate_next_io_time[ddir] - now;
581e7141 768 } else {
50a8ce86 769 usec = td->rate_next_io_time[odir] - now;
581e7141
JA
770 ddir = odir;
771 }
772 } else
50a8ce86 773 usec = td->rate_next_io_time[ddir] - now;
581e7141 774
a9da8ab2
JA
775 if (td->o.io_submit_mode == IO_MODE_INLINE)
776 io_u_quiesce(td);
78c1eda5 777
1a9bf814 778 usec_sleep(td, usec);
581e7141
JA
779 return ddir;
780}
781
10ba535a
JA
782/*
783 * Return the data direction for the next io_u. If the job is a
784 * mixed read/write workload, check the rwmix cycle and switch if
785 * necessary.
786 */
1e97cce9 787static enum fio_ddir get_rw_ddir(struct thread_data *td)
10ba535a 788{
581e7141
JA
789 enum fio_ddir ddir;
790
5f9099ea 791 /*
f6860972
TK
792 * See if it's time to fsync/fdatasync/sync_file_range first,
793 * and if not then move on to check regular I/Os.
5f9099ea 794 */
f6860972
TK
795 if (should_fsync(td)) {
796 if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] &&
797 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks))
798 return DDIR_SYNC;
799
800 if (td->o.fdatasync_blocks && td->io_issues[DDIR_WRITE] &&
801 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks))
802 return DDIR_DATASYNC;
803
804 if (td->sync_file_range_nr && td->io_issues[DDIR_WRITE] &&
805 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr))
806 return DDIR_SYNC_FILE_RANGE;
807 }
44f29692 808
10ba535a 809 if (td_rw(td)) {
10ba535a
JA
810 /*
811 * Check if it's time to seed a new data direction.
812 */
e4928662 813 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
e47f799f
JA
814 /*
815 * Put a top limit on how many bytes we do for
816 * one data direction, to avoid overflowing the
817 * ranges too much
818 */
819 ddir = get_rand_ddir(td);
e47f799f
JA
820
821 if (ddir != td->rwmix_ddir)
822 set_rwmix_bytes(td);
823
824 td->rwmix_ddir = ddir;
10ba535a 825 }
581e7141 826 ddir = td->rwmix_ddir;
10ba535a 827 } else if (td_read(td))
581e7141 828 ddir = DDIR_READ;
6eaf09d6 829 else if (td_write(td))
581e7141 830 ddir = DDIR_WRITE;
5c8e84ca 831 else if (td_trim(td))
6eaf09d6 832 ddir = DDIR_TRIM;
5c8e84ca
TK
833 else
834 ddir = DDIR_INVAL;
581e7141
JA
835
836 td->rwmix_ddir = rate_ddir(td, ddir);
837 return td->rwmix_ddir;
10ba535a
JA
838}
839
1ef2b6be
JA
840static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
841{
0e4dd95c
DE
842 enum fio_ddir ddir = get_rw_ddir(td);
843
82a90686 844 if (td_trimwrite(td)) {
0e4dd95c
DE
845 struct fio_file *f = io_u->file;
846 if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
847 ddir = DDIR_TRIM;
848 else
849 ddir = DDIR_WRITE;
850 }
851
852 io_u->ddir = io_u->acct_ddir = ddir;
1ef2b6be 853
9b87f09b 854 if (io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_BARRIER) &&
1ef2b6be
JA
855 td->o.barrier_blocks &&
856 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
857 td->io_issues[DDIR_WRITE])
1651e431 858 io_u_set(td, io_u, IO_U_F_BARRIER);
1ef2b6be
JA
859}
860
e8462bd8 861void put_file_log(struct thread_data *td, struct fio_file *f)
60f2c658 862{
71b84caa 863 unsigned int ret = put_file(td, f);
60f2c658
JA
864
865 if (ret)
866 td_verror(td, ret, "file close");
867}
868
10ba535a
JA
869void put_io_u(struct thread_data *td, struct io_u *io_u)
870{
a9da8ab2
JA
871 if (td->parent)
872 td = td->parent;
873
e8462bd8
JA
874 td_io_u_lock(td);
875
f8b0bd10 876 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
60f2c658 877 put_file_log(td, io_u->file);
f8b0bd10 878
10ba535a 879 io_u->file = NULL;
1651e431 880 io_u_set(td, io_u, IO_U_F_FREE);
d7ee2a7d 881
a9da8ab2 882 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
0c41214f 883 td->cur_depth--;
a9da8ab2
JA
884 assert(!(td->flags & TD_F_CHILD));
885 }
2ae0b204 886 io_u_qpush(&td->io_u_freelist, io_u);
e8462bd8
JA
887 td_io_u_unlock(td);
888 td_io_u_free_notify(td);
10ba535a
JA
889}
890
f2bba182
RR
891void clear_io_u(struct thread_data *td, struct io_u *io_u)
892{
1651e431 893 io_u_clear(td, io_u, IO_U_F_FLIGHT);
f2bba182
RR
894 put_io_u(td, io_u);
895}
896
755200a3
JA
897void requeue_io_u(struct thread_data *td, struct io_u **io_u)
898{
899 struct io_u *__io_u = *io_u;
bcd5abfa 900 enum fio_ddir ddir = acct_ddir(__io_u);
755200a3 901
465221b0
JA
902 dprint(FD_IO, "requeue %p\n", __io_u);
903
a9da8ab2
JA
904 if (td->parent)
905 td = td->parent;
906
e8462bd8
JA
907 td_io_u_lock(td);
908
1651e431 909 io_u_set(td, __io_u, IO_U_F_FREE);
bcd5abfa
JA
910 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
911 td->io_issues[ddir]--;
5ec10eaa 912
1651e431 913 io_u_clear(td, __io_u, IO_U_F_FLIGHT);
a9da8ab2 914 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) {
0c41214f 915 td->cur_depth--;
a9da8ab2
JA
916 assert(!(td->flags & TD_F_CHILD));
917 }
2ae0b204
JA
918
919 io_u_rpush(&td->io_u_requeues, __io_u);
e8462bd8 920 td_io_u_unlock(td);
a9da8ab2 921 td_io_u_free_notify(td);
755200a3
JA
922 *io_u = NULL;
923}
924
224b3093 925static void __fill_io_u_zone(struct thread_data *td, struct io_u *io_u)
926{
927 struct fio_file *f = io_u->file;
928
929 /*
930 * See if it's time to switch to a new zone
931 */
932 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
933 td->zone_bytes = 0;
934 f->file_offset += td->o.zone_range + td->o.zone_skip;
935
936 /*
937 * Wrap from the beginning, if we exceed the file size
938 */
939 if (f->file_offset >= f->real_file_size)
940 f->file_offset = f->real_file_size - f->file_offset;
941 f->last_pos[io_u->ddir] = f->file_offset;
942 td->io_skip_bytes += td->o.zone_skip;
943 }
944
945 /*
946 * If zone_size > zone_range, then maintain the same zone until
947 * zone_bytes >= zone_size.
948 */
949 if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) {
950 dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n",
951 f->file_offset, f->last_pos[io_u->ddir]);
952 f->last_pos[io_u->ddir] = f->file_offset;
953 }
954
955 /*
956 * For random: if 'norandommap' is not set and zone_size > zone_range,
957 * map needs to be reset as it's done with zone_range everytime.
958 */
959 if ((td->zone_bytes % td->o.zone_range) == 0) {
960 fio_file_reset(td, f);
961 }
962}
963
9bf2061e 964static int fill_io_u(struct thread_data *td, struct io_u *io_u)
10ba535a 965{
6aca9b3d
JA
966 unsigned int is_random;
967
9b87f09b 968 if (td_ioengine_flagged(td, FIO_NOIO))
b4c5e1ac
JA
969 goto out;
970
1ef2b6be 971 set_rw_ddir(td, io_u);
5f9099ea 972
87dc1ab1 973 /*
ff58fced 974 * fsync() or fdatasync() or trim etc, we are done
87dc1ab1 975 */
ff58fced 976 if (!ddir_rw(io_u->ddir))
c38e9468 977 goto out;
a00735e6 978
48f5abd3 979 /*
224b3093 980 * When file is zoned zone_range is always positive
48f5abd3 981 */
224b3093 982 if (td->o.zone_range) {
983 __fill_io_u_zone(td, io_u);
48f5abd3
JA
984 }
985
10ba535a 986 /*
c685b5b2
JA
987 * No log, let the seq/rand engine retrieve the next buflen and
988 * position.
10ba535a 989 */
6aca9b3d 990 if (get_next_offset(td, io_u, &is_random)) {
2ba1c290 991 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
bca4ed4d 992 return 1;
2ba1c290 993 }
10ba535a 994
6aca9b3d 995 io_u->buflen = get_next_buflen(td, io_u, is_random);
2ba1c290
JA
996 if (!io_u->buflen) {
997 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
bca4ed4d 998 return 1;
2ba1c290 999 }
bca4ed4d 1000
2ba1c290 1001 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
e5f9a813
RE
1002 dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%lx exceeds file size=0x%llx\n",
1003 io_u,
4b91ee8f
JA
1004 (unsigned long long) io_u->offset, io_u->buflen,
1005 (unsigned long long) io_u->file->real_file_size);
6a5e6884 1006 return 1;
2ba1c290 1007 }
6a5e6884 1008
bca4ed4d
JA
1009 /*
1010 * mark entry before potentially trimming io_u
1011 */
303032ae 1012 if (td_random(td) && file_randommap(td, io_u->file))
9bf2061e 1013 mark_random_map(td, io_u);
bca4ed4d 1014
c38e9468 1015out:
e5f9a813 1016 dprint_io_u(io_u, "fill");
d9d91e39 1017 td->zone_bytes += io_u->buflen;
bca4ed4d 1018 return 0;
10ba535a
JA
1019}
1020
838bc709
JA
1021static void __io_u_mark_map(unsigned int *map, unsigned int nr)
1022{
2b13e716 1023 int idx = 0;
838bc709
JA
1024
1025 switch (nr) {
1026 default:
2b13e716 1027 idx = 6;
838bc709
JA
1028 break;
1029 case 33 ... 64:
2b13e716 1030 idx = 5;
838bc709
JA
1031 break;
1032 case 17 ... 32:
2b13e716 1033 idx = 4;
838bc709
JA
1034 break;
1035 case 9 ... 16:
2b13e716 1036 idx = 3;
838bc709
JA
1037 break;
1038 case 5 ... 8:
2b13e716 1039 idx = 2;
838bc709
JA
1040 break;
1041 case 1 ... 4:
2b13e716 1042 idx = 1;
838bc709
JA
1043 case 0:
1044 break;
1045 }
1046
2b13e716 1047 map[idx]++;
838bc709
JA
1048}
1049
1050void io_u_mark_submit(struct thread_data *td, unsigned int nr)
1051{
1052 __io_u_mark_map(td->ts.io_u_submit, nr);
1053 td->ts.total_submit++;
1054}
1055
1056void io_u_mark_complete(struct thread_data *td, unsigned int nr)
1057{
1058 __io_u_mark_map(td->ts.io_u_complete, nr);
1059 td->ts.total_complete++;
1060}
1061
d8005759 1062void io_u_mark_depth(struct thread_data *td, unsigned int nr)
71619dc2 1063{
2b13e716 1064 int idx = 0;
71619dc2
JA
1065
1066 switch (td->cur_depth) {
1067 default:
2b13e716 1068 idx = 6;
a783e61a 1069 break;
71619dc2 1070 case 32 ... 63:
2b13e716 1071 idx = 5;
a783e61a 1072 break;
71619dc2 1073 case 16 ... 31:
2b13e716 1074 idx = 4;
a783e61a 1075 break;
71619dc2 1076 case 8 ... 15:
2b13e716 1077 idx = 3;
a783e61a 1078 break;
71619dc2 1079 case 4 ... 7:
2b13e716 1080 idx = 2;
a783e61a 1081 break;
71619dc2 1082 case 2 ... 3:
2b13e716 1083 idx = 1;
71619dc2
JA
1084 case 1:
1085 break;
1086 }
1087
2b13e716 1088 td->ts.io_u_map[idx] += nr;
71619dc2
JA
1089}
1090
d6bb626e 1091static void io_u_mark_lat_nsec(struct thread_data *td, unsigned long long nsec)
04a0feae 1092{
2b13e716 1093 int idx = 0;
04a0feae 1094
d6bb626e
VF
1095 assert(nsec < 1000);
1096
1097 switch (nsec) {
1098 case 750 ... 999:
1099 idx = 9;
1100 break;
1101 case 500 ... 749:
1102 idx = 8;
1103 break;
1104 case 250 ... 499:
1105 idx = 7;
1106 break;
1107 case 100 ... 249:
1108 idx = 6;
1109 break;
1110 case 50 ... 99:
1111 idx = 5;
1112 break;
1113 case 20 ... 49:
1114 idx = 4;
1115 break;
1116 case 10 ... 19:
1117 idx = 3;
1118 break;
1119 case 4 ... 9:
1120 idx = 2;
1121 break;
1122 case 2 ... 3:
1123 idx = 1;
1124 case 0 ... 1:
1125 break;
1126 }
1127
1128 assert(idx < FIO_IO_U_LAT_N_NR);
1129 td->ts.io_u_lat_n[idx]++;
1130}
1131
1132static void io_u_mark_lat_usec(struct thread_data *td, unsigned long long usec)
1133{
1134 int idx = 0;
1135
1136 assert(usec < 1000 && usec >= 1);
04a0feae
JA
1137
1138 switch (usec) {
1139 case 750 ... 999:
2b13e716 1140 idx = 9;
04a0feae
JA
1141 break;
1142 case 500 ... 749:
2b13e716 1143 idx = 8;
04a0feae
JA
1144 break;
1145 case 250 ... 499:
2b13e716 1146 idx = 7;
04a0feae
JA
1147 break;
1148 case 100 ... 249:
2b13e716 1149 idx = 6;
04a0feae
JA
1150 break;
1151 case 50 ... 99:
2b13e716 1152 idx = 5;
04a0feae
JA
1153 break;
1154 case 20 ... 49:
2b13e716 1155 idx = 4;
04a0feae
JA
1156 break;
1157 case 10 ... 19:
2b13e716 1158 idx = 3;
04a0feae
JA
1159 break;
1160 case 4 ... 9:
2b13e716 1161 idx = 2;
04a0feae
JA
1162 break;
1163 case 2 ... 3:
2b13e716 1164 idx = 1;
04a0feae
JA
1165 case 0 ... 1:
1166 break;
1167 }
1168
2b13e716
JA
1169 assert(idx < FIO_IO_U_LAT_U_NR);
1170 td->ts.io_u_lat_u[idx]++;
04a0feae
JA
1171}
1172
d6bb626e 1173static void io_u_mark_lat_msec(struct thread_data *td, unsigned long long msec)
ec118304 1174{
2b13e716 1175 int idx = 0;
ec118304 1176
d6bb626e
VF
1177 assert(msec >= 1);
1178
ec118304
JA
1179 switch (msec) {
1180 default:
2b13e716 1181 idx = 11;
04a0feae 1182 break;
8abdce66 1183 case 1000 ... 1999:
2b13e716 1184 idx = 10;
04a0feae 1185 break;
8abdce66 1186 case 750 ... 999:
2b13e716 1187 idx = 9;
04a0feae 1188 break;
8abdce66 1189 case 500 ... 749:
2b13e716 1190 idx = 8;
04a0feae 1191 break;
8abdce66 1192 case 250 ... 499:
2b13e716 1193 idx = 7;
04a0feae 1194 break;
8abdce66 1195 case 100 ... 249:
2b13e716 1196 idx = 6;
04a0feae 1197 break;
8abdce66 1198 case 50 ... 99:
2b13e716 1199 idx = 5;
04a0feae 1200 break;
8abdce66 1201 case 20 ... 49:
2b13e716 1202 idx = 4;
04a0feae 1203 break;
8abdce66 1204 case 10 ... 19:
2b13e716 1205 idx = 3;
04a0feae 1206 break;
8abdce66 1207 case 4 ... 9:
2b13e716 1208 idx = 2;
04a0feae 1209 break;
ec118304 1210 case 2 ... 3:
2b13e716 1211 idx = 1;
ec118304
JA
1212 case 0 ... 1:
1213 break;
1214 }
1215
2b13e716
JA
1216 assert(idx < FIO_IO_U_LAT_M_NR);
1217 td->ts.io_u_lat_m[idx]++;
04a0feae
JA
1218}
1219
d6bb626e 1220static void io_u_mark_latency(struct thread_data *td, unsigned long long nsec)
04a0feae 1221{
d6bb626e
VF
1222 if (nsec < 1000)
1223 io_u_mark_lat_nsec(td, nsec);
1224 else if (nsec < 1000000)
1225 io_u_mark_lat_usec(td, nsec / 1000);
04a0feae 1226 else
d6bb626e 1227 io_u_mark_lat_msec(td, nsec / 1000000);
ec118304
JA
1228}
1229
8c07860d
JA
1230static unsigned int __get_next_fileno_rand(struct thread_data *td)
1231{
1232 unsigned long fileno;
1233
1234 if (td->o.file_service_type == FIO_FSERVICE_RANDOM) {
1235 uint64_t frand_max = rand_max(&td->next_file_state);
1236 unsigned long r;
1237
1238 r = __rand(&td->next_file_state);
1239 return (unsigned int) ((double) td->o.nr_files
1240 * (r / (frand_max + 1.0)));
1241 }
1242
1243 if (td->o.file_service_type == FIO_FSERVICE_ZIPF)
1244 fileno = zipf_next(&td->next_file_zipf);
1245 else if (td->o.file_service_type == FIO_FSERVICE_PARETO)
1246 fileno = pareto_next(&td->next_file_zipf);
1247 else if (td->o.file_service_type == FIO_FSERVICE_GAUSS)
1248 fileno = gauss_next(&td->next_file_gauss);
1249 else {
1250 log_err("fio: bad file service type: %d\n", td->o.file_service_type);
1251 assert(0);
1252 return 0;
1253 }
1254
1255 return fileno >> FIO_FSERVICE_SHIFT;
1256}
1257
0aabe160
JA
1258/*
1259 * Get next file to service by choosing one at random
1260 */
2cc52930
JA
1261static struct fio_file *get_next_file_rand(struct thread_data *td,
1262 enum fio_file_flags goodf,
d6aed795 1263 enum fio_file_flags badf)
0aabe160 1264{
0aabe160 1265 struct fio_file *f;
1c178180 1266 int fno;
0aabe160
JA
1267
1268 do {
87b10676 1269 int opened = 0;
4c07ad86 1270
8c07860d 1271 fno = __get_next_fileno_rand(td);
7c83c089 1272
126d65c6 1273 f = td->files[fno];
d6aed795 1274 if (fio_file_done(f))
059e63c0 1275 continue;
1c178180 1276
d6aed795 1277 if (!fio_file_open(f)) {
87b10676
JA
1278 int err;
1279
002fe734
JA
1280 if (td->nr_open_files >= td->o.open_files)
1281 return ERR_PTR(-EBUSY);
1282
87b10676
JA
1283 err = td_io_open_file(td, f);
1284 if (err)
1285 continue;
1286 opened = 1;
1287 }
1288
2ba1c290
JA
1289 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
1290 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
0aabe160 1291 return f;
2ba1c290 1292 }
87b10676
JA
1293 if (opened)
1294 td_io_close_file(td, f);
0aabe160
JA
1295 } while (1);
1296}
1297
1298/*
1299 * Get next file to service by doing round robin between all available ones
1300 */
1c178180
JA
1301static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1302 int badf)
3d7c391d
JA
1303{
1304 unsigned int old_next_file = td->next_file;
1305 struct fio_file *f;
1306
1307 do {
87b10676
JA
1308 int opened = 0;
1309
126d65c6 1310 f = td->files[td->next_file];
3d7c391d
JA
1311
1312 td->next_file++;
2dc1bbeb 1313 if (td->next_file >= td->o.nr_files)
3d7c391d
JA
1314 td->next_file = 0;
1315
87b10676 1316 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
d6aed795 1317 if (fio_file_done(f)) {
d5ed68ea 1318 f = NULL;
059e63c0 1319 continue;
d5ed68ea 1320 }
059e63c0 1321
d6aed795 1322 if (!fio_file_open(f)) {
87b10676
JA
1323 int err;
1324
002fe734
JA
1325 if (td->nr_open_files >= td->o.open_files)
1326 return ERR_PTR(-EBUSY);
1327
87b10676 1328 err = td_io_open_file(td, f);
b5696bfc
JA
1329 if (err) {
1330 dprint(FD_FILE, "error %d on open of %s\n",
1331 err, f->file_name);
87c27b45 1332 f = NULL;
87b10676 1333 continue;
b5696bfc 1334 }
87b10676
JA
1335 opened = 1;
1336 }
1337
0b9d69ec
JA
1338 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1339 f->flags);
1c178180 1340 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
3d7c391d
JA
1341 break;
1342
87b10676
JA
1343 if (opened)
1344 td_io_close_file(td, f);
1345
3d7c391d
JA
1346 f = NULL;
1347 } while (td->next_file != old_next_file);
1348
2ba1c290 1349 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
3d7c391d
JA
1350 return f;
1351}
1352
7eb36574 1353static struct fio_file *__get_next_file(struct thread_data *td)
bdb4e2e9 1354{
1907dbc6
JA
1355 struct fio_file *f;
1356
2dc1bbeb 1357 assert(td->o.nr_files <= td->files_index);
1c178180 1358
b5696bfc 1359 if (td->nr_done_files >= td->o.nr_files) {
5ec10eaa
JA
1360 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1361 " nr_files=%d\n", td->nr_open_files,
1362 td->nr_done_files,
1363 td->o.nr_files);
bdb4e2e9 1364 return NULL;
2ba1c290 1365 }
bdb4e2e9 1366
1907dbc6 1367 f = td->file_service_file;
d6aed795 1368 if (f && fio_file_open(f) && !fio_file_closing(f)) {
a086c257
JA
1369 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1370 goto out;
1371 if (td->file_service_left--)
1372 goto out;
1373 }
1907dbc6 1374
a086c257
JA
1375 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1376 td->o.file_service_type == FIO_FSERVICE_SEQ)
d6aed795 1377 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
bdb4e2e9 1378 else
d6aed795 1379 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
1907dbc6 1380
002fe734
JA
1381 if (IS_ERR(f))
1382 return f;
1383
1907dbc6
JA
1384 td->file_service_file = f;
1385 td->file_service_left = td->file_service_nr - 1;
2ba1c290 1386out:
0dac421f
JA
1387 if (f)
1388 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1389 else
1390 dprint(FD_FILE, "get_next_file: NULL\n");
1907dbc6 1391 return f;
bdb4e2e9
JA
1392}
1393
7eb36574
JA
1394static struct fio_file *get_next_file(struct thread_data *td)
1395{
372d8962 1396 if (td->flags & TD_F_PROFILE_OPS) {
d72be545 1397 struct prof_io_ops *ops = &td->prof_io_ops;
7eb36574 1398
d72be545
JA
1399 if (ops->get_next_file)
1400 return ops->get_next_file(td);
1401 }
7eb36574
JA
1402
1403 return __get_next_file(td);
1404}
1405
002fe734 1406static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
429f6675
JA
1407{
1408 struct fio_file *f;
1409
1410 do {
1411 f = get_next_file(td);
002fe734
JA
1412 if (IS_ERR_OR_NULL(f))
1413 return PTR_ERR(f);
429f6675 1414
429f6675
JA
1415 io_u->file = f;
1416 get_file(f);
1417
1418 if (!fill_io_u(td, io_u))
1419 break;
1420
b5696bfc 1421 put_file_log(td, f);
429f6675 1422 td_io_close_file(td, f);
b5696bfc 1423 io_u->file = NULL;
8c07860d
JA
1424 if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)
1425 fio_file_reset(td, f);
1426 else {
1427 fio_file_set_done(f);
1428 td->nr_done_files++;
1429 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
0b9d69ec 1430 td->nr_done_files, td->o.nr_files);
8c07860d 1431 }
429f6675
JA
1432 } while (1);
1433
1434 return 0;
1435}
1436
3e260a46 1437static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
c3a32714 1438 unsigned long long tnsec, unsigned long long max_nsec)
3e260a46
JA
1439{
1440 if (!td->error)
c3a32714 1441 log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec)\n", tnsec, max_nsec);
3e260a46
JA
1442 td_verror(td, ETIMEDOUT, "max latency exceeded");
1443 icd->error = ETIMEDOUT;
1444}
1445
1446static void lat_new_cycle(struct thread_data *td)
1447{
1448 fio_gettime(&td->latency_ts, NULL);
1449 td->latency_ios = ddir_rw_sum(td->io_blocks);
1450 td->latency_failed = 0;
1451}
1452
1453/*
1454 * We had an IO outside the latency target. Reduce the queue depth. If we
1455 * are at QD=1, then it's time to give up.
1456 */
e39c0676 1457static bool __lat_target_failed(struct thread_data *td)
3e260a46
JA
1458{
1459 if (td->latency_qd == 1)
e39c0676 1460 return true;
3e260a46
JA
1461
1462 td->latency_qd_high = td->latency_qd;
6bb58215
JA
1463
1464 if (td->latency_qd == td->latency_qd_low)
1465 td->latency_qd_low--;
1466
3e260a46
JA
1467 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1468
1469 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1470
1471 /*
1472 * When we ramp QD down, quiesce existing IO to prevent
1473 * a storm of ramp downs due to pending higher depth.
1474 */
1475 io_u_quiesce(td);
1476 lat_new_cycle(td);
e39c0676 1477 return false;
3e260a46
JA
1478}
1479
e39c0676 1480static bool lat_target_failed(struct thread_data *td)
3e260a46
JA
1481{
1482 if (td->o.latency_percentile.u.f == 100.0)
1483 return __lat_target_failed(td);
1484
1485 td->latency_failed++;
e39c0676 1486 return false;
3e260a46
JA
1487}
1488
1489void lat_target_init(struct thread_data *td)
1490{
6bb58215
JA
1491 td->latency_end_run = 0;
1492
3e260a46
JA
1493 if (td->o.latency_target) {
1494 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1495 fio_gettime(&td->latency_ts, NULL);
1496 td->latency_qd = 1;
1497 td->latency_qd_high = td->o.iodepth;
1498 td->latency_qd_low = 1;
1499 td->latency_ios = ddir_rw_sum(td->io_blocks);
1500 } else
1501 td->latency_qd = td->o.iodepth;
1502}
1503
6bb58215
JA
1504void lat_target_reset(struct thread_data *td)
1505{
1506 if (!td->latency_end_run)
1507 lat_target_init(td);
1508}
1509
3e260a46
JA
1510static void lat_target_success(struct thread_data *td)
1511{
1512 const unsigned int qd = td->latency_qd;
6bb58215 1513 struct thread_options *o = &td->o;
3e260a46
JA
1514
1515 td->latency_qd_low = td->latency_qd;
1516
1517 /*
1518 * If we haven't failed yet, we double up to a failing value instead
1519 * of bisecting from highest possible queue depth. If we have set
1520 * a limit other than td->o.iodepth, bisect between that.
1521 */
6bb58215 1522 if (td->latency_qd_high != o->iodepth)
3e260a46
JA
1523 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1524 else
1525 td->latency_qd *= 2;
1526
6bb58215
JA
1527 if (td->latency_qd > o->iodepth)
1528 td->latency_qd = o->iodepth;
3e260a46
JA
1529
1530 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
6bb58215 1531
3e260a46 1532 /*
6bb58215
JA
1533 * Same as last one, we are done. Let it run a latency cycle, so
1534 * we get only the results from the targeted depth.
3e260a46 1535 */
6bb58215
JA
1536 if (td->latency_qd == qd) {
1537 if (td->latency_end_run) {
1538 dprint(FD_RATE, "We are done\n");
1539 td->done = 1;
1540 } else {
1541 dprint(FD_RATE, "Quiesce and final run\n");
1542 io_u_quiesce(td);
1543 td->latency_end_run = 1;
1544 reset_all_stats(td);
1545 reset_io_stats(td);
1546 }
1547 }
3e260a46
JA
1548
1549 lat_new_cycle(td);
1550}
1551
1552/*
1553 * Check if we can bump the queue depth
1554 */
1555void lat_target_check(struct thread_data *td)
1556{
1557 uint64_t usec_window;
1558 uint64_t ios;
1559 double success_ios;
1560
1561 usec_window = utime_since_now(&td->latency_ts);
1562 if (usec_window < td->o.latency_window)
1563 return;
1564
1565 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1566 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1567 success_ios *= 100.0;
1568
1569 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1570
1571 if (success_ios >= td->o.latency_percentile.u.f)
1572 lat_target_success(td);
1573 else
1574 __lat_target_failed(td);
1575}
1576
1577/*
1578 * If latency target is enabled, we might be ramping up or down and not
1579 * using the full queue depth available.
1580 */
e39c0676 1581bool queue_full(const struct thread_data *td)
3e260a46
JA
1582{
1583 const int qempty = io_u_qempty(&td->io_u_freelist);
1584
1585 if (qempty)
e39c0676 1586 return true;
3e260a46 1587 if (!td->o.latency_target)
e39c0676 1588 return false;
3e260a46
JA
1589
1590 return td->cur_depth >= td->latency_qd;
1591}
429f6675 1592
10ba535a
JA
1593struct io_u *__get_io_u(struct thread_data *td)
1594{
0cae66f6 1595 struct io_u *io_u = NULL;
10ba535a 1596
ca09be4b
JA
1597 if (td->stop_io)
1598 return NULL;
1599
e8462bd8
JA
1600 td_io_u_lock(td);
1601
1602again:
2ae0b204
JA
1603 if (!io_u_rempty(&td->io_u_requeues))
1604 io_u = io_u_rpop(&td->io_u_requeues);
3e260a46 1605 else if (!queue_full(td)) {
2ae0b204 1606 io_u = io_u_qpop(&td->io_u_freelist);
10ba535a 1607
225ba9e3 1608 io_u->file = NULL;
6040dabc 1609 io_u->buflen = 0;
10ba535a 1610 io_u->resid = 0;
d7762cf8 1611 io_u->end_io = NULL;
755200a3
JA
1612 }
1613
1614 if (io_u) {
0c6e7517 1615 assert(io_u->flags & IO_U_F_FREE);
1651e431 1616 io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
f8b0bd10
JA
1617 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1618 IO_U_F_VER_LIST);
0c6e7517 1619
755200a3 1620 io_u->error = 0;
bcd5abfa 1621 io_u->acct_ddir = -1;
10ba535a 1622 td->cur_depth++;
a9da8ab2 1623 assert(!(td->flags & TD_F_CHILD));
1651e431 1624 io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH);
f9401285 1625 io_u->ipo = NULL;
a9da8ab2 1626 } else if (td_async_processing(td)) {
1dec3e07
JA
1627 /*
1628 * We ran out, wait for async verify threads to finish and
1629 * return one
1630 */
a9da8ab2
JA
1631 assert(!(td->flags & TD_F_CHILD));
1632 assert(!pthread_cond_wait(&td->free_cond, &td->io_u_lock));
1dec3e07 1633 goto again;
10ba535a
JA
1634 }
1635
e8462bd8 1636 td_io_u_unlock(td);
10ba535a
JA
1637 return io_u;
1638}
1639
e39c0676 1640static bool check_get_trim(struct thread_data *td, struct io_u *io_u)
10ba535a 1641{
d72be545 1642 if (!(td->flags & TD_F_TRIM_BACKLOG))
e39c0676 1643 return false;
c9a73054
JA
1644 if (!td->trim_entries)
1645 return false;
d72be545 1646
c9a73054
JA
1647 if (td->trim_batch) {
1648 td->trim_batch--;
1649 if (get_next_trim(td, io_u))
1650 return true;
1651 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1652 td->last_ddir != DDIR_READ) {
1653 td->trim_batch = td->o.trim_batch;
1654 if (!td->trim_batch)
1655 td->trim_batch = td->o.trim_backlog;
1656 if (get_next_trim(td, io_u))
e39c0676 1657 return true;
2ba1c290 1658 }
10ba535a 1659
e39c0676 1660 return false;
0d29de83
JA
1661}
1662
e39c0676 1663static bool check_get_verify(struct thread_data *td, struct io_u *io_u)
0d29de83 1664{
d72be545 1665 if (!(td->flags & TD_F_VER_BACKLOG))
e39c0676 1666 return false;
d72be545
JA
1667
1668 if (td->io_hist_len) {
9e144189
JA
1669 int get_verify = 0;
1670
d1ece0c7 1671 if (td->verify_batch)
9e144189 1672 get_verify = 1;
d1ece0c7 1673 else if (!(td->io_hist_len % td->o.verify_backlog) &&
9e144189
JA
1674 td->last_ddir != DDIR_READ) {
1675 td->verify_batch = td->o.verify_batch;
f8a75c99
JA
1676 if (!td->verify_batch)
1677 td->verify_batch = td->o.verify_backlog;
9e144189
JA
1678 get_verify = 1;
1679 }
1680
d1ece0c7
JA
1681 if (get_verify && !get_next_verify(td, io_u)) {
1682 td->verify_batch--;
e39c0676 1683 return true;
d1ece0c7 1684 }
9e144189
JA
1685 }
1686
e39c0676 1687 return false;
0d29de83
JA
1688}
1689
de789769
JA
1690/*
1691 * Fill offset and start time into the buffer content, to prevent too
23f394d5
JA
1692 * easy compressible data for simple de-dupe attempts. Do this for every
1693 * 512b block in the range, since that should be the smallest block size
1694 * we can expect from a device.
de789769
JA
1695 */
1696static void small_content_scramble(struct io_u *io_u)
1697{
319b073f 1698 unsigned int i, nr_blocks = io_u->buflen >> 9;
23f394d5 1699 unsigned int offset;
319b073f
JA
1700 uint64_t boffset, *iptr;
1701 char *p;
de789769 1702
23f394d5
JA
1703 if (!nr_blocks)
1704 return;
1705
1706 p = io_u->xfer_buf;
fba76ee8 1707 boffset = io_u->offset;
319b073f
JA
1708
1709 if (io_u->buf_filled_len)
1710 io_u->buf_filled_len = 0;
1711
1712 /*
1713 * Generate random index between 0..7. We do chunks of 512b, if
1714 * we assume a cacheline is 64 bytes, then we have 8 of those.
1715 * Scramble content within the blocks in the same cacheline to
1716 * speed things up.
1717 */
1718 offset = (io_u->start_time.tv_nsec ^ boffset) & 7;
fad82f76 1719
23f394d5
JA
1720 for (i = 0; i < nr_blocks; i++) {
1721 /*
319b073f
JA
1722 * Fill offset into start of cacheline, time into end
1723 * of cacheline
23f394d5 1724 */
319b073f
JA
1725 iptr = (void *) p + (offset << 6);
1726 *iptr = boffset;
1727
1728 iptr = (void *) p + 64 - 2 * sizeof(uint64_t);
1729 iptr[0] = io_u->start_time.tv_sec;
1730 iptr[1] = io_u->start_time.tv_nsec;
23f394d5 1731
23f394d5 1732 p += 512;
fad82f76 1733 boffset += 512;
23f394d5 1734 }
de789769
JA
1735}
1736
0d29de83
JA
1737/*
1738 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1739 * etc. The returned io_u is fully ready to be prepped and submitted.
1740 */
1741struct io_u *get_io_u(struct thread_data *td)
1742{
1743 struct fio_file *f;
1744 struct io_u *io_u;
de789769 1745 int do_scramble = 0;
002fe734 1746 long ret = 0;
0d29de83
JA
1747
1748 io_u = __get_io_u(td);
1749 if (!io_u) {
1750 dprint(FD_IO, "__get_io_u failed\n");
1751 return NULL;
1752 }
1753
1754 if (check_get_verify(td, io_u))
1755 goto out;
1756 if (check_get_trim(td, io_u))
1757 goto out;
1758
755200a3
JA
1759 /*
1760 * from a requeue, io_u already setup
1761 */
1762 if (io_u->file)
77f392bf 1763 goto out;
755200a3 1764
429f6675
JA
1765 /*
1766 * If using an iolog, grab next piece if any available.
1767 */
d72be545 1768 if (td->flags & TD_F_READ_IOLOG) {
429f6675
JA
1769 if (read_iolog_get(td, io_u))
1770 goto err_put;
2ba1c290 1771 } else if (set_io_u_file(td, io_u)) {
002fe734 1772 ret = -EBUSY;
2ba1c290 1773 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
429f6675 1774 goto err_put;
2ba1c290 1775 }
5ec10eaa 1776
429f6675 1777 f = io_u->file;
002fe734
JA
1778 if (!f) {
1779 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1780 goto err_put;
1781 }
1782
d6aed795 1783 assert(fio_file_open(f));
97af62ce 1784
ff58fced 1785 if (ddir_rw(io_u->ddir)) {
9b87f09b 1786 if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) {
2ba1c290 1787 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
429f6675 1788 goto err_put;
2ba1c290 1789 }
10ba535a 1790
f1dfb668
JA
1791 f->last_start[io_u->ddir] = io_u->offset;
1792 f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen;
10ba535a 1793
fd68418e 1794 if (io_u->ddir == DDIR_WRITE) {
d72be545 1795 if (td->flags & TD_F_REFILL_BUFFERS) {
9c42684e 1796 io_u_fill_buffer(td, io_u,
1066358a 1797 td->o.min_bs[DDIR_WRITE],
9e129577 1798 io_u->buflen);
ff441ae8 1799 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
bedc9dc2 1800 !(td->flags & TD_F_COMPRESS))
fd68418e 1801 do_scramble = 1;
d72be545 1802 if (td->flags & TD_F_VER_NONE) {
629f1d71
JA
1803 populate_verify_io_u(td, io_u);
1804 do_scramble = 0;
1805 }
fd68418e 1806 } else if (io_u->ddir == DDIR_READ) {
cbe8d756
RR
1807 /*
1808 * Reset the buf_filled parameters so next time if the
1809 * buffer is used for writes it is refilled.
1810 */
cbe8d756
RR
1811 io_u->buf_filled_len = 0;
1812 }
87dc1ab1 1813 }
10ba535a 1814
165faf16
JA
1815 /*
1816 * Set io data pointers.
1817 */
cec6b55d
JA
1818 io_u->xfer_buf = io_u->buf;
1819 io_u->xfer_buflen = io_u->buflen;
5973cafb 1820
6ac7a331 1821out:
0d29de83 1822 assert(io_u->file);
429f6675 1823 if (!td_io_prep(td, io_u)) {
3ecc8c67 1824 if (!td->o.disable_lat)
993bf48b 1825 fio_gettime(&io_u->start_time, NULL);
03553853 1826
de789769
JA
1827 if (do_scramble)
1828 small_content_scramble(io_u);
03553853 1829
429f6675 1830 return io_u;
36167d82 1831 }
429f6675 1832err_put:
2ba1c290 1833 dprint(FD_IO, "get_io_u failed\n");
429f6675 1834 put_io_u(td, io_u);
002fe734 1835 return ERR_PTR(ret);
10ba535a
JA
1836}
1837
a9da8ab2 1838static void __io_u_log_error(struct thread_data *td, struct io_u *io_u)
5451792e 1839{
8b28bd41 1840 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
825f818e 1841
8b28bd41
DM
1842 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1843 return;
5451792e 1844
709c8313
RE
1845 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
1846 io_u->file ? " on file " : "",
1847 io_u->file ? io_u->file->file_name : "",
1848 strerror(io_u->error),
1849 io_ddir_name(io_u->ddir),
1850 io_u->offset, io_u->xfer_buflen);
5451792e 1851
5ad7be56
KD
1852 if (td->io_ops->errdetails) {
1853 char *err = td->io_ops->errdetails(io_u);
1854
1855 log_err("fio: %s\n", err);
1856 free(err);
1857 }
1858
5451792e
JA
1859 if (!td->error)
1860 td_verror(td, io_u->error, "io_u error");
1861}
1862
a9da8ab2
JA
1863void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1864{
1865 __io_u_log_error(td, io_u);
1866 if (td->parent)
094e66cb 1867 __io_u_log_error(td->parent, io_u);
a9da8ab2
JA
1868}
1869
e39c0676 1870static inline bool gtod_reduce(struct thread_data *td)
aba6c951 1871{
3ecc8c67
JA
1872 return (td->o.disable_clat && td->o.disable_slat && td->o.disable_bw)
1873 || td->o.gtod_reduce;
aba6c951
JA
1874}
1875
c8eeb9df
JA
1876static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1877 struct io_completion_data *icd,
1878 const enum fio_ddir idx, unsigned int bytes)
1879{
a9da8ab2 1880 const int no_reduce = !gtod_reduce(td);
d6bb626e 1881 unsigned long long llnsec = 0;
c8eeb9df 1882
75dc383e
JA
1883 if (td->parent)
1884 td = td->parent;
1885
132b1ee4 1886 if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS))
8243be59
JA
1887 return;
1888
a9da8ab2 1889 if (no_reduce)
d6bb626e 1890 llnsec = ntime_since(&io_u->issue_time, &icd->time);
c8eeb9df
JA
1891
1892 if (!td->o.disable_lat) {
c3a32714 1893 unsigned long long tnsec;
c8eeb9df 1894
d6bb626e
VF
1895 tnsec = ntime_since(&io_u->start_time, &icd->time);
1896 add_lat_sample(td, idx, tnsec, bytes, io_u->offset);
15501535 1897
d4afedfd
JA
1898 if (td->flags & TD_F_PROFILE_OPS) {
1899 struct prof_io_ops *ops = &td->prof_io_ops;
1900
1901 if (ops->io_u_lat)
c3a32714 1902 icd->error = ops->io_u_lat(td, tnsec);
d4afedfd
JA
1903 }
1904
c3a32714
JA
1905 if (td->o.max_latency && tnsec > td->o.max_latency)
1906 lat_fatal(td, icd, tnsec, td->o.max_latency);
1907 if (td->o.latency_target && tnsec > td->o.latency_target) {
3e260a46 1908 if (lat_target_failed(td))
c3a32714 1909 lat_fatal(td, icd, tnsec, td->o.latency_target);
15501535 1910 }
c8eeb9df
JA
1911 }
1912
a47591e4
JA
1913 if (ddir_rw(idx)) {
1914 if (!td->o.disable_clat) {
d6bb626e
VF
1915 add_clat_sample(td, idx, llnsec, bytes, io_u->offset);
1916 io_u_mark_latency(td, llnsec);
a47591e4 1917 }
c8eeb9df 1918
a47591e4 1919 if (!td->o.disable_bw && per_unit_log(td->bw_log))
d6bb626e 1920 add_bw_sample(td, io_u, bytes, llnsec);
c8eeb9df 1921
a47591e4
JA
1922 if (no_reduce && per_unit_log(td->iops_log))
1923 add_iops_sample(td, io_u, bytes);
1924 }
66347cfa
DE
1925
1926 if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
1927 uint32_t *info = io_u_block_info(td, io_u);
1928 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
1929 if (io_u->ddir == DDIR_TRIM) {
1930 *info = BLOCK_INFO(BLOCK_STATE_TRIMMED,
1931 BLOCK_INFO_TRIMS(*info) + 1);
1932 } else if (io_u->ddir == DDIR_WRITE) {
1933 *info = BLOCK_INFO_SET_STATE(BLOCK_STATE_WRITTEN,
1934 *info);
1935 }
1936 }
1937 }
c8eeb9df
JA
1938}
1939
94a6e1bb
JA
1940static void file_log_write_comp(const struct thread_data *td, struct fio_file *f,
1941 uint64_t offset, unsigned int bytes)
1942{
1943 int idx;
1944
639ad1ea
JA
1945 if (!f)
1946 return;
1947
94a6e1bb
JA
1948 if (f->first_write == -1ULL || offset < f->first_write)
1949 f->first_write = offset;
1950 if (f->last_write == -1ULL || ((offset + bytes) > f->last_write))
1951 f->last_write = offset + bytes;
1952
1953 if (!f->last_write_comp)
1954 return;
1955
1956 idx = f->last_write_idx++;
1957 f->last_write_comp[idx] = offset;
1958 if (f->last_write_idx == td->o.iodepth)
1959 f->last_write_idx = 0;
1960}
1961
f8b0bd10 1962static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
97601024 1963 struct io_completion_data *icd)
10ba535a 1964{
f8b0bd10
JA
1965 struct io_u *io_u = *io_u_ptr;
1966 enum fio_ddir ddir = io_u->ddir;
1967 struct fio_file *f = io_u->file;
10ba535a 1968
e5f9a813 1969 dprint_io_u(io_u, "complete");
2ba1c290 1970
0c6e7517 1971 assert(io_u->flags & IO_U_F_FLIGHT);
1651e431 1972 io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
f9401285
JA
1973
1974 /*
1975 * Mark IO ok to verify
1976 */
1977 if (io_u->ipo) {
890b6656
JA
1978 /*
1979 * Remove errored entry from the verification list
1980 */
1981 if (io_u->error)
1982 unlog_io_piece(td, io_u);
1983 else {
1984 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1985 write_barrier();
1986 }
f9401285
JA
1987 }
1988
f8b0bd10 1989 if (ddir_sync(ddir)) {
87dc1ab1 1990 td->last_was_sync = 1;
44f29692
JA
1991 if (f) {
1992 f->first_write = -1ULL;
1993 f->last_write = -1ULL;
1994 }
87dc1ab1
JA
1995 return;
1996 }
1997
1998 td->last_was_sync = 0;
f8b0bd10 1999 td->last_ddir = ddir;
87dc1ab1 2000
f8b0bd10 2001 if (!io_u->error && ddir_rw(ddir)) {
10ba535a 2002 unsigned int bytes = io_u->buflen - io_u->resid;
b29ee5b3 2003 int ret;
10ba535a 2004
f8b0bd10 2005 td->io_blocks[ddir]++;
f8b0bd10 2006 td->io_bytes[ddir] += bytes;
ae2fafc8 2007
e1c325d2
JA
2008 if (!(io_u->flags & IO_U_F_VER_LIST)) {
2009 td->this_io_blocks[ddir]++;
f8b0bd10 2010 td->this_io_bytes[ddir] += bytes;
e1c325d2 2011 }
f8b0bd10 2012
639ad1ea 2013 if (ddir == DDIR_WRITE)
94a6e1bb 2014 file_log_write_comp(td, f, io_u->offset, bytes);
44f29692 2015
6b1190fd 2016 if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
50a8ce86 2017 td->runstate == TD_VERIFYING))
f8b0bd10 2018 account_io_completion(td, io_u, icd, ddir, bytes);
40e1a6f0 2019
f8b0bd10 2020 icd->bytes_done[ddir] += bytes;
3af6ef39 2021
d7762cf8 2022 if (io_u->end_io) {
f8b0bd10
JA
2023 ret = io_u->end_io(td, io_u_ptr);
2024 io_u = *io_u_ptr;
3af6ef39
JA
2025 if (ret && !icd->error)
2026 icd->error = ret;
2027 }
ff58fced 2028 } else if (io_u->error) {
10ba535a 2029 icd->error = io_u->error;
5451792e
JA
2030 io_u_log_error(td, io_u);
2031 }
8b28bd41 2032 if (icd->error) {
f8b0bd10
JA
2033 enum error_type_bit eb = td_error_type(ddir, icd->error);
2034
8b28bd41
DM
2035 if (!td_non_fatal_error(td, eb, icd->error))
2036 return;
f8b0bd10 2037
f2bba182
RR
2038 /*
2039 * If there is a non_fatal error, then add to the error count
2040 * and clear all the errors.
2041 */
2042 update_error_count(td, icd->error);
2043 td_clear_error(td);
2044 icd->error = 0;
f8b0bd10
JA
2045 if (io_u)
2046 io_u->error = 0;
f2bba182 2047 }
10ba535a
JA
2048}
2049
9520ebb9
JA
2050static void init_icd(struct thread_data *td, struct io_completion_data *icd,
2051 int nr)
10ba535a 2052{
6eaf09d6 2053 int ddir;
aba6c951
JA
2054
2055 if (!gtod_reduce(td))
9520ebb9 2056 fio_gettime(&icd->time, NULL);
02bcaa8c 2057
3af6ef39
JA
2058 icd->nr = nr;
2059
10ba535a 2060 icd->error = 0;
c1f50f76 2061 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
6eaf09d6 2062 icd->bytes_done[ddir] = 0;
36167d82
JA
2063}
2064
97601024
JA
2065static void ios_completed(struct thread_data *td,
2066 struct io_completion_data *icd)
36167d82
JA
2067{
2068 struct io_u *io_u;
2069 int i;
2070
10ba535a
JA
2071 for (i = 0; i < icd->nr; i++) {
2072 io_u = td->io_ops->event(td, i);
2073
f8b0bd10 2074 io_completed(td, &io_u, icd);
e8462bd8 2075
f8b0bd10 2076 if (io_u)
e8462bd8 2077 put_io_u(td, io_u);
10ba535a
JA
2078 }
2079}
97601024 2080
e7e6cfb4
JA
2081/*
2082 * Complete a single io_u for the sync engines.
2083 */
55312f9f 2084int io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
97601024
JA
2085{
2086 struct io_completion_data icd;
55312f9f 2087 int ddir;
97601024 2088
9520ebb9 2089 init_icd(td, &icd, 1);
f8b0bd10 2090 io_completed(td, &io_u, &icd);
e8462bd8 2091
f8b0bd10 2092 if (io_u)
e8462bd8 2093 put_io_u(td, io_u);
97601024 2094
581e7141
JA
2095 if (icd.error) {
2096 td_verror(td, icd.error, "io_u_sync_complete");
2097 return -1;
2098 }
97601024 2099
c1f50f76 2100 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
55312f9f 2101 td->bytes_done[ddir] += icd.bytes_done[ddir];
581e7141
JA
2102
2103 return 0;
97601024
JA
2104}
2105
e7e6cfb4
JA
2106/*
2107 * Called to complete min_events number of io for the async engines.
2108 */
55312f9f 2109int io_u_queued_complete(struct thread_data *td, int min_evts)
97601024 2110{
97601024 2111 struct io_completion_data icd;
00de55ef 2112 struct timespec *tvp = NULL;
55312f9f 2113 int ret, ddir;
4d06a338 2114 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
97601024 2115
12bb8569 2116 dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts);
b271fe62 2117
4950421a 2118 if (!min_evts)
00de55ef 2119 tvp = &ts;
5fb4b366
RE
2120 else if (min_evts > td->cur_depth)
2121 min_evts = td->cur_depth;
97601024 2122
82407585
RP
2123 /* No worries, td_io_getevents fixes min and max if they are
2124 * set incorrectly */
2125 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp);
97601024 2126 if (ret < 0) {
e1161c32 2127 td_verror(td, -ret, "td_io_getevents");
97601024
JA
2128 return ret;
2129 } else if (!ret)
2130 return ret;
2131
9520ebb9 2132 init_icd(td, &icd, ret);
97601024 2133 ios_completed(td, &icd);
581e7141
JA
2134 if (icd.error) {
2135 td_verror(td, icd.error, "io_u_queued_complete");
2136 return -1;
2137 }
97601024 2138
c1f50f76 2139 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
55312f9f 2140 td->bytes_done[ddir] += icd.bytes_done[ddir];
581e7141 2141
0d593542 2142 return ret;
97601024 2143}
7e77dd02
JA
2144
2145/*
2146 * Call when io_u is really queued, to update the submission latency.
2147 */
2148void io_u_queued(struct thread_data *td, struct io_u *io_u)
2149{
8243be59 2150 if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) {
9520ebb9 2151 unsigned long slat_time;
7e77dd02 2152
d6bb626e 2153 slat_time = ntime_since(&io_u->start_time, &io_u->issue_time);
75dc383e
JA
2154
2155 if (td->parent)
2156 td = td->parent;
2157
ae588852
JA
2158 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
2159 io_u->offset);
9520ebb9 2160 }
7e77dd02 2161}
433afcb4 2162
5c94b008
JA
2163/*
2164 * See if we should reuse the last seed, if dedupe is enabled
2165 */
9451b93e 2166static struct frand_state *get_buf_state(struct thread_data *td)
5c94b008
JA
2167{
2168 unsigned int v;
5c94b008
JA
2169
2170 if (!td->o.dedupe_percentage)
2171 return &td->buf_state;
732eedd0 2172 else if (td->o.dedupe_percentage == 100) {
9451b93e
JA
2173 frand_copy(&td->buf_state_prev, &td->buf_state);
2174 return &td->buf_state;
732eedd0 2175 }
5c94b008 2176
36dd3379 2177 v = rand32_between(&td->dedupe_state, 1, 100);
5c94b008
JA
2178
2179 if (v <= td->o.dedupe_percentage)
2180 return &td->buf_state_prev;
2181
2182 return &td->buf_state;
2183}
2184
9451b93e 2185static void save_buf_state(struct thread_data *td, struct frand_state *rs)
5c94b008 2186{
9451b93e
JA
2187 if (td->o.dedupe_percentage == 100)
2188 frand_copy(rs, &td->buf_state_prev);
2189 else if (rs == &td->buf_state)
5c94b008
JA
2190 frand_copy(&td->buf_state_prev, rs);
2191}
2192
cc86c395
JA
2193void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
2194 unsigned int max_bs)
5973cafb 2195{
d1af2894
JA
2196 struct thread_options *o = &td->o;
2197
15600335
JA
2198 if (o->mem_type == MEM_CUDA_MALLOC)
2199 return;
03553853 2200
4eff3e57 2201 if (o->compress_percentage || o->dedupe_percentage) {
9c42684e 2202 unsigned int perc = td->o.compress_percentage;
5c94b008 2203 struct frand_state *rs;
1066358a 2204 unsigned int left = max_bs;
1e7f82e2 2205 unsigned int this_write;
5c94b008 2206
1066358a 2207 do {
9451b93e 2208 rs = get_buf_state(td);
9c42684e 2209
1066358a 2210 min_write = min(min_write, left);
f97a43a1 2211
1066358a 2212 if (perc) {
4b157ac6
JA
2213 this_write = min_not_zero(min_write,
2214 td->o.compress_chunk);
1e7f82e2
JA
2215
2216 fill_random_buf_percentage(rs, buf, perc,
2217 this_write, this_write,
2218 o->buffer_pattern,
2219 o->buffer_pattern_bytes);
2220 } else {
1066358a 2221 fill_random_buf(rs, buf, min_write);
1e7f82e2
JA
2222 this_write = min_write;
2223 }
1066358a 2224
1e7f82e2
JA
2225 buf += this_write;
2226 left -= this_write;
9451b93e 2227 save_buf_state(td, rs);
1066358a 2228 } while (left);
d1af2894
JA
2229 } else if (o->buffer_pattern_bytes)
2230 fill_buffer_pattern(td, buf, max_bs);
999d245e 2231 else if (o->zero_buffers)
cc86c395 2232 memset(buf, 0, max_bs);
999d245e 2233 else
9451b93e 2234 fill_random_buf(get_buf_state(td), buf, max_bs);
cc86c395
JA
2235}
2236
2237/*
2238 * "randomly" fill the buffer contents
2239 */
2240void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
2241 unsigned int min_write, unsigned int max_bs)
2242{
2243 io_u->buf_filled_len = 0;
2244 fill_io_buffer(td, io_u->buf, min_write, max_bs);
5973cafb 2245}
e2c75fc4
TK
2246
2247static int do_sync_file_range(const struct thread_data *td,
2248 struct fio_file *f)
2249{
2250 off64_t offset, nbytes;
2251
2252 offset = f->first_write;
2253 nbytes = f->last_write - f->first_write;
2254
2255 if (!nbytes)
2256 return 0;
2257
2258 return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
2259}
2260
2261int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
2262{
2263 int ret;
2264
2265 if (io_u->ddir == DDIR_SYNC) {
2266 ret = fsync(io_u->file->fd);
2267 } else if (io_u->ddir == DDIR_DATASYNC) {
2268#ifdef CONFIG_FDATASYNC
2269 ret = fdatasync(io_u->file->fd);
2270#else
2271 ret = io_u->xfer_buflen;
2272 io_u->error = EINVAL;
2273#endif
2274 } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
2275 ret = do_sync_file_range(td, io_u->file);
2276 else {
2277 ret = io_u->xfer_buflen;
2278 io_u->error = EINVAL;
2279 }
2280
2281 if (ret < 0)
2282 io_u->error = errno;
2283
2284 return ret;
2285}
2286
2287int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
2288{
2289#ifndef FIO_HAVE_TRIM
2290 io_u->error = EINVAL;
2291 return 0;
2292#else
2293 struct fio_file *f = io_u->file;
2294 int ret;
2295
496b1f9e 2296 ret = os_trim(f, io_u->offset, io_u->xfer_buflen);
e2c75fc4
TK
2297 if (!ret)
2298 return io_u->xfer_buflen;
2299
2300 io_u->error = ret;
2301 return 0;
2302#endif
2303}