backend: split queue io_u event handling into helper
[fio.git] / io_u.c
CommitLineData
10ba535a
JA
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
0c6e7517 6#include <assert.h>
10ba535a
JA
7
8#include "fio.h"
5973cafb 9#include "hash.h"
4f5af7b2 10#include "verify.h"
0d29de83 11#include "trim.h"
1fbbf72e 12#include "lib/rand.h"
7ebd796f 13#include "lib/axmap.h"
002fe734 14#include "err.h"
10ba535a 15
97601024
JA
16struct io_completion_data {
17 int nr; /* input */
97601024
JA
18
19 int error; /* output */
100f49f1 20 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
97601024
JA
21 struct timeval time; /* output */
22};
23
10ba535a 24/*
7ebd796f 25 * The ->io_axmap contains a map of blocks we have or have not done io
10ba535a
JA
26 * to yet. Used to make sure we cover the entire range in a fair fashion.
27 */
1ae83d45 28static int random_map_free(struct fio_file *f, const uint64_t block)
10ba535a 29{
7ebd796f 30 return !axmap_isset(f->io_axmap, block);
10ba535a
JA
31}
32
df415585
JA
33/*
34 * Mark a given offset as used in the map.
35 */
9bf2061e 36static void mark_random_map(struct thread_data *td, struct io_u *io_u)
df415585 37{
2dc1bbeb 38 unsigned int min_bs = td->o.rw_min_bs;
9bf2061e 39 struct fio_file *f = io_u->file;
51ede0b1 40 unsigned int nr_blocks;
1ae83d45 41 uint64_t block;
df415585 42
1ae83d45 43 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
c685b5b2 44 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
df415585 45
2ab9e98b 46 if (!(io_u->flags & IO_U_F_BUSY_OK))
7ebd796f 47 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
df415585 48
51ede0b1
JA
49 if ((nr_blocks * min_bs) < io_u->buflen)
50 io_u->buflen = nr_blocks * min_bs;
df415585
JA
51}
52
74776733
JA
53static uint64_t last_block(struct thread_data *td, struct fio_file *f,
54 enum fio_ddir ddir)
2ba1c290 55{
74776733
JA
56 uint64_t max_blocks;
57 uint64_t max_size;
2ba1c290 58
ff58fced
JA
59 assert(ddir_rw(ddir));
60
d9dd70f7
JA
61 /*
62 * Hmm, should we make sure that ->io_size <= ->real_file_size?
63 */
64 max_size = f->io_size;
65 if (max_size > f->real_file_size)
66 max_size = f->real_file_size;
67
ed335855
SN
68 if (td->o.zone_range)
69 max_size = td->o.zone_range;
70
0412d12e
JE
71 if (td->o.min_bs[ddir] > td->o.ba[ddir])
72 max_size -= td->o.min_bs[ddir] - td->o.ba[ddir];
73
1ae83d45 74 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
2ba1c290
JA
75 if (!max_blocks)
76 return 0;
77
67778e88 78 return max_blocks;
2ba1c290
JA
79}
80
1ae83d45
JA
81struct rand_off {
82 struct flist_head list;
83 uint64_t off;
84};
85
e25839d4 86static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
1ae83d45 87 enum fio_ddir ddir, uint64_t *b)
ec4015da 88{
6f49f8bc 89 uint64_t r;
5e0baa7f 90
74776733 91 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
6f49f8bc
JA
92 uint64_t lastb;
93
94 lastb = last_block(td, f, ddir);
95 if (!lastb)
96 return 1;
97
d6b72507 98 r = __rand(&td->random_state);
8055e41d 99
4b91ee8f 100 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
8055e41d 101
54a21917 102 *b = lastb * (r / ((uint64_t) FRAND_MAX + 1.0));
51ede0b1 103 } else {
8055e41d 104 uint64_t off = 0;
43c63a78 105
967d1b63
JA
106 assert(fio_file_lfsr(f));
107
6f49f8bc 108 if (lfsr_next(&f->lfsr, &off))
8055e41d 109 return 1;
ec4015da 110
8055e41d
JA
111 *b = off;
112 }
0ce8b119 113
ec4015da 114 /*
51ede0b1 115 * if we are not maintaining a random map, we are done.
ec4015da 116 */
51ede0b1
JA
117 if (!file_randommap(td, f))
118 goto ret;
43c63a78
JA
119
120 /*
51ede0b1 121 * calculate map offset and check if it's free
43c63a78 122 */
51ede0b1
JA
123 if (random_map_free(f, *b))
124 goto ret;
125
4b91ee8f
JA
126 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
127 (unsigned long long) *b);
51ede0b1 128
7ebd796f 129 *b = axmap_next_free(f->io_axmap, *b);
51ede0b1
JA
130 if (*b == (uint64_t) -1ULL)
131 return 1;
0ce8b119
JA
132ret:
133 return 0;
ec4015da
JA
134}
135
925fee33
JA
136static int __get_next_rand_offset_zipf(struct thread_data *td,
137 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 138 uint64_t *b)
e25839d4 139{
9c6f6316 140 *b = zipf_next(&f->zipf);
e25839d4
JA
141 return 0;
142}
143
925fee33
JA
144static int __get_next_rand_offset_pareto(struct thread_data *td,
145 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 146 uint64_t *b)
925fee33 147{
9c6f6316 148 *b = pareto_next(&f->zipf);
925fee33
JA
149 return 0;
150}
151
56d9fa4b
JA
152static int __get_next_rand_offset_gauss(struct thread_data *td,
153 struct fio_file *f, enum fio_ddir ddir,
154 uint64_t *b)
155{
156 *b = gauss_next(&f->gauss);
157 return 0;
158}
159
160
1ae83d45
JA
161static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
162{
163 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
164 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
165
166 return r1->off - r2->off;
167}
168
169static int get_off_from_method(struct thread_data *td, struct fio_file *f,
170 enum fio_ddir ddir, uint64_t *b)
e25839d4
JA
171{
172 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
173 return __get_next_rand_offset(td, f, ddir, b);
174 else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
175 return __get_next_rand_offset_zipf(td, f, ddir, b);
925fee33
JA
176 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
177 return __get_next_rand_offset_pareto(td, f, ddir, b);
56d9fa4b
JA
178 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
179 return __get_next_rand_offset_gauss(td, f, ddir, b);
e25839d4
JA
180
181 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
182 return 1;
183}
184
bcd5abfa
JA
185/*
186 * Sort the reads for a verify phase in batches of verifysort_nr, if
187 * specified.
188 */
189static inline int should_sort_io(struct thread_data *td)
190{
191 if (!td->o.verifysort_nr || !td->o.do_verify)
192 return 0;
193 if (!td_random(td))
194 return 0;
195 if (td->runstate != TD_VERIFYING)
196 return 0;
197 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE)
198 return 0;
199
200 return 1;
201}
202
d9472271 203static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
211c9b89
JA
204{
205 unsigned int v;
206 unsigned long r;
207
d9472271 208 if (td->o.perc_rand[ddir] == 100)
211c9b89
JA
209 return 1;
210
d6b72507 211 r = __rand(&td->seq_rand_state[ddir]);
54a21917 212 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
211c9b89 213
d9472271 214 return v <= td->o.perc_rand[ddir];
211c9b89
JA
215}
216
1ae83d45
JA
217static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
218 enum fio_ddir ddir, uint64_t *b)
219{
220 struct rand_off *r;
221 int i, ret = 1;
222
bcd5abfa 223 if (!should_sort_io(td))
1ae83d45
JA
224 return get_off_from_method(td, f, ddir, b);
225
226 if (!flist_empty(&td->next_rand_list)) {
1ae83d45 227fetch:
9342d5f8 228 r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
1ae83d45
JA
229 flist_del(&r->list);
230 *b = r->off;
231 free(r);
232 return 0;
233 }
234
235 for (i = 0; i < td->o.verifysort_nr; i++) {
236 r = malloc(sizeof(*r));
237
238 ret = get_off_from_method(td, f, ddir, &r->off);
239 if (ret) {
240 free(r);
241 break;
242 }
243
244 flist_add(&r->list, &td->next_rand_list);
245 }
246
247 if (ret && !i)
248 return ret;
249
250 assert(!flist_empty(&td->next_rand_list));
251 flist_sort(NULL, &td->next_rand_list, flist_cmp);
252 goto fetch;
253}
254
38dad62d 255static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
1ae83d45 256 enum fio_ddir ddir, uint64_t *b)
38dad62d 257{
c04e4661
DE
258 if (!get_next_rand_offset(td, f, ddir, b))
259 return 0;
260
261 if (td->o.time_based) {
33c48814 262 fio_file_reset(td, f);
c04e4661
DE
263 if (!get_next_rand_offset(td, f, ddir, b))
264 return 0;
38dad62d
JA
265 }
266
c04e4661 267 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
f1dfb668 268 f->file_name, (unsigned long long) f->last_pos[ddir],
4b91ee8f 269 (unsigned long long) f->real_file_size);
c04e4661 270 return 1;
38dad62d
JA
271}
272
37cf9e3c 273static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
1ae83d45 274 enum fio_ddir ddir, uint64_t *offset)
38dad62d 275{
ac002339
JA
276 struct thread_options *o = &td->o;
277
ff58fced
JA
278 assert(ddir_rw(ddir));
279
f1dfb668 280 if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
ac002339 281 o->time_based)
f1dfb668 282 f->last_pos[ddir] = f->last_pos[ddir] - f->io_size;
c04e4661 283
f1dfb668 284 if (f->last_pos[ddir] < f->real_file_size) {
1ae83d45 285 uint64_t pos;
059b0802 286
f1dfb668
JA
287 if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0)
288 f->last_pos[ddir] = f->real_file_size;
a66da7a2 289
f1dfb668 290 pos = f->last_pos[ddir] - f->file_offset;
ac002339
JA
291 if (pos && o->ddir_seq_add) {
292 pos += o->ddir_seq_add;
293
294 /*
295 * If we reach beyond the end of the file
296 * with holed IO, wrap around to the
297 * beginning again.
298 */
299 if (pos >= f->real_file_size)
300 pos = f->file_offset;
301 }
059b0802 302
37cf9e3c 303 *offset = pos;
38dad62d
JA
304 return 0;
305 }
306
307 return 1;
308}
309
310static int get_next_block(struct thread_data *td, struct io_u *io_u,
6aca9b3d
JA
311 enum fio_ddir ddir, int rw_seq,
312 unsigned int *is_random)
38dad62d
JA
313{
314 struct fio_file *f = io_u->file;
1ae83d45 315 uint64_t b, offset;
38dad62d
JA
316 int ret;
317
ff58fced
JA
318 assert(ddir_rw(ddir));
319
37cf9e3c
JA
320 b = offset = -1ULL;
321
38dad62d 322 if (rw_seq) {
211c9b89 323 if (td_random(td)) {
6aca9b3d 324 if (should_do_random(td, ddir)) {
211c9b89 325 ret = get_next_rand_block(td, f, ddir, &b);
6aca9b3d
JA
326 *is_random = 1;
327 } else {
328 *is_random = 0;
211c9b89
JA
329 io_u->flags |= IO_U_F_BUSY_OK;
330 ret = get_next_seq_offset(td, f, ddir, &offset);
331 if (ret)
332 ret = get_next_rand_block(td, f, ddir, &b);
333 }
6aca9b3d
JA
334 } else {
335 *is_random = 0;
37cf9e3c 336 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 337 }
38dad62d
JA
338 } else {
339 io_u->flags |= IO_U_F_BUSY_OK;
6aca9b3d 340 *is_random = 0;
38dad62d
JA
341
342 if (td->o.rw_seq == RW_SEQ_SEQ) {
37cf9e3c 343 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 344 if (ret) {
37cf9e3c 345 ret = get_next_rand_block(td, f, ddir, &b);
6aca9b3d
JA
346 *is_random = 0;
347 }
38dad62d 348 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
f1dfb668
JA
349 if (f->last_start[ddir] != -1ULL)
350 offset = f->last_start[ddir] - f->file_offset;
38dad62d 351 else
37cf9e3c 352 offset = 0;
38dad62d
JA
353 ret = 0;
354 } else {
355 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
356 ret = 1;
357 }
358 }
6d68b997 359
37cf9e3c
JA
360 if (!ret) {
361 if (offset != -1ULL)
362 io_u->offset = offset;
363 else if (b != -1ULL)
364 io_u->offset = b * td->o.ba[ddir];
365 else {
4e0a8fa2 366 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
37cf9e3c
JA
367 ret = 1;
368 }
369 }
370
38dad62d
JA
371 return ret;
372}
373
10ba535a
JA
374/*
375 * For random io, generate a random new block and see if it's used. Repeat
376 * until we find a free one. For sequential io, just return the end of
377 * the last io issued.
378 */
6aca9b3d
JA
379static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
380 unsigned int *is_random)
10ba535a 381{
9bf2061e 382 struct fio_file *f = io_u->file;
4ba66134 383 enum fio_ddir ddir = io_u->ddir;
38dad62d 384 int rw_seq_hit = 0;
10ba535a 385
ff58fced
JA
386 assert(ddir_rw(ddir));
387
38dad62d
JA
388 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
389 rw_seq_hit = 1;
5736c10d 390 td->ddir_seq_nr = td->o.ddir_seq_nr;
38dad62d 391 }
211097b2 392
6aca9b3d 393 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
38dad62d 394 return 1;
10ba535a 395
009bd847
JA
396 if (io_u->offset >= f->io_size) {
397 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
4b91ee8f
JA
398 (unsigned long long) io_u->offset,
399 (unsigned long long) f->io_size);
009bd847
JA
400 return 1;
401 }
402
403 io_u->offset += f->file_offset;
2ba1c290
JA
404 if (io_u->offset >= f->real_file_size) {
405 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
4b91ee8f
JA
406 (unsigned long long) io_u->offset,
407 (unsigned long long) f->real_file_size);
10ba535a 408 return 1;
2ba1c290 409 }
10ba535a
JA
410
411 return 0;
412}
413
6aca9b3d
JA
414static int get_next_offset(struct thread_data *td, struct io_u *io_u,
415 unsigned int *is_random)
15dc1934 416{
d72be545
JA
417 if (td->flags & TD_F_PROFILE_OPS) {
418 struct prof_io_ops *ops = &td->prof_io_ops;
7eb36574 419
d72be545 420 if (ops->fill_io_u_off)
6aca9b3d 421 return ops->fill_io_u_off(td, io_u, is_random);
d72be545 422 }
15dc1934 423
6aca9b3d 424 return __get_next_offset(td, io_u, is_random);
15dc1934
JA
425}
426
79944128
JA
427static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
428 unsigned int buflen)
429{
430 struct fio_file *f = io_u->file;
431
bedc9dc2 432 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
79944128
JA
433}
434
6aca9b3d
JA
435static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
436 unsigned int is_random)
10ba535a 437{
6aca9b3d 438 int ddir = io_u->ddir;
24d23ca7 439 unsigned int buflen = 0;
f3059de1 440 unsigned int minbs, maxbs;
54a21917 441 unsigned long r;
10ba535a 442
9ee1c647 443 assert(ddir_rw(ddir));
6aca9b3d
JA
444
445 if (td->o.bs_is_seq_rand)
446 ddir = is_random ? DDIR_WRITE: DDIR_READ;
ff58fced 447
f3059de1
JA
448 minbs = td->o.min_bs[ddir];
449 maxbs = td->o.max_bs[ddir];
450
79944128
JA
451 if (minbs == maxbs)
452 return minbs;
453
52c58027
JA
454 /*
455 * If we can't satisfy the min block size from here, then fail
456 */
457 if (!io_u_fits(td, io_u, minbs))
458 return 0;
459
79944128 460 do {
d6b72507 461 r = __rand(&td->bsrange_state);
4c07ad86 462
720e84ad 463 if (!td->o.bssplit_nr[ddir]) {
f3059de1 464 buflen = 1 + (unsigned int) ((double) maxbs *
54a21917 465 (r / (FRAND_MAX + 1.0)));
f3059de1
JA
466 if (buflen < minbs)
467 buflen = minbs;
5ec10eaa 468 } else {
564ca972
JA
469 long perc = 0;
470 unsigned int i;
471
720e84ad
JA
472 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
473 struct bssplit *bsp = &td->o.bssplit[ddir][i];
564ca972
JA
474
475 buflen = bsp->bs;
476 perc += bsp->perc;
54a21917 477 if ((r <= ((FRAND_MAX / 100L) * perc)) &&
79944128 478 io_u_fits(td, io_u, buflen))
564ca972
JA
479 break;
480 }
481 }
79944128 482
a9f70b1f
JB
483 if (td->o.do_verify && td->o.verify != VERIFY_NONE)
484 buflen = (buflen + td->o.verify_interval - 1) &
485 ~(td->o.verify_interval - 1);
486
f3059de1
JA
487 if (!td->o.bs_unaligned && is_power_of_2(minbs))
488 buflen = (buflen + minbs - 1) & ~(minbs - 1);
10ba535a 489
79944128 490 } while (!io_u_fits(td, io_u, buflen));
6a5e6884 491
10ba535a
JA
492 return buflen;
493}
494
6aca9b3d
JA
495static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
496 unsigned int is_random)
15dc1934 497{
d72be545
JA
498 if (td->flags & TD_F_PROFILE_OPS) {
499 struct prof_io_ops *ops = &td->prof_io_ops;
7eb36574 500
d72be545 501 if (ops->fill_io_u_size)
6aca9b3d 502 return ops->fill_io_u_size(td, io_u, is_random);
d72be545 503 }
15dc1934 504
6aca9b3d 505 return __get_next_buflen(td, io_u, is_random);
15dc1934
JA
506}
507
afe24a5a
JA
508static void set_rwmix_bytes(struct thread_data *td)
509{
afe24a5a
JA
510 unsigned int diff;
511
512 /*
513 * we do time or byte based switch. this is needed because
514 * buffered writes may issue a lot quicker than they complete,
515 * whereas reads do not.
516 */
e47f799f 517 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
04c540d9 518 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
e47f799f
JA
519}
520
521static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
522{
523 unsigned int v;
1294c3ec 524 unsigned long r;
e47f799f 525
d6b72507 526 r = __rand(&td->rwmix_state);
54a21917 527 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
4c07ad86 528
04c540d9 529 if (v <= td->o.rwmix[DDIR_READ])
e47f799f
JA
530 return DDIR_READ;
531
532 return DDIR_WRITE;
afe24a5a
JA
533}
534
002e7183
JA
535void io_u_quiesce(struct thread_data *td)
536{
537 /*
538 * We are going to sleep, ensure that we flush anything pending as
539 * not to skew our latency numbers.
540 *
541 * Changed to only monitor 'in flight' requests here instead of the
542 * td->cur_depth, b/c td->cur_depth does not accurately represent
543 * io's that have been actually submitted to an async engine,
544 * and cur_depth is meaningless for sync engines.
545 */
9cc80b6d
JA
546 if (td->io_u_queued || td->cur_depth) {
547 int fio_unused ret;
548
549 ret = td_io_commit(td);
550 }
551
002e7183
JA
552 while (td->io_u_in_flight) {
553 int fio_unused ret;
554
55312f9f 555 ret = io_u_queued_complete(td, 1);
002e7183
JA
556 }
557}
558
581e7141
JA
559static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
560{
561 enum fio_ddir odir = ddir ^ 1;
581e7141
JA
562 long usec;
563
ff58fced
JA
564 assert(ddir_rw(ddir));
565
315fcfec 566 if (td->rate_pending_usleep[ddir] <= 0)
581e7141
JA
567 return ddir;
568
569 /*
570 * We have too much pending sleep in this direction. See if we
571 * should switch.
572 */
315fcfec 573 if (td_rw(td) && td->o.rwmix[odir]) {
581e7141
JA
574 /*
575 * Other direction does not have too much pending, switch
576 */
577 if (td->rate_pending_usleep[odir] < 100000)
578 return odir;
579
580 /*
581 * Both directions have pending sleep. Sleep the minimum time
582 * and deduct from both.
583 */
584 if (td->rate_pending_usleep[ddir] <=
585 td->rate_pending_usleep[odir]) {
586 usec = td->rate_pending_usleep[ddir];
587 } else {
588 usec = td->rate_pending_usleep[odir];
589 ddir = odir;
590 }
591 } else
592 usec = td->rate_pending_usleep[ddir];
593
002e7183 594 io_u_quiesce(td);
78c1eda5 595
9cc80b6d 596 usec = usec_sleep(td, usec);
581e7141
JA
597
598 td->rate_pending_usleep[ddir] -= usec;
599
600 odir = ddir ^ 1;
601 if (td_rw(td) && __should_check_rate(td, odir))
602 td->rate_pending_usleep[odir] -= usec;
0b9d69ec 603
581e7141
JA
604 return ddir;
605}
606
10ba535a
JA
607/*
608 * Return the data direction for the next io_u. If the job is a
609 * mixed read/write workload, check the rwmix cycle and switch if
610 * necessary.
611 */
1e97cce9 612static enum fio_ddir get_rw_ddir(struct thread_data *td)
10ba535a 613{
581e7141
JA
614 enum fio_ddir ddir;
615
5f9099ea
JA
616 /*
617 * see if it's time to fsync
618 */
619 if (td->o.fsync_blocks &&
620 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
621 td->io_issues[DDIR_WRITE] && should_fsync(td))
622 return DDIR_SYNC;
623
624 /*
625 * see if it's time to fdatasync
626 */
627 if (td->o.fdatasync_blocks &&
628 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
629 td->io_issues[DDIR_WRITE] && should_fsync(td))
630 return DDIR_DATASYNC;
631
44f29692
JA
632 /*
633 * see if it's time to sync_file_range
634 */
635 if (td->sync_file_range_nr &&
636 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
637 td->io_issues[DDIR_WRITE] && should_fsync(td))
638 return DDIR_SYNC_FILE_RANGE;
639
10ba535a 640 if (td_rw(td)) {
10ba535a
JA
641 /*
642 * Check if it's time to seed a new data direction.
643 */
e4928662 644 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
e47f799f
JA
645 /*
646 * Put a top limit on how many bytes we do for
647 * one data direction, to avoid overflowing the
648 * ranges too much
649 */
650 ddir = get_rand_ddir(td);
e47f799f
JA
651
652 if (ddir != td->rwmix_ddir)
653 set_rwmix_bytes(td);
654
655 td->rwmix_ddir = ddir;
10ba535a 656 }
581e7141 657 ddir = td->rwmix_ddir;
10ba535a 658 } else if (td_read(td))
581e7141 659 ddir = DDIR_READ;
6eaf09d6 660 else if (td_write(td))
581e7141 661 ddir = DDIR_WRITE;
6eaf09d6
SL
662 else
663 ddir = DDIR_TRIM;
581e7141
JA
664
665 td->rwmix_ddir = rate_ddir(td, ddir);
666 return td->rwmix_ddir;
10ba535a
JA
667}
668
1ef2b6be
JA
669static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
670{
0e4dd95c
DE
671 enum fio_ddir ddir = get_rw_ddir(td);
672
82a90686 673 if (td_trimwrite(td)) {
0e4dd95c
DE
674 struct fio_file *f = io_u->file;
675 if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
676 ddir = DDIR_TRIM;
677 else
678 ddir = DDIR_WRITE;
679 }
680
681 io_u->ddir = io_u->acct_ddir = ddir;
1ef2b6be
JA
682
683 if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
684 td->o.barrier_blocks &&
685 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
686 td->io_issues[DDIR_WRITE])
687 io_u->flags |= IO_U_F_BARRIER;
688}
689
e8462bd8 690void put_file_log(struct thread_data *td, struct fio_file *f)
60f2c658 691{
71b84caa 692 unsigned int ret = put_file(td, f);
60f2c658
JA
693
694 if (ret)
695 td_verror(td, ret, "file close");
696}
697
10ba535a
JA
698void put_io_u(struct thread_data *td, struct io_u *io_u)
699{
e8462bd8
JA
700 td_io_u_lock(td);
701
f8b0bd10 702 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
60f2c658 703 put_file_log(td, io_u->file);
f8b0bd10 704
10ba535a 705 io_u->file = NULL;
d7ee2a7d
SL
706 io_u->flags |= IO_U_F_FREE;
707
0c41214f
RR
708 if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
709 td->cur_depth--;
2ae0b204 710 io_u_qpush(&td->io_u_freelist, io_u);
e8462bd8
JA
711 td_io_u_unlock(td);
712 td_io_u_free_notify(td);
10ba535a
JA
713}
714
f2bba182
RR
715void clear_io_u(struct thread_data *td, struct io_u *io_u)
716{
717 io_u->flags &= ~IO_U_F_FLIGHT;
718 put_io_u(td, io_u);
719}
720
755200a3
JA
721void requeue_io_u(struct thread_data *td, struct io_u **io_u)
722{
723 struct io_u *__io_u = *io_u;
bcd5abfa 724 enum fio_ddir ddir = acct_ddir(__io_u);
755200a3 725
465221b0
JA
726 dprint(FD_IO, "requeue %p\n", __io_u);
727
e8462bd8
JA
728 td_io_u_lock(td);
729
4d2e0f49 730 __io_u->flags |= IO_U_F_FREE;
bcd5abfa
JA
731 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
732 td->io_issues[ddir]--;
5ec10eaa 733
4d2e0f49 734 __io_u->flags &= ~IO_U_F_FLIGHT;
0c41214f
RR
735 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
736 td->cur_depth--;
2ae0b204
JA
737
738 io_u_rpush(&td->io_u_requeues, __io_u);
e8462bd8 739 td_io_u_unlock(td);
755200a3
JA
740 *io_u = NULL;
741}
742
9bf2061e 743static int fill_io_u(struct thread_data *td, struct io_u *io_u)
10ba535a 744{
6aca9b3d
JA
745 unsigned int is_random;
746
b4c5e1ac
JA
747 if (td->io_ops->flags & FIO_NOIO)
748 goto out;
749
1ef2b6be 750 set_rw_ddir(td, io_u);
5f9099ea 751
87dc1ab1 752 /*
ff58fced 753 * fsync() or fdatasync() or trim etc, we are done
87dc1ab1 754 */
ff58fced 755 if (!ddir_rw(io_u->ddir))
c38e9468 756 goto out;
a00735e6 757
48f5abd3
JA
758 /*
759 * See if it's time to switch to a new zone
760 */
13af05ae 761 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
4c8be5b1
JA
762 struct fio_file *f = io_u->file;
763
48f5abd3 764 td->zone_bytes = 0;
4c8be5b1
JA
765 f->file_offset += td->o.zone_range + td->o.zone_skip;
766
767 /*
768 * Wrap from the beginning, if we exceed the file size
769 */
770 if (f->file_offset >= f->real_file_size)
771 f->file_offset = f->real_file_size - f->file_offset;
f1dfb668 772 f->last_pos[io_u->ddir] = f->file_offset;
48f5abd3
JA
773 td->io_skip_bytes += td->o.zone_skip;
774 }
775
10ba535a 776 /*
c685b5b2
JA
777 * No log, let the seq/rand engine retrieve the next buflen and
778 * position.
10ba535a 779 */
6aca9b3d 780 if (get_next_offset(td, io_u, &is_random)) {
2ba1c290 781 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
bca4ed4d 782 return 1;
2ba1c290 783 }
10ba535a 784
6aca9b3d 785 io_u->buflen = get_next_buflen(td, io_u, is_random);
2ba1c290
JA
786 if (!io_u->buflen) {
787 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
bca4ed4d 788 return 1;
2ba1c290 789 }
bca4ed4d 790
2ba1c290
JA
791 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
792 dprint(FD_IO, "io_u %p, offset too large\n", io_u);
4b91ee8f
JA
793 dprint(FD_IO, " off=%llu/%lu > %llu\n",
794 (unsigned long long) io_u->offset, io_u->buflen,
795 (unsigned long long) io_u->file->real_file_size);
6a5e6884 796 return 1;
2ba1c290 797 }
6a5e6884 798
bca4ed4d
JA
799 /*
800 * mark entry before potentially trimming io_u
801 */
303032ae 802 if (td_random(td) && file_randommap(td, io_u->file))
9bf2061e 803 mark_random_map(td, io_u);
bca4ed4d 804
c38e9468 805out:
2ba1c290 806 dprint_io_u(io_u, "fill_io_u");
d9d91e39 807 td->zone_bytes += io_u->buflen;
bca4ed4d 808 return 0;
10ba535a
JA
809}
810
838bc709
JA
811static void __io_u_mark_map(unsigned int *map, unsigned int nr)
812{
2b13e716 813 int idx = 0;
838bc709
JA
814
815 switch (nr) {
816 default:
2b13e716 817 idx = 6;
838bc709
JA
818 break;
819 case 33 ... 64:
2b13e716 820 idx = 5;
838bc709
JA
821 break;
822 case 17 ... 32:
2b13e716 823 idx = 4;
838bc709
JA
824 break;
825 case 9 ... 16:
2b13e716 826 idx = 3;
838bc709
JA
827 break;
828 case 5 ... 8:
2b13e716 829 idx = 2;
838bc709
JA
830 break;
831 case 1 ... 4:
2b13e716 832 idx = 1;
838bc709
JA
833 case 0:
834 break;
835 }
836
2b13e716 837 map[idx]++;
838bc709
JA
838}
839
840void io_u_mark_submit(struct thread_data *td, unsigned int nr)
841{
842 __io_u_mark_map(td->ts.io_u_submit, nr);
843 td->ts.total_submit++;
844}
845
846void io_u_mark_complete(struct thread_data *td, unsigned int nr)
847{
848 __io_u_mark_map(td->ts.io_u_complete, nr);
849 td->ts.total_complete++;
850}
851
d8005759 852void io_u_mark_depth(struct thread_data *td, unsigned int nr)
71619dc2 853{
2b13e716 854 int idx = 0;
71619dc2
JA
855
856 switch (td->cur_depth) {
857 default:
2b13e716 858 idx = 6;
a783e61a 859 break;
71619dc2 860 case 32 ... 63:
2b13e716 861 idx = 5;
a783e61a 862 break;
71619dc2 863 case 16 ... 31:
2b13e716 864 idx = 4;
a783e61a 865 break;
71619dc2 866 case 8 ... 15:
2b13e716 867 idx = 3;
a783e61a 868 break;
71619dc2 869 case 4 ... 7:
2b13e716 870 idx = 2;
a783e61a 871 break;
71619dc2 872 case 2 ... 3:
2b13e716 873 idx = 1;
71619dc2
JA
874 case 1:
875 break;
876 }
877
2b13e716 878 td->ts.io_u_map[idx] += nr;
71619dc2
JA
879}
880
04a0feae
JA
881static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
882{
2b13e716 883 int idx = 0;
04a0feae
JA
884
885 assert(usec < 1000);
886
887 switch (usec) {
888 case 750 ... 999:
2b13e716 889 idx = 9;
04a0feae
JA
890 break;
891 case 500 ... 749:
2b13e716 892 idx = 8;
04a0feae
JA
893 break;
894 case 250 ... 499:
2b13e716 895 idx = 7;
04a0feae
JA
896 break;
897 case 100 ... 249:
2b13e716 898 idx = 6;
04a0feae
JA
899 break;
900 case 50 ... 99:
2b13e716 901 idx = 5;
04a0feae
JA
902 break;
903 case 20 ... 49:
2b13e716 904 idx = 4;
04a0feae
JA
905 break;
906 case 10 ... 19:
2b13e716 907 idx = 3;
04a0feae
JA
908 break;
909 case 4 ... 9:
2b13e716 910 idx = 2;
04a0feae
JA
911 break;
912 case 2 ... 3:
2b13e716 913 idx = 1;
04a0feae
JA
914 case 0 ... 1:
915 break;
916 }
917
2b13e716
JA
918 assert(idx < FIO_IO_U_LAT_U_NR);
919 td->ts.io_u_lat_u[idx]++;
04a0feae
JA
920}
921
922static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
ec118304 923{
2b13e716 924 int idx = 0;
ec118304
JA
925
926 switch (msec) {
927 default:
2b13e716 928 idx = 11;
04a0feae 929 break;
8abdce66 930 case 1000 ... 1999:
2b13e716 931 idx = 10;
04a0feae 932 break;
8abdce66 933 case 750 ... 999:
2b13e716 934 idx = 9;
04a0feae 935 break;
8abdce66 936 case 500 ... 749:
2b13e716 937 idx = 8;
04a0feae 938 break;
8abdce66 939 case 250 ... 499:
2b13e716 940 idx = 7;
04a0feae 941 break;
8abdce66 942 case 100 ... 249:
2b13e716 943 idx = 6;
04a0feae 944 break;
8abdce66 945 case 50 ... 99:
2b13e716 946 idx = 5;
04a0feae 947 break;
8abdce66 948 case 20 ... 49:
2b13e716 949 idx = 4;
04a0feae 950 break;
8abdce66 951 case 10 ... 19:
2b13e716 952 idx = 3;
04a0feae 953 break;
8abdce66 954 case 4 ... 9:
2b13e716 955 idx = 2;
04a0feae 956 break;
ec118304 957 case 2 ... 3:
2b13e716 958 idx = 1;
ec118304
JA
959 case 0 ... 1:
960 break;
961 }
962
2b13e716
JA
963 assert(idx < FIO_IO_U_LAT_M_NR);
964 td->ts.io_u_lat_m[idx]++;
04a0feae
JA
965}
966
967static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
968{
969 if (usec < 1000)
970 io_u_mark_lat_usec(td, usec);
971 else
972 io_u_mark_lat_msec(td, usec / 1000);
ec118304
JA
973}
974
0aabe160
JA
975/*
976 * Get next file to service by choosing one at random
977 */
2cc52930
JA
978static struct fio_file *get_next_file_rand(struct thread_data *td,
979 enum fio_file_flags goodf,
d6aed795 980 enum fio_file_flags badf)
0aabe160 981{
0aabe160 982 struct fio_file *f;
1c178180 983 int fno;
0aabe160
JA
984
985 do {
87b10676 986 int opened = 0;
1294c3ec 987 unsigned long r;
4c07ad86 988
d6b72507 989 r = __rand(&td->next_file_state);
54a21917 990 fno = (unsigned int) ((double) td->o.nr_files
4c07ad86 991 * (r / (FRAND_MAX + 1.0)));
7c83c089 992
126d65c6 993 f = td->files[fno];
d6aed795 994 if (fio_file_done(f))
059e63c0 995 continue;
1c178180 996
d6aed795 997 if (!fio_file_open(f)) {
87b10676
JA
998 int err;
999
002fe734
JA
1000 if (td->nr_open_files >= td->o.open_files)
1001 return ERR_PTR(-EBUSY);
1002
87b10676
JA
1003 err = td_io_open_file(td, f);
1004 if (err)
1005 continue;
1006 opened = 1;
1007 }
1008
2ba1c290
JA
1009 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
1010 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
0aabe160 1011 return f;
2ba1c290 1012 }
87b10676
JA
1013 if (opened)
1014 td_io_close_file(td, f);
0aabe160
JA
1015 } while (1);
1016}
1017
1018/*
1019 * Get next file to service by doing round robin between all available ones
1020 */
1c178180
JA
1021static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1022 int badf)
3d7c391d
JA
1023{
1024 unsigned int old_next_file = td->next_file;
1025 struct fio_file *f;
1026
1027 do {
87b10676
JA
1028 int opened = 0;
1029
126d65c6 1030 f = td->files[td->next_file];
3d7c391d
JA
1031
1032 td->next_file++;
2dc1bbeb 1033 if (td->next_file >= td->o.nr_files)
3d7c391d
JA
1034 td->next_file = 0;
1035
87b10676 1036 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
d6aed795 1037 if (fio_file_done(f)) {
d5ed68ea 1038 f = NULL;
059e63c0 1039 continue;
d5ed68ea 1040 }
059e63c0 1041
d6aed795 1042 if (!fio_file_open(f)) {
87b10676
JA
1043 int err;
1044
002fe734
JA
1045 if (td->nr_open_files >= td->o.open_files)
1046 return ERR_PTR(-EBUSY);
1047
87b10676 1048 err = td_io_open_file(td, f);
b5696bfc
JA
1049 if (err) {
1050 dprint(FD_FILE, "error %d on open of %s\n",
1051 err, f->file_name);
87c27b45 1052 f = NULL;
87b10676 1053 continue;
b5696bfc 1054 }
87b10676
JA
1055 opened = 1;
1056 }
1057
0b9d69ec
JA
1058 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1059 f->flags);
1c178180 1060 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
3d7c391d
JA
1061 break;
1062
87b10676
JA
1063 if (opened)
1064 td_io_close_file(td, f);
1065
3d7c391d
JA
1066 f = NULL;
1067 } while (td->next_file != old_next_file);
1068
2ba1c290 1069 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
3d7c391d
JA
1070 return f;
1071}
1072
7eb36574 1073static struct fio_file *__get_next_file(struct thread_data *td)
bdb4e2e9 1074{
1907dbc6
JA
1075 struct fio_file *f;
1076
2dc1bbeb 1077 assert(td->o.nr_files <= td->files_index);
1c178180 1078
b5696bfc 1079 if (td->nr_done_files >= td->o.nr_files) {
5ec10eaa
JA
1080 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1081 " nr_files=%d\n", td->nr_open_files,
1082 td->nr_done_files,
1083 td->o.nr_files);
bdb4e2e9 1084 return NULL;
2ba1c290 1085 }
bdb4e2e9 1086
1907dbc6 1087 f = td->file_service_file;
d6aed795 1088 if (f && fio_file_open(f) && !fio_file_closing(f)) {
a086c257
JA
1089 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1090 goto out;
1091 if (td->file_service_left--)
1092 goto out;
1093 }
1907dbc6 1094
a086c257
JA
1095 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1096 td->o.file_service_type == FIO_FSERVICE_SEQ)
d6aed795 1097 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
bdb4e2e9 1098 else
d6aed795 1099 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
1907dbc6 1100
002fe734
JA
1101 if (IS_ERR(f))
1102 return f;
1103
1907dbc6
JA
1104 td->file_service_file = f;
1105 td->file_service_left = td->file_service_nr - 1;
2ba1c290 1106out:
0dac421f
JA
1107 if (f)
1108 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1109 else
1110 dprint(FD_FILE, "get_next_file: NULL\n");
1907dbc6 1111 return f;
bdb4e2e9
JA
1112}
1113
7eb36574
JA
1114static struct fio_file *get_next_file(struct thread_data *td)
1115{
372d8962 1116 if (td->flags & TD_F_PROFILE_OPS) {
d72be545 1117 struct prof_io_ops *ops = &td->prof_io_ops;
7eb36574 1118
d72be545
JA
1119 if (ops->get_next_file)
1120 return ops->get_next_file(td);
1121 }
7eb36574
JA
1122
1123 return __get_next_file(td);
1124}
1125
002fe734 1126static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
429f6675
JA
1127{
1128 struct fio_file *f;
1129
1130 do {
1131 f = get_next_file(td);
002fe734
JA
1132 if (IS_ERR_OR_NULL(f))
1133 return PTR_ERR(f);
429f6675 1134
429f6675
JA
1135 io_u->file = f;
1136 get_file(f);
1137
1138 if (!fill_io_u(td, io_u))
1139 break;
1140
b5696bfc 1141 put_file_log(td, f);
429f6675 1142 td_io_close_file(td, f);
b5696bfc 1143 io_u->file = NULL;
d6aed795 1144 fio_file_set_done(f);
429f6675 1145 td->nr_done_files++;
0b9d69ec
JA
1146 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1147 td->nr_done_files, td->o.nr_files);
429f6675
JA
1148 } while (1);
1149
1150 return 0;
1151}
1152
3e260a46
JA
1153static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
1154 unsigned long tusec, unsigned long max_usec)
1155{
1156 if (!td->error)
1157 log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
1158 td_verror(td, ETIMEDOUT, "max latency exceeded");
1159 icd->error = ETIMEDOUT;
1160}
1161
1162static void lat_new_cycle(struct thread_data *td)
1163{
1164 fio_gettime(&td->latency_ts, NULL);
1165 td->latency_ios = ddir_rw_sum(td->io_blocks);
1166 td->latency_failed = 0;
1167}
1168
1169/*
1170 * We had an IO outside the latency target. Reduce the queue depth. If we
1171 * are at QD=1, then it's time to give up.
1172 */
1173static int __lat_target_failed(struct thread_data *td)
1174{
1175 if (td->latency_qd == 1)
1176 return 1;
1177
1178 td->latency_qd_high = td->latency_qd;
6bb58215
JA
1179
1180 if (td->latency_qd == td->latency_qd_low)
1181 td->latency_qd_low--;
1182
3e260a46
JA
1183 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1184
1185 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1186
1187 /*
1188 * When we ramp QD down, quiesce existing IO to prevent
1189 * a storm of ramp downs due to pending higher depth.
1190 */
1191 io_u_quiesce(td);
1192 lat_new_cycle(td);
1193 return 0;
1194}
1195
1196static int lat_target_failed(struct thread_data *td)
1197{
1198 if (td->o.latency_percentile.u.f == 100.0)
1199 return __lat_target_failed(td);
1200
1201 td->latency_failed++;
1202 return 0;
1203}
1204
1205void lat_target_init(struct thread_data *td)
1206{
6bb58215
JA
1207 td->latency_end_run = 0;
1208
3e260a46
JA
1209 if (td->o.latency_target) {
1210 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1211 fio_gettime(&td->latency_ts, NULL);
1212 td->latency_qd = 1;
1213 td->latency_qd_high = td->o.iodepth;
1214 td->latency_qd_low = 1;
1215 td->latency_ios = ddir_rw_sum(td->io_blocks);
1216 } else
1217 td->latency_qd = td->o.iodepth;
1218}
1219
6bb58215
JA
1220void lat_target_reset(struct thread_data *td)
1221{
1222 if (!td->latency_end_run)
1223 lat_target_init(td);
1224}
1225
3e260a46
JA
1226static void lat_target_success(struct thread_data *td)
1227{
1228 const unsigned int qd = td->latency_qd;
6bb58215 1229 struct thread_options *o = &td->o;
3e260a46
JA
1230
1231 td->latency_qd_low = td->latency_qd;
1232
1233 /*
1234 * If we haven't failed yet, we double up to a failing value instead
1235 * of bisecting from highest possible queue depth. If we have set
1236 * a limit other than td->o.iodepth, bisect between that.
1237 */
6bb58215 1238 if (td->latency_qd_high != o->iodepth)
3e260a46
JA
1239 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1240 else
1241 td->latency_qd *= 2;
1242
6bb58215
JA
1243 if (td->latency_qd > o->iodepth)
1244 td->latency_qd = o->iodepth;
3e260a46
JA
1245
1246 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
6bb58215 1247
3e260a46 1248 /*
6bb58215
JA
1249 * Same as last one, we are done. Let it run a latency cycle, so
1250 * we get only the results from the targeted depth.
3e260a46 1251 */
6bb58215
JA
1252 if (td->latency_qd == qd) {
1253 if (td->latency_end_run) {
1254 dprint(FD_RATE, "We are done\n");
1255 td->done = 1;
1256 } else {
1257 dprint(FD_RATE, "Quiesce and final run\n");
1258 io_u_quiesce(td);
1259 td->latency_end_run = 1;
1260 reset_all_stats(td);
1261 reset_io_stats(td);
1262 }
1263 }
3e260a46
JA
1264
1265 lat_new_cycle(td);
1266}
1267
1268/*
1269 * Check if we can bump the queue depth
1270 */
1271void lat_target_check(struct thread_data *td)
1272{
1273 uint64_t usec_window;
1274 uint64_t ios;
1275 double success_ios;
1276
1277 usec_window = utime_since_now(&td->latency_ts);
1278 if (usec_window < td->o.latency_window)
1279 return;
1280
1281 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1282 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1283 success_ios *= 100.0;
1284
1285 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1286
1287 if (success_ios >= td->o.latency_percentile.u.f)
1288 lat_target_success(td);
1289 else
1290 __lat_target_failed(td);
1291}
1292
1293/*
1294 * If latency target is enabled, we might be ramping up or down and not
1295 * using the full queue depth available.
1296 */
effd6ff0 1297int queue_full(const struct thread_data *td)
3e260a46
JA
1298{
1299 const int qempty = io_u_qempty(&td->io_u_freelist);
1300
1301 if (qempty)
1302 return 1;
1303 if (!td->o.latency_target)
1304 return 0;
1305
1306 return td->cur_depth >= td->latency_qd;
1307}
429f6675 1308
10ba535a
JA
1309struct io_u *__get_io_u(struct thread_data *td)
1310{
0cae66f6 1311 struct io_u *io_u = NULL;
10ba535a 1312
ca09be4b
JA
1313 if (td->stop_io)
1314 return NULL;
1315
e8462bd8
JA
1316 td_io_u_lock(td);
1317
1318again:
2ae0b204
JA
1319 if (!io_u_rempty(&td->io_u_requeues))
1320 io_u = io_u_rpop(&td->io_u_requeues);
3e260a46 1321 else if (!queue_full(td)) {
2ae0b204 1322 io_u = io_u_qpop(&td->io_u_freelist);
10ba535a 1323
225ba9e3 1324 io_u->file = NULL;
6040dabc 1325 io_u->buflen = 0;
10ba535a 1326 io_u->resid = 0;
d7762cf8 1327 io_u->end_io = NULL;
755200a3
JA
1328 }
1329
1330 if (io_u) {
0c6e7517 1331 assert(io_u->flags & IO_U_F_FREE);
f8b0bd10
JA
1332 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
1333 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1334 IO_U_F_VER_LIST);
0c6e7517 1335
755200a3 1336 io_u->error = 0;
bcd5abfa 1337 io_u->acct_ddir = -1;
10ba535a 1338 td->cur_depth++;
0c41214f 1339 io_u->flags |= IO_U_F_IN_CUR_DEPTH;
f9401285 1340 io_u->ipo = NULL;
1dec3e07
JA
1341 } else if (td->o.verify_async) {
1342 /*
1343 * We ran out, wait for async verify threads to finish and
1344 * return one
1345 */
1346 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1347 goto again;
10ba535a
JA
1348 }
1349
e8462bd8 1350 td_io_u_unlock(td);
10ba535a
JA
1351 return io_u;
1352}
1353
0d29de83 1354static int check_get_trim(struct thread_data *td, struct io_u *io_u)
10ba535a 1355{
d72be545
JA
1356 if (!(td->flags & TD_F_TRIM_BACKLOG))
1357 return 0;
1358
1359 if (td->trim_entries) {
0d29de83 1360 int get_trim = 0;
10ba535a 1361
0d29de83
JA
1362 if (td->trim_batch) {
1363 td->trim_batch--;
1364 get_trim = 1;
1365 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1366 td->last_ddir != DDIR_READ) {
1367 td->trim_batch = td->o.trim_batch;
1368 if (!td->trim_batch)
1369 td->trim_batch = td->o.trim_backlog;
1370 get_trim = 1;
1371 }
1372
1373 if (get_trim && !get_next_trim(td, io_u))
1374 return 1;
2ba1c290 1375 }
10ba535a 1376
0d29de83
JA
1377 return 0;
1378}
1379
1380static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1381{
d72be545
JA
1382 if (!(td->flags & TD_F_VER_BACKLOG))
1383 return 0;
1384
1385 if (td->io_hist_len) {
9e144189
JA
1386 int get_verify = 0;
1387
d1ece0c7 1388 if (td->verify_batch)
9e144189 1389 get_verify = 1;
d1ece0c7 1390 else if (!(td->io_hist_len % td->o.verify_backlog) &&
9e144189
JA
1391 td->last_ddir != DDIR_READ) {
1392 td->verify_batch = td->o.verify_batch;
f8a75c99
JA
1393 if (!td->verify_batch)
1394 td->verify_batch = td->o.verify_backlog;
9e144189
JA
1395 get_verify = 1;
1396 }
1397
d1ece0c7
JA
1398 if (get_verify && !get_next_verify(td, io_u)) {
1399 td->verify_batch--;
0d29de83 1400 return 1;
d1ece0c7 1401 }
9e144189
JA
1402 }
1403
0d29de83
JA
1404 return 0;
1405}
1406
de789769
JA
1407/*
1408 * Fill offset and start time into the buffer content, to prevent too
23f394d5
JA
1409 * easy compressible data for simple de-dupe attempts. Do this for every
1410 * 512b block in the range, since that should be the smallest block size
1411 * we can expect from a device.
de789769
JA
1412 */
1413static void small_content_scramble(struct io_u *io_u)
1414{
23f394d5 1415 unsigned int i, nr_blocks = io_u->buflen / 512;
1ae83d45 1416 uint64_t boffset;
23f394d5
JA
1417 unsigned int offset;
1418 void *p, *end;
de789769 1419
23f394d5
JA
1420 if (!nr_blocks)
1421 return;
1422
1423 p = io_u->xfer_buf;
fba76ee8 1424 boffset = io_u->offset;
81f0366c 1425 io_u->buf_filled_len = 0;
fad82f76 1426
23f394d5
JA
1427 for (i = 0; i < nr_blocks; i++) {
1428 /*
1429 * Fill the byte offset into a "random" start offset of
1430 * the buffer, given by the product of the usec time
1431 * and the actual offset.
1432 */
fad82f76 1433 offset = (io_u->start_time.tv_usec ^ boffset) & 511;
1ae83d45
JA
1434 offset &= ~(sizeof(uint64_t) - 1);
1435 if (offset >= 512 - sizeof(uint64_t))
1436 offset -= sizeof(uint64_t);
fba76ee8 1437 memcpy(p + offset, &boffset, sizeof(boffset));
23f394d5
JA
1438
1439 end = p + 512 - sizeof(io_u->start_time);
1440 memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1441 p += 512;
fad82f76 1442 boffset += 512;
23f394d5 1443 }
de789769
JA
1444}
1445
0d29de83
JA
1446/*
1447 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1448 * etc. The returned io_u is fully ready to be prepped and submitted.
1449 */
1450struct io_u *get_io_u(struct thread_data *td)
1451{
1452 struct fio_file *f;
1453 struct io_u *io_u;
de789769 1454 int do_scramble = 0;
002fe734 1455 long ret = 0;
0d29de83
JA
1456
1457 io_u = __get_io_u(td);
1458 if (!io_u) {
1459 dprint(FD_IO, "__get_io_u failed\n");
1460 return NULL;
1461 }
1462
1463 if (check_get_verify(td, io_u))
1464 goto out;
1465 if (check_get_trim(td, io_u))
1466 goto out;
1467
755200a3
JA
1468 /*
1469 * from a requeue, io_u already setup
1470 */
1471 if (io_u->file)
77f392bf 1472 goto out;
755200a3 1473
429f6675
JA
1474 /*
1475 * If using an iolog, grab next piece if any available.
1476 */
d72be545 1477 if (td->flags & TD_F_READ_IOLOG) {
429f6675
JA
1478 if (read_iolog_get(td, io_u))
1479 goto err_put;
2ba1c290 1480 } else if (set_io_u_file(td, io_u)) {
002fe734 1481 ret = -EBUSY;
2ba1c290 1482 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
429f6675 1483 goto err_put;
2ba1c290 1484 }
5ec10eaa 1485
429f6675 1486 f = io_u->file;
002fe734
JA
1487 if (!f) {
1488 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1489 goto err_put;
1490 }
1491
d6aed795 1492 assert(fio_file_open(f));
97af62ce 1493
ff58fced 1494 if (ddir_rw(io_u->ddir)) {
d0656a93 1495 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
2ba1c290 1496 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
429f6675 1497 goto err_put;
2ba1c290 1498 }
10ba535a 1499
f1dfb668
JA
1500 f->last_start[io_u->ddir] = io_u->offset;
1501 f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen;
10ba535a 1502
fd68418e 1503 if (io_u->ddir == DDIR_WRITE) {
d72be545 1504 if (td->flags & TD_F_REFILL_BUFFERS) {
9c42684e 1505 io_u_fill_buffer(td, io_u,
1066358a
JA
1506 td->o.min_bs[DDIR_WRITE],
1507 io_u->xfer_buflen);
ff441ae8 1508 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
bedc9dc2 1509 !(td->flags & TD_F_COMPRESS))
fd68418e 1510 do_scramble = 1;
d72be545 1511 if (td->flags & TD_F_VER_NONE) {
629f1d71
JA
1512 populate_verify_io_u(td, io_u);
1513 do_scramble = 0;
1514 }
fd68418e 1515 } else if (io_u->ddir == DDIR_READ) {
cbe8d756
RR
1516 /*
1517 * Reset the buf_filled parameters so next time if the
1518 * buffer is used for writes it is refilled.
1519 */
cbe8d756
RR
1520 io_u->buf_filled_len = 0;
1521 }
87dc1ab1 1522 }
10ba535a 1523
165faf16
JA
1524 /*
1525 * Set io data pointers.
1526 */
cec6b55d
JA
1527 io_u->xfer_buf = io_u->buf;
1528 io_u->xfer_buflen = io_u->buflen;
5973cafb 1529
6ac7a331 1530out:
0d29de83 1531 assert(io_u->file);
429f6675 1532 if (!td_io_prep(td, io_u)) {
993bf48b
JA
1533 if (!td->o.disable_slat)
1534 fio_gettime(&io_u->start_time, NULL);
de789769
JA
1535 if (do_scramble)
1536 small_content_scramble(io_u);
429f6675 1537 return io_u;
36167d82 1538 }
429f6675 1539err_put:
2ba1c290 1540 dprint(FD_IO, "get_io_u failed\n");
429f6675 1541 put_io_u(td, io_u);
002fe734 1542 return ERR_PTR(ret);
10ba535a
JA
1543}
1544
5451792e
JA
1545void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1546{
8b28bd41 1547 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
825f818e 1548
8b28bd41
DM
1549 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1550 return;
5451792e 1551
709c8313
RE
1552 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
1553 io_u->file ? " on file " : "",
1554 io_u->file ? io_u->file->file_name : "",
1555 strerror(io_u->error),
1556 io_ddir_name(io_u->ddir),
1557 io_u->offset, io_u->xfer_buflen);
5451792e
JA
1558
1559 if (!td->error)
1560 td_verror(td, io_u->error, "io_u error");
1561}
1562
aba6c951
JA
1563static inline int gtod_reduce(struct thread_data *td)
1564{
729fe3af 1565 return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat
b74b8208 1566 && td->o.disable_bw;
aba6c951
JA
1567}
1568
c8eeb9df
JA
1569static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1570 struct io_completion_data *icd,
1571 const enum fio_ddir idx, unsigned int bytes)
1572{
24d23ca7 1573 unsigned long lusec = 0;
c8eeb9df 1574
aba6c951 1575 if (!gtod_reduce(td))
c8eeb9df
JA
1576 lusec = utime_since(&io_u->issue_time, &icd->time);
1577
1578 if (!td->o.disable_lat) {
1579 unsigned long tusec;
1580
1581 tusec = utime_since(&io_u->start_time, &icd->time);
ae588852 1582 add_lat_sample(td, idx, tusec, bytes, io_u->offset);
15501535 1583
d4afedfd
JA
1584 if (td->flags & TD_F_PROFILE_OPS) {
1585 struct prof_io_ops *ops = &td->prof_io_ops;
1586
1587 if (ops->io_u_lat)
1588 icd->error = ops->io_u_lat(td, tusec);
1589 }
1590
3e260a46
JA
1591 if (td->o.max_latency && tusec > td->o.max_latency)
1592 lat_fatal(td, icd, tusec, td->o.max_latency);
1593 if (td->o.latency_target && tusec > td->o.latency_target) {
1594 if (lat_target_failed(td))
1595 lat_fatal(td, icd, tusec, td->o.latency_target);
15501535 1596 }
c8eeb9df
JA
1597 }
1598
1599 if (!td->o.disable_clat) {
ae588852 1600 add_clat_sample(td, idx, lusec, bytes, io_u->offset);
c8eeb9df
JA
1601 io_u_mark_latency(td, lusec);
1602 }
1603
1604 if (!td->o.disable_bw)
1605 add_bw_sample(td, idx, bytes, &icd->time);
1606
aba6c951
JA
1607 if (!gtod_reduce(td))
1608 add_iops_sample(td, idx, bytes, &icd->time);
66347cfa
DE
1609
1610 if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
1611 uint32_t *info = io_u_block_info(td, io_u);
1612 if (BLOCK_INFO_STATE(*info) < BLOCK_STATE_TRIM_FAILURE) {
1613 if (io_u->ddir == DDIR_TRIM) {
1614 *info = BLOCK_INFO(BLOCK_STATE_TRIMMED,
1615 BLOCK_INFO_TRIMS(*info) + 1);
1616 } else if (io_u->ddir == DDIR_WRITE) {
1617 *info = BLOCK_INFO_SET_STATE(BLOCK_STATE_WRITTEN,
1618 *info);
1619 }
1620 }
1621 }
c8eeb9df
JA
1622}
1623
1b8dbf25
SL
1624static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1625{
1ae83d45
JA
1626 uint64_t secs, remainder, bps, bytes;
1627
1b8dbf25
SL
1628 bytes = td->this_io_bytes[ddir];
1629 bps = td->rate_bps[ddir];
1630 secs = bytes / bps;
1631 remainder = bytes % bps;
1632 return remainder * 1000000 / bps + secs * 1000000;
1633}
1634
f8b0bd10 1635static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
97601024 1636 struct io_completion_data *icd)
10ba535a 1637{
f8b0bd10
JA
1638 struct io_u *io_u = *io_u_ptr;
1639 enum fio_ddir ddir = io_u->ddir;
1640 struct fio_file *f = io_u->file;
10ba535a 1641
2ba1c290
JA
1642 dprint_io_u(io_u, "io complete");
1643
2ecc1b57 1644 td_io_u_lock(td);
0c6e7517 1645 assert(io_u->flags & IO_U_F_FLIGHT);
38dad62d 1646 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
f9401285
JA
1647
1648 /*
1649 * Mark IO ok to verify
1650 */
1651 if (io_u->ipo) {
890b6656
JA
1652 /*
1653 * Remove errored entry from the verification list
1654 */
1655 if (io_u->error)
1656 unlog_io_piece(td, io_u);
1657 else {
1658 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1659 write_barrier();
1660 }
f9401285
JA
1661 }
1662
2ecc1b57 1663 td_io_u_unlock(td);
0c6e7517 1664
f8b0bd10 1665 if (ddir_sync(ddir)) {
87dc1ab1 1666 td->last_was_sync = 1;
44f29692
JA
1667 if (f) {
1668 f->first_write = -1ULL;
1669 f->last_write = -1ULL;
1670 }
87dc1ab1
JA
1671 return;
1672 }
1673
1674 td->last_was_sync = 0;
f8b0bd10 1675 td->last_ddir = ddir;
87dc1ab1 1676
f8b0bd10 1677 if (!io_u->error && ddir_rw(ddir)) {
10ba535a 1678 unsigned int bytes = io_u->buflen - io_u->resid;
f8b0bd10 1679 const enum fio_ddir oddir = ddir ^ 1;
b29ee5b3 1680 int ret;
10ba535a 1681
f8b0bd10
JA
1682 td->io_blocks[ddir]++;
1683 td->this_io_blocks[ddir]++;
1684 td->io_bytes[ddir] += bytes;
ae2fafc8 1685
1686 if (!(io_u->flags & IO_U_F_VER_LIST))
f8b0bd10
JA
1687 td->this_io_bytes[ddir] += bytes;
1688
ca09be4b
JA
1689 if (ddir == DDIR_WRITE) {
1690 if (f) {
1691 if (f->first_write == -1ULL ||
1692 io_u->offset < f->first_write)
1693 f->first_write = io_u->offset;
1694 if (f->last_write == -1ULL ||
1695 ((io_u->offset + bytes) > f->last_write))
1696 f->last_write = io_u->offset + bytes;
1697 }
1698 if (td->last_write_comp) {
1699 int idx = td->last_write_idx++;
1700
1701 td->last_write_comp[idx] = io_u->offset;
1702 if (td->last_write_idx == td->o.iodepth)
1703 td->last_write_idx = 0;
1704 }
44f29692
JA
1705 }
1706
6b1190fd
SL
1707 if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1708 td->runstate == TD_VERIFYING)) {
f8b0bd10 1709 account_io_completion(td, io_u, icd, ddir, bytes);
40e1a6f0 1710
f8b0bd10
JA
1711 if (__should_check_rate(td, ddir)) {
1712 td->rate_pending_usleep[ddir] =
1713 (usec_for_io(td, ddir) -
ba3e4e0c 1714 utime_since_now(&td->start));
b23b6a2f 1715 }
f8b0bd10
JA
1716 if (ddir != DDIR_TRIM &&
1717 __should_check_rate(td, oddir)) {
1718 td->rate_pending_usleep[oddir] =
1719 (usec_for_io(td, oddir) -
ba3e4e0c 1720 utime_since_now(&td->start));
f8b0bd10 1721 }
721938ae 1722 }
10ba535a 1723
f8b0bd10 1724 icd->bytes_done[ddir] += bytes;
3af6ef39 1725
d7762cf8 1726 if (io_u->end_io) {
f8b0bd10
JA
1727 ret = io_u->end_io(td, io_u_ptr);
1728 io_u = *io_u_ptr;
3af6ef39
JA
1729 if (ret && !icd->error)
1730 icd->error = ret;
1731 }
ff58fced 1732 } else if (io_u->error) {
10ba535a 1733 icd->error = io_u->error;
5451792e
JA
1734 io_u_log_error(td, io_u);
1735 }
8b28bd41 1736 if (icd->error) {
f8b0bd10
JA
1737 enum error_type_bit eb = td_error_type(ddir, icd->error);
1738
8b28bd41
DM
1739 if (!td_non_fatal_error(td, eb, icd->error))
1740 return;
f8b0bd10 1741
f2bba182
RR
1742 /*
1743 * If there is a non_fatal error, then add to the error count
1744 * and clear all the errors.
1745 */
1746 update_error_count(td, icd->error);
1747 td_clear_error(td);
1748 icd->error = 0;
f8b0bd10
JA
1749 if (io_u)
1750 io_u->error = 0;
f2bba182 1751 }
10ba535a
JA
1752}
1753
9520ebb9
JA
1754static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1755 int nr)
10ba535a 1756{
6eaf09d6 1757 int ddir;
aba6c951
JA
1758
1759 if (!gtod_reduce(td))
9520ebb9 1760 fio_gettime(&icd->time, NULL);
02bcaa8c 1761
3af6ef39
JA
1762 icd->nr = nr;
1763
10ba535a 1764 icd->error = 0;
6eaf09d6
SL
1765 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1766 icd->bytes_done[ddir] = 0;
36167d82
JA
1767}
1768
97601024
JA
1769static void ios_completed(struct thread_data *td,
1770 struct io_completion_data *icd)
36167d82
JA
1771{
1772 struct io_u *io_u;
1773 int i;
1774
10ba535a
JA
1775 for (i = 0; i < icd->nr; i++) {
1776 io_u = td->io_ops->event(td, i);
1777
f8b0bd10 1778 io_completed(td, &io_u, icd);
e8462bd8 1779
f8b0bd10 1780 if (io_u)
e8462bd8 1781 put_io_u(td, io_u);
10ba535a
JA
1782 }
1783}
97601024 1784
e7e6cfb4
JA
1785/*
1786 * Complete a single io_u for the sync engines.
1787 */
55312f9f 1788int io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
97601024
JA
1789{
1790 struct io_completion_data icd;
55312f9f 1791 int ddir;
97601024 1792
9520ebb9 1793 init_icd(td, &icd, 1);
f8b0bd10 1794 io_completed(td, &io_u, &icd);
e8462bd8 1795
f8b0bd10 1796 if (io_u)
e8462bd8 1797 put_io_u(td, io_u);
97601024 1798
581e7141
JA
1799 if (icd.error) {
1800 td_verror(td, icd.error, "io_u_sync_complete");
1801 return -1;
1802 }
97601024 1803
55312f9f
JA
1804 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1805 td->bytes_done[ddir] += icd.bytes_done[ddir];
581e7141
JA
1806
1807 return 0;
97601024
JA
1808}
1809
e7e6cfb4
JA
1810/*
1811 * Called to complete min_events number of io for the async engines.
1812 */
55312f9f 1813int io_u_queued_complete(struct thread_data *td, int min_evts)
97601024 1814{
97601024 1815 struct io_completion_data icd;
00de55ef 1816 struct timespec *tvp = NULL;
55312f9f 1817 int ret, ddir;
4d06a338 1818 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
97601024 1819
4950421a 1820 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
b271fe62 1821
4950421a 1822 if (!min_evts)
00de55ef 1823 tvp = &ts;
5fb4b366
RE
1824 else if (min_evts > td->cur_depth)
1825 min_evts = td->cur_depth;
97601024 1826
4950421a 1827 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
97601024 1828 if (ret < 0) {
e1161c32 1829 td_verror(td, -ret, "td_io_getevents");
97601024
JA
1830 return ret;
1831 } else if (!ret)
1832 return ret;
1833
9520ebb9 1834 init_icd(td, &icd, ret);
97601024 1835 ios_completed(td, &icd);
581e7141
JA
1836 if (icd.error) {
1837 td_verror(td, icd.error, "io_u_queued_complete");
1838 return -1;
1839 }
97601024 1840
55312f9f
JA
1841 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1842 td->bytes_done[ddir] += icd.bytes_done[ddir];
581e7141
JA
1843
1844 return 0;
97601024 1845}
7e77dd02
JA
1846
1847/*
1848 * Call when io_u is really queued, to update the submission latency.
1849 */
1850void io_u_queued(struct thread_data *td, struct io_u *io_u)
1851{
9520ebb9
JA
1852 if (!td->o.disable_slat) {
1853 unsigned long slat_time;
7e77dd02 1854
9520ebb9 1855 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
ae588852
JA
1856 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
1857 io_u->offset);
9520ebb9 1858 }
7e77dd02 1859}
433afcb4 1860
5c94b008
JA
1861/*
1862 * See if we should reuse the last seed, if dedupe is enabled
1863 */
1864static struct frand_state *get_buf_state(struct thread_data *td)
1865{
1866 unsigned int v;
1867 unsigned long r;
1868
1869 if (!td->o.dedupe_percentage)
1870 return &td->buf_state;
c0b69b92
JA
1871 else if (td->o.dedupe_percentage == 100)
1872 return &td->buf_state_prev;
5c94b008
JA
1873
1874 r = __rand(&td->dedupe_state);
1875 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
1876
1877 if (v <= td->o.dedupe_percentage)
1878 return &td->buf_state_prev;
1879
1880 return &td->buf_state;
1881}
1882
1883static void save_buf_state(struct thread_data *td, struct frand_state *rs)
1884{
1885 if (rs == &td->buf_state)
1886 frand_copy(&td->buf_state_prev, rs);
1887}
1888
cc86c395
JA
1889void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
1890 unsigned int max_bs)
5973cafb 1891{
d1af2894
JA
1892 struct thread_options *o = &td->o;
1893
4eff3e57 1894 if (o->compress_percentage || o->dedupe_percentage) {
9c42684e 1895 unsigned int perc = td->o.compress_percentage;
5c94b008 1896 struct frand_state *rs;
1066358a 1897 unsigned int left = max_bs;
5c94b008 1898
1066358a
JA
1899 do {
1900 rs = get_buf_state(td);
9c42684e 1901
1066358a 1902 min_write = min(min_write, left);
f97a43a1 1903
1066358a
JA
1904 if (perc) {
1905 unsigned int seg = min_write;
cc86c395 1906
1066358a
JA
1907 seg = min(min_write, td->o.compress_chunk);
1908 if (!seg)
1909 seg = min_write;
1910
1911 fill_random_buf_percentage(rs, buf, perc, seg,
d1af2894
JA
1912 min_write, o->buffer_pattern,
1913 o->buffer_pattern_bytes);
1066358a
JA
1914 } else
1915 fill_random_buf(rs, buf, min_write);
1916
1917 buf += min_write;
1918 left -= min_write;
5c94b008 1919 save_buf_state(td, rs);
1066358a 1920 } while (left);
d1af2894
JA
1921 } else if (o->buffer_pattern_bytes)
1922 fill_buffer_pattern(td, buf, max_bs);
999d245e 1923 else if (o->zero_buffers)
cc86c395 1924 memset(buf, 0, max_bs);
999d245e
JA
1925 else
1926 fill_random_buf(get_buf_state(td), buf, max_bs);
cc86c395
JA
1927}
1928
1929/*
1930 * "randomly" fill the buffer contents
1931 */
1932void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1933 unsigned int min_write, unsigned int max_bs)
1934{
1935 io_u->buf_filled_len = 0;
1936 fill_io_buffer(td, io_u->buf, min_write, max_bs);
5973cafb 1937}