lfsr: don't pass in last value to lfsr_next()
[fio.git] / io_u.c
CommitLineData
10ba535a
JA
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
0c6e7517 6#include <assert.h>
10ba535a
JA
7
8#include "fio.h"
5973cafb 9#include "hash.h"
4f5af7b2 10#include "verify.h"
0d29de83 11#include "trim.h"
1fbbf72e 12#include "lib/rand.h"
7ebd796f 13#include "lib/axmap.h"
002fe734 14#include "err.h"
10ba535a 15
97601024
JA
16struct io_completion_data {
17 int nr; /* input */
97601024
JA
18
19 int error; /* output */
100f49f1 20 uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
97601024
JA
21 struct timeval time; /* output */
22};
23
10ba535a 24/*
7ebd796f 25 * The ->io_axmap contains a map of blocks we have or have not done io
10ba535a
JA
26 * to yet. Used to make sure we cover the entire range in a fair fashion.
27 */
1ae83d45 28static int random_map_free(struct fio_file *f, const uint64_t block)
10ba535a 29{
7ebd796f 30 return !axmap_isset(f->io_axmap, block);
10ba535a
JA
31}
32
df415585
JA
33/*
34 * Mark a given offset as used in the map.
35 */
9bf2061e 36static void mark_random_map(struct thread_data *td, struct io_u *io_u)
df415585 37{
2dc1bbeb 38 unsigned int min_bs = td->o.rw_min_bs;
9bf2061e 39 struct fio_file *f = io_u->file;
51ede0b1 40 unsigned int nr_blocks;
1ae83d45 41 uint64_t block;
df415585 42
1ae83d45 43 block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
c685b5b2 44 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
df415585 45
2ab9e98b 46 if (!(io_u->flags & IO_U_F_BUSY_OK))
7ebd796f 47 nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
df415585 48
51ede0b1
JA
49 if ((nr_blocks * min_bs) < io_u->buflen)
50 io_u->buflen = nr_blocks * min_bs;
df415585
JA
51}
52
74776733
JA
53static uint64_t last_block(struct thread_data *td, struct fio_file *f,
54 enum fio_ddir ddir)
2ba1c290 55{
74776733
JA
56 uint64_t max_blocks;
57 uint64_t max_size;
2ba1c290 58
ff58fced
JA
59 assert(ddir_rw(ddir));
60
d9dd70f7
JA
61 /*
62 * Hmm, should we make sure that ->io_size <= ->real_file_size?
63 */
64 max_size = f->io_size;
65 if (max_size > f->real_file_size)
66 max_size = f->real_file_size;
67
ed335855
SN
68 if (td->o.zone_range)
69 max_size = td->o.zone_range;
70
1ae83d45 71 max_blocks = max_size / (uint64_t) td->o.ba[ddir];
2ba1c290
JA
72 if (!max_blocks)
73 return 0;
74
67778e88 75 return max_blocks;
2ba1c290
JA
76}
77
1ae83d45
JA
78struct rand_off {
79 struct flist_head list;
80 uint64_t off;
81};
82
e25839d4 83static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
1ae83d45 84 enum fio_ddir ddir, uint64_t *b)
ec4015da 85{
6f49f8bc 86 uint64_t r;
5e0baa7f 87
74776733 88 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
6f49f8bc
JA
89 uint64_t lastb;
90
91 lastb = last_block(td, f, ddir);
92 if (!lastb)
93 return 1;
94
d6b72507 95 r = __rand(&td->random_state);
8055e41d 96
4b91ee8f 97 dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
8055e41d 98
54a21917 99 *b = lastb * (r / ((uint64_t) FRAND_MAX + 1.0));
51ede0b1 100 } else {
8055e41d 101 uint64_t off = 0;
43c63a78 102
6f49f8bc 103 if (lfsr_next(&f->lfsr, &off))
8055e41d 104 return 1;
ec4015da 105
8055e41d
JA
106 *b = off;
107 }
0ce8b119 108
ec4015da 109 /*
51ede0b1 110 * if we are not maintaining a random map, we are done.
ec4015da 111 */
51ede0b1
JA
112 if (!file_randommap(td, f))
113 goto ret;
43c63a78
JA
114
115 /*
51ede0b1 116 * calculate map offset and check if it's free
43c63a78 117 */
51ede0b1
JA
118 if (random_map_free(f, *b))
119 goto ret;
120
4b91ee8f
JA
121 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
122 (unsigned long long) *b);
51ede0b1 123
7ebd796f 124 *b = axmap_next_free(f->io_axmap, *b);
51ede0b1
JA
125 if (*b == (uint64_t) -1ULL)
126 return 1;
0ce8b119
JA
127ret:
128 return 0;
ec4015da
JA
129}
130
925fee33
JA
131static int __get_next_rand_offset_zipf(struct thread_data *td,
132 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 133 uint64_t *b)
e25839d4 134{
9c6f6316 135 *b = zipf_next(&f->zipf);
e25839d4
JA
136 return 0;
137}
138
925fee33
JA
139static int __get_next_rand_offset_pareto(struct thread_data *td,
140 struct fio_file *f, enum fio_ddir ddir,
1ae83d45 141 uint64_t *b)
925fee33 142{
9c6f6316 143 *b = pareto_next(&f->zipf);
925fee33
JA
144 return 0;
145}
146
1ae83d45
JA
147static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
148{
149 struct rand_off *r1 = flist_entry(a, struct rand_off, list);
150 struct rand_off *r2 = flist_entry(b, struct rand_off, list);
151
152 return r1->off - r2->off;
153}
154
155static int get_off_from_method(struct thread_data *td, struct fio_file *f,
156 enum fio_ddir ddir, uint64_t *b)
e25839d4
JA
157{
158 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
159 return __get_next_rand_offset(td, f, ddir, b);
160 else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
161 return __get_next_rand_offset_zipf(td, f, ddir, b);
925fee33
JA
162 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
163 return __get_next_rand_offset_pareto(td, f, ddir, b);
e25839d4
JA
164
165 log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
166 return 1;
167}
168
bcd5abfa
JA
169/*
170 * Sort the reads for a verify phase in batches of verifysort_nr, if
171 * specified.
172 */
173static inline int should_sort_io(struct thread_data *td)
174{
175 if (!td->o.verifysort_nr || !td->o.do_verify)
176 return 0;
177 if (!td_random(td))
178 return 0;
179 if (td->runstate != TD_VERIFYING)
180 return 0;
181 if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE)
182 return 0;
183
184 return 1;
185}
186
d9472271 187static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
211c9b89
JA
188{
189 unsigned int v;
190 unsigned long r;
191
d9472271 192 if (td->o.perc_rand[ddir] == 100)
211c9b89
JA
193 return 1;
194
d6b72507 195 r = __rand(&td->seq_rand_state[ddir]);
54a21917 196 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
211c9b89 197
d9472271 198 return v <= td->o.perc_rand[ddir];
211c9b89
JA
199}
200
1ae83d45
JA
201static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
202 enum fio_ddir ddir, uint64_t *b)
203{
204 struct rand_off *r;
205 int i, ret = 1;
206
bcd5abfa 207 if (!should_sort_io(td))
1ae83d45
JA
208 return get_off_from_method(td, f, ddir, b);
209
210 if (!flist_empty(&td->next_rand_list)) {
1ae83d45 211fetch:
9342d5f8 212 r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
1ae83d45
JA
213 flist_del(&r->list);
214 *b = r->off;
215 free(r);
216 return 0;
217 }
218
219 for (i = 0; i < td->o.verifysort_nr; i++) {
220 r = malloc(sizeof(*r));
221
222 ret = get_off_from_method(td, f, ddir, &r->off);
223 if (ret) {
224 free(r);
225 break;
226 }
227
228 flist_add(&r->list, &td->next_rand_list);
229 }
230
231 if (ret && !i)
232 return ret;
233
234 assert(!flist_empty(&td->next_rand_list));
235 flist_sort(NULL, &td->next_rand_list, flist_cmp);
236 goto fetch;
237}
238
38dad62d 239static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
1ae83d45 240 enum fio_ddir ddir, uint64_t *b)
38dad62d 241{
c04e4661
DE
242 if (!get_next_rand_offset(td, f, ddir, b))
243 return 0;
244
245 if (td->o.time_based) {
33c48814 246 fio_file_reset(td, f);
c04e4661
DE
247 if (!get_next_rand_offset(td, f, ddir, b))
248 return 0;
38dad62d
JA
249 }
250
c04e4661 251 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
4b91ee8f
JA
252 f->file_name, (unsigned long long) f->last_pos,
253 (unsigned long long) f->real_file_size);
c04e4661 254 return 1;
38dad62d
JA
255}
256
37cf9e3c 257static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
1ae83d45 258 enum fio_ddir ddir, uint64_t *offset)
38dad62d 259{
ac002339
JA
260 struct thread_options *o = &td->o;
261
ff58fced
JA
262 assert(ddir_rw(ddir));
263
ac002339
JA
264 if (f->last_pos >= f->io_size + get_start_offset(td, f) &&
265 o->time_based)
c04e4661
DE
266 f->last_pos = f->last_pos - f->io_size;
267
38dad62d 268 if (f->last_pos < f->real_file_size) {
1ae83d45 269 uint64_t pos;
059b0802 270
ac002339 271 if (f->last_pos == f->file_offset && o->ddir_seq_add < 0)
a66da7a2
JA
272 f->last_pos = f->real_file_size;
273
274 pos = f->last_pos - f->file_offset;
ac002339
JA
275 if (pos && o->ddir_seq_add) {
276 pos += o->ddir_seq_add;
277
278 /*
279 * If we reach beyond the end of the file
280 * with holed IO, wrap around to the
281 * beginning again.
282 */
283 if (pos >= f->real_file_size)
284 pos = f->file_offset;
285 }
059b0802 286
37cf9e3c 287 *offset = pos;
38dad62d
JA
288 return 0;
289 }
290
291 return 1;
292}
293
294static int get_next_block(struct thread_data *td, struct io_u *io_u,
6aca9b3d
JA
295 enum fio_ddir ddir, int rw_seq,
296 unsigned int *is_random)
38dad62d
JA
297{
298 struct fio_file *f = io_u->file;
1ae83d45 299 uint64_t b, offset;
38dad62d
JA
300 int ret;
301
ff58fced
JA
302 assert(ddir_rw(ddir));
303
37cf9e3c
JA
304 b = offset = -1ULL;
305
38dad62d 306 if (rw_seq) {
211c9b89 307 if (td_random(td)) {
6aca9b3d 308 if (should_do_random(td, ddir)) {
211c9b89 309 ret = get_next_rand_block(td, f, ddir, &b);
6aca9b3d
JA
310 *is_random = 1;
311 } else {
312 *is_random = 0;
211c9b89
JA
313 io_u->flags |= IO_U_F_BUSY_OK;
314 ret = get_next_seq_offset(td, f, ddir, &offset);
315 if (ret)
316 ret = get_next_rand_block(td, f, ddir, &b);
317 }
6aca9b3d
JA
318 } else {
319 *is_random = 0;
37cf9e3c 320 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 321 }
38dad62d
JA
322 } else {
323 io_u->flags |= IO_U_F_BUSY_OK;
6aca9b3d 324 *is_random = 0;
38dad62d
JA
325
326 if (td->o.rw_seq == RW_SEQ_SEQ) {
37cf9e3c 327 ret = get_next_seq_offset(td, f, ddir, &offset);
6aca9b3d 328 if (ret) {
37cf9e3c 329 ret = get_next_rand_block(td, f, ddir, &b);
6aca9b3d
JA
330 *is_random = 0;
331 }
38dad62d
JA
332 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
333 if (f->last_start != -1ULL)
37cf9e3c 334 offset = f->last_start - f->file_offset;
38dad62d 335 else
37cf9e3c 336 offset = 0;
38dad62d
JA
337 ret = 0;
338 } else {
339 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
340 ret = 1;
341 }
342 }
6d68b997 343
37cf9e3c
JA
344 if (!ret) {
345 if (offset != -1ULL)
346 io_u->offset = offset;
347 else if (b != -1ULL)
348 io_u->offset = b * td->o.ba[ddir];
349 else {
4e0a8fa2 350 log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
37cf9e3c
JA
351 ret = 1;
352 }
353 }
354
38dad62d
JA
355 return ret;
356}
357
10ba535a
JA
358/*
359 * For random io, generate a random new block and see if it's used. Repeat
360 * until we find a free one. For sequential io, just return the end of
361 * the last io issued.
362 */
6aca9b3d
JA
363static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
364 unsigned int *is_random)
10ba535a 365{
9bf2061e 366 struct fio_file *f = io_u->file;
4ba66134 367 enum fio_ddir ddir = io_u->ddir;
38dad62d 368 int rw_seq_hit = 0;
10ba535a 369
ff58fced
JA
370 assert(ddir_rw(ddir));
371
38dad62d
JA
372 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
373 rw_seq_hit = 1;
5736c10d 374 td->ddir_seq_nr = td->o.ddir_seq_nr;
38dad62d 375 }
211097b2 376
6aca9b3d 377 if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
38dad62d 378 return 1;
10ba535a 379
009bd847
JA
380 if (io_u->offset >= f->io_size) {
381 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
4b91ee8f
JA
382 (unsigned long long) io_u->offset,
383 (unsigned long long) f->io_size);
009bd847
JA
384 return 1;
385 }
386
387 io_u->offset += f->file_offset;
2ba1c290
JA
388 if (io_u->offset >= f->real_file_size) {
389 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
4b91ee8f
JA
390 (unsigned long long) io_u->offset,
391 (unsigned long long) f->real_file_size);
10ba535a 392 return 1;
2ba1c290 393 }
10ba535a
JA
394
395 return 0;
396}
397
6aca9b3d
JA
398static int get_next_offset(struct thread_data *td, struct io_u *io_u,
399 unsigned int *is_random)
15dc1934 400{
d72be545
JA
401 if (td->flags & TD_F_PROFILE_OPS) {
402 struct prof_io_ops *ops = &td->prof_io_ops;
7eb36574 403
d72be545 404 if (ops->fill_io_u_off)
6aca9b3d 405 return ops->fill_io_u_off(td, io_u, is_random);
d72be545 406 }
15dc1934 407
6aca9b3d 408 return __get_next_offset(td, io_u, is_random);
15dc1934
JA
409}
410
79944128
JA
411static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
412 unsigned int buflen)
413{
414 struct fio_file *f = io_u->file;
415
bedc9dc2 416 return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
79944128
JA
417}
418
6aca9b3d
JA
419static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
420 unsigned int is_random)
10ba535a 421{
6aca9b3d 422 int ddir = io_u->ddir;
24d23ca7 423 unsigned int buflen = 0;
f3059de1 424 unsigned int minbs, maxbs;
54a21917 425 unsigned long r;
10ba535a 426
9ee1c647 427 assert(ddir_rw(ddir));
6aca9b3d
JA
428
429 if (td->o.bs_is_seq_rand)
430 ddir = is_random ? DDIR_WRITE: DDIR_READ;
ff58fced 431
f3059de1
JA
432 minbs = td->o.min_bs[ddir];
433 maxbs = td->o.max_bs[ddir];
434
79944128
JA
435 if (minbs == maxbs)
436 return minbs;
437
52c58027
JA
438 /*
439 * If we can't satisfy the min block size from here, then fail
440 */
441 if (!io_u_fits(td, io_u, minbs))
442 return 0;
443
79944128 444 do {
d6b72507 445 r = __rand(&td->bsrange_state);
4c07ad86 446
720e84ad 447 if (!td->o.bssplit_nr[ddir]) {
f3059de1 448 buflen = 1 + (unsigned int) ((double) maxbs *
54a21917 449 (r / (FRAND_MAX + 1.0)));
f3059de1
JA
450 if (buflen < minbs)
451 buflen = minbs;
5ec10eaa 452 } else {
564ca972
JA
453 long perc = 0;
454 unsigned int i;
455
720e84ad
JA
456 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
457 struct bssplit *bsp = &td->o.bssplit[ddir][i];
564ca972
JA
458
459 buflen = bsp->bs;
460 perc += bsp->perc;
54a21917 461 if ((r <= ((FRAND_MAX / 100L) * perc)) &&
79944128 462 io_u_fits(td, io_u, buflen))
564ca972
JA
463 break;
464 }
465 }
79944128 466
a9f70b1f
JB
467 if (td->o.do_verify && td->o.verify != VERIFY_NONE)
468 buflen = (buflen + td->o.verify_interval - 1) &
469 ~(td->o.verify_interval - 1);
470
f3059de1
JA
471 if (!td->o.bs_unaligned && is_power_of_2(minbs))
472 buflen = (buflen + minbs - 1) & ~(minbs - 1);
10ba535a 473
79944128 474 } while (!io_u_fits(td, io_u, buflen));
6a5e6884 475
10ba535a
JA
476 return buflen;
477}
478
6aca9b3d
JA
479static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
480 unsigned int is_random)
15dc1934 481{
d72be545
JA
482 if (td->flags & TD_F_PROFILE_OPS) {
483 struct prof_io_ops *ops = &td->prof_io_ops;
7eb36574 484
d72be545 485 if (ops->fill_io_u_size)
6aca9b3d 486 return ops->fill_io_u_size(td, io_u, is_random);
d72be545 487 }
15dc1934 488
6aca9b3d 489 return __get_next_buflen(td, io_u, is_random);
15dc1934
JA
490}
491
afe24a5a
JA
492static void set_rwmix_bytes(struct thread_data *td)
493{
afe24a5a
JA
494 unsigned int diff;
495
496 /*
497 * we do time or byte based switch. this is needed because
498 * buffered writes may issue a lot quicker than they complete,
499 * whereas reads do not.
500 */
e47f799f 501 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
04c540d9 502 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
e47f799f
JA
503}
504
505static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
506{
507 unsigned int v;
1294c3ec 508 unsigned long r;
e47f799f 509
d6b72507 510 r = __rand(&td->rwmix_state);
54a21917 511 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
4c07ad86 512
04c540d9 513 if (v <= td->o.rwmix[DDIR_READ])
e47f799f
JA
514 return DDIR_READ;
515
516 return DDIR_WRITE;
afe24a5a
JA
517}
518
002e7183
JA
519void io_u_quiesce(struct thread_data *td)
520{
521 /*
522 * We are going to sleep, ensure that we flush anything pending as
523 * not to skew our latency numbers.
524 *
525 * Changed to only monitor 'in flight' requests here instead of the
526 * td->cur_depth, b/c td->cur_depth does not accurately represent
527 * io's that have been actually submitted to an async engine,
528 * and cur_depth is meaningless for sync engines.
529 */
530 while (td->io_u_in_flight) {
531 int fio_unused ret;
532
533 ret = io_u_queued_complete(td, 1, NULL);
534 }
535}
536
581e7141
JA
537static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
538{
539 enum fio_ddir odir = ddir ^ 1;
540 struct timeval t;
541 long usec;
542
ff58fced
JA
543 assert(ddir_rw(ddir));
544
315fcfec 545 if (td->rate_pending_usleep[ddir] <= 0)
581e7141
JA
546 return ddir;
547
548 /*
549 * We have too much pending sleep in this direction. See if we
550 * should switch.
551 */
315fcfec 552 if (td_rw(td) && td->o.rwmix[odir]) {
581e7141
JA
553 /*
554 * Other direction does not have too much pending, switch
555 */
556 if (td->rate_pending_usleep[odir] < 100000)
557 return odir;
558
559 /*
560 * Both directions have pending sleep. Sleep the minimum time
561 * and deduct from both.
562 */
563 if (td->rate_pending_usleep[ddir] <=
564 td->rate_pending_usleep[odir]) {
565 usec = td->rate_pending_usleep[ddir];
566 } else {
567 usec = td->rate_pending_usleep[odir];
568 ddir = odir;
569 }
570 } else
571 usec = td->rate_pending_usleep[ddir];
572
002e7183 573 io_u_quiesce(td);
78c1eda5 574
581e7141
JA
575 fio_gettime(&t, NULL);
576 usec_sleep(td, usec);
577 usec = utime_since_now(&t);
578
579 td->rate_pending_usleep[ddir] -= usec;
580
581 odir = ddir ^ 1;
582 if (td_rw(td) && __should_check_rate(td, odir))
583 td->rate_pending_usleep[odir] -= usec;
0b9d69ec 584
77b451cc
JA
585 if (ddir == DDIR_TRIM)
586 return DDIR_TRIM;
e0224c6b 587
581e7141
JA
588 return ddir;
589}
590
10ba535a
JA
591/*
592 * Return the data direction for the next io_u. If the job is a
593 * mixed read/write workload, check the rwmix cycle and switch if
594 * necessary.
595 */
1e97cce9 596static enum fio_ddir get_rw_ddir(struct thread_data *td)
10ba535a 597{
581e7141
JA
598 enum fio_ddir ddir;
599
5f9099ea
JA
600 /*
601 * see if it's time to fsync
602 */
603 if (td->o.fsync_blocks &&
604 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
605 td->io_issues[DDIR_WRITE] && should_fsync(td))
606 return DDIR_SYNC;
607
608 /*
609 * see if it's time to fdatasync
610 */
611 if (td->o.fdatasync_blocks &&
612 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
613 td->io_issues[DDIR_WRITE] && should_fsync(td))
614 return DDIR_DATASYNC;
615
44f29692
JA
616 /*
617 * see if it's time to sync_file_range
618 */
619 if (td->sync_file_range_nr &&
620 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
621 td->io_issues[DDIR_WRITE] && should_fsync(td))
622 return DDIR_SYNC_FILE_RANGE;
623
10ba535a 624 if (td_rw(td)) {
10ba535a
JA
625 /*
626 * Check if it's time to seed a new data direction.
627 */
e4928662 628 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
e47f799f
JA
629 /*
630 * Put a top limit on how many bytes we do for
631 * one data direction, to avoid overflowing the
632 * ranges too much
633 */
634 ddir = get_rand_ddir(td);
e47f799f
JA
635
636 if (ddir != td->rwmix_ddir)
637 set_rwmix_bytes(td);
638
639 td->rwmix_ddir = ddir;
10ba535a 640 }
581e7141 641 ddir = td->rwmix_ddir;
10ba535a 642 } else if (td_read(td))
581e7141 643 ddir = DDIR_READ;
6eaf09d6 644 else if (td_write(td))
581e7141 645 ddir = DDIR_WRITE;
6eaf09d6
SL
646 else
647 ddir = DDIR_TRIM;
581e7141
JA
648
649 td->rwmix_ddir = rate_ddir(td, ddir);
650 return td->rwmix_ddir;
10ba535a
JA
651}
652
1ef2b6be
JA
653static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
654{
bcd5abfa 655 io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
1ef2b6be
JA
656
657 if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
658 td->o.barrier_blocks &&
659 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
660 td->io_issues[DDIR_WRITE])
661 io_u->flags |= IO_U_F_BARRIER;
662}
663
e8462bd8 664void put_file_log(struct thread_data *td, struct fio_file *f)
60f2c658 665{
71b84caa 666 unsigned int ret = put_file(td, f);
60f2c658
JA
667
668 if (ret)
669 td_verror(td, ret, "file close");
670}
671
10ba535a
JA
672void put_io_u(struct thread_data *td, struct io_u *io_u)
673{
e8462bd8
JA
674 td_io_u_lock(td);
675
f8b0bd10 676 if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT))
60f2c658 677 put_file_log(td, io_u->file);
f8b0bd10 678
10ba535a 679 io_u->file = NULL;
d7ee2a7d
SL
680 io_u->flags |= IO_U_F_FREE;
681
0c41214f
RR
682 if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
683 td->cur_depth--;
2ae0b204 684 io_u_qpush(&td->io_u_freelist, io_u);
e8462bd8
JA
685 td_io_u_unlock(td);
686 td_io_u_free_notify(td);
10ba535a
JA
687}
688
f2bba182
RR
689void clear_io_u(struct thread_data *td, struct io_u *io_u)
690{
691 io_u->flags &= ~IO_U_F_FLIGHT;
692 put_io_u(td, io_u);
693}
694
755200a3
JA
695void requeue_io_u(struct thread_data *td, struct io_u **io_u)
696{
697 struct io_u *__io_u = *io_u;
bcd5abfa 698 enum fio_ddir ddir = acct_ddir(__io_u);
755200a3 699
465221b0
JA
700 dprint(FD_IO, "requeue %p\n", __io_u);
701
e8462bd8
JA
702 td_io_u_lock(td);
703
4d2e0f49 704 __io_u->flags |= IO_U_F_FREE;
bcd5abfa
JA
705 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
706 td->io_issues[ddir]--;
5ec10eaa 707
4d2e0f49 708 __io_u->flags &= ~IO_U_F_FLIGHT;
0c41214f
RR
709 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
710 td->cur_depth--;
2ae0b204
JA
711
712 io_u_rpush(&td->io_u_requeues, __io_u);
e8462bd8 713 td_io_u_unlock(td);
755200a3
JA
714 *io_u = NULL;
715}
716
9bf2061e 717static int fill_io_u(struct thread_data *td, struct io_u *io_u)
10ba535a 718{
6aca9b3d
JA
719 unsigned int is_random;
720
b4c5e1ac
JA
721 if (td->io_ops->flags & FIO_NOIO)
722 goto out;
723
1ef2b6be 724 set_rw_ddir(td, io_u);
5f9099ea 725
87dc1ab1 726 /*
ff58fced 727 * fsync() or fdatasync() or trim etc, we are done
87dc1ab1 728 */
ff58fced 729 if (!ddir_rw(io_u->ddir))
c38e9468 730 goto out;
a00735e6 731
48f5abd3
JA
732 /*
733 * See if it's time to switch to a new zone
734 */
13af05ae 735 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
4c8be5b1
JA
736 struct fio_file *f = io_u->file;
737
48f5abd3 738 td->zone_bytes = 0;
4c8be5b1
JA
739 f->file_offset += td->o.zone_range + td->o.zone_skip;
740
741 /*
742 * Wrap from the beginning, if we exceed the file size
743 */
744 if (f->file_offset >= f->real_file_size)
745 f->file_offset = f->real_file_size - f->file_offset;
746 f->last_pos = f->file_offset;
48f5abd3
JA
747 td->io_skip_bytes += td->o.zone_skip;
748 }
749
10ba535a 750 /*
c685b5b2
JA
751 * No log, let the seq/rand engine retrieve the next buflen and
752 * position.
10ba535a 753 */
6aca9b3d 754 if (get_next_offset(td, io_u, &is_random)) {
2ba1c290 755 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
bca4ed4d 756 return 1;
2ba1c290 757 }
10ba535a 758
6aca9b3d 759 io_u->buflen = get_next_buflen(td, io_u, is_random);
2ba1c290
JA
760 if (!io_u->buflen) {
761 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
bca4ed4d 762 return 1;
2ba1c290 763 }
bca4ed4d 764
2ba1c290
JA
765 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
766 dprint(FD_IO, "io_u %p, offset too large\n", io_u);
4b91ee8f
JA
767 dprint(FD_IO, " off=%llu/%lu > %llu\n",
768 (unsigned long long) io_u->offset, io_u->buflen,
769 (unsigned long long) io_u->file->real_file_size);
6a5e6884 770 return 1;
2ba1c290 771 }
6a5e6884 772
bca4ed4d
JA
773 /*
774 * mark entry before potentially trimming io_u
775 */
303032ae 776 if (td_random(td) && file_randommap(td, io_u->file))
9bf2061e 777 mark_random_map(td, io_u);
bca4ed4d 778
c38e9468 779out:
2ba1c290 780 dprint_io_u(io_u, "fill_io_u");
d9d91e39 781 td->zone_bytes += io_u->buflen;
bca4ed4d 782 return 0;
10ba535a
JA
783}
784
838bc709
JA
785static void __io_u_mark_map(unsigned int *map, unsigned int nr)
786{
2b13e716 787 int idx = 0;
838bc709
JA
788
789 switch (nr) {
790 default:
2b13e716 791 idx = 6;
838bc709
JA
792 break;
793 case 33 ... 64:
2b13e716 794 idx = 5;
838bc709
JA
795 break;
796 case 17 ... 32:
2b13e716 797 idx = 4;
838bc709
JA
798 break;
799 case 9 ... 16:
2b13e716 800 idx = 3;
838bc709
JA
801 break;
802 case 5 ... 8:
2b13e716 803 idx = 2;
838bc709
JA
804 break;
805 case 1 ... 4:
2b13e716 806 idx = 1;
838bc709
JA
807 case 0:
808 break;
809 }
810
2b13e716 811 map[idx]++;
838bc709
JA
812}
813
814void io_u_mark_submit(struct thread_data *td, unsigned int nr)
815{
816 __io_u_mark_map(td->ts.io_u_submit, nr);
817 td->ts.total_submit++;
818}
819
820void io_u_mark_complete(struct thread_data *td, unsigned int nr)
821{
822 __io_u_mark_map(td->ts.io_u_complete, nr);
823 td->ts.total_complete++;
824}
825
d8005759 826void io_u_mark_depth(struct thread_data *td, unsigned int nr)
71619dc2 827{
2b13e716 828 int idx = 0;
71619dc2
JA
829
830 switch (td->cur_depth) {
831 default:
2b13e716 832 idx = 6;
a783e61a 833 break;
71619dc2 834 case 32 ... 63:
2b13e716 835 idx = 5;
a783e61a 836 break;
71619dc2 837 case 16 ... 31:
2b13e716 838 idx = 4;
a783e61a 839 break;
71619dc2 840 case 8 ... 15:
2b13e716 841 idx = 3;
a783e61a 842 break;
71619dc2 843 case 4 ... 7:
2b13e716 844 idx = 2;
a783e61a 845 break;
71619dc2 846 case 2 ... 3:
2b13e716 847 idx = 1;
71619dc2
JA
848 case 1:
849 break;
850 }
851
2b13e716 852 td->ts.io_u_map[idx] += nr;
71619dc2
JA
853}
854
04a0feae
JA
855static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
856{
2b13e716 857 int idx = 0;
04a0feae
JA
858
859 assert(usec < 1000);
860
861 switch (usec) {
862 case 750 ... 999:
2b13e716 863 idx = 9;
04a0feae
JA
864 break;
865 case 500 ... 749:
2b13e716 866 idx = 8;
04a0feae
JA
867 break;
868 case 250 ... 499:
2b13e716 869 idx = 7;
04a0feae
JA
870 break;
871 case 100 ... 249:
2b13e716 872 idx = 6;
04a0feae
JA
873 break;
874 case 50 ... 99:
2b13e716 875 idx = 5;
04a0feae
JA
876 break;
877 case 20 ... 49:
2b13e716 878 idx = 4;
04a0feae
JA
879 break;
880 case 10 ... 19:
2b13e716 881 idx = 3;
04a0feae
JA
882 break;
883 case 4 ... 9:
2b13e716 884 idx = 2;
04a0feae
JA
885 break;
886 case 2 ... 3:
2b13e716 887 idx = 1;
04a0feae
JA
888 case 0 ... 1:
889 break;
890 }
891
2b13e716
JA
892 assert(idx < FIO_IO_U_LAT_U_NR);
893 td->ts.io_u_lat_u[idx]++;
04a0feae
JA
894}
895
896static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
ec118304 897{
2b13e716 898 int idx = 0;
ec118304
JA
899
900 switch (msec) {
901 default:
2b13e716 902 idx = 11;
04a0feae 903 break;
8abdce66 904 case 1000 ... 1999:
2b13e716 905 idx = 10;
04a0feae 906 break;
8abdce66 907 case 750 ... 999:
2b13e716 908 idx = 9;
04a0feae 909 break;
8abdce66 910 case 500 ... 749:
2b13e716 911 idx = 8;
04a0feae 912 break;
8abdce66 913 case 250 ... 499:
2b13e716 914 idx = 7;
04a0feae 915 break;
8abdce66 916 case 100 ... 249:
2b13e716 917 idx = 6;
04a0feae 918 break;
8abdce66 919 case 50 ... 99:
2b13e716 920 idx = 5;
04a0feae 921 break;
8abdce66 922 case 20 ... 49:
2b13e716 923 idx = 4;
04a0feae 924 break;
8abdce66 925 case 10 ... 19:
2b13e716 926 idx = 3;
04a0feae 927 break;
8abdce66 928 case 4 ... 9:
2b13e716 929 idx = 2;
04a0feae 930 break;
ec118304 931 case 2 ... 3:
2b13e716 932 idx = 1;
ec118304
JA
933 case 0 ... 1:
934 break;
935 }
936
2b13e716
JA
937 assert(idx < FIO_IO_U_LAT_M_NR);
938 td->ts.io_u_lat_m[idx]++;
04a0feae
JA
939}
940
941static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
942{
943 if (usec < 1000)
944 io_u_mark_lat_usec(td, usec);
945 else
946 io_u_mark_lat_msec(td, usec / 1000);
ec118304
JA
947}
948
0aabe160
JA
949/*
950 * Get next file to service by choosing one at random
951 */
2cc52930
JA
952static struct fio_file *get_next_file_rand(struct thread_data *td,
953 enum fio_file_flags goodf,
d6aed795 954 enum fio_file_flags badf)
0aabe160 955{
0aabe160 956 struct fio_file *f;
1c178180 957 int fno;
0aabe160
JA
958
959 do {
87b10676 960 int opened = 0;
1294c3ec 961 unsigned long r;
4c07ad86 962
d6b72507 963 r = __rand(&td->next_file_state);
54a21917 964 fno = (unsigned int) ((double) td->o.nr_files
4c07ad86 965 * (r / (FRAND_MAX + 1.0)));
7c83c089 966
126d65c6 967 f = td->files[fno];
d6aed795 968 if (fio_file_done(f))
059e63c0 969 continue;
1c178180 970
d6aed795 971 if (!fio_file_open(f)) {
87b10676
JA
972 int err;
973
002fe734
JA
974 if (td->nr_open_files >= td->o.open_files)
975 return ERR_PTR(-EBUSY);
976
87b10676
JA
977 err = td_io_open_file(td, f);
978 if (err)
979 continue;
980 opened = 1;
981 }
982
2ba1c290
JA
983 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
984 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
0aabe160 985 return f;
2ba1c290 986 }
87b10676
JA
987 if (opened)
988 td_io_close_file(td, f);
0aabe160
JA
989 } while (1);
990}
991
992/*
993 * Get next file to service by doing round robin between all available ones
994 */
1c178180
JA
995static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
996 int badf)
3d7c391d
JA
997{
998 unsigned int old_next_file = td->next_file;
999 struct fio_file *f;
1000
1001 do {
87b10676
JA
1002 int opened = 0;
1003
126d65c6 1004 f = td->files[td->next_file];
3d7c391d
JA
1005
1006 td->next_file++;
2dc1bbeb 1007 if (td->next_file >= td->o.nr_files)
3d7c391d
JA
1008 td->next_file = 0;
1009
87b10676 1010 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
d6aed795 1011 if (fio_file_done(f)) {
d5ed68ea 1012 f = NULL;
059e63c0 1013 continue;
d5ed68ea 1014 }
059e63c0 1015
d6aed795 1016 if (!fio_file_open(f)) {
87b10676
JA
1017 int err;
1018
002fe734
JA
1019 if (td->nr_open_files >= td->o.open_files)
1020 return ERR_PTR(-EBUSY);
1021
87b10676 1022 err = td_io_open_file(td, f);
b5696bfc
JA
1023 if (err) {
1024 dprint(FD_FILE, "error %d on open of %s\n",
1025 err, f->file_name);
87c27b45 1026 f = NULL;
87b10676 1027 continue;
b5696bfc 1028 }
87b10676
JA
1029 opened = 1;
1030 }
1031
0b9d69ec
JA
1032 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1033 f->flags);
1c178180 1034 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
3d7c391d
JA
1035 break;
1036
87b10676
JA
1037 if (opened)
1038 td_io_close_file(td, f);
1039
3d7c391d
JA
1040 f = NULL;
1041 } while (td->next_file != old_next_file);
1042
2ba1c290 1043 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
3d7c391d
JA
1044 return f;
1045}
1046
7eb36574 1047static struct fio_file *__get_next_file(struct thread_data *td)
bdb4e2e9 1048{
1907dbc6
JA
1049 struct fio_file *f;
1050
2dc1bbeb 1051 assert(td->o.nr_files <= td->files_index);
1c178180 1052
b5696bfc 1053 if (td->nr_done_files >= td->o.nr_files) {
5ec10eaa
JA
1054 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1055 " nr_files=%d\n", td->nr_open_files,
1056 td->nr_done_files,
1057 td->o.nr_files);
bdb4e2e9 1058 return NULL;
2ba1c290 1059 }
bdb4e2e9 1060
1907dbc6 1061 f = td->file_service_file;
d6aed795 1062 if (f && fio_file_open(f) && !fio_file_closing(f)) {
a086c257
JA
1063 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1064 goto out;
1065 if (td->file_service_left--)
1066 goto out;
1067 }
1907dbc6 1068
a086c257
JA
1069 if (td->o.file_service_type == FIO_FSERVICE_RR ||
1070 td->o.file_service_type == FIO_FSERVICE_SEQ)
d6aed795 1071 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
bdb4e2e9 1072 else
d6aed795 1073 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
1907dbc6 1074
002fe734
JA
1075 if (IS_ERR(f))
1076 return f;
1077
1907dbc6
JA
1078 td->file_service_file = f;
1079 td->file_service_left = td->file_service_nr - 1;
2ba1c290 1080out:
0dac421f
JA
1081 if (f)
1082 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1083 else
1084 dprint(FD_FILE, "get_next_file: NULL\n");
1907dbc6 1085 return f;
bdb4e2e9
JA
1086}
1087
7eb36574
JA
1088static struct fio_file *get_next_file(struct thread_data *td)
1089{
372d8962 1090 if (td->flags & TD_F_PROFILE_OPS) {
d72be545 1091 struct prof_io_ops *ops = &td->prof_io_ops;
7eb36574 1092
d72be545
JA
1093 if (ops->get_next_file)
1094 return ops->get_next_file(td);
1095 }
7eb36574
JA
1096
1097 return __get_next_file(td);
1098}
1099
002fe734 1100static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
429f6675
JA
1101{
1102 struct fio_file *f;
1103
1104 do {
1105 f = get_next_file(td);
002fe734
JA
1106 if (IS_ERR_OR_NULL(f))
1107 return PTR_ERR(f);
429f6675 1108
429f6675
JA
1109 io_u->file = f;
1110 get_file(f);
1111
1112 if (!fill_io_u(td, io_u))
1113 break;
1114
b5696bfc 1115 put_file_log(td, f);
429f6675 1116 td_io_close_file(td, f);
b5696bfc 1117 io_u->file = NULL;
d6aed795 1118 fio_file_set_done(f);
429f6675 1119 td->nr_done_files++;
0b9d69ec
JA
1120 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1121 td->nr_done_files, td->o.nr_files);
429f6675
JA
1122 } while (1);
1123
1124 return 0;
1125}
1126
3e260a46
JA
1127static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
1128 unsigned long tusec, unsigned long max_usec)
1129{
1130 if (!td->error)
1131 log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
1132 td_verror(td, ETIMEDOUT, "max latency exceeded");
1133 icd->error = ETIMEDOUT;
1134}
1135
1136static void lat_new_cycle(struct thread_data *td)
1137{
1138 fio_gettime(&td->latency_ts, NULL);
1139 td->latency_ios = ddir_rw_sum(td->io_blocks);
1140 td->latency_failed = 0;
1141}
1142
1143/*
1144 * We had an IO outside the latency target. Reduce the queue depth. If we
1145 * are at QD=1, then it's time to give up.
1146 */
1147static int __lat_target_failed(struct thread_data *td)
1148{
1149 if (td->latency_qd == 1)
1150 return 1;
1151
1152 td->latency_qd_high = td->latency_qd;
6bb58215
JA
1153
1154 if (td->latency_qd == td->latency_qd_low)
1155 td->latency_qd_low--;
1156
3e260a46
JA
1157 td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1158
1159 dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1160
1161 /*
1162 * When we ramp QD down, quiesce existing IO to prevent
1163 * a storm of ramp downs due to pending higher depth.
1164 */
1165 io_u_quiesce(td);
1166 lat_new_cycle(td);
1167 return 0;
1168}
1169
1170static int lat_target_failed(struct thread_data *td)
1171{
1172 if (td->o.latency_percentile.u.f == 100.0)
1173 return __lat_target_failed(td);
1174
1175 td->latency_failed++;
1176 return 0;
1177}
1178
1179void lat_target_init(struct thread_data *td)
1180{
6bb58215
JA
1181 td->latency_end_run = 0;
1182
3e260a46
JA
1183 if (td->o.latency_target) {
1184 dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1185 fio_gettime(&td->latency_ts, NULL);
1186 td->latency_qd = 1;
1187 td->latency_qd_high = td->o.iodepth;
1188 td->latency_qd_low = 1;
1189 td->latency_ios = ddir_rw_sum(td->io_blocks);
1190 } else
1191 td->latency_qd = td->o.iodepth;
1192}
1193
6bb58215
JA
1194void lat_target_reset(struct thread_data *td)
1195{
1196 if (!td->latency_end_run)
1197 lat_target_init(td);
1198}
1199
3e260a46
JA
1200static void lat_target_success(struct thread_data *td)
1201{
1202 const unsigned int qd = td->latency_qd;
6bb58215 1203 struct thread_options *o = &td->o;
3e260a46
JA
1204
1205 td->latency_qd_low = td->latency_qd;
1206
1207 /*
1208 * If we haven't failed yet, we double up to a failing value instead
1209 * of bisecting from highest possible queue depth. If we have set
1210 * a limit other than td->o.iodepth, bisect between that.
1211 */
6bb58215 1212 if (td->latency_qd_high != o->iodepth)
3e260a46
JA
1213 td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1214 else
1215 td->latency_qd *= 2;
1216
6bb58215
JA
1217 if (td->latency_qd > o->iodepth)
1218 td->latency_qd = o->iodepth;
3e260a46
JA
1219
1220 dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
6bb58215 1221
3e260a46 1222 /*
6bb58215
JA
1223 * Same as last one, we are done. Let it run a latency cycle, so
1224 * we get only the results from the targeted depth.
3e260a46 1225 */
6bb58215
JA
1226 if (td->latency_qd == qd) {
1227 if (td->latency_end_run) {
1228 dprint(FD_RATE, "We are done\n");
1229 td->done = 1;
1230 } else {
1231 dprint(FD_RATE, "Quiesce and final run\n");
1232 io_u_quiesce(td);
1233 td->latency_end_run = 1;
1234 reset_all_stats(td);
1235 reset_io_stats(td);
1236 }
1237 }
3e260a46
JA
1238
1239 lat_new_cycle(td);
1240}
1241
1242/*
1243 * Check if we can bump the queue depth
1244 */
1245void lat_target_check(struct thread_data *td)
1246{
1247 uint64_t usec_window;
1248 uint64_t ios;
1249 double success_ios;
1250
1251 usec_window = utime_since_now(&td->latency_ts);
1252 if (usec_window < td->o.latency_window)
1253 return;
1254
1255 ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1256 success_ios = (double) (ios - td->latency_failed) / (double) ios;
1257 success_ios *= 100.0;
1258
1259 dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1260
1261 if (success_ios >= td->o.latency_percentile.u.f)
1262 lat_target_success(td);
1263 else
1264 __lat_target_failed(td);
1265}
1266
1267/*
1268 * If latency target is enabled, we might be ramping up or down and not
1269 * using the full queue depth available.
1270 */
effd6ff0 1271int queue_full(const struct thread_data *td)
3e260a46
JA
1272{
1273 const int qempty = io_u_qempty(&td->io_u_freelist);
1274
1275 if (qempty)
1276 return 1;
1277 if (!td->o.latency_target)
1278 return 0;
1279
1280 return td->cur_depth >= td->latency_qd;
1281}
429f6675 1282
10ba535a
JA
1283struct io_u *__get_io_u(struct thread_data *td)
1284{
0cae66f6 1285 struct io_u *io_u = NULL;
10ba535a 1286
ca09be4b
JA
1287 if (td->stop_io)
1288 return NULL;
1289
e8462bd8
JA
1290 td_io_u_lock(td);
1291
1292again:
2ae0b204
JA
1293 if (!io_u_rempty(&td->io_u_requeues))
1294 io_u = io_u_rpop(&td->io_u_requeues);
3e260a46 1295 else if (!queue_full(td)) {
2ae0b204 1296 io_u = io_u_qpop(&td->io_u_freelist);
10ba535a 1297
225ba9e3 1298 io_u->file = NULL;
6040dabc 1299 io_u->buflen = 0;
10ba535a 1300 io_u->resid = 0;
d7762cf8 1301 io_u->end_io = NULL;
755200a3
JA
1302 }
1303
1304 if (io_u) {
0c6e7517 1305 assert(io_u->flags & IO_U_F_FREE);
f8b0bd10
JA
1306 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
1307 IO_U_F_TRIMMED | IO_U_F_BARRIER |
1308 IO_U_F_VER_LIST);
0c6e7517 1309
755200a3 1310 io_u->error = 0;
bcd5abfa 1311 io_u->acct_ddir = -1;
10ba535a 1312 td->cur_depth++;
0c41214f 1313 io_u->flags |= IO_U_F_IN_CUR_DEPTH;
f9401285 1314 io_u->ipo = NULL;
1dec3e07
JA
1315 } else if (td->o.verify_async) {
1316 /*
1317 * We ran out, wait for async verify threads to finish and
1318 * return one
1319 */
1320 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1321 goto again;
10ba535a
JA
1322 }
1323
e8462bd8 1324 td_io_u_unlock(td);
10ba535a
JA
1325 return io_u;
1326}
1327
0d29de83 1328static int check_get_trim(struct thread_data *td, struct io_u *io_u)
10ba535a 1329{
d72be545
JA
1330 if (!(td->flags & TD_F_TRIM_BACKLOG))
1331 return 0;
1332
1333 if (td->trim_entries) {
0d29de83 1334 int get_trim = 0;
10ba535a 1335
0d29de83
JA
1336 if (td->trim_batch) {
1337 td->trim_batch--;
1338 get_trim = 1;
1339 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
1340 td->last_ddir != DDIR_READ) {
1341 td->trim_batch = td->o.trim_batch;
1342 if (!td->trim_batch)
1343 td->trim_batch = td->o.trim_backlog;
1344 get_trim = 1;
1345 }
1346
1347 if (get_trim && !get_next_trim(td, io_u))
1348 return 1;
2ba1c290 1349 }
10ba535a 1350
0d29de83
JA
1351 return 0;
1352}
1353
1354static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1355{
d72be545
JA
1356 if (!(td->flags & TD_F_VER_BACKLOG))
1357 return 0;
1358
1359 if (td->io_hist_len) {
9e144189
JA
1360 int get_verify = 0;
1361
d1ece0c7 1362 if (td->verify_batch)
9e144189 1363 get_verify = 1;
d1ece0c7 1364 else if (!(td->io_hist_len % td->o.verify_backlog) &&
9e144189
JA
1365 td->last_ddir != DDIR_READ) {
1366 td->verify_batch = td->o.verify_batch;
f8a75c99
JA
1367 if (!td->verify_batch)
1368 td->verify_batch = td->o.verify_backlog;
9e144189
JA
1369 get_verify = 1;
1370 }
1371
d1ece0c7
JA
1372 if (get_verify && !get_next_verify(td, io_u)) {
1373 td->verify_batch--;
0d29de83 1374 return 1;
d1ece0c7 1375 }
9e144189
JA
1376 }
1377
0d29de83
JA
1378 return 0;
1379}
1380
de789769
JA
1381/*
1382 * Fill offset and start time into the buffer content, to prevent too
23f394d5
JA
1383 * easy compressible data for simple de-dupe attempts. Do this for every
1384 * 512b block in the range, since that should be the smallest block size
1385 * we can expect from a device.
de789769
JA
1386 */
1387static void small_content_scramble(struct io_u *io_u)
1388{
23f394d5 1389 unsigned int i, nr_blocks = io_u->buflen / 512;
1ae83d45 1390 uint64_t boffset;
23f394d5
JA
1391 unsigned int offset;
1392 void *p, *end;
de789769 1393
23f394d5
JA
1394 if (!nr_blocks)
1395 return;
1396
1397 p = io_u->xfer_buf;
fba76ee8 1398 boffset = io_u->offset;
81f0366c 1399 io_u->buf_filled_len = 0;
fad82f76 1400
23f394d5
JA
1401 for (i = 0; i < nr_blocks; i++) {
1402 /*
1403 * Fill the byte offset into a "random" start offset of
1404 * the buffer, given by the product of the usec time
1405 * and the actual offset.
1406 */
fad82f76 1407 offset = (io_u->start_time.tv_usec ^ boffset) & 511;
1ae83d45
JA
1408 offset &= ~(sizeof(uint64_t) - 1);
1409 if (offset >= 512 - sizeof(uint64_t))
1410 offset -= sizeof(uint64_t);
fba76ee8 1411 memcpy(p + offset, &boffset, sizeof(boffset));
23f394d5
JA
1412
1413 end = p + 512 - sizeof(io_u->start_time);
1414 memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1415 p += 512;
fad82f76 1416 boffset += 512;
23f394d5 1417 }
de789769
JA
1418}
1419
0d29de83
JA
1420/*
1421 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1422 * etc. The returned io_u is fully ready to be prepped and submitted.
1423 */
1424struct io_u *get_io_u(struct thread_data *td)
1425{
1426 struct fio_file *f;
1427 struct io_u *io_u;
de789769 1428 int do_scramble = 0;
002fe734 1429 long ret = 0;
0d29de83
JA
1430
1431 io_u = __get_io_u(td);
1432 if (!io_u) {
1433 dprint(FD_IO, "__get_io_u failed\n");
1434 return NULL;
1435 }
1436
1437 if (check_get_verify(td, io_u))
1438 goto out;
1439 if (check_get_trim(td, io_u))
1440 goto out;
1441
755200a3
JA
1442 /*
1443 * from a requeue, io_u already setup
1444 */
1445 if (io_u->file)
77f392bf 1446 goto out;
755200a3 1447
429f6675
JA
1448 /*
1449 * If using an iolog, grab next piece if any available.
1450 */
d72be545 1451 if (td->flags & TD_F_READ_IOLOG) {
429f6675
JA
1452 if (read_iolog_get(td, io_u))
1453 goto err_put;
2ba1c290 1454 } else if (set_io_u_file(td, io_u)) {
002fe734 1455 ret = -EBUSY;
2ba1c290 1456 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
429f6675 1457 goto err_put;
2ba1c290 1458 }
5ec10eaa 1459
429f6675 1460 f = io_u->file;
002fe734
JA
1461 if (!f) {
1462 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1463 goto err_put;
1464 }
1465
d6aed795 1466 assert(fio_file_open(f));
97af62ce 1467
ff58fced 1468 if (ddir_rw(io_u->ddir)) {
d0656a93 1469 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
2ba1c290 1470 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
429f6675 1471 goto err_put;
2ba1c290 1472 }
10ba535a 1473
38dad62d 1474 f->last_start = io_u->offset;
36167d82 1475 f->last_pos = io_u->offset + io_u->buflen;
10ba535a 1476
fd68418e 1477 if (io_u->ddir == DDIR_WRITE) {
d72be545 1478 if (td->flags & TD_F_REFILL_BUFFERS) {
9c42684e 1479 io_u_fill_buffer(td, io_u,
1066358a
JA
1480 td->o.min_bs[DDIR_WRITE],
1481 io_u->xfer_buflen);
bedc9dc2
JA
1482 } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
1483 !(td->flags & TD_F_COMPRESS))
fd68418e 1484 do_scramble = 1;
d72be545 1485 if (td->flags & TD_F_VER_NONE) {
629f1d71
JA
1486 populate_verify_io_u(td, io_u);
1487 do_scramble = 0;
1488 }
fd68418e 1489 } else if (io_u->ddir == DDIR_READ) {
cbe8d756
RR
1490 /*
1491 * Reset the buf_filled parameters so next time if the
1492 * buffer is used for writes it is refilled.
1493 */
cbe8d756
RR
1494 io_u->buf_filled_len = 0;
1495 }
87dc1ab1 1496 }
10ba535a 1497
165faf16
JA
1498 /*
1499 * Set io data pointers.
1500 */
cec6b55d
JA
1501 io_u->xfer_buf = io_u->buf;
1502 io_u->xfer_buflen = io_u->buflen;
5973cafb 1503
6ac7a331 1504out:
0d29de83 1505 assert(io_u->file);
429f6675 1506 if (!td_io_prep(td, io_u)) {
993bf48b
JA
1507 if (!td->o.disable_slat)
1508 fio_gettime(&io_u->start_time, NULL);
de789769
JA
1509 if (do_scramble)
1510 small_content_scramble(io_u);
429f6675 1511 return io_u;
36167d82 1512 }
429f6675 1513err_put:
2ba1c290 1514 dprint(FD_IO, "get_io_u failed\n");
429f6675 1515 put_io_u(td, io_u);
002fe734 1516 return ERR_PTR(ret);
10ba535a
JA
1517}
1518
5451792e
JA
1519void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1520{
8b28bd41 1521 enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
825f818e 1522
8b28bd41
DM
1523 if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1524 return;
5451792e 1525
709c8313
RE
1526 log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
1527 io_u->file ? " on file " : "",
1528 io_u->file ? io_u->file->file_name : "",
1529 strerror(io_u->error),
1530 io_ddir_name(io_u->ddir),
1531 io_u->offset, io_u->xfer_buflen);
5451792e
JA
1532
1533 if (!td->error)
1534 td_verror(td, io_u->error, "io_u error");
1535}
1536
aba6c951
JA
1537static inline int gtod_reduce(struct thread_data *td)
1538{
729fe3af 1539 return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat
b74b8208 1540 && td->o.disable_bw;
aba6c951
JA
1541}
1542
c8eeb9df
JA
1543static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1544 struct io_completion_data *icd,
1545 const enum fio_ddir idx, unsigned int bytes)
1546{
24d23ca7 1547 unsigned long lusec = 0;
c8eeb9df 1548
aba6c951 1549 if (!gtod_reduce(td))
c8eeb9df
JA
1550 lusec = utime_since(&io_u->issue_time, &icd->time);
1551
1552 if (!td->o.disable_lat) {
1553 unsigned long tusec;
1554
1555 tusec = utime_since(&io_u->start_time, &icd->time);
ae588852 1556 add_lat_sample(td, idx, tusec, bytes, io_u->offset);
15501535 1557
d4afedfd
JA
1558 if (td->flags & TD_F_PROFILE_OPS) {
1559 struct prof_io_ops *ops = &td->prof_io_ops;
1560
1561 if (ops->io_u_lat)
1562 icd->error = ops->io_u_lat(td, tusec);
1563 }
1564
3e260a46
JA
1565 if (td->o.max_latency && tusec > td->o.max_latency)
1566 lat_fatal(td, icd, tusec, td->o.max_latency);
1567 if (td->o.latency_target && tusec > td->o.latency_target) {
1568 if (lat_target_failed(td))
1569 lat_fatal(td, icd, tusec, td->o.latency_target);
15501535 1570 }
c8eeb9df
JA
1571 }
1572
1573 if (!td->o.disable_clat) {
ae588852 1574 add_clat_sample(td, idx, lusec, bytes, io_u->offset);
c8eeb9df
JA
1575 io_u_mark_latency(td, lusec);
1576 }
1577
1578 if (!td->o.disable_bw)
1579 add_bw_sample(td, idx, bytes, &icd->time);
1580
aba6c951
JA
1581 if (!gtod_reduce(td))
1582 add_iops_sample(td, idx, bytes, &icd->time);
c8eeb9df
JA
1583}
1584
1b8dbf25
SL
1585static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1586{
1ae83d45
JA
1587 uint64_t secs, remainder, bps, bytes;
1588
1b8dbf25
SL
1589 bytes = td->this_io_bytes[ddir];
1590 bps = td->rate_bps[ddir];
1591 secs = bytes / bps;
1592 remainder = bytes % bps;
1593 return remainder * 1000000 / bps + secs * 1000000;
1594}
1595
f8b0bd10 1596static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
97601024 1597 struct io_completion_data *icd)
10ba535a 1598{
f8b0bd10
JA
1599 struct io_u *io_u = *io_u_ptr;
1600 enum fio_ddir ddir = io_u->ddir;
1601 struct fio_file *f = io_u->file;
10ba535a 1602
2ba1c290
JA
1603 dprint_io_u(io_u, "io complete");
1604
2ecc1b57 1605 td_io_u_lock(td);
0c6e7517 1606 assert(io_u->flags & IO_U_F_FLIGHT);
38dad62d 1607 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
f9401285
JA
1608
1609 /*
1610 * Mark IO ok to verify
1611 */
1612 if (io_u->ipo) {
890b6656
JA
1613 /*
1614 * Remove errored entry from the verification list
1615 */
1616 if (io_u->error)
1617 unlog_io_piece(td, io_u);
1618 else {
1619 io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1620 write_barrier();
1621 }
f9401285
JA
1622 }
1623
2ecc1b57 1624 td_io_u_unlock(td);
0c6e7517 1625
f8b0bd10 1626 if (ddir_sync(ddir)) {
87dc1ab1 1627 td->last_was_sync = 1;
44f29692
JA
1628 if (f) {
1629 f->first_write = -1ULL;
1630 f->last_write = -1ULL;
1631 }
87dc1ab1
JA
1632 return;
1633 }
1634
1635 td->last_was_sync = 0;
f8b0bd10 1636 td->last_ddir = ddir;
87dc1ab1 1637
f8b0bd10 1638 if (!io_u->error && ddir_rw(ddir)) {
10ba535a 1639 unsigned int bytes = io_u->buflen - io_u->resid;
f8b0bd10 1640 const enum fio_ddir oddir = ddir ^ 1;
b29ee5b3 1641 int ret;
10ba535a 1642
f8b0bd10
JA
1643 td->io_blocks[ddir]++;
1644 td->this_io_blocks[ddir]++;
1645 td->io_bytes[ddir] += bytes;
ae2fafc8 1646
1647 if (!(io_u->flags & IO_U_F_VER_LIST))
f8b0bd10
JA
1648 td->this_io_bytes[ddir] += bytes;
1649
ca09be4b
JA
1650 if (ddir == DDIR_WRITE) {
1651 if (f) {
1652 if (f->first_write == -1ULL ||
1653 io_u->offset < f->first_write)
1654 f->first_write = io_u->offset;
1655 if (f->last_write == -1ULL ||
1656 ((io_u->offset + bytes) > f->last_write))
1657 f->last_write = io_u->offset + bytes;
1658 }
1659 if (td->last_write_comp) {
1660 int idx = td->last_write_idx++;
1661
1662 td->last_write_comp[idx] = io_u->offset;
1663 if (td->last_write_idx == td->o.iodepth)
1664 td->last_write_idx = 0;
1665 }
44f29692
JA
1666 }
1667
6b1190fd
SL
1668 if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1669 td->runstate == TD_VERIFYING)) {
f8b0bd10 1670 account_io_completion(td, io_u, icd, ddir, bytes);
40e1a6f0 1671
f8b0bd10
JA
1672 if (__should_check_rate(td, ddir)) {
1673 td->rate_pending_usleep[ddir] =
1674 (usec_for_io(td, ddir) -
ba3e4e0c 1675 utime_since_now(&td->start));
b23b6a2f 1676 }
f8b0bd10
JA
1677 if (ddir != DDIR_TRIM &&
1678 __should_check_rate(td, oddir)) {
1679 td->rate_pending_usleep[oddir] =
1680 (usec_for_io(td, oddir) -
ba3e4e0c 1681 utime_since_now(&td->start));
f8b0bd10 1682 }
721938ae 1683 }
10ba535a 1684
f8b0bd10 1685 icd->bytes_done[ddir] += bytes;
3af6ef39 1686
d7762cf8 1687 if (io_u->end_io) {
f8b0bd10
JA
1688 ret = io_u->end_io(td, io_u_ptr);
1689 io_u = *io_u_ptr;
3af6ef39
JA
1690 if (ret && !icd->error)
1691 icd->error = ret;
1692 }
ff58fced 1693 } else if (io_u->error) {
10ba535a 1694 icd->error = io_u->error;
5451792e
JA
1695 io_u_log_error(td, io_u);
1696 }
8b28bd41 1697 if (icd->error) {
f8b0bd10
JA
1698 enum error_type_bit eb = td_error_type(ddir, icd->error);
1699
8b28bd41
DM
1700 if (!td_non_fatal_error(td, eb, icd->error))
1701 return;
f8b0bd10 1702
f2bba182
RR
1703 /*
1704 * If there is a non_fatal error, then add to the error count
1705 * and clear all the errors.
1706 */
1707 update_error_count(td, icd->error);
1708 td_clear_error(td);
1709 icd->error = 0;
f8b0bd10
JA
1710 if (io_u)
1711 io_u->error = 0;
f2bba182 1712 }
10ba535a
JA
1713}
1714
9520ebb9
JA
1715static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1716 int nr)
10ba535a 1717{
6eaf09d6 1718 int ddir;
aba6c951
JA
1719
1720 if (!gtod_reduce(td))
9520ebb9 1721 fio_gettime(&icd->time, NULL);
02bcaa8c 1722
3af6ef39
JA
1723 icd->nr = nr;
1724
10ba535a 1725 icd->error = 0;
6eaf09d6
SL
1726 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1727 icd->bytes_done[ddir] = 0;
36167d82
JA
1728}
1729
97601024
JA
1730static void ios_completed(struct thread_data *td,
1731 struct io_completion_data *icd)
36167d82
JA
1732{
1733 struct io_u *io_u;
1734 int i;
1735
10ba535a
JA
1736 for (i = 0; i < icd->nr; i++) {
1737 io_u = td->io_ops->event(td, i);
1738
f8b0bd10 1739 io_completed(td, &io_u, icd);
e8462bd8 1740
f8b0bd10 1741 if (io_u)
e8462bd8 1742 put_io_u(td, io_u);
10ba535a
JA
1743 }
1744}
97601024 1745
e7e6cfb4
JA
1746/*
1747 * Complete a single io_u for the sync engines.
1748 */
581e7141 1749int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
100f49f1 1750 uint64_t *bytes)
97601024
JA
1751{
1752 struct io_completion_data icd;
1753
9520ebb9 1754 init_icd(td, &icd, 1);
f8b0bd10 1755 io_completed(td, &io_u, &icd);
e8462bd8 1756
f8b0bd10 1757 if (io_u)
e8462bd8 1758 put_io_u(td, io_u);
97601024 1759
581e7141
JA
1760 if (icd.error) {
1761 td_verror(td, icd.error, "io_u_sync_complete");
1762 return -1;
1763 }
97601024 1764
581e7141 1765 if (bytes) {
6eaf09d6
SL
1766 int ddir;
1767
1768 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1769 bytes[ddir] += icd.bytes_done[ddir];
581e7141
JA
1770 }
1771
1772 return 0;
97601024
JA
1773}
1774
e7e6cfb4
JA
1775/*
1776 * Called to complete min_events number of io for the async engines.
1777 */
581e7141 1778int io_u_queued_complete(struct thread_data *td, int min_evts,
100f49f1 1779 uint64_t *bytes)
97601024 1780{
97601024 1781 struct io_completion_data icd;
00de55ef 1782 struct timespec *tvp = NULL;
97601024 1783 int ret;
4d06a338 1784 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
97601024 1785
4950421a 1786 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
b271fe62 1787
4950421a 1788 if (!min_evts)
00de55ef 1789 tvp = &ts;
5fb4b366
RE
1790 else if (min_evts > td->cur_depth)
1791 min_evts = td->cur_depth;
97601024 1792
4950421a 1793 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
97601024 1794 if (ret < 0) {
e1161c32 1795 td_verror(td, -ret, "td_io_getevents");
97601024
JA
1796 return ret;
1797 } else if (!ret)
1798 return ret;
1799
9520ebb9 1800 init_icd(td, &icd, ret);
97601024 1801 ios_completed(td, &icd);
581e7141
JA
1802 if (icd.error) {
1803 td_verror(td, icd.error, "io_u_queued_complete");
1804 return -1;
1805 }
97601024 1806
581e7141 1807 if (bytes) {
6eaf09d6
SL
1808 int ddir;
1809
1810 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1811 bytes[ddir] += icd.bytes_done[ddir];
581e7141
JA
1812 }
1813
1814 return 0;
97601024 1815}
7e77dd02
JA
1816
1817/*
1818 * Call when io_u is really queued, to update the submission latency.
1819 */
1820void io_u_queued(struct thread_data *td, struct io_u *io_u)
1821{
9520ebb9
JA
1822 if (!td->o.disable_slat) {
1823 unsigned long slat_time;
7e77dd02 1824
9520ebb9 1825 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
ae588852
JA
1826 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
1827 io_u->offset);
9520ebb9 1828 }
7e77dd02 1829}
433afcb4 1830
5c94b008
JA
1831/*
1832 * See if we should reuse the last seed, if dedupe is enabled
1833 */
1834static struct frand_state *get_buf_state(struct thread_data *td)
1835{
1836 unsigned int v;
1837 unsigned long r;
1838
1839 if (!td->o.dedupe_percentage)
1840 return &td->buf_state;
c0b69b92
JA
1841 else if (td->o.dedupe_percentage == 100)
1842 return &td->buf_state_prev;
5c94b008
JA
1843
1844 r = __rand(&td->dedupe_state);
1845 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
1846
1847 if (v <= td->o.dedupe_percentage)
1848 return &td->buf_state_prev;
1849
1850 return &td->buf_state;
1851}
1852
1853static void save_buf_state(struct thread_data *td, struct frand_state *rs)
1854{
1855 if (rs == &td->buf_state)
1856 frand_copy(&td->buf_state_prev, rs);
1857}
1858
cc86c395
JA
1859void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
1860 unsigned int max_bs)
5973cafb 1861{
ce35b1ec
JA
1862 if (td->o.buffer_pattern_bytes)
1863 fill_buffer_pattern(td, buf, max_bs);
1864 else if (!td->o.zero_buffers) {
9c42684e 1865 unsigned int perc = td->o.compress_percentage;
5c94b008 1866 struct frand_state *rs;
1066358a 1867 unsigned int left = max_bs;
5c94b008 1868
1066358a
JA
1869 do {
1870 rs = get_buf_state(td);
9c42684e 1871
1066358a 1872 min_write = min(min_write, left);
f97a43a1 1873
1066358a
JA
1874 if (perc) {
1875 unsigned int seg = min_write;
cc86c395 1876
1066358a
JA
1877 seg = min(min_write, td->o.compress_chunk);
1878 if (!seg)
1879 seg = min_write;
1880
1881 fill_random_buf_percentage(rs, buf, perc, seg,
1882 min_write);
1883 } else
1884 fill_random_buf(rs, buf, min_write);
1885
1886 buf += min_write;
1887 left -= min_write;
5c94b008 1888 save_buf_state(td, rs);
1066358a 1889 } while (left);
9c42684e 1890 } else
cc86c395
JA
1891 memset(buf, 0, max_bs);
1892}
1893
1894/*
1895 * "randomly" fill the buffer contents
1896 */
1897void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1898 unsigned int min_write, unsigned int max_bs)
1899{
1900 io_u->buf_filled_len = 0;
1901 fill_io_buffer(td, io_u->buf, min_write, max_bs);
5973cafb 1902}