Add missing file.h f->file_data change
[fio.git] / io_u.c
... / ...
CommitLineData
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
6#include <assert.h>
7
8#include "fio.h"
9#include "hash.h"
10#include "verify.h"
11#include "trim.h"
12#include "lib/rand.h"
13
14struct io_completion_data {
15 int nr; /* input */
16
17 int error; /* output */
18 unsigned long bytes_done[2]; /* output */
19 struct timeval time; /* output */
20};
21
22/*
23 * The ->file_map[] contains a map of blocks we have or have not done io
24 * to yet. Used to make sure we cover the entire range in a fair fashion.
25 */
26static int random_map_free(struct fio_file *f, const unsigned long long block)
27{
28 unsigned int idx = RAND_MAP_IDX(f, block);
29 unsigned int bit = RAND_MAP_BIT(f, block);
30
31 dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit);
32
33 return (f->file_map[idx] & (1 << bit)) == 0;
34}
35
36/*
37 * Mark a given offset as used in the map.
38 */
39static void mark_random_map(struct thread_data *td, struct io_u *io_u)
40{
41 unsigned int min_bs = td->o.rw_min_bs;
42 struct fio_file *f = io_u->file;
43 unsigned long long block;
44 unsigned int blocks, nr_blocks;
45 int busy_check;
46
47 block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs;
48 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
49 blocks = 0;
50 busy_check = !(io_u->flags & IO_U_F_BUSY_OK);
51
52 while (nr_blocks) {
53 unsigned int this_blocks, mask;
54 unsigned int idx, bit;
55
56 /*
57 * If we have a mixed random workload, we may
58 * encounter blocks we already did IO to.
59 */
60 if (!busy_check) {
61 blocks = nr_blocks;
62 break;
63 }
64 if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block))
65 break;
66
67 idx = RAND_MAP_IDX(f, block);
68 bit = RAND_MAP_BIT(f, block);
69
70 fio_assert(td, idx < f->num_maps);
71
72 this_blocks = nr_blocks;
73 if (this_blocks + bit > BLOCKS_PER_MAP)
74 this_blocks = BLOCKS_PER_MAP - bit;
75
76 do {
77 if (this_blocks == BLOCKS_PER_MAP)
78 mask = -1U;
79 else
80 mask = ((1U << this_blocks) - 1) << bit;
81
82 if (!(f->file_map[idx] & mask))
83 break;
84
85 this_blocks--;
86 } while (this_blocks);
87
88 if (!this_blocks)
89 break;
90
91 f->file_map[idx] |= mask;
92 nr_blocks -= this_blocks;
93 blocks += this_blocks;
94 block += this_blocks;
95 }
96
97 if ((blocks * min_bs) < io_u->buflen)
98 io_u->buflen = blocks * min_bs;
99}
100
101static unsigned long long last_block(struct thread_data *td, struct fio_file *f,
102 enum fio_ddir ddir)
103{
104 unsigned long long max_blocks;
105 unsigned long long max_size;
106
107 assert(ddir_rw(ddir));
108
109 /*
110 * Hmm, should we make sure that ->io_size <= ->real_file_size?
111 */
112 max_size = f->io_size;
113 if (max_size > f->real_file_size)
114 max_size = f->real_file_size;
115
116 max_blocks = max_size / (unsigned long long) td->o.ba[ddir];
117 if (!max_blocks)
118 return 0;
119
120 return max_blocks;
121}
122
123/*
124 * Return the next free block in the map.
125 */
126static int get_next_free_block(struct thread_data *td, struct fio_file *f,
127 enum fio_ddir ddir, unsigned long long *b)
128{
129 unsigned long long min_bs = td->o.rw_min_bs;
130 int i;
131
132 i = f->last_free_lookup;
133 *b = (i * BLOCKS_PER_MAP);
134 while ((*b) * min_bs < f->real_file_size &&
135 (*b) * min_bs < f->io_size) {
136 if (f->file_map[i] != (unsigned int) -1) {
137 *b += ffz(f->file_map[i]);
138 if (*b > last_block(td, f, ddir))
139 break;
140 f->last_free_lookup = i;
141 return 0;
142 }
143
144 *b += BLOCKS_PER_MAP;
145 i++;
146 }
147
148 dprint(FD_IO, "failed finding a free block\n");
149 return 1;
150}
151
152static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
153 enum fio_ddir ddir, unsigned long long *b)
154{
155 unsigned long long r;
156 int loops = 5;
157
158 do {
159 r = os_random_long(&td->random_state);
160 dprint(FD_RANDOM, "off rand %llu\n", r);
161 *b = (last_block(td, f, ddir) - 1)
162 * (r / ((unsigned long long) OS_RAND_MAX + 1.0));
163
164 /*
165 * if we are not maintaining a random map, we are done.
166 */
167 if (!file_randommap(td, f))
168 return 0;
169
170 /*
171 * calculate map offset and check if it's free
172 */
173 if (random_map_free(f, *b))
174 return 0;
175
176 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
177 *b);
178 } while (--loops);
179
180 /*
181 * we get here, if we didn't suceed in looking up a block. generate
182 * a random start offset into the filemap, and find the first free
183 * block from there.
184 */
185 loops = 10;
186 do {
187 f->last_free_lookup = (f->num_maps - 1) *
188 (r / (OS_RAND_MAX + 1.0));
189 if (!get_next_free_block(td, f, ddir, b))
190 return 0;
191
192 r = os_random_long(&td->random_state);
193 } while (--loops);
194
195 /*
196 * that didn't work either, try exhaustive search from the start
197 */
198 f->last_free_lookup = 0;
199 return get_next_free_block(td, f, ddir, b);
200}
201
202static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
203 enum fio_ddir ddir, unsigned long long *b)
204{
205 if (get_next_rand_offset(td, f, ddir, b)) {
206 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
207 f->file_name, f->last_pos, f->real_file_size);
208 return 1;
209 }
210
211 return 0;
212}
213
214static int get_next_seq_block(struct thread_data *td, struct fio_file *f,
215 enum fio_ddir ddir, unsigned long long *b)
216{
217 assert(ddir_rw(ddir));
218
219 if (f->last_pos < f->real_file_size) {
220 *b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir];
221 return 0;
222 }
223
224 return 1;
225}
226
227static int get_next_block(struct thread_data *td, struct io_u *io_u,
228 enum fio_ddir ddir, int rw_seq, unsigned long long *b)
229{
230 struct fio_file *f = io_u->file;
231 int ret;
232
233 assert(ddir_rw(ddir));
234
235 if (rw_seq) {
236 if (td_random(td))
237 ret = get_next_rand_block(td, f, ddir, b);
238 else
239 ret = get_next_seq_block(td, f, ddir, b);
240 } else {
241 io_u->flags |= IO_U_F_BUSY_OK;
242
243 if (td->o.rw_seq == RW_SEQ_SEQ) {
244 ret = get_next_seq_block(td, f, ddir, b);
245 if (ret)
246 ret = get_next_rand_block(td, f, ddir, b);
247 } else if (td->o.rw_seq == RW_SEQ_IDENT) {
248 if (f->last_start != -1ULL)
249 *b = (f->last_start - f->file_offset)
250 / td->o.min_bs[ddir];
251 else
252 *b = 0;
253 ret = 0;
254 } else {
255 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
256 ret = 1;
257 }
258 }
259
260 return ret;
261}
262
263/*
264 * For random io, generate a random new block and see if it's used. Repeat
265 * until we find a free one. For sequential io, just return the end of
266 * the last io issued.
267 */
268static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
269{
270 struct fio_file *f = io_u->file;
271 unsigned long long b;
272 enum fio_ddir ddir = io_u->ddir;
273 int rw_seq_hit = 0;
274
275 assert(ddir_rw(ddir));
276
277 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
278 rw_seq_hit = 1;
279 td->ddir_seq_nr = td->o.ddir_seq_nr;
280 }
281
282 if (get_next_block(td, io_u, ddir, rw_seq_hit, &b))
283 return 1;
284
285 io_u->offset = b * td->o.ba[ddir];
286 if (io_u->offset >= f->io_size) {
287 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
288 io_u->offset, f->io_size);
289 return 1;
290 }
291
292 io_u->offset += f->file_offset;
293 if (io_u->offset >= f->real_file_size) {
294 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
295 io_u->offset, f->real_file_size);
296 return 1;
297 }
298
299 return 0;
300}
301
302static int get_next_offset(struct thread_data *td, struct io_u *io_u)
303{
304 struct prof_io_ops *ops = &td->prof_io_ops;
305
306 if (ops->fill_io_u_off)
307 return ops->fill_io_u_off(td, io_u);
308
309 return __get_next_offset(td, io_u);
310}
311
312static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u)
313{
314 const int ddir = io_u->ddir;
315 unsigned int uninitialized_var(buflen);
316 unsigned int minbs, maxbs;
317 long r;
318
319 assert(ddir_rw(ddir));
320
321 minbs = td->o.min_bs[ddir];
322 maxbs = td->o.max_bs[ddir];
323
324 if (minbs == maxbs)
325 buflen = minbs;
326 else {
327 r = os_random_long(&td->bsrange_state);
328 if (!td->o.bssplit_nr[ddir]) {
329 buflen = 1 + (unsigned int) ((double) maxbs *
330 (r / (OS_RAND_MAX + 1.0)));
331 if (buflen < minbs)
332 buflen = minbs;
333 } else {
334 long perc = 0;
335 unsigned int i;
336
337 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
338 struct bssplit *bsp = &td->o.bssplit[ddir][i];
339
340 buflen = bsp->bs;
341 perc += bsp->perc;
342 if (r <= ((OS_RAND_MAX / 100L) * perc))
343 break;
344 }
345 }
346 if (!td->o.bs_unaligned && is_power_of_2(minbs))
347 buflen = (buflen + minbs - 1) & ~(minbs - 1);
348 }
349
350 if (io_u->offset + buflen > io_u->file->real_file_size) {
351 dprint(FD_IO, "lower buflen %u -> %u (ddir=%d)\n", buflen,
352 minbs, ddir);
353 buflen = minbs;
354 }
355
356 return buflen;
357}
358
359static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
360{
361 struct prof_io_ops *ops = &td->prof_io_ops;
362
363 if (ops->fill_io_u_size)
364 return ops->fill_io_u_size(td, io_u);
365
366 return __get_next_buflen(td, io_u);
367}
368
369static void set_rwmix_bytes(struct thread_data *td)
370{
371 unsigned int diff;
372
373 /*
374 * we do time or byte based switch. this is needed because
375 * buffered writes may issue a lot quicker than they complete,
376 * whereas reads do not.
377 */
378 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
379 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
380}
381
382static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
383{
384 unsigned int v;
385 long r;
386
387 r = os_random_long(&td->rwmix_state);
388 v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
389 if (v <= td->o.rwmix[DDIR_READ])
390 return DDIR_READ;
391
392 return DDIR_WRITE;
393}
394
395static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
396{
397 enum fio_ddir odir = ddir ^ 1;
398 struct timeval t;
399 long usec;
400
401 assert(ddir_rw(ddir));
402
403 if (td->rate_pending_usleep[ddir] <= 0)
404 return ddir;
405
406 /*
407 * We have too much pending sleep in this direction. See if we
408 * should switch.
409 */
410 if (td_rw(td)) {
411 /*
412 * Other direction does not have too much pending, switch
413 */
414 if (td->rate_pending_usleep[odir] < 100000)
415 return odir;
416
417 /*
418 * Both directions have pending sleep. Sleep the minimum time
419 * and deduct from both.
420 */
421 if (td->rate_pending_usleep[ddir] <=
422 td->rate_pending_usleep[odir]) {
423 usec = td->rate_pending_usleep[ddir];
424 } else {
425 usec = td->rate_pending_usleep[odir];
426 ddir = odir;
427 }
428 } else
429 usec = td->rate_pending_usleep[ddir];
430
431 fio_gettime(&t, NULL);
432 usec_sleep(td, usec);
433 usec = utime_since_now(&t);
434
435 td->rate_pending_usleep[ddir] -= usec;
436
437 odir = ddir ^ 1;
438 if (td_rw(td) && __should_check_rate(td, odir))
439 td->rate_pending_usleep[odir] -= usec;
440
441 return ddir;
442}
443
444/*
445 * Return the data direction for the next io_u. If the job is a
446 * mixed read/write workload, check the rwmix cycle and switch if
447 * necessary.
448 */
449static enum fio_ddir get_rw_ddir(struct thread_data *td)
450{
451 enum fio_ddir ddir;
452
453 /*
454 * see if it's time to fsync
455 */
456 if (td->o.fsync_blocks &&
457 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
458 td->io_issues[DDIR_WRITE] && should_fsync(td))
459 return DDIR_SYNC;
460
461 /*
462 * see if it's time to fdatasync
463 */
464 if (td->o.fdatasync_blocks &&
465 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
466 td->io_issues[DDIR_WRITE] && should_fsync(td))
467 return DDIR_DATASYNC;
468
469 /*
470 * see if it's time to sync_file_range
471 */
472 if (td->sync_file_range_nr &&
473 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
474 td->io_issues[DDIR_WRITE] && should_fsync(td))
475 return DDIR_SYNC_FILE_RANGE;
476
477 if (td_rw(td)) {
478 /*
479 * Check if it's time to seed a new data direction.
480 */
481 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
482 /*
483 * Put a top limit on how many bytes we do for
484 * one data direction, to avoid overflowing the
485 * ranges too much
486 */
487 ddir = get_rand_ddir(td);
488
489 if (ddir != td->rwmix_ddir)
490 set_rwmix_bytes(td);
491
492 td->rwmix_ddir = ddir;
493 }
494 ddir = td->rwmix_ddir;
495 } else if (td_read(td))
496 ddir = DDIR_READ;
497 else
498 ddir = DDIR_WRITE;
499
500 td->rwmix_ddir = rate_ddir(td, ddir);
501 return td->rwmix_ddir;
502}
503
504void put_file_log(struct thread_data *td, struct fio_file *f)
505{
506 int ret = put_file(td, f);
507
508 if (ret)
509 td_verror(td, ret, "file close");
510}
511
512void put_io_u(struct thread_data *td, struct io_u *io_u)
513{
514 td_io_u_lock(td);
515
516 io_u->flags |= IO_U_F_FREE;
517 io_u->flags &= ~IO_U_F_FREE_DEF;
518
519 if (io_u->file)
520 put_file_log(td, io_u->file);
521
522 io_u->file = NULL;
523 if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
524 td->cur_depth--;
525 flist_del_init(&io_u->list);
526 flist_add(&io_u->list, &td->io_u_freelist);
527 td_io_u_unlock(td);
528 td_io_u_free_notify(td);
529}
530
531void clear_io_u(struct thread_data *td, struct io_u *io_u)
532{
533 io_u->flags &= ~IO_U_F_FLIGHT;
534 put_io_u(td, io_u);
535}
536
537void requeue_io_u(struct thread_data *td, struct io_u **io_u)
538{
539 struct io_u *__io_u = *io_u;
540
541 dprint(FD_IO, "requeue %p\n", __io_u);
542
543 td_io_u_lock(td);
544
545 __io_u->flags |= IO_U_F_FREE;
546 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir))
547 td->io_issues[__io_u->ddir]--;
548
549 __io_u->flags &= ~IO_U_F_FLIGHT;
550 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
551 td->cur_depth--;
552 flist_del(&__io_u->list);
553 flist_add_tail(&__io_u->list, &td->io_u_requeues);
554 td_io_u_unlock(td);
555 *io_u = NULL;
556}
557
558static int fill_io_u(struct thread_data *td, struct io_u *io_u)
559{
560 if (td->io_ops->flags & FIO_NOIO)
561 goto out;
562
563 io_u->ddir = get_rw_ddir(td);
564
565 /*
566 * fsync() or fdatasync() or trim etc, we are done
567 */
568 if (!ddir_rw(io_u->ddir))
569 goto out;
570
571 /*
572 * See if it's time to switch to a new zone
573 */
574 if (td->zone_bytes >= td->o.zone_size) {
575 td->zone_bytes = 0;
576 io_u->file->last_pos += td->o.zone_skip;
577 td->io_skip_bytes += td->o.zone_skip;
578 }
579
580 /*
581 * No log, let the seq/rand engine retrieve the next buflen and
582 * position.
583 */
584 if (get_next_offset(td, io_u)) {
585 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
586 return 1;
587 }
588
589 io_u->buflen = get_next_buflen(td, io_u);
590 if (!io_u->buflen) {
591 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
592 return 1;
593 }
594
595 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
596 dprint(FD_IO, "io_u %p, offset too large\n", io_u);
597 dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset,
598 io_u->buflen, io_u->file->real_file_size);
599 return 1;
600 }
601
602 /*
603 * mark entry before potentially trimming io_u
604 */
605 if (td_random(td) && file_randommap(td, io_u->file))
606 mark_random_map(td, io_u);
607
608 /*
609 * If using a write iolog, store this entry.
610 */
611out:
612 dprint_io_u(io_u, "fill_io_u");
613 td->zone_bytes += io_u->buflen;
614 log_io_u(td, io_u);
615 return 0;
616}
617
618static void __io_u_mark_map(unsigned int *map, unsigned int nr)
619{
620 int index = 0;
621
622 switch (nr) {
623 default:
624 index = 6;
625 break;
626 case 33 ... 64:
627 index = 5;
628 break;
629 case 17 ... 32:
630 index = 4;
631 break;
632 case 9 ... 16:
633 index = 3;
634 break;
635 case 5 ... 8:
636 index = 2;
637 break;
638 case 1 ... 4:
639 index = 1;
640 case 0:
641 break;
642 }
643
644 map[index]++;
645}
646
647void io_u_mark_submit(struct thread_data *td, unsigned int nr)
648{
649 __io_u_mark_map(td->ts.io_u_submit, nr);
650 td->ts.total_submit++;
651}
652
653void io_u_mark_complete(struct thread_data *td, unsigned int nr)
654{
655 __io_u_mark_map(td->ts.io_u_complete, nr);
656 td->ts.total_complete++;
657}
658
659void io_u_mark_depth(struct thread_data *td, unsigned int nr)
660{
661 int index = 0;
662
663 switch (td->cur_depth) {
664 default:
665 index = 6;
666 break;
667 case 32 ... 63:
668 index = 5;
669 break;
670 case 16 ... 31:
671 index = 4;
672 break;
673 case 8 ... 15:
674 index = 3;
675 break;
676 case 4 ... 7:
677 index = 2;
678 break;
679 case 2 ... 3:
680 index = 1;
681 case 1:
682 break;
683 }
684
685 td->ts.io_u_map[index] += nr;
686}
687
688static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
689{
690 int index = 0;
691
692 assert(usec < 1000);
693
694 switch (usec) {
695 case 750 ... 999:
696 index = 9;
697 break;
698 case 500 ... 749:
699 index = 8;
700 break;
701 case 250 ... 499:
702 index = 7;
703 break;
704 case 100 ... 249:
705 index = 6;
706 break;
707 case 50 ... 99:
708 index = 5;
709 break;
710 case 20 ... 49:
711 index = 4;
712 break;
713 case 10 ... 19:
714 index = 3;
715 break;
716 case 4 ... 9:
717 index = 2;
718 break;
719 case 2 ... 3:
720 index = 1;
721 case 0 ... 1:
722 break;
723 }
724
725 assert(index < FIO_IO_U_LAT_U_NR);
726 td->ts.io_u_lat_u[index]++;
727}
728
729static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
730{
731 int index = 0;
732
733 switch (msec) {
734 default:
735 index = 11;
736 break;
737 case 1000 ... 1999:
738 index = 10;
739 break;
740 case 750 ... 999:
741 index = 9;
742 break;
743 case 500 ... 749:
744 index = 8;
745 break;
746 case 250 ... 499:
747 index = 7;
748 break;
749 case 100 ... 249:
750 index = 6;
751 break;
752 case 50 ... 99:
753 index = 5;
754 break;
755 case 20 ... 49:
756 index = 4;
757 break;
758 case 10 ... 19:
759 index = 3;
760 break;
761 case 4 ... 9:
762 index = 2;
763 break;
764 case 2 ... 3:
765 index = 1;
766 case 0 ... 1:
767 break;
768 }
769
770 assert(index < FIO_IO_U_LAT_M_NR);
771 td->ts.io_u_lat_m[index]++;
772}
773
774static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
775{
776 if (usec < 1000)
777 io_u_mark_lat_usec(td, usec);
778 else
779 io_u_mark_lat_msec(td, usec / 1000);
780}
781
782/*
783 * Get next file to service by choosing one at random
784 */
785static struct fio_file *get_next_file_rand(struct thread_data *td,
786 enum fio_file_flags goodf,
787 enum fio_file_flags badf)
788{
789 struct fio_file *f;
790 int fno;
791
792 do {
793 long r = os_random_long(&td->next_file_state);
794 int opened = 0;
795
796 fno = (unsigned int) ((double) td->o.nr_files
797 * (r / (OS_RAND_MAX + 1.0)));
798 f = td->files[fno];
799 if (fio_file_done(f))
800 continue;
801
802 if (!fio_file_open(f)) {
803 int err;
804
805 err = td_io_open_file(td, f);
806 if (err)
807 continue;
808 opened = 1;
809 }
810
811 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
812 dprint(FD_FILE, "get_next_file_rand: %p\n", f);
813 return f;
814 }
815 if (opened)
816 td_io_close_file(td, f);
817 } while (1);
818}
819
820/*
821 * Get next file to service by doing round robin between all available ones
822 */
823static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
824 int badf)
825{
826 unsigned int old_next_file = td->next_file;
827 struct fio_file *f;
828
829 do {
830 int opened = 0;
831
832 f = td->files[td->next_file];
833
834 td->next_file++;
835 if (td->next_file >= td->o.nr_files)
836 td->next_file = 0;
837
838 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
839 if (fio_file_done(f)) {
840 f = NULL;
841 continue;
842 }
843
844 if (!fio_file_open(f)) {
845 int err;
846
847 err = td_io_open_file(td, f);
848 if (err) {
849 dprint(FD_FILE, "error %d on open of %s\n",
850 err, f->file_name);
851 f = NULL;
852 continue;
853 }
854 opened = 1;
855 }
856
857 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
858 f->flags);
859 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
860 break;
861
862 if (opened)
863 td_io_close_file(td, f);
864
865 f = NULL;
866 } while (td->next_file != old_next_file);
867
868 dprint(FD_FILE, "get_next_file_rr: %p\n", f);
869 return f;
870}
871
872static struct fio_file *__get_next_file(struct thread_data *td)
873{
874 struct fio_file *f;
875
876 assert(td->o.nr_files <= td->files_index);
877
878 if (td->nr_done_files >= td->o.nr_files) {
879 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
880 " nr_files=%d\n", td->nr_open_files,
881 td->nr_done_files,
882 td->o.nr_files);
883 return NULL;
884 }
885
886 f = td->file_service_file;
887 if (f && fio_file_open(f) && !fio_file_closing(f)) {
888 if (td->o.file_service_type == FIO_FSERVICE_SEQ)
889 goto out;
890 if (td->file_service_left--)
891 goto out;
892 }
893
894 if (td->o.file_service_type == FIO_FSERVICE_RR ||
895 td->o.file_service_type == FIO_FSERVICE_SEQ)
896 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
897 else
898 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
899
900 td->file_service_file = f;
901 td->file_service_left = td->file_service_nr - 1;
902out:
903 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
904 return f;
905}
906
907static struct fio_file *get_next_file(struct thread_data *td)
908{
909 struct prof_io_ops *ops = &td->prof_io_ops;
910
911 if (ops->get_next_file)
912 return ops->get_next_file(td);
913
914 return __get_next_file(td);
915}
916
917static int set_io_u_file(struct thread_data *td, struct io_u *io_u)
918{
919 struct fio_file *f;
920
921 do {
922 f = get_next_file(td);
923 if (!f)
924 return 1;
925
926 io_u->file = f;
927 get_file(f);
928
929 if (!fill_io_u(td, io_u))
930 break;
931
932 put_file_log(td, f);
933 td_io_close_file(td, f);
934 io_u->file = NULL;
935 fio_file_set_done(f);
936 td->nr_done_files++;
937 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
938 td->nr_done_files, td->o.nr_files);
939 } while (1);
940
941 return 0;
942}
943
944
945struct io_u *__get_io_u(struct thread_data *td)
946{
947 struct io_u *io_u = NULL;
948
949 td_io_u_lock(td);
950
951again:
952 if (!flist_empty(&td->io_u_requeues))
953 io_u = flist_entry(td->io_u_requeues.next, struct io_u, list);
954 else if (!queue_full(td)) {
955 io_u = flist_entry(td->io_u_freelist.next, struct io_u, list);
956
957 io_u->buflen = 0;
958 io_u->resid = 0;
959 io_u->file = NULL;
960 io_u->end_io = NULL;
961 }
962
963 if (io_u) {
964 assert(io_u->flags & IO_U_F_FREE);
965 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
966 io_u->flags &= ~IO_U_F_TRIMMED;
967
968 io_u->error = 0;
969 flist_del(&io_u->list);
970 flist_add(&io_u->list, &td->io_u_busylist);
971 td->cur_depth++;
972 io_u->flags |= IO_U_F_IN_CUR_DEPTH;
973 } else if (td->o.verify_async) {
974 /*
975 * We ran out, wait for async verify threads to finish and
976 * return one
977 */
978 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
979 goto again;
980 }
981
982 td_io_u_unlock(td);
983 return io_u;
984}
985
986static int check_get_trim(struct thread_data *td, struct io_u *io_u)
987{
988 if (td->o.trim_backlog && td->trim_entries) {
989 int get_trim = 0;
990
991 if (td->trim_batch) {
992 td->trim_batch--;
993 get_trim = 1;
994 } else if (!(td->io_hist_len % td->o.trim_backlog) &&
995 td->last_ddir != DDIR_READ) {
996 td->trim_batch = td->o.trim_batch;
997 if (!td->trim_batch)
998 td->trim_batch = td->o.trim_backlog;
999 get_trim = 1;
1000 }
1001
1002 if (get_trim && !get_next_trim(td, io_u))
1003 return 1;
1004 }
1005
1006 return 0;
1007}
1008
1009static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1010{
1011 if (td->o.verify_backlog && td->io_hist_len) {
1012 int get_verify = 0;
1013
1014 if (td->verify_batch) {
1015 td->verify_batch--;
1016 get_verify = 1;
1017 } else if (!(td->io_hist_len % td->o.verify_backlog) &&
1018 td->last_ddir != DDIR_READ) {
1019 td->verify_batch = td->o.verify_batch;
1020 if (!td->verify_batch)
1021 td->verify_batch = td->o.verify_backlog;
1022 get_verify = 1;
1023 }
1024
1025 if (get_verify && !get_next_verify(td, io_u))
1026 return 1;
1027 }
1028
1029 return 0;
1030}
1031
1032/*
1033 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1034 * etc. The returned io_u is fully ready to be prepped and submitted.
1035 */
1036struct io_u *get_io_u(struct thread_data *td)
1037{
1038 struct fio_file *f;
1039 struct io_u *io_u;
1040
1041 io_u = __get_io_u(td);
1042 if (!io_u) {
1043 dprint(FD_IO, "__get_io_u failed\n");
1044 return NULL;
1045 }
1046
1047 if (check_get_verify(td, io_u))
1048 goto out;
1049 if (check_get_trim(td, io_u))
1050 goto out;
1051
1052 /*
1053 * from a requeue, io_u already setup
1054 */
1055 if (io_u->file)
1056 goto out;
1057
1058 /*
1059 * If using an iolog, grab next piece if any available.
1060 */
1061 if (td->o.read_iolog_file) {
1062 if (read_iolog_get(td, io_u))
1063 goto err_put;
1064 } else if (set_io_u_file(td, io_u)) {
1065 dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1066 goto err_put;
1067 }
1068
1069 f = io_u->file;
1070 assert(fio_file_open(f));
1071
1072 if (ddir_rw(io_u->ddir)) {
1073 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
1074 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
1075 goto err_put;
1076 }
1077
1078 f->last_start = io_u->offset;
1079 f->last_pos = io_u->offset + io_u->buflen;
1080
1081 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_WRITE)
1082 populate_verify_io_u(td, io_u);
1083 else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE)
1084 io_u_fill_buffer(td, io_u, io_u->xfer_buflen);
1085 else if (io_u->ddir == DDIR_READ) {
1086 /*
1087 * Reset the buf_filled parameters so next time if the
1088 * buffer is used for writes it is refilled.
1089 */
1090 io_u->buf_filled_len = 0;
1091 }
1092 }
1093
1094 /*
1095 * Set io data pointers.
1096 */
1097 io_u->xfer_buf = io_u->buf;
1098 io_u->xfer_buflen = io_u->buflen;
1099
1100out:
1101 assert(io_u->file);
1102 if (!td_io_prep(td, io_u)) {
1103 if (!td->o.disable_slat)
1104 fio_gettime(&io_u->start_time, NULL);
1105 return io_u;
1106 }
1107err_put:
1108 dprint(FD_IO, "get_io_u failed\n");
1109 put_io_u(td, io_u);
1110 return NULL;
1111}
1112
1113void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1114{
1115 const char *msg[] = { "read", "write", "sync", "datasync",
1116 "sync_file_range", "wait", "trim" };
1117
1118
1119
1120 log_err("fio: io_u error");
1121
1122 if (io_u->file)
1123 log_err(" on file %s", io_u->file->file_name);
1124
1125 log_err(": %s\n", strerror(io_u->error));
1126
1127 log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir],
1128 io_u->offset, io_u->xfer_buflen);
1129
1130 if (!td->error)
1131 td_verror(td, io_u->error, "io_u error");
1132}
1133
1134static void io_completed(struct thread_data *td, struct io_u *io_u,
1135 struct io_completion_data *icd)
1136{
1137 /*
1138 * Older gcc's are too dumb to realize that usec is always used
1139 * initialized, silence that warning.
1140 */
1141 unsigned long uninitialized_var(usec);
1142 struct fio_file *f;
1143
1144 dprint_io_u(io_u, "io complete");
1145
1146 td_io_u_lock(td);
1147 assert(io_u->flags & IO_U_F_FLIGHT);
1148 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
1149 td_io_u_unlock(td);
1150
1151 if (ddir_sync(io_u->ddir)) {
1152 td->last_was_sync = 1;
1153 f = io_u->file;
1154 if (f) {
1155 f->first_write = -1ULL;
1156 f->last_write = -1ULL;
1157 }
1158 return;
1159 }
1160
1161 td->last_was_sync = 0;
1162 td->last_ddir = io_u->ddir;
1163
1164 if (!io_u->error && ddir_rw(io_u->ddir)) {
1165 unsigned int bytes = io_u->buflen - io_u->resid;
1166 const enum fio_ddir idx = io_u->ddir;
1167 const enum fio_ddir odx = io_u->ddir ^ 1;
1168 int ret;
1169
1170 td->io_blocks[idx]++;
1171 td->io_bytes[idx] += bytes;
1172 td->this_io_bytes[idx] += bytes;
1173
1174 if (idx == DDIR_WRITE) {
1175 f = io_u->file;
1176 if (f) {
1177 if (f->first_write == -1ULL ||
1178 io_u->offset < f->first_write)
1179 f->first_write = io_u->offset;
1180 if (f->last_write == -1ULL ||
1181 ((io_u->offset + bytes) > f->last_write))
1182 f->last_write = io_u->offset + bytes;
1183 }
1184 }
1185
1186 if (ramp_time_over(td)) {
1187 unsigned long uninitialized_var(lusec);
1188
1189 if (!td->o.disable_clat || !td->o.disable_bw)
1190 lusec = utime_since(&io_u->issue_time,
1191 &icd->time);
1192 if (!td->o.disable_lat) {
1193 unsigned long tusec;
1194
1195 tusec = utime_since(&io_u->start_time,
1196 &icd->time);
1197 add_lat_sample(td, idx, tusec, bytes);
1198 }
1199 if (!td->o.disable_clat) {
1200 add_clat_sample(td, idx, lusec, bytes);
1201 io_u_mark_latency(td, lusec);
1202 }
1203 if (!td->o.disable_bw)
1204 add_bw_sample(td, idx, bytes, &icd->time);
1205 if (__should_check_rate(td, idx)) {
1206 td->rate_pending_usleep[idx] =
1207 ((td->this_io_bytes[idx] *
1208 td->rate_nsec_cycle[idx]) / 1000 -
1209 utime_since_now(&td->start));
1210 }
1211 if (__should_check_rate(td, idx ^ 1))
1212 td->rate_pending_usleep[odx] =
1213 ((td->this_io_bytes[odx] *
1214 td->rate_nsec_cycle[odx]) / 1000 -
1215 utime_since_now(&td->start));
1216 }
1217
1218 if (td_write(td) && idx == DDIR_WRITE &&
1219 td->o.do_verify &&
1220 td->o.verify != VERIFY_NONE)
1221 log_io_piece(td, io_u);
1222
1223 icd->bytes_done[idx] += bytes;
1224
1225 if (io_u->end_io) {
1226 ret = io_u->end_io(td, io_u);
1227 if (ret && !icd->error)
1228 icd->error = ret;
1229 }
1230 } else if (io_u->error) {
1231 icd->error = io_u->error;
1232 io_u_log_error(td, io_u);
1233 }
1234 if (td->o.continue_on_error && icd->error &&
1235 td_non_fatal_error(icd->error)) {
1236 /*
1237 * If there is a non_fatal error, then add to the error count
1238 * and clear all the errors.
1239 */
1240 update_error_count(td, icd->error);
1241 td_clear_error(td);
1242 icd->error = 0;
1243 io_u->error = 0;
1244 }
1245}
1246
1247static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1248 int nr)
1249{
1250 if (!td->o.disable_clat || !td->o.disable_bw)
1251 fio_gettime(&icd->time, NULL);
1252
1253 icd->nr = nr;
1254
1255 icd->error = 0;
1256 icd->bytes_done[0] = icd->bytes_done[1] = 0;
1257}
1258
1259static void ios_completed(struct thread_data *td,
1260 struct io_completion_data *icd)
1261{
1262 struct io_u *io_u;
1263 int i;
1264
1265 for (i = 0; i < icd->nr; i++) {
1266 io_u = td->io_ops->event(td, i);
1267
1268 io_completed(td, io_u, icd);
1269
1270 if (!(io_u->flags & IO_U_F_FREE_DEF))
1271 put_io_u(td, io_u);
1272 }
1273}
1274
1275/*
1276 * Complete a single io_u for the sync engines.
1277 */
1278int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
1279 unsigned long *bytes)
1280{
1281 struct io_completion_data icd;
1282
1283 init_icd(td, &icd, 1);
1284 io_completed(td, io_u, &icd);
1285
1286 if (!(io_u->flags & IO_U_F_FREE_DEF))
1287 put_io_u(td, io_u);
1288
1289 if (icd.error) {
1290 td_verror(td, icd.error, "io_u_sync_complete");
1291 return -1;
1292 }
1293
1294 if (bytes) {
1295 bytes[0] += icd.bytes_done[0];
1296 bytes[1] += icd.bytes_done[1];
1297 }
1298
1299 return 0;
1300}
1301
1302/*
1303 * Called to complete min_events number of io for the async engines.
1304 */
1305int io_u_queued_complete(struct thread_data *td, int min_evts,
1306 unsigned long *bytes)
1307{
1308 struct io_completion_data icd;
1309 struct timespec *tvp = NULL;
1310 int ret;
1311 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
1312
1313 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
1314
1315 if (!min_evts)
1316 tvp = &ts;
1317
1318 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
1319 if (ret < 0) {
1320 td_verror(td, -ret, "td_io_getevents");
1321 return ret;
1322 } else if (!ret)
1323 return ret;
1324
1325 init_icd(td, &icd, ret);
1326 ios_completed(td, &icd);
1327 if (icd.error) {
1328 td_verror(td, icd.error, "io_u_queued_complete");
1329 return -1;
1330 }
1331
1332 if (bytes) {
1333 bytes[0] += icd.bytes_done[0];
1334 bytes[1] += icd.bytes_done[1];
1335 }
1336
1337 return 0;
1338}
1339
1340/*
1341 * Call when io_u is really queued, to update the submission latency.
1342 */
1343void io_u_queued(struct thread_data *td, struct io_u *io_u)
1344{
1345 if (!td->o.disable_slat) {
1346 unsigned long slat_time;
1347
1348 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
1349 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen);
1350 }
1351}
1352
1353/*
1354 * "randomly" fill the buffer contents
1355 */
1356void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1357 unsigned int max_bs)
1358{
1359 if (!td->o.zero_buffers)
1360 fill_random_buf(io_u->buf, max_bs);
1361 else
1362 memset(io_u->buf, 0, max_bs);
1363}