splice: fix problem with current mainline kernels
[fio.git] / io_u.c
CommitLineData
10ba535a
JA
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
0c6e7517 6#include <assert.h>
10ba535a
JA
7
8#include "fio.h"
10ba535a 9
5945b9b4
JA
10/*
11 * Change this define to play with the timeout handling
12 */
13#undef FIO_USE_TIMEOUT
14
97601024
JA
15struct io_completion_data {
16 int nr; /* input */
97601024
JA
17
18 int error; /* output */
19 unsigned long bytes_done[2]; /* output */
20 struct timeval time; /* output */
21};
22
10ba535a
JA
23/*
24 * The ->file_map[] contains a map of blocks we have or have not done io
25 * to yet. Used to make sure we cover the entire range in a fair fashion.
26 */
27static int random_map_free(struct thread_data *td, struct fio_file *f,
28 unsigned long long block)
29{
30 unsigned int idx = RAND_MAP_IDX(td, f, block);
31 unsigned int bit = RAND_MAP_BIT(td, f, block);
32
33 return (f->file_map[idx] & (1UL << bit)) == 0;
34}
35
df415585
JA
36/*
37 * Mark a given offset as used in the map.
38 */
9bf2061e 39static void mark_random_map(struct thread_data *td, struct io_u *io_u)
df415585 40{
2dc1bbeb 41 unsigned int min_bs = td->o.rw_min_bs;
9bf2061e 42 struct fio_file *f = io_u->file;
a00735e6
JA
43 unsigned long long block;
44 unsigned int blocks;
c685b5b2 45 unsigned int nr_blocks;
df415585 46
a00735e6
JA
47 block = io_u->offset / (unsigned long long) min_bs;
48 blocks = 0;
c685b5b2
JA
49 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
50
51 while (blocks < nr_blocks) {
df415585
JA
52 unsigned int idx, bit;
53
1e3d53ac
JA
54 /*
55 * If we have a mixed random workload, we may
56 * encounter blocks we already did IO to.
57 */
b6a4c7d1 58 if (!td->o.ddir_nr == 1 && !random_map_free(td, f, block))
df415585
JA
59 break;
60
61 idx = RAND_MAP_IDX(td, f, block);
62 bit = RAND_MAP_BIT(td, f, block);
63
0032bf9f 64 fio_assert(td, idx < f->num_maps);
df415585
JA
65
66 f->file_map[idx] |= (1UL << bit);
67 block++;
68 blocks++;
69 }
70
a00735e6
JA
71 if ((blocks * min_bs) < io_u->buflen)
72 io_u->buflen = blocks * min_bs;
df415585
JA
73}
74
10ba535a
JA
75/*
76 * Return the next free block in the map.
77 */
78static int get_next_free_block(struct thread_data *td, struct fio_file *f,
79 unsigned long long *b)
80{
81 int i;
82
c685b5b2
JA
83 i = f->last_free_lookup;
84 *b = (i * BLOCKS_PER_MAP);
2dc1bbeb 85 while ((*b) * td->o.rw_min_bs < f->real_file_size) {
10ba535a 86 if (f->file_map[i] != -1UL) {
b12ebc65 87 *b += fio_ffz(f->file_map[i]);
c685b5b2 88 f->last_free_lookup = i;
10ba535a
JA
89 return 0;
90 }
91
92 *b += BLOCKS_PER_MAP;
93 i++;
94 }
95
96 return 1;
97}
98
ec4015da
JA
99static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
100 int ddir, unsigned long long *b)
101{
7bb48f84 102 unsigned long long max_blocks = f->io_size / td->o.min_bs[ddir];
ec4015da
JA
103 unsigned long long r, rb;
104 int loops = 5;
105
106 do {
107 r = os_random_long(&td->random_state);
108 if (!max_blocks)
109 *b = 0;
110 else
111 *b = ((max_blocks - 1) * r / (unsigned long long) (RAND_MAX+1.0));
43c63a78
JA
112 /*
113 * if we are not maintaining a random map, we are done.
114 */
ec4015da 115 if (td->o.norandommap)
43c63a78
JA
116 return 0;
117
118 /*
119 * calculate map offset and chec if it's free
120 */
ec4015da 121 rb = *b + (f->file_offset / td->o.min_bs[ddir]);
43c63a78
JA
122 if (random_map_free(td, f, rb))
123 return 0;
124
125 } while (--loops);
ec4015da
JA
126
127 /*
43c63a78
JA
128 * we get here, if we didn't suceed in looking up a block. generate
129 * a random start offset into the filemap, and find the first free
130 * block from there.
ec4015da 131 */
43c63a78
JA
132 loops = 10;
133 do {
134 f->last_free_lookup = (f->num_maps - 1) * (r / (RAND_MAX+1.0));
135 if (!get_next_free_block(td, f, b))
136 return 0;
ec4015da 137
43c63a78
JA
138 r = os_random_long(&td->random_state);
139 } while (--loops);
140
141 /*
142 * that didn't work either, try exhaustive search from the start
143 */
144 f->last_free_lookup = 0;
145 return get_next_free_block(td, f, b);
ec4015da
JA
146}
147
10ba535a
JA
148/*
149 * For random io, generate a random new block and see if it's used. Repeat
150 * until we find a free one. For sequential io, just return the end of
151 * the last io issued.
152 */
9bf2061e 153static int get_next_offset(struct thread_data *td, struct io_u *io_u)
10ba535a 154{
9bf2061e 155 struct fio_file *f = io_u->file;
c685b5b2 156 const int ddir = io_u->ddir;
ec4015da 157 unsigned long long b;
10ba535a 158
ec4015da
JA
159 if (td_random(td) && (td->o.ddir_nr && !--td->ddir_nr)) {
160 td->ddir_nr = td->o.ddir_nr;
211097b2 161
ec4015da 162 if (get_next_rand_offset(td, f, ddir, &b))
bca4ed4d 163 return 1;
43063a1c 164 } else {
0c3d768a
YHJT
165 if (f->last_pos >= f->real_file_size) {
166 if (!td_random(td) || get_next_rand_offset(td, f, ddir, &b))
167 return 1;
bcdedd0a
YHJT
168 } else
169 b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir];
43063a1c 170 }
10ba535a 171
2dc1bbeb 172 io_u->offset = (b * td->o.min_bs[ddir]) + f->file_offset;
bca4ed4d 173 if (io_u->offset >= f->real_file_size)
10ba535a
JA
174 return 1;
175
176 return 0;
177}
178
9bf2061e 179static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
10ba535a 180{
bca4ed4d 181 const int ddir = io_u->ddir;
10ba535a
JA
182 unsigned int buflen;
183 long r;
184
2dc1bbeb
JA
185 if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
186 buflen = td->o.min_bs[ddir];
10ba535a
JA
187 else {
188 r = os_random_long(&td->bsrange_state);
2dc1bbeb
JA
189 buflen = (unsigned int) (1 + (double) (td->o.max_bs[ddir] - 1) * r / (RAND_MAX + 1.0));
190 if (!td->o.bs_unaligned)
191 buflen = (buflen + td->o.min_bs[ddir] - 1) & ~(td->o.min_bs[ddir] - 1);
10ba535a
JA
192 }
193
6a5e6884
JA
194 if (io_u->offset + buflen > io_u->file->real_file_size)
195 buflen = td->o.min_bs[ddir];
196
10ba535a
JA
197 return buflen;
198}
199
afe24a5a
JA
200static void set_rwmix_bytes(struct thread_data *td)
201{
202 unsigned long long rbytes;
203 unsigned int diff;
204
205 /*
206 * we do time or byte based switch. this is needed because
207 * buffered writes may issue a lot quicker than they complete,
208 * whereas reads do not.
209 */
210 rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes;
e47f799f 211 diff = td->o.rwmix[td->rwmix_ddir ^ 1];
afe24a5a 212
e47f799f
JA
213 td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * ((100 - diff)) / diff);
214}
215
216static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
217{
218 unsigned int v;
219 long r;
220
221 r = os_random_long(&td->rwmix_state);
222 v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
223 if (v < td->o.rwmix[DDIR_READ])
224 return DDIR_READ;
225
226 return DDIR_WRITE;
afe24a5a
JA
227}
228
10ba535a
JA
229/*
230 * Return the data direction for the next io_u. If the job is a
231 * mixed read/write workload, check the rwmix cycle and switch if
232 * necessary.
233 */
1e97cce9 234static enum fio_ddir get_rw_ddir(struct thread_data *td)
10ba535a
JA
235{
236 if (td_rw(td)) {
237 struct timeval now;
238 unsigned long elapsed;
afe24a5a 239 unsigned int cycle;
10ba535a 240
02bcaa8c 241 fio_gettime(&now, NULL);
10ba535a
JA
242 elapsed = mtime_since_now(&td->rwmix_switch);
243
e47f799f
JA
244 /*
245 * if this is the first cycle, make it shorter
246 */
afe24a5a
JA
247 cycle = td->o.rwmixcycle;
248 if (!td->rwmix_bytes)
249 cycle /= 10;
250
10ba535a
JA
251 /*
252 * Check if it's time to seed a new data direction.
253 */
e47f799f 254 if (elapsed >= cycle ||
afe24a5a 255 td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) {
e47f799f
JA
256 unsigned long long max_bytes;
257 enum fio_ddir ddir;
258
259 /*
260 * Put a top limit on how many bytes we do for
261 * one data direction, to avoid overflowing the
262 * ranges too much
263 */
264 ddir = get_rand_ddir(td);
265 max_bytes = td->this_io_bytes[ddir];
7bb48f84 266 if (max_bytes >= (td->o.size * td->o.rwmix[ddir] / 100)) {
38d77cae
JA
267 if (!td->rw_end_set[ddir]) {
268 td->rw_end_set[ddir] = 1;
269 memcpy(&td->rw_end[ddir], &now, sizeof(now));
270 }
e47f799f 271 ddir ^= 1;
38d77cae 272 }
e47f799f
JA
273
274 if (ddir != td->rwmix_ddir)
275 set_rwmix_bytes(td);
276
277 td->rwmix_ddir = ddir;
10ba535a
JA
278 memcpy(&td->rwmix_switch, &now, sizeof(now));
279 }
280 return td->rwmix_ddir;
281 } else if (td_read(td))
282 return DDIR_READ;
283 else
284 return DDIR_WRITE;
285}
286
10ba535a
JA
287void put_io_u(struct thread_data *td, struct io_u *io_u)
288{
0c6e7517
JA
289 assert((io_u->flags & IO_U_F_FREE) == 0);
290 io_u->flags |= IO_U_F_FREE;
291
2dbdab7e
JA
292 if (io_u->file)
293 put_file(td, io_u->file);
294
10ba535a
JA
295 io_u->file = NULL;
296 list_del(&io_u->list);
297 list_add(&io_u->list, &td->io_u_freelist);
298 td->cur_depth--;
299}
300
755200a3
JA
301void requeue_io_u(struct thread_data *td, struct io_u **io_u)
302{
303 struct io_u *__io_u = *io_u;
304
4d2e0f49
JA
305 __io_u->flags |= IO_U_F_FREE;
306 __io_u->flags &= ~IO_U_F_FLIGHT;
307
755200a3
JA
308 list_del(&__io_u->list);
309 list_add_tail(&__io_u->list, &td->io_u_requeues);
310 td->cur_depth--;
311 *io_u = NULL;
312}
313
9bf2061e 314static int fill_io_u(struct thread_data *td, struct io_u *io_u)
10ba535a 315{
87dc1ab1
JA
316 /*
317 * see if it's time to sync
318 */
2dc1bbeb
JA
319 if (td->o.fsync_blocks &&
320 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
321 td->io_issues[DDIR_WRITE] && should_fsync(td)) {
87dc1ab1 322 io_u->ddir = DDIR_SYNC;
c38e9468 323 goto out;
87dc1ab1
JA
324 }
325
a00735e6
JA
326 io_u->ddir = get_rw_ddir(td);
327
48f5abd3
JA
328 /*
329 * See if it's time to switch to a new zone
330 */
331 if (td->zone_bytes >= td->o.zone_size) {
332 td->zone_bytes = 0;
333 io_u->file->last_pos += td->o.zone_skip;
334 td->io_skip_bytes += td->o.zone_skip;
335 }
336
10ba535a 337 /*
c685b5b2
JA
338 * No log, let the seq/rand engine retrieve the next buflen and
339 * position.
10ba535a 340 */
9bf2061e 341 if (get_next_offset(td, io_u))
bca4ed4d 342 return 1;
10ba535a 343
9bf2061e 344 io_u->buflen = get_next_buflen(td, io_u);
bca4ed4d
JA
345 if (!io_u->buflen)
346 return 1;
347
6a5e6884
JA
348 if (io_u->offset + io_u->buflen > io_u->file->real_file_size)
349 return 1;
350
bca4ed4d
JA
351 /*
352 * mark entry before potentially trimming io_u
353 */
8fd15a9a 354 if (td_random(td) && !td->o.norandommap)
9bf2061e 355 mark_random_map(td, io_u);
bca4ed4d
JA
356
357 /*
358 * If using a write iolog, store this entry.
359 */
c38e9468 360out:
f29b25a3 361 log_io_u(td, io_u);
bca4ed4d 362 return 0;
10ba535a
JA
363}
364
b3605062 365void io_u_mark_depth(struct thread_data *td, struct io_u *io_u)
71619dc2
JA
366{
367 int index = 0;
368
b3605062
JA
369 if (io_u->ddir == DDIR_SYNC)
370 return;
371
71619dc2
JA
372 switch (td->cur_depth) {
373 default:
a783e61a
JA
374 index = 6;
375 break;
71619dc2 376 case 32 ... 63:
a783e61a
JA
377 index = 5;
378 break;
71619dc2 379 case 16 ... 31:
a783e61a
JA
380 index = 4;
381 break;
71619dc2 382 case 8 ... 15:
a783e61a
JA
383 index = 3;
384 break;
71619dc2 385 case 4 ... 7:
a783e61a
JA
386 index = 2;
387 break;
71619dc2 388 case 2 ... 3:
a783e61a 389 index = 1;
71619dc2
JA
390 case 1:
391 break;
392 }
393
756867bd 394 td->ts.io_u_map[index]++;
b3605062 395 td->ts.total_io_u[io_u->ddir]++;
71619dc2
JA
396}
397
04a0feae
JA
398static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
399{
400 int index = 0;
401
402 assert(usec < 1000);
403
404 switch (usec) {
405 case 750 ... 999:
406 index = 9;
407 break;
408 case 500 ... 749:
409 index = 8;
410 break;
411 case 250 ... 499:
412 index = 7;
413 break;
414 case 100 ... 249:
415 index = 6;
416 break;
417 case 50 ... 99:
418 index = 5;
419 break;
420 case 20 ... 49:
421 index = 4;
422 break;
423 case 10 ... 19:
424 index = 3;
425 break;
426 case 4 ... 9:
427 index = 2;
428 break;
429 case 2 ... 3:
430 index = 1;
431 case 0 ... 1:
432 break;
433 }
434
435 assert(index < FIO_IO_U_LAT_U_NR);
436 td->ts.io_u_lat_u[index]++;
437}
438
439static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
ec118304
JA
440{
441 int index = 0;
442
443 switch (msec) {
444 default:
04a0feae
JA
445 index = 11;
446 break;
8abdce66 447 case 1000 ... 1999:
04a0feae
JA
448 index = 10;
449 break;
8abdce66 450 case 750 ... 999:
04a0feae
JA
451 index = 9;
452 break;
8abdce66 453 case 500 ... 749:
04a0feae
JA
454 index = 8;
455 break;
8abdce66 456 case 250 ... 499:
04a0feae
JA
457 index = 7;
458 break;
8abdce66 459 case 100 ... 249:
04a0feae
JA
460 index = 6;
461 break;
8abdce66 462 case 50 ... 99:
04a0feae
JA
463 index = 5;
464 break;
8abdce66 465 case 20 ... 49:
04a0feae
JA
466 index = 4;
467 break;
8abdce66 468 case 10 ... 19:
04a0feae
JA
469 index = 3;
470 break;
8abdce66 471 case 4 ... 9:
04a0feae
JA
472 index = 2;
473 break;
ec118304 474 case 2 ... 3:
04a0feae 475 index = 1;
ec118304
JA
476 case 0 ... 1:
477 break;
478 }
479
04a0feae
JA
480 assert(index < FIO_IO_U_LAT_M_NR);
481 td->ts.io_u_lat_m[index]++;
482}
483
484static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
485{
486 if (usec < 1000)
487 io_u_mark_lat_usec(td, usec);
488 else
489 io_u_mark_lat_msec(td, usec / 1000);
ec118304
JA
490}
491
0aabe160
JA
492/*
493 * Get next file to service by choosing one at random
494 */
1c178180
JA
495static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf,
496 int badf)
0aabe160 497{
0aabe160 498 struct fio_file *f;
1c178180 499 int fno;
0aabe160
JA
500
501 do {
7c83c089
JA
502 long r = os_random_long(&td->next_file_state);
503
2dc1bbeb 504 fno = (unsigned int) ((double) td->o.nr_files * (r / (RAND_MAX + 1.0)));
1c178180 505 f = &td->files[fno];
059e63c0
JA
506 if (f->flags & FIO_FILE_DONE)
507 continue;
1c178180
JA
508
509 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
0aabe160
JA
510 return f;
511 } while (1);
512}
513
514/*
515 * Get next file to service by doing round robin between all available ones
516 */
1c178180
JA
517static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
518 int badf)
3d7c391d
JA
519{
520 unsigned int old_next_file = td->next_file;
521 struct fio_file *f;
522
523 do {
524 f = &td->files[td->next_file];
525
526 td->next_file++;
2dc1bbeb 527 if (td->next_file >= td->o.nr_files)
3d7c391d
JA
528 td->next_file = 0;
529
d5ed68ea
JA
530 if (f->flags & FIO_FILE_DONE) {
531 f = NULL;
059e63c0 532 continue;
d5ed68ea 533 }
059e63c0 534
1c178180 535 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
3d7c391d
JA
536 break;
537
538 f = NULL;
539 } while (td->next_file != old_next_file);
540
541 return f;
542}
543
bdb4e2e9
JA
544static struct fio_file *get_next_file(struct thread_data *td)
545{
1907dbc6
JA
546 struct fio_file *f;
547
2dc1bbeb 548 assert(td->o.nr_files <= td->files_index);
1c178180 549
1020a139 550 if (!td->nr_open_files || td->nr_done_files >= td->o.nr_files)
bdb4e2e9
JA
551 return NULL;
552
1907dbc6 553 f = td->file_service_file;
f11bd94d 554 if (f && (f->flags & FIO_FILE_OPEN) && td->file_service_left--)
1907dbc6
JA
555 return f;
556
2dc1bbeb 557 if (td->o.file_service_type == FIO_FSERVICE_RR)
1c178180 558 f = get_next_file_rr(td, FIO_FILE_OPEN, FIO_FILE_CLOSING);
bdb4e2e9 559 else
1c178180 560 f = get_next_file_rand(td, FIO_FILE_OPEN, FIO_FILE_CLOSING);
1907dbc6
JA
561
562 td->file_service_file = f;
563 td->file_service_left = td->file_service_nr - 1;
564 return f;
bdb4e2e9
JA
565}
566
1c178180
JA
567static struct fio_file *find_next_new_file(struct thread_data *td)
568{
569 struct fio_file *f;
570
1020a139
JA
571 if (!td->nr_open_files || td->nr_done_files >= td->o.nr_files)
572 return NULL;
573
2dc1bbeb 574 if (td->o.file_service_type == FIO_FSERVICE_RR)
1c178180
JA
575 f = get_next_file_rr(td, 0, FIO_FILE_OPEN);
576 else
577 f = get_next_file_rand(td, 0, FIO_FILE_OPEN);
578
579 return f;
580}
581
429f6675
JA
582static int set_io_u_file(struct thread_data *td, struct io_u *io_u)
583{
584 struct fio_file *f;
585
586 do {
587 f = get_next_file(td);
588 if (!f)
589 return 1;
590
591set_file:
592 io_u->file = f;
593 get_file(f);
594
595 if (!fill_io_u(td, io_u))
596 break;
597
598 /*
599 * td_io_close() does a put_file() as well, so no need to
600 * do that here.
601 */
602 io_u->file = NULL;
603 td_io_close_file(td, f);
604 f->flags |= FIO_FILE_DONE;
605 td->nr_done_files++;
606
607 /*
608 * probably not the right place to do this, but see
609 * if we need to open a new file
610 */
611 if (td->nr_open_files < td->o.open_files &&
612 td->o.open_files != td->o.nr_files) {
613 f = find_next_new_file(td);
614
615 if (!f || td_io_open_file(td, f))
616 return 1;
617
618 goto set_file;
619 }
620 } while (1);
621
622 return 0;
623}
624
625
10ba535a
JA
626struct io_u *__get_io_u(struct thread_data *td)
627{
628 struct io_u *io_u = NULL;
629
755200a3
JA
630 if (!list_empty(&td->io_u_requeues))
631 io_u = list_entry(td->io_u_requeues.next, struct io_u, list);
632 else if (!queue_full(td)) {
10ba535a
JA
633 io_u = list_entry(td->io_u_freelist.next, struct io_u, list);
634
6040dabc 635 io_u->buflen = 0;
10ba535a 636 io_u->resid = 0;
755200a3 637 io_u->file = NULL;
d7762cf8 638 io_u->end_io = NULL;
755200a3
JA
639 }
640
641 if (io_u) {
0c6e7517
JA
642 assert(io_u->flags & IO_U_F_FREE);
643 io_u->flags &= ~IO_U_F_FREE;
644
755200a3 645 io_u->error = 0;
10ba535a
JA
646 list_del(&io_u->list);
647 list_add(&io_u->list, &td->io_u_busylist);
648 td->cur_depth++;
649 }
650
651 return io_u;
652}
653
654/*
655 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
656 * etc. The returned io_u is fully ready to be prepped and submitted.
657 */
3d7c391d 658struct io_u *get_io_u(struct thread_data *td)
10ba535a 659{
3d7c391d 660 struct fio_file *f;
10ba535a
JA
661 struct io_u *io_u;
662
663 io_u = __get_io_u(td);
664 if (!io_u)
665 return NULL;
666
755200a3
JA
667 /*
668 * from a requeue, io_u already setup
669 */
670 if (io_u->file)
77f392bf 671 goto out;
755200a3 672
429f6675
JA
673 /*
674 * If using an iolog, grab next piece if any available.
675 */
676 if (td->o.read_iolog_file) {
677 if (read_iolog_get(td, io_u))
678 goto err_put;
679 } else if (set_io_u_file(td, io_u))
680 goto err_put;
681
682 f = io_u->file;
683 assert(f->flags & FIO_FILE_OPEN);
97af62ce 684
87dc1ab1 685 if (io_u->ddir != DDIR_SYNC) {
429f6675
JA
686 if (!io_u->buflen)
687 goto err_put;
10ba535a 688
36167d82 689 f->last_pos = io_u->offset + io_u->buflen;
10ba535a 690
2dc1bbeb 691 if (td->o.verify != VERIFY_NONE)
87dc1ab1
JA
692 populate_verify_io_u(td, io_u);
693 }
10ba535a 694
165faf16
JA
695 /*
696 * Set io data pointers.
697 */
d460eb31 698 io_u->endpos = io_u->offset + io_u->buflen;
77f392bf 699out:
cec6b55d
JA
700 io_u->xfer_buf = io_u->buf;
701 io_u->xfer_buflen = io_u->buflen;
165faf16 702
429f6675
JA
703 if (!td_io_prep(td, io_u)) {
704 fio_gettime(&io_u->start_time, NULL);
705 return io_u;
36167d82 706 }
429f6675
JA
707err_put:
708 put_io_u(td, io_u);
709 return NULL;
10ba535a
JA
710}
711
5451792e
JA
712void io_u_log_error(struct thread_data *td, struct io_u *io_u)
713{
714 const char *msg[] = { "read", "write", "sync" };
715
716 log_err("fio: io_u error");
717
718 if (io_u->file)
719 log_err(" on file %s", io_u->file->file_name);
720
721 log_err(": %s\n", strerror(io_u->error));
722
723 log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir], io_u->offset, io_u->xfer_buflen);
724
725 if (!td->error)
726 td_verror(td, io_u->error, "io_u error");
727}
728
97601024
JA
729static void io_completed(struct thread_data *td, struct io_u *io_u,
730 struct io_completion_data *icd)
10ba535a 731{
d85f5118 732 unsigned long usec;
10ba535a 733
0c6e7517
JA
734 assert(io_u->flags & IO_U_F_FLIGHT);
735 io_u->flags &= ~IO_U_F_FLIGHT;
736
87dc1ab1
JA
737 if (io_u->ddir == DDIR_SYNC) {
738 td->last_was_sync = 1;
739 return;
740 }
741
742 td->last_was_sync = 0;
743
10ba535a
JA
744 if (!io_u->error) {
745 unsigned int bytes = io_u->buflen - io_u->resid;
1e97cce9 746 const enum fio_ddir idx = io_u->ddir;
3af6ef39 747 int ret;
10ba535a
JA
748
749 td->io_blocks[idx]++;
750 td->io_bytes[idx] += bytes;
751 td->zone_bytes += bytes;
752 td->this_io_bytes[idx] += bytes;
753
d460eb31 754 io_u->file->last_completed_pos = io_u->endpos;
02bcaa8c 755
d85f5118 756 usec = utime_since(&io_u->issue_time, &icd->time);
10ba535a 757
d85f5118 758 add_clat_sample(td, idx, usec);
02bcaa8c 759 add_bw_sample(td, idx, &icd->time);
04a0feae 760 io_u_mark_latency(td, usec);
10ba535a 761
660a1cb5 762 if (td_write(td) && idx == DDIR_WRITE &&
41128405 763 td->o.verify != VERIFY_NONE)
10ba535a
JA
764 log_io_piece(td, io_u);
765
766 icd->bytes_done[idx] += bytes;
3af6ef39 767
d7762cf8 768 if (io_u->end_io) {
36690c9b 769 ret = io_u->end_io(td, io_u);
3af6ef39
JA
770 if (ret && !icd->error)
771 icd->error = ret;
772 }
5451792e 773 } else {
10ba535a 774 icd->error = io_u->error;
5451792e
JA
775 io_u_log_error(td, io_u);
776 }
10ba535a
JA
777}
778
d7762cf8 779static void init_icd(struct io_completion_data *icd, int nr)
10ba535a 780{
02bcaa8c
JA
781 fio_gettime(&icd->time, NULL);
782
3af6ef39
JA
783 icd->nr = nr;
784
10ba535a
JA
785 icd->error = 0;
786 icd->bytes_done[0] = icd->bytes_done[1] = 0;
36167d82
JA
787}
788
97601024
JA
789static void ios_completed(struct thread_data *td,
790 struct io_completion_data *icd)
36167d82
JA
791{
792 struct io_u *io_u;
793 int i;
794
10ba535a
JA
795 for (i = 0; i < icd->nr; i++) {
796 io_u = td->io_ops->event(td, i);
797
798 io_completed(td, io_u, icd);
799 put_io_u(td, io_u);
800 }
801}
97601024 802
e7e6cfb4
JA
803/*
804 * Complete a single io_u for the sync engines.
805 */
d7762cf8 806long io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
97601024
JA
807{
808 struct io_completion_data icd;
809
d7762cf8 810 init_icd(&icd, 1);
97601024
JA
811 io_completed(td, io_u, &icd);
812 put_io_u(td, io_u);
813
814 if (!icd.error)
815 return icd.bytes_done[0] + icd.bytes_done[1];
816
37e974a5 817 td_verror(td, icd.error, "io_u_sync_complete");
97601024
JA
818 return -1;
819}
820
e7e6cfb4
JA
821/*
822 * Called to complete min_events number of io for the async engines.
823 */
d7762cf8 824long io_u_queued_complete(struct thread_data *td, int min_events)
97601024 825{
97601024 826 struct io_completion_data icd;
00de55ef 827 struct timespec *tvp = NULL;
97601024 828 int ret;
4d06a338 829 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
97601024 830
4d06a338 831 if (!min_events)
00de55ef 832 tvp = &ts;
97601024 833
00de55ef 834 ret = td_io_getevents(td, min_events, td->cur_depth, tvp);
97601024 835 if (ret < 0) {
e1161c32 836 td_verror(td, -ret, "td_io_getevents");
97601024
JA
837 return ret;
838 } else if (!ret)
839 return ret;
840
d7762cf8 841 init_icd(&icd, ret);
97601024
JA
842 ios_completed(td, &icd);
843 if (!icd.error)
844 return icd.bytes_done[0] + icd.bytes_done[1];
845
37e974a5 846 td_verror(td, icd.error, "io_u_queued_complete");
97601024
JA
847 return -1;
848}
7e77dd02
JA
849
850/*
851 * Call when io_u is really queued, to update the submission latency.
852 */
853void io_u_queued(struct thread_data *td, struct io_u *io_u)
854{
855 unsigned long slat_time;
856
d85f5118 857 slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
7e77dd02
JA
858 add_slat_sample(td, io_u->ddir, slat_time);
859}
433afcb4 860
55bc9728 861#ifdef FIO_USE_TIMEOUT
433afcb4
JA
862void io_u_set_timeout(struct thread_data *td)
863{
864 assert(td->cur_depth);
865
866 td->timer.it_interval.tv_sec = 0;
867 td->timer.it_interval.tv_usec = 0;
868 td->timer.it_value.tv_sec = IO_U_TIMEOUT + IO_U_TIMEOUT_INC;
869 td->timer.it_value.tv_usec = 0;
870 setitimer(ITIMER_REAL, &td->timer, NULL);
871 fio_gettime(&td->timeout_end, NULL);
872}
5945b9b4
JA
873
874static void io_u_dump(struct io_u *io_u)
875{
876 unsigned long t_start = mtime_since_now(&io_u->start_time);
877 unsigned long t_issue = mtime_since_now(&io_u->issue_time);
878
879 log_err("io_u=%p, t_start=%lu, t_issue=%lu\n", io_u, t_start, t_issue);
880 log_err(" buf=%p/%p, len=%lu/%lu, offset=%llu\n", io_u->buf, io_u->xfer_buf, io_u->buflen, io_u->xfer_buflen, io_u->offset);
881 log_err(" ddir=%d, fname=%s\n", io_u->ddir, io_u->file->file_name);
882}
55bc9728
JA
883#else
884void io_u_set_timeout(struct thread_data fio_unused *td)
885{
886}
887#endif
433afcb4 888
55bc9728 889#ifdef FIO_USE_TIMEOUT
433afcb4
JA
890static void io_u_timeout_handler(int fio_unused sig)
891{
892 struct thread_data *td, *__td;
893 pid_t pid = getpid();
5945b9b4
JA
894 struct list_head *entry;
895 struct io_u *io_u;
433afcb4
JA
896 int i;
897
898 log_err("fio: io_u timeout\n");
899
900 /*
901 * TLS would be nice...
902 */
903 td = NULL;
904 for_each_td(__td, i) {
905 if (__td->pid == pid) {
906 td = __td;
907 break;
908 }
909 }
910
911 if (!td) {
912 log_err("fio: io_u timeout, can't find job\n");
913 exit(1);
914 }
915
916 if (!td->cur_depth) {
917 log_err("fio: timeout without pending work?\n");
918 return;
919 }
920
15506d09 921 log_err("fio: io_u timeout: job=%s, pid=%d\n", td->o.name, td->pid);
5945b9b4
JA
922
923 list_for_each(entry, &td->io_u_busylist) {
924 io_u = list_entry(entry, struct io_u, list);
925
926 io_u_dump(io_u);
927 }
928
929 td_verror(td, ETIMEDOUT, "io_u timeout");
433afcb4
JA
930 exit(1);
931}
55bc9728 932#endif
433afcb4
JA
933
934void io_u_init_timeout(void)
935{
55bc9728 936#ifdef FIO_USE_TIMEOUT
433afcb4 937 signal(SIGALRM, io_u_timeout_handler);
55bc9728 938#endif
433afcb4 939}