Commit | Line | Data |
---|---|---|
10ba535a JA |
1 | #include <unistd.h> |
2 | #include <fcntl.h> | |
3 | #include <string.h> | |
4 | #include <signal.h> | |
5 | #include <time.h> | |
0c6e7517 | 6 | #include <assert.h> |
10ba535a JA |
7 | |
8 | #include "fio.h" | |
5973cafb | 9 | #include "hash.h" |
4f5af7b2 | 10 | #include "verify.h" |
0d29de83 | 11 | #include "trim.h" |
1fbbf72e | 12 | #include "lib/rand.h" |
10ba535a | 13 | |
97601024 JA |
14 | struct io_completion_data { |
15 | int nr; /* input */ | |
97601024 JA |
16 | |
17 | int error; /* output */ | |
18 | unsigned long bytes_done[2]; /* output */ | |
19 | struct timeval time; /* output */ | |
20 | }; | |
21 | ||
10ba535a JA |
22 | /* |
23 | * The ->file_map[] contains a map of blocks we have or have not done io | |
24 | * to yet. Used to make sure we cover the entire range in a fair fashion. | |
25 | */ | |
aec2de20 | 26 | static int random_map_free(struct fio_file *f, const unsigned long long block) |
10ba535a | 27 | { |
aec2de20 JA |
28 | unsigned int idx = RAND_MAP_IDX(f, block); |
29 | unsigned int bit = RAND_MAP_BIT(f, block); | |
10ba535a | 30 | |
84422acd JA |
31 | dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit); |
32 | ||
0ce8b119 | 33 | return (f->file_map[idx] & (1UL << bit)) == 0; |
10ba535a JA |
34 | } |
35 | ||
df415585 JA |
36 | /* |
37 | * Mark a given offset as used in the map. | |
38 | */ | |
9bf2061e | 39 | static void mark_random_map(struct thread_data *td, struct io_u *io_u) |
df415585 | 40 | { |
2dc1bbeb | 41 | unsigned int min_bs = td->o.rw_min_bs; |
9bf2061e | 42 | struct fio_file *f = io_u->file; |
a00735e6 | 43 | unsigned long long block; |
3e3357b1 | 44 | unsigned int blocks, nr_blocks; |
38dad62d | 45 | int busy_check; |
df415585 | 46 | |
b9c5b644 | 47 | block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs; |
c685b5b2 | 48 | nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; |
3e3357b1 | 49 | blocks = 0; |
38dad62d | 50 | busy_check = !(io_u->flags & IO_U_F_BUSY_OK); |
c685b5b2 | 51 | |
3e3357b1 | 52 | while (nr_blocks) { |
df415585 | 53 | unsigned int idx, bit; |
0ce8b119 | 54 | unsigned long mask, this_blocks; |
df415585 | 55 | |
1e3d53ac JA |
56 | /* |
57 | * If we have a mixed random workload, we may | |
58 | * encounter blocks we already did IO to. | |
59 | */ | |
38dad62d JA |
60 | if (!busy_check) { |
61 | blocks = nr_blocks; | |
62 | break; | |
63 | } | |
5736c10d | 64 | if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block)) |
df415585 JA |
65 | break; |
66 | ||
aec2de20 JA |
67 | idx = RAND_MAP_IDX(f, block); |
68 | bit = RAND_MAP_BIT(f, block); | |
df415585 | 69 | |
0032bf9f | 70 | fio_assert(td, idx < f->num_maps); |
df415585 | 71 | |
3e3357b1 JA |
72 | this_blocks = nr_blocks; |
73 | if (this_blocks + bit > BLOCKS_PER_MAP) | |
74 | this_blocks = BLOCKS_PER_MAP - bit; | |
75 | ||
da3758bf JA |
76 | do { |
77 | if (this_blocks == BLOCKS_PER_MAP) | |
0ce8b119 | 78 | mask = -1UL; |
da3758bf | 79 | else |
0ce8b119 | 80 | mask = ((1UL << this_blocks) - 1) << bit; |
da3758bf JA |
81 | |
82 | if (!(f->file_map[idx] & mask)) | |
83 | break; | |
84 | ||
85 | this_blocks--; | |
86 | } while (this_blocks); | |
87 | ||
88 | if (!this_blocks) | |
89 | break; | |
3e3357b1 | 90 | |
3e3357b1 JA |
91 | f->file_map[idx] |= mask; |
92 | nr_blocks -= this_blocks; | |
93 | blocks += this_blocks; | |
c9dd34b2 | 94 | block += this_blocks; |
df415585 JA |
95 | } |
96 | ||
a00735e6 JA |
97 | if ((blocks * min_bs) < io_u->buflen) |
98 | io_u->buflen = blocks * min_bs; | |
df415585 JA |
99 | } |
100 | ||
3e3357b1 JA |
101 | static unsigned long long last_block(struct thread_data *td, struct fio_file *f, |
102 | enum fio_ddir ddir) | |
2ba1c290 JA |
103 | { |
104 | unsigned long long max_blocks; | |
d9dd70f7 | 105 | unsigned long long max_size; |
2ba1c290 | 106 | |
ff58fced JA |
107 | assert(ddir_rw(ddir)); |
108 | ||
d9dd70f7 JA |
109 | /* |
110 | * Hmm, should we make sure that ->io_size <= ->real_file_size? | |
111 | */ | |
112 | max_size = f->io_size; | |
113 | if (max_size > f->real_file_size) | |
114 | max_size = f->real_file_size; | |
115 | ||
2b7a01d0 | 116 | max_blocks = max_size / (unsigned long long) td->o.ba[ddir]; |
2ba1c290 JA |
117 | if (!max_blocks) |
118 | return 0; | |
119 | ||
67778e88 | 120 | return max_blocks; |
2ba1c290 JA |
121 | } |
122 | ||
10ba535a JA |
123 | /* |
124 | * Return the next free block in the map. | |
125 | */ | |
126 | static int get_next_free_block(struct thread_data *td, struct fio_file *f, | |
4ba66134 | 127 | enum fio_ddir ddir, unsigned long long *b) |
10ba535a | 128 | { |
0ce8b119 | 129 | unsigned long long block, min_bs = td->o.rw_min_bs, lastb; |
10ba535a JA |
130 | int i; |
131 | ||
5e0baa7f JA |
132 | lastb = last_block(td, f, ddir); |
133 | if (!lastb) | |
134 | return 1; | |
135 | ||
c685b5b2 | 136 | i = f->last_free_lookup; |
0ce8b119 JA |
137 | block = i * BLOCKS_PER_MAP; |
138 | while (block * min_bs < f->real_file_size && | |
139 | block * min_bs < f->io_size) { | |
140 | if (f->file_map[i] != -1UL) { | |
141 | block += ffz(f->file_map[i]); | |
142 | if (block > lastb) | |
2ba1c290 | 143 | break; |
c685b5b2 | 144 | f->last_free_lookup = i; |
0ce8b119 | 145 | *b = block; |
10ba535a JA |
146 | return 0; |
147 | } | |
148 | ||
0ce8b119 | 149 | block += BLOCKS_PER_MAP; |
10ba535a JA |
150 | i++; |
151 | } | |
152 | ||
2ba1c290 | 153 | dprint(FD_IO, "failed finding a free block\n"); |
10ba535a JA |
154 | return 1; |
155 | } | |
156 | ||
ec4015da | 157 | static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, |
4ba66134 | 158 | enum fio_ddir ddir, unsigned long long *b) |
ec4015da | 159 | { |
5e0baa7f | 160 | unsigned long long r, lastb; |
ec4015da JA |
161 | int loops = 5; |
162 | ||
5e0baa7f JA |
163 | lastb = last_block(td, f, ddir); |
164 | if (!lastb) | |
165 | return 1; | |
166 | ||
0ce8b119 JA |
167 | if (f->failed_rands >= 200) |
168 | goto ffz; | |
169 | ||
ec4015da | 170 | do { |
2615cc4b JA |
171 | if (td->o.use_os_rand) { |
172 | r = os_random_long(&td->random_state); | |
173 | *b = (lastb - 1) * (r / ((unsigned long long) OS_RAND_MAX + 1.0)); | |
174 | } else { | |
175 | r = __rand(&td->__random_state); | |
176 | *b = (lastb - 1) * (r / ((unsigned long long) FRAND_MAX + 1.0)); | |
177 | } | |
178 | ||
84422acd | 179 | dprint(FD_RANDOM, "off rand %llu\n", r); |
2615cc4b | 180 | |
2ba1c290 | 181 | |
43c63a78 JA |
182 | /* |
183 | * if we are not maintaining a random map, we are done. | |
184 | */ | |
303032ae | 185 | if (!file_randommap(td, f)) |
0ce8b119 | 186 | goto ret_good; |
43c63a78 JA |
187 | |
188 | /* | |
84422acd | 189 | * calculate map offset and check if it's free |
43c63a78 | 190 | */ |
aec2de20 | 191 | if (random_map_free(f, *b)) |
0ce8b119 | 192 | goto ret_good; |
43c63a78 | 193 | |
84422acd JA |
194 | dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", |
195 | *b); | |
43c63a78 | 196 | } while (--loops); |
ec4015da | 197 | |
0ce8b119 JA |
198 | if (!f->failed_rands++) |
199 | f->last_free_lookup = 0; | |
200 | ||
ec4015da | 201 | /* |
43c63a78 JA |
202 | * we get here, if we didn't suceed in looking up a block. generate |
203 | * a random start offset into the filemap, and find the first free | |
204 | * block from there. | |
ec4015da | 205 | */ |
43c63a78 JA |
206 | loops = 10; |
207 | do { | |
dc873b6f JA |
208 | f->last_free_lookup = (f->num_maps - 1) * |
209 | (r / (OS_RAND_MAX + 1.0)); | |
4ba66134 | 210 | if (!get_next_free_block(td, f, ddir, b)) |
0ce8b119 | 211 | goto ret; |
ec4015da | 212 | |
d1cea521 JA |
213 | if (td->o.use_os_rand) |
214 | r = os_random_long(&td->random_state); | |
215 | else | |
216 | r = __rand(&td->__random_state); | |
43c63a78 JA |
217 | } while (--loops); |
218 | ||
219 | /* | |
220 | * that didn't work either, try exhaustive search from the start | |
221 | */ | |
222 | f->last_free_lookup = 0; | |
0ce8b119 JA |
223 | ffz: |
224 | if (!get_next_free_block(td, f, ddir, b)) | |
225 | return 0; | |
226 | f->last_free_lookup = 0; | |
4ba66134 | 227 | return get_next_free_block(td, f, ddir, b); |
0ce8b119 JA |
228 | ret_good: |
229 | f->failed_rands = 0; | |
230 | ret: | |
231 | return 0; | |
ec4015da JA |
232 | } |
233 | ||
38dad62d JA |
234 | static int get_next_rand_block(struct thread_data *td, struct fio_file *f, |
235 | enum fio_ddir ddir, unsigned long long *b) | |
236 | { | |
237 | if (get_next_rand_offset(td, f, ddir, b)) { | |
238 | dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", | |
239 | f->file_name, f->last_pos, f->real_file_size); | |
240 | return 1; | |
241 | } | |
242 | ||
243 | return 0; | |
244 | } | |
245 | ||
246 | static int get_next_seq_block(struct thread_data *td, struct fio_file *f, | |
247 | enum fio_ddir ddir, unsigned long long *b) | |
248 | { | |
ff58fced JA |
249 | assert(ddir_rw(ddir)); |
250 | ||
38dad62d JA |
251 | if (f->last_pos < f->real_file_size) { |
252 | *b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir]; | |
253 | return 0; | |
254 | } | |
255 | ||
256 | return 1; | |
257 | } | |
258 | ||
259 | static int get_next_block(struct thread_data *td, struct io_u *io_u, | |
260 | enum fio_ddir ddir, int rw_seq, unsigned long long *b) | |
261 | { | |
262 | struct fio_file *f = io_u->file; | |
263 | int ret; | |
264 | ||
ff58fced JA |
265 | assert(ddir_rw(ddir)); |
266 | ||
38dad62d JA |
267 | if (rw_seq) { |
268 | if (td_random(td)) | |
269 | ret = get_next_rand_block(td, f, ddir, b); | |
270 | else | |
271 | ret = get_next_seq_block(td, f, ddir, b); | |
272 | } else { | |
273 | io_u->flags |= IO_U_F_BUSY_OK; | |
274 | ||
275 | if (td->o.rw_seq == RW_SEQ_SEQ) { | |
276 | ret = get_next_seq_block(td, f, ddir, b); | |
277 | if (ret) | |
278 | ret = get_next_rand_block(td, f, ddir, b); | |
279 | } else if (td->o.rw_seq == RW_SEQ_IDENT) { | |
280 | if (f->last_start != -1ULL) | |
0508320e DN |
281 | *b = (f->last_start - f->file_offset) |
282 | / td->o.min_bs[ddir]; | |
38dad62d JA |
283 | else |
284 | *b = 0; | |
285 | ret = 0; | |
286 | } else { | |
287 | log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); | |
288 | ret = 1; | |
289 | } | |
290 | } | |
291 | ||
292 | return ret; | |
293 | } | |
294 | ||
10ba535a JA |
295 | /* |
296 | * For random io, generate a random new block and see if it's used. Repeat | |
297 | * until we find a free one. For sequential io, just return the end of | |
298 | * the last io issued. | |
299 | */ | |
15dc1934 | 300 | static int __get_next_offset(struct thread_data *td, struct io_u *io_u) |
10ba535a | 301 | { |
9bf2061e | 302 | struct fio_file *f = io_u->file; |
ec4015da | 303 | unsigned long long b; |
4ba66134 | 304 | enum fio_ddir ddir = io_u->ddir; |
38dad62d | 305 | int rw_seq_hit = 0; |
10ba535a | 306 | |
ff58fced JA |
307 | assert(ddir_rw(ddir)); |
308 | ||
38dad62d JA |
309 | if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) { |
310 | rw_seq_hit = 1; | |
5736c10d | 311 | td->ddir_seq_nr = td->o.ddir_seq_nr; |
38dad62d | 312 | } |
211097b2 | 313 | |
0508320e | 314 | if (get_next_block(td, io_u, ddir, rw_seq_hit, &b)) |
38dad62d | 315 | return 1; |
10ba535a | 316 | |
2b7a01d0 | 317 | io_u->offset = b * td->o.ba[ddir]; |
009bd847 JA |
318 | if (io_u->offset >= f->io_size) { |
319 | dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", | |
320 | io_u->offset, f->io_size); | |
321 | return 1; | |
322 | } | |
323 | ||
324 | io_u->offset += f->file_offset; | |
2ba1c290 JA |
325 | if (io_u->offset >= f->real_file_size) { |
326 | dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n", | |
327 | io_u->offset, f->real_file_size); | |
10ba535a | 328 | return 1; |
2ba1c290 | 329 | } |
10ba535a JA |
330 | |
331 | return 0; | |
332 | } | |
333 | ||
15dc1934 JA |
334 | static int get_next_offset(struct thread_data *td, struct io_u *io_u) |
335 | { | |
7eb36574 JA |
336 | struct prof_io_ops *ops = &td->prof_io_ops; |
337 | ||
338 | if (ops->fill_io_u_off) | |
339 | return ops->fill_io_u_off(td, io_u); | |
15dc1934 JA |
340 | |
341 | return __get_next_offset(td, io_u); | |
342 | } | |
343 | ||
344 | static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) | |
10ba535a | 345 | { |
bca4ed4d | 346 | const int ddir = io_u->ddir; |
f3f552b9 | 347 | unsigned int uninitialized_var(buflen); |
f3059de1 | 348 | unsigned int minbs, maxbs; |
10ba535a JA |
349 | long r; |
350 | ||
ff58fced JA |
351 | assert(ddir_rw(ddir)); |
352 | ||
f3059de1 JA |
353 | minbs = td->o.min_bs[ddir]; |
354 | maxbs = td->o.max_bs[ddir]; | |
355 | ||
356 | if (minbs == maxbs) | |
357 | buflen = minbs; | |
10ba535a JA |
358 | else { |
359 | r = os_random_long(&td->bsrange_state); | |
720e84ad | 360 | if (!td->o.bssplit_nr[ddir]) { |
f3059de1 JA |
361 | buflen = 1 + (unsigned int) ((double) maxbs * |
362 | (r / (OS_RAND_MAX + 1.0))); | |
363 | if (buflen < minbs) | |
364 | buflen = minbs; | |
5ec10eaa | 365 | } else { |
564ca972 JA |
366 | long perc = 0; |
367 | unsigned int i; | |
368 | ||
720e84ad JA |
369 | for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { |
370 | struct bssplit *bsp = &td->o.bssplit[ddir][i]; | |
564ca972 JA |
371 | |
372 | buflen = bsp->bs; | |
373 | perc += bsp->perc; | |
f3059de1 | 374 | if (r <= ((OS_RAND_MAX / 100L) * perc)) |
564ca972 JA |
375 | break; |
376 | } | |
377 | } | |
f3059de1 JA |
378 | if (!td->o.bs_unaligned && is_power_of_2(minbs)) |
379 | buflen = (buflen + minbs - 1) & ~(minbs - 1); | |
10ba535a JA |
380 | } |
381 | ||
4ba66134 JA |
382 | if (io_u->offset + buflen > io_u->file->real_file_size) { |
383 | dprint(FD_IO, "lower buflen %u -> %u (ddir=%d)\n", buflen, | |
f3059de1 JA |
384 | minbs, ddir); |
385 | buflen = minbs; | |
4ba66134 | 386 | } |
6a5e6884 | 387 | |
10ba535a JA |
388 | return buflen; |
389 | } | |
390 | ||
15dc1934 JA |
391 | static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) |
392 | { | |
7eb36574 JA |
393 | struct prof_io_ops *ops = &td->prof_io_ops; |
394 | ||
395 | if (ops->fill_io_u_size) | |
396 | return ops->fill_io_u_size(td, io_u); | |
15dc1934 JA |
397 | |
398 | return __get_next_buflen(td, io_u); | |
399 | } | |
400 | ||
afe24a5a JA |
401 | static void set_rwmix_bytes(struct thread_data *td) |
402 | { | |
afe24a5a JA |
403 | unsigned int diff; |
404 | ||
405 | /* | |
406 | * we do time or byte based switch. this is needed because | |
407 | * buffered writes may issue a lot quicker than they complete, | |
408 | * whereas reads do not. | |
409 | */ | |
e47f799f | 410 | diff = td->o.rwmix[td->rwmix_ddir ^ 1]; |
04c540d9 | 411 | td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100; |
e47f799f JA |
412 | } |
413 | ||
414 | static inline enum fio_ddir get_rand_ddir(struct thread_data *td) | |
415 | { | |
416 | unsigned int v; | |
417 | long r; | |
418 | ||
419 | r = os_random_long(&td->rwmix_state); | |
dc873b6f | 420 | v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); |
04c540d9 | 421 | if (v <= td->o.rwmix[DDIR_READ]) |
e47f799f JA |
422 | return DDIR_READ; |
423 | ||
424 | return DDIR_WRITE; | |
afe24a5a JA |
425 | } |
426 | ||
581e7141 JA |
427 | static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) |
428 | { | |
429 | enum fio_ddir odir = ddir ^ 1; | |
430 | struct timeval t; | |
431 | long usec; | |
432 | ||
ff58fced JA |
433 | assert(ddir_rw(ddir)); |
434 | ||
581e7141 JA |
435 | if (td->rate_pending_usleep[ddir] <= 0) |
436 | return ddir; | |
437 | ||
438 | /* | |
439 | * We have too much pending sleep in this direction. See if we | |
440 | * should switch. | |
441 | */ | |
442 | if (td_rw(td)) { | |
443 | /* | |
444 | * Other direction does not have too much pending, switch | |
445 | */ | |
446 | if (td->rate_pending_usleep[odir] < 100000) | |
447 | return odir; | |
448 | ||
449 | /* | |
450 | * Both directions have pending sleep. Sleep the minimum time | |
451 | * and deduct from both. | |
452 | */ | |
453 | if (td->rate_pending_usleep[ddir] <= | |
454 | td->rate_pending_usleep[odir]) { | |
455 | usec = td->rate_pending_usleep[ddir]; | |
456 | } else { | |
457 | usec = td->rate_pending_usleep[odir]; | |
458 | ddir = odir; | |
459 | } | |
460 | } else | |
461 | usec = td->rate_pending_usleep[ddir]; | |
462 | ||
463 | fio_gettime(&t, NULL); | |
464 | usec_sleep(td, usec); | |
465 | usec = utime_since_now(&t); | |
466 | ||
467 | td->rate_pending_usleep[ddir] -= usec; | |
468 | ||
469 | odir = ddir ^ 1; | |
470 | if (td_rw(td) && __should_check_rate(td, odir)) | |
471 | td->rate_pending_usleep[odir] -= usec; | |
0b9d69ec | 472 | |
581e7141 JA |
473 | return ddir; |
474 | } | |
475 | ||
10ba535a JA |
476 | /* |
477 | * Return the data direction for the next io_u. If the job is a | |
478 | * mixed read/write workload, check the rwmix cycle and switch if | |
479 | * necessary. | |
480 | */ | |
1e97cce9 | 481 | static enum fio_ddir get_rw_ddir(struct thread_data *td) |
10ba535a | 482 | { |
581e7141 JA |
483 | enum fio_ddir ddir; |
484 | ||
5f9099ea JA |
485 | /* |
486 | * see if it's time to fsync | |
487 | */ | |
488 | if (td->o.fsync_blocks && | |
489 | !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && | |
490 | td->io_issues[DDIR_WRITE] && should_fsync(td)) | |
491 | return DDIR_SYNC; | |
492 | ||
493 | /* | |
494 | * see if it's time to fdatasync | |
495 | */ | |
496 | if (td->o.fdatasync_blocks && | |
497 | !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) && | |
498 | td->io_issues[DDIR_WRITE] && should_fsync(td)) | |
499 | return DDIR_DATASYNC; | |
500 | ||
44f29692 JA |
501 | /* |
502 | * see if it's time to sync_file_range | |
503 | */ | |
504 | if (td->sync_file_range_nr && | |
505 | !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) && | |
506 | td->io_issues[DDIR_WRITE] && should_fsync(td)) | |
507 | return DDIR_SYNC_FILE_RANGE; | |
508 | ||
10ba535a | 509 | if (td_rw(td)) { |
10ba535a JA |
510 | /* |
511 | * Check if it's time to seed a new data direction. | |
512 | */ | |
e4928662 | 513 | if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) { |
e47f799f JA |
514 | /* |
515 | * Put a top limit on how many bytes we do for | |
516 | * one data direction, to avoid overflowing the | |
517 | * ranges too much | |
518 | */ | |
519 | ddir = get_rand_ddir(td); | |
e47f799f JA |
520 | |
521 | if (ddir != td->rwmix_ddir) | |
522 | set_rwmix_bytes(td); | |
523 | ||
524 | td->rwmix_ddir = ddir; | |
10ba535a | 525 | } |
581e7141 | 526 | ddir = td->rwmix_ddir; |
10ba535a | 527 | } else if (td_read(td)) |
581e7141 | 528 | ddir = DDIR_READ; |
10ba535a | 529 | else |
581e7141 JA |
530 | ddir = DDIR_WRITE; |
531 | ||
532 | td->rwmix_ddir = rate_ddir(td, ddir); | |
533 | return td->rwmix_ddir; | |
10ba535a JA |
534 | } |
535 | ||
1ef2b6be JA |
536 | static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) |
537 | { | |
538 | io_u->ddir = get_rw_ddir(td); | |
539 | ||
540 | if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) && | |
541 | td->o.barrier_blocks && | |
542 | !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) && | |
543 | td->io_issues[DDIR_WRITE]) | |
544 | io_u->flags |= IO_U_F_BARRIER; | |
545 | } | |
546 | ||
e8462bd8 | 547 | void put_file_log(struct thread_data *td, struct fio_file *f) |
60f2c658 JA |
548 | { |
549 | int ret = put_file(td, f); | |
550 | ||
551 | if (ret) | |
552 | td_verror(td, ret, "file close"); | |
553 | } | |
554 | ||
10ba535a JA |
555 | void put_io_u(struct thread_data *td, struct io_u *io_u) |
556 | { | |
e8462bd8 JA |
557 | td_io_u_lock(td); |
558 | ||
0c6e7517 | 559 | io_u->flags |= IO_U_F_FREE; |
e8462bd8 | 560 | io_u->flags &= ~IO_U_F_FREE_DEF; |
0c6e7517 | 561 | |
60f2c658 JA |
562 | if (io_u->file) |
563 | put_file_log(td, io_u->file); | |
2dbdab7e | 564 | |
10ba535a | 565 | io_u->file = NULL; |
0c41214f RR |
566 | if (io_u->flags & IO_U_F_IN_CUR_DEPTH) |
567 | td->cur_depth--; | |
e8462bd8 | 568 | flist_del_init(&io_u->list); |
01743ee1 | 569 | flist_add(&io_u->list, &td->io_u_freelist); |
e8462bd8 JA |
570 | td_io_u_unlock(td); |
571 | td_io_u_free_notify(td); | |
10ba535a JA |
572 | } |
573 | ||
f2bba182 RR |
574 | void clear_io_u(struct thread_data *td, struct io_u *io_u) |
575 | { | |
576 | io_u->flags &= ~IO_U_F_FLIGHT; | |
577 | put_io_u(td, io_u); | |
578 | } | |
579 | ||
755200a3 JA |
580 | void requeue_io_u(struct thread_data *td, struct io_u **io_u) |
581 | { | |
582 | struct io_u *__io_u = *io_u; | |
583 | ||
465221b0 JA |
584 | dprint(FD_IO, "requeue %p\n", __io_u); |
585 | ||
e8462bd8 JA |
586 | td_io_u_lock(td); |
587 | ||
4d2e0f49 | 588 | __io_u->flags |= IO_U_F_FREE; |
ff58fced | 589 | if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir)) |
e4f54adb | 590 | td->io_issues[__io_u->ddir]--; |
5ec10eaa | 591 | |
4d2e0f49 | 592 | __io_u->flags &= ~IO_U_F_FLIGHT; |
0c41214f RR |
593 | if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) |
594 | td->cur_depth--; | |
01743ee1 JA |
595 | flist_del(&__io_u->list); |
596 | flist_add_tail(&__io_u->list, &td->io_u_requeues); | |
e8462bd8 | 597 | td_io_u_unlock(td); |
755200a3 JA |
598 | *io_u = NULL; |
599 | } | |
600 | ||
9bf2061e | 601 | static int fill_io_u(struct thread_data *td, struct io_u *io_u) |
10ba535a | 602 | { |
b4c5e1ac JA |
603 | if (td->io_ops->flags & FIO_NOIO) |
604 | goto out; | |
605 | ||
1ef2b6be | 606 | set_rw_ddir(td, io_u); |
5f9099ea | 607 | |
87dc1ab1 | 608 | /* |
ff58fced | 609 | * fsync() or fdatasync() or trim etc, we are done |
87dc1ab1 | 610 | */ |
ff58fced | 611 | if (!ddir_rw(io_u->ddir)) |
c38e9468 | 612 | goto out; |
a00735e6 | 613 | |
48f5abd3 JA |
614 | /* |
615 | * See if it's time to switch to a new zone | |
616 | */ | |
617 | if (td->zone_bytes >= td->o.zone_size) { | |
618 | td->zone_bytes = 0; | |
619 | io_u->file->last_pos += td->o.zone_skip; | |
620 | td->io_skip_bytes += td->o.zone_skip; | |
621 | } | |
622 | ||
10ba535a | 623 | /* |
c685b5b2 JA |
624 | * No log, let the seq/rand engine retrieve the next buflen and |
625 | * position. | |
10ba535a | 626 | */ |
2ba1c290 JA |
627 | if (get_next_offset(td, io_u)) { |
628 | dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); | |
bca4ed4d | 629 | return 1; |
2ba1c290 | 630 | } |
10ba535a | 631 | |
9bf2061e | 632 | io_u->buflen = get_next_buflen(td, io_u); |
2ba1c290 JA |
633 | if (!io_u->buflen) { |
634 | dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); | |
bca4ed4d | 635 | return 1; |
2ba1c290 | 636 | } |
bca4ed4d | 637 | |
2ba1c290 JA |
638 | if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { |
639 | dprint(FD_IO, "io_u %p, offset too large\n", io_u); | |
4ba66134 JA |
640 | dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset, |
641 | io_u->buflen, io_u->file->real_file_size); | |
6a5e6884 | 642 | return 1; |
2ba1c290 | 643 | } |
6a5e6884 | 644 | |
bca4ed4d JA |
645 | /* |
646 | * mark entry before potentially trimming io_u | |
647 | */ | |
303032ae | 648 | if (td_random(td) && file_randommap(td, io_u->file)) |
9bf2061e | 649 | mark_random_map(td, io_u); |
bca4ed4d JA |
650 | |
651 | /* | |
652 | * If using a write iolog, store this entry. | |
653 | */ | |
c38e9468 | 654 | out: |
2ba1c290 | 655 | dprint_io_u(io_u, "fill_io_u"); |
d9d91e39 | 656 | td->zone_bytes += io_u->buflen; |
f29b25a3 | 657 | log_io_u(td, io_u); |
bca4ed4d | 658 | return 0; |
10ba535a JA |
659 | } |
660 | ||
838bc709 JA |
661 | static void __io_u_mark_map(unsigned int *map, unsigned int nr) |
662 | { | |
2b13e716 | 663 | int idx = 0; |
838bc709 JA |
664 | |
665 | switch (nr) { | |
666 | default: | |
2b13e716 | 667 | idx = 6; |
838bc709 JA |
668 | break; |
669 | case 33 ... 64: | |
2b13e716 | 670 | idx = 5; |
838bc709 JA |
671 | break; |
672 | case 17 ... 32: | |
2b13e716 | 673 | idx = 4; |
838bc709 JA |
674 | break; |
675 | case 9 ... 16: | |
2b13e716 | 676 | idx = 3; |
838bc709 JA |
677 | break; |
678 | case 5 ... 8: | |
2b13e716 | 679 | idx = 2; |
838bc709 JA |
680 | break; |
681 | case 1 ... 4: | |
2b13e716 | 682 | idx = 1; |
838bc709 JA |
683 | case 0: |
684 | break; | |
685 | } | |
686 | ||
2b13e716 | 687 | map[idx]++; |
838bc709 JA |
688 | } |
689 | ||
690 | void io_u_mark_submit(struct thread_data *td, unsigned int nr) | |
691 | { | |
692 | __io_u_mark_map(td->ts.io_u_submit, nr); | |
693 | td->ts.total_submit++; | |
694 | } | |
695 | ||
696 | void io_u_mark_complete(struct thread_data *td, unsigned int nr) | |
697 | { | |
698 | __io_u_mark_map(td->ts.io_u_complete, nr); | |
699 | td->ts.total_complete++; | |
700 | } | |
701 | ||
d8005759 | 702 | void io_u_mark_depth(struct thread_data *td, unsigned int nr) |
71619dc2 | 703 | { |
2b13e716 | 704 | int idx = 0; |
71619dc2 JA |
705 | |
706 | switch (td->cur_depth) { | |
707 | default: | |
2b13e716 | 708 | idx = 6; |
a783e61a | 709 | break; |
71619dc2 | 710 | case 32 ... 63: |
2b13e716 | 711 | idx = 5; |
a783e61a | 712 | break; |
71619dc2 | 713 | case 16 ... 31: |
2b13e716 | 714 | idx = 4; |
a783e61a | 715 | break; |
71619dc2 | 716 | case 8 ... 15: |
2b13e716 | 717 | idx = 3; |
a783e61a | 718 | break; |
71619dc2 | 719 | case 4 ... 7: |
2b13e716 | 720 | idx = 2; |
a783e61a | 721 | break; |
71619dc2 | 722 | case 2 ... 3: |
2b13e716 | 723 | idx = 1; |
71619dc2 JA |
724 | case 1: |
725 | break; | |
726 | } | |
727 | ||
2b13e716 | 728 | td->ts.io_u_map[idx] += nr; |
71619dc2 JA |
729 | } |
730 | ||
04a0feae JA |
731 | static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec) |
732 | { | |
2b13e716 | 733 | int idx = 0; |
04a0feae JA |
734 | |
735 | assert(usec < 1000); | |
736 | ||
737 | switch (usec) { | |
738 | case 750 ... 999: | |
2b13e716 | 739 | idx = 9; |
04a0feae JA |
740 | break; |
741 | case 500 ... 749: | |
2b13e716 | 742 | idx = 8; |
04a0feae JA |
743 | break; |
744 | case 250 ... 499: | |
2b13e716 | 745 | idx = 7; |
04a0feae JA |
746 | break; |
747 | case 100 ... 249: | |
2b13e716 | 748 | idx = 6; |
04a0feae JA |
749 | break; |
750 | case 50 ... 99: | |
2b13e716 | 751 | idx = 5; |
04a0feae JA |
752 | break; |
753 | case 20 ... 49: | |
2b13e716 | 754 | idx = 4; |
04a0feae JA |
755 | break; |
756 | case 10 ... 19: | |
2b13e716 | 757 | idx = 3; |
04a0feae JA |
758 | break; |
759 | case 4 ... 9: | |
2b13e716 | 760 | idx = 2; |
04a0feae JA |
761 | break; |
762 | case 2 ... 3: | |
2b13e716 | 763 | idx = 1; |
04a0feae JA |
764 | case 0 ... 1: |
765 | break; | |
766 | } | |
767 | ||
2b13e716 JA |
768 | assert(idx < FIO_IO_U_LAT_U_NR); |
769 | td->ts.io_u_lat_u[idx]++; | |
04a0feae JA |
770 | } |
771 | ||
772 | static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec) | |
ec118304 | 773 | { |
2b13e716 | 774 | int idx = 0; |
ec118304 JA |
775 | |
776 | switch (msec) { | |
777 | default: | |
2b13e716 | 778 | idx = 11; |
04a0feae | 779 | break; |
8abdce66 | 780 | case 1000 ... 1999: |
2b13e716 | 781 | idx = 10; |
04a0feae | 782 | break; |
8abdce66 | 783 | case 750 ... 999: |
2b13e716 | 784 | idx = 9; |
04a0feae | 785 | break; |
8abdce66 | 786 | case 500 ... 749: |
2b13e716 | 787 | idx = 8; |
04a0feae | 788 | break; |
8abdce66 | 789 | case 250 ... 499: |
2b13e716 | 790 | idx = 7; |
04a0feae | 791 | break; |
8abdce66 | 792 | case 100 ... 249: |
2b13e716 | 793 | idx = 6; |
04a0feae | 794 | break; |
8abdce66 | 795 | case 50 ... 99: |
2b13e716 | 796 | idx = 5; |
04a0feae | 797 | break; |
8abdce66 | 798 | case 20 ... 49: |
2b13e716 | 799 | idx = 4; |
04a0feae | 800 | break; |
8abdce66 | 801 | case 10 ... 19: |
2b13e716 | 802 | idx = 3; |
04a0feae | 803 | break; |
8abdce66 | 804 | case 4 ... 9: |
2b13e716 | 805 | idx = 2; |
04a0feae | 806 | break; |
ec118304 | 807 | case 2 ... 3: |
2b13e716 | 808 | idx = 1; |
ec118304 JA |
809 | case 0 ... 1: |
810 | break; | |
811 | } | |
812 | ||
2b13e716 JA |
813 | assert(idx < FIO_IO_U_LAT_M_NR); |
814 | td->ts.io_u_lat_m[idx]++; | |
04a0feae JA |
815 | } |
816 | ||
817 | static void io_u_mark_latency(struct thread_data *td, unsigned long usec) | |
818 | { | |
819 | if (usec < 1000) | |
820 | io_u_mark_lat_usec(td, usec); | |
821 | else | |
822 | io_u_mark_lat_msec(td, usec / 1000); | |
ec118304 JA |
823 | } |
824 | ||
0aabe160 JA |
825 | /* |
826 | * Get next file to service by choosing one at random | |
827 | */ | |
2cc52930 JA |
828 | static struct fio_file *get_next_file_rand(struct thread_data *td, |
829 | enum fio_file_flags goodf, | |
d6aed795 | 830 | enum fio_file_flags badf) |
0aabe160 | 831 | { |
0aabe160 | 832 | struct fio_file *f; |
1c178180 | 833 | int fno; |
0aabe160 JA |
834 | |
835 | do { | |
7c83c089 | 836 | long r = os_random_long(&td->next_file_state); |
87b10676 | 837 | int opened = 0; |
7c83c089 | 838 | |
5ec10eaa | 839 | fno = (unsigned int) ((double) td->o.nr_files |
dc873b6f | 840 | * (r / (OS_RAND_MAX + 1.0))); |
126d65c6 | 841 | f = td->files[fno]; |
d6aed795 | 842 | if (fio_file_done(f)) |
059e63c0 | 843 | continue; |
1c178180 | 844 | |
d6aed795 | 845 | if (!fio_file_open(f)) { |
87b10676 JA |
846 | int err; |
847 | ||
848 | err = td_io_open_file(td, f); | |
849 | if (err) | |
850 | continue; | |
851 | opened = 1; | |
852 | } | |
853 | ||
2ba1c290 JA |
854 | if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) { |
855 | dprint(FD_FILE, "get_next_file_rand: %p\n", f); | |
0aabe160 | 856 | return f; |
2ba1c290 | 857 | } |
87b10676 JA |
858 | if (opened) |
859 | td_io_close_file(td, f); | |
0aabe160 JA |
860 | } while (1); |
861 | } | |
862 | ||
863 | /* | |
864 | * Get next file to service by doing round robin between all available ones | |
865 | */ | |
1c178180 JA |
866 | static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, |
867 | int badf) | |
3d7c391d JA |
868 | { |
869 | unsigned int old_next_file = td->next_file; | |
870 | struct fio_file *f; | |
871 | ||
872 | do { | |
87b10676 JA |
873 | int opened = 0; |
874 | ||
126d65c6 | 875 | f = td->files[td->next_file]; |
3d7c391d JA |
876 | |
877 | td->next_file++; | |
2dc1bbeb | 878 | if (td->next_file >= td->o.nr_files) |
3d7c391d JA |
879 | td->next_file = 0; |
880 | ||
87b10676 | 881 | dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags); |
d6aed795 | 882 | if (fio_file_done(f)) { |
d5ed68ea | 883 | f = NULL; |
059e63c0 | 884 | continue; |
d5ed68ea | 885 | } |
059e63c0 | 886 | |
d6aed795 | 887 | if (!fio_file_open(f)) { |
87b10676 JA |
888 | int err; |
889 | ||
890 | err = td_io_open_file(td, f); | |
b5696bfc JA |
891 | if (err) { |
892 | dprint(FD_FILE, "error %d on open of %s\n", | |
893 | err, f->file_name); | |
87c27b45 | 894 | f = NULL; |
87b10676 | 895 | continue; |
b5696bfc | 896 | } |
87b10676 JA |
897 | opened = 1; |
898 | } | |
899 | ||
0b9d69ec JA |
900 | dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf, |
901 | f->flags); | |
1c178180 | 902 | if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) |
3d7c391d JA |
903 | break; |
904 | ||
87b10676 JA |
905 | if (opened) |
906 | td_io_close_file(td, f); | |
907 | ||
3d7c391d JA |
908 | f = NULL; |
909 | } while (td->next_file != old_next_file); | |
910 | ||
2ba1c290 | 911 | dprint(FD_FILE, "get_next_file_rr: %p\n", f); |
3d7c391d JA |
912 | return f; |
913 | } | |
914 | ||
7eb36574 | 915 | static struct fio_file *__get_next_file(struct thread_data *td) |
bdb4e2e9 | 916 | { |
1907dbc6 JA |
917 | struct fio_file *f; |
918 | ||
2dc1bbeb | 919 | assert(td->o.nr_files <= td->files_index); |
1c178180 | 920 | |
b5696bfc | 921 | if (td->nr_done_files >= td->o.nr_files) { |
5ec10eaa JA |
922 | dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d," |
923 | " nr_files=%d\n", td->nr_open_files, | |
924 | td->nr_done_files, | |
925 | td->o.nr_files); | |
bdb4e2e9 | 926 | return NULL; |
2ba1c290 | 927 | } |
bdb4e2e9 | 928 | |
1907dbc6 | 929 | f = td->file_service_file; |
d6aed795 | 930 | if (f && fio_file_open(f) && !fio_file_closing(f)) { |
a086c257 JA |
931 | if (td->o.file_service_type == FIO_FSERVICE_SEQ) |
932 | goto out; | |
933 | if (td->file_service_left--) | |
934 | goto out; | |
935 | } | |
1907dbc6 | 936 | |
a086c257 JA |
937 | if (td->o.file_service_type == FIO_FSERVICE_RR || |
938 | td->o.file_service_type == FIO_FSERVICE_SEQ) | |
d6aed795 | 939 | f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing); |
bdb4e2e9 | 940 | else |
d6aed795 | 941 | f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing); |
1907dbc6 JA |
942 | |
943 | td->file_service_file = f; | |
944 | td->file_service_left = td->file_service_nr - 1; | |
2ba1c290 | 945 | out: |
683023e8 | 946 | dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name); |
1907dbc6 | 947 | return f; |
bdb4e2e9 JA |
948 | } |
949 | ||
7eb36574 JA |
950 | static struct fio_file *get_next_file(struct thread_data *td) |
951 | { | |
952 | struct prof_io_ops *ops = &td->prof_io_ops; | |
953 | ||
954 | if (ops->get_next_file) | |
955 | return ops->get_next_file(td); | |
956 | ||
957 | return __get_next_file(td); | |
958 | } | |
959 | ||
429f6675 JA |
960 | static int set_io_u_file(struct thread_data *td, struct io_u *io_u) |
961 | { | |
962 | struct fio_file *f; | |
963 | ||
964 | do { | |
965 | f = get_next_file(td); | |
966 | if (!f) | |
967 | return 1; | |
968 | ||
429f6675 JA |
969 | io_u->file = f; |
970 | get_file(f); | |
971 | ||
972 | if (!fill_io_u(td, io_u)) | |
973 | break; | |
974 | ||
b5696bfc | 975 | put_file_log(td, f); |
429f6675 | 976 | td_io_close_file(td, f); |
b5696bfc | 977 | io_u->file = NULL; |
d6aed795 | 978 | fio_file_set_done(f); |
429f6675 | 979 | td->nr_done_files++; |
0b9d69ec JA |
980 | dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, |
981 | td->nr_done_files, td->o.nr_files); | |
429f6675 JA |
982 | } while (1); |
983 | ||
984 | return 0; | |
985 | } | |
986 | ||
987 | ||
10ba535a JA |
988 | struct io_u *__get_io_u(struct thread_data *td) |
989 | { | |
990 | struct io_u *io_u = NULL; | |
991 | ||
e8462bd8 JA |
992 | td_io_u_lock(td); |
993 | ||
994 | again: | |
01743ee1 JA |
995 | if (!flist_empty(&td->io_u_requeues)) |
996 | io_u = flist_entry(td->io_u_requeues.next, struct io_u, list); | |
755200a3 | 997 | else if (!queue_full(td)) { |
01743ee1 | 998 | io_u = flist_entry(td->io_u_freelist.next, struct io_u, list); |
10ba535a | 999 | |
6040dabc | 1000 | io_u->buflen = 0; |
10ba535a | 1001 | io_u->resid = 0; |
755200a3 | 1002 | io_u->file = NULL; |
d7762cf8 | 1003 | io_u->end_io = NULL; |
755200a3 JA |
1004 | } |
1005 | ||
1006 | if (io_u) { | |
0c6e7517 | 1007 | assert(io_u->flags & IO_U_F_FREE); |
2ecc1b57 | 1008 | io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF); |
1ef2b6be | 1009 | io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER); |
0c6e7517 | 1010 | |
755200a3 | 1011 | io_u->error = 0; |
01743ee1 JA |
1012 | flist_del(&io_u->list); |
1013 | flist_add(&io_u->list, &td->io_u_busylist); | |
10ba535a | 1014 | td->cur_depth++; |
0c41214f | 1015 | io_u->flags |= IO_U_F_IN_CUR_DEPTH; |
1dec3e07 JA |
1016 | } else if (td->o.verify_async) { |
1017 | /* | |
1018 | * We ran out, wait for async verify threads to finish and | |
1019 | * return one | |
1020 | */ | |
1021 | pthread_cond_wait(&td->free_cond, &td->io_u_lock); | |
1022 | goto again; | |
10ba535a JA |
1023 | } |
1024 | ||
e8462bd8 | 1025 | td_io_u_unlock(td); |
10ba535a JA |
1026 | return io_u; |
1027 | } | |
1028 | ||
0d29de83 | 1029 | static int check_get_trim(struct thread_data *td, struct io_u *io_u) |
10ba535a | 1030 | { |
0d29de83 JA |
1031 | if (td->o.trim_backlog && td->trim_entries) { |
1032 | int get_trim = 0; | |
10ba535a | 1033 | |
0d29de83 JA |
1034 | if (td->trim_batch) { |
1035 | td->trim_batch--; | |
1036 | get_trim = 1; | |
1037 | } else if (!(td->io_hist_len % td->o.trim_backlog) && | |
1038 | td->last_ddir != DDIR_READ) { | |
1039 | td->trim_batch = td->o.trim_batch; | |
1040 | if (!td->trim_batch) | |
1041 | td->trim_batch = td->o.trim_backlog; | |
1042 | get_trim = 1; | |
1043 | } | |
1044 | ||
1045 | if (get_trim && !get_next_trim(td, io_u)) | |
1046 | return 1; | |
2ba1c290 | 1047 | } |
10ba535a | 1048 | |
0d29de83 JA |
1049 | return 0; |
1050 | } | |
1051 | ||
1052 | static int check_get_verify(struct thread_data *td, struct io_u *io_u) | |
1053 | { | |
9e144189 JA |
1054 | if (td->o.verify_backlog && td->io_hist_len) { |
1055 | int get_verify = 0; | |
1056 | ||
1057 | if (td->verify_batch) { | |
1058 | td->verify_batch--; | |
1059 | get_verify = 1; | |
1060 | } else if (!(td->io_hist_len % td->o.verify_backlog) && | |
1061 | td->last_ddir != DDIR_READ) { | |
1062 | td->verify_batch = td->o.verify_batch; | |
f8a75c99 JA |
1063 | if (!td->verify_batch) |
1064 | td->verify_batch = td->o.verify_backlog; | |
9e144189 JA |
1065 | get_verify = 1; |
1066 | } | |
1067 | ||
1068 | if (get_verify && !get_next_verify(td, io_u)) | |
0d29de83 | 1069 | return 1; |
9e144189 JA |
1070 | } |
1071 | ||
0d29de83 JA |
1072 | return 0; |
1073 | } | |
1074 | ||
1075 | /* | |
1076 | * Return an io_u to be processed. Gets a buflen and offset, sets direction, | |
1077 | * etc. The returned io_u is fully ready to be prepped and submitted. | |
1078 | */ | |
1079 | struct io_u *get_io_u(struct thread_data *td) | |
1080 | { | |
1081 | struct fio_file *f; | |
1082 | struct io_u *io_u; | |
1083 | ||
1084 | io_u = __get_io_u(td); | |
1085 | if (!io_u) { | |
1086 | dprint(FD_IO, "__get_io_u failed\n"); | |
1087 | return NULL; | |
1088 | } | |
1089 | ||
1090 | if (check_get_verify(td, io_u)) | |
1091 | goto out; | |
1092 | if (check_get_trim(td, io_u)) | |
1093 | goto out; | |
1094 | ||
755200a3 JA |
1095 | /* |
1096 | * from a requeue, io_u already setup | |
1097 | */ | |
1098 | if (io_u->file) | |
77f392bf | 1099 | goto out; |
755200a3 | 1100 | |
429f6675 JA |
1101 | /* |
1102 | * If using an iolog, grab next piece if any available. | |
1103 | */ | |
1104 | if (td->o.read_iolog_file) { | |
1105 | if (read_iolog_get(td, io_u)) | |
1106 | goto err_put; | |
2ba1c290 JA |
1107 | } else if (set_io_u_file(td, io_u)) { |
1108 | dprint(FD_IO, "io_u %p, setting file failed\n", io_u); | |
429f6675 | 1109 | goto err_put; |
2ba1c290 | 1110 | } |
5ec10eaa | 1111 | |
429f6675 | 1112 | f = io_u->file; |
d6aed795 | 1113 | assert(fio_file_open(f)); |
97af62ce | 1114 | |
ff58fced | 1115 | if (ddir_rw(io_u->ddir)) { |
d0656a93 | 1116 | if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { |
2ba1c290 | 1117 | dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); |
429f6675 | 1118 | goto err_put; |
2ba1c290 | 1119 | } |
10ba535a | 1120 | |
38dad62d | 1121 | f->last_start = io_u->offset; |
36167d82 | 1122 | f->last_pos = io_u->offset + io_u->buflen; |
10ba535a | 1123 | |
c311cd2a | 1124 | if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_WRITE) |
87dc1ab1 | 1125 | populate_verify_io_u(td, io_u); |
e4dad9c6 JA |
1126 | else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE) |
1127 | io_u_fill_buffer(td, io_u, io_u->xfer_buflen); | |
cbe8d756 RR |
1128 | else if (io_u->ddir == DDIR_READ) { |
1129 | /* | |
1130 | * Reset the buf_filled parameters so next time if the | |
1131 | * buffer is used for writes it is refilled. | |
1132 | */ | |
cbe8d756 RR |
1133 | io_u->buf_filled_len = 0; |
1134 | } | |
87dc1ab1 | 1135 | } |
10ba535a | 1136 | |
165faf16 JA |
1137 | /* |
1138 | * Set io data pointers. | |
1139 | */ | |
cec6b55d JA |
1140 | io_u->xfer_buf = io_u->buf; |
1141 | io_u->xfer_buflen = io_u->buflen; | |
5973cafb | 1142 | |
6ac7a331 | 1143 | out: |
0d29de83 | 1144 | assert(io_u->file); |
429f6675 | 1145 | if (!td_io_prep(td, io_u)) { |
993bf48b JA |
1146 | if (!td->o.disable_slat) |
1147 | fio_gettime(&io_u->start_time, NULL); | |
429f6675 | 1148 | return io_u; |
36167d82 | 1149 | } |
429f6675 | 1150 | err_put: |
2ba1c290 | 1151 | dprint(FD_IO, "get_io_u failed\n"); |
429f6675 JA |
1152 | put_io_u(td, io_u); |
1153 | return NULL; | |
10ba535a JA |
1154 | } |
1155 | ||
5451792e JA |
1156 | void io_u_log_error(struct thread_data *td, struct io_u *io_u) |
1157 | { | |
825f818e JA |
1158 | const char *msg[] = { "read", "write", "sync", "datasync", |
1159 | "sync_file_range", "wait", "trim" }; | |
1160 | ||
1161 | ||
5451792e JA |
1162 | |
1163 | log_err("fio: io_u error"); | |
1164 | ||
1165 | if (io_u->file) | |
1166 | log_err(" on file %s", io_u->file->file_name); | |
1167 | ||
1168 | log_err(": %s\n", strerror(io_u->error)); | |
1169 | ||
5ec10eaa JA |
1170 | log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir], |
1171 | io_u->offset, io_u->xfer_buflen); | |
5451792e JA |
1172 | |
1173 | if (!td->error) | |
1174 | td_verror(td, io_u->error, "io_u error"); | |
1175 | } | |
1176 | ||
97601024 JA |
1177 | static void io_completed(struct thread_data *td, struct io_u *io_u, |
1178 | struct io_completion_data *icd) | |
10ba535a | 1179 | { |
dbad30b0 JA |
1180 | /* |
1181 | * Older gcc's are too dumb to realize that usec is always used | |
1182 | * initialized, silence that warning. | |
1183 | */ | |
1184 | unsigned long uninitialized_var(usec); | |
44f29692 | 1185 | struct fio_file *f; |
10ba535a | 1186 | |
2ba1c290 JA |
1187 | dprint_io_u(io_u, "io complete"); |
1188 | ||
2ecc1b57 | 1189 | td_io_u_lock(td); |
0c6e7517 | 1190 | assert(io_u->flags & IO_U_F_FLIGHT); |
38dad62d | 1191 | io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); |
2ecc1b57 | 1192 | td_io_u_unlock(td); |
0c6e7517 | 1193 | |
5f9099ea | 1194 | if (ddir_sync(io_u->ddir)) { |
87dc1ab1 | 1195 | td->last_was_sync = 1; |
44f29692 JA |
1196 | f = io_u->file; |
1197 | if (f) { | |
1198 | f->first_write = -1ULL; | |
1199 | f->last_write = -1ULL; | |
1200 | } | |
87dc1ab1 JA |
1201 | return; |
1202 | } | |
1203 | ||
1204 | td->last_was_sync = 0; | |
9e144189 | 1205 | td->last_ddir = io_u->ddir; |
87dc1ab1 | 1206 | |
ff58fced | 1207 | if (!io_u->error && ddir_rw(io_u->ddir)) { |
10ba535a | 1208 | unsigned int bytes = io_u->buflen - io_u->resid; |
1e97cce9 | 1209 | const enum fio_ddir idx = io_u->ddir; |
ba3e4e0c | 1210 | const enum fio_ddir odx = io_u->ddir ^ 1; |
b29ee5b3 | 1211 | int ret; |
10ba535a | 1212 | |
b29ee5b3 JA |
1213 | td->io_blocks[idx]++; |
1214 | td->io_bytes[idx] += bytes; | |
1215 | td->this_io_bytes[idx] += bytes; | |
10ba535a | 1216 | |
44f29692 JA |
1217 | if (idx == DDIR_WRITE) { |
1218 | f = io_u->file; | |
1219 | if (f) { | |
1220 | if (f->first_write == -1ULL || | |
1221 | io_u->offset < f->first_write) | |
1222 | f->first_write = io_u->offset; | |
1223 | if (f->last_write == -1ULL || | |
1224 | ((io_u->offset + bytes) > f->last_write)) | |
1225 | f->last_write = io_u->offset + bytes; | |
1226 | } | |
1227 | } | |
1228 | ||
b29ee5b3 | 1229 | if (ramp_time_over(td)) { |
40e1a6f0 | 1230 | unsigned long uninitialized_var(lusec); |
40e1a6f0 JA |
1231 | |
1232 | if (!td->o.disable_clat || !td->o.disable_bw) | |
1233 | lusec = utime_since(&io_u->issue_time, | |
1234 | &icd->time); | |
02af0988 JA |
1235 | if (!td->o.disable_lat) { |
1236 | unsigned long tusec; | |
721938ae | 1237 | |
02af0988 JA |
1238 | tusec = utime_since(&io_u->start_time, |
1239 | &icd->time); | |
1240 | add_lat_sample(td, idx, tusec, bytes); | |
1241 | } | |
9520ebb9 | 1242 | if (!td->o.disable_clat) { |
01a1caa5 | 1243 | add_clat_sample(td, idx, lusec, bytes); |
40e1a6f0 | 1244 | io_u_mark_latency(td, lusec); |
9520ebb9 JA |
1245 | } |
1246 | if (!td->o.disable_bw) | |
306ddc97 | 1247 | add_bw_sample(td, idx, bytes, &icd->time); |
b23b6a2f | 1248 | if (__should_check_rate(td, idx)) { |
ba3e4e0c RR |
1249 | td->rate_pending_usleep[idx] = |
1250 | ((td->this_io_bytes[idx] * | |
1251 | td->rate_nsec_cycle[idx]) / 1000 - | |
1252 | utime_since_now(&td->start)); | |
b23b6a2f | 1253 | } |
581e7141 | 1254 | if (__should_check_rate(td, idx ^ 1)) |
ba3e4e0c RR |
1255 | td->rate_pending_usleep[odx] = |
1256 | ((td->this_io_bytes[odx] * | |
1257 | td->rate_nsec_cycle[odx]) / 1000 - | |
1258 | utime_since_now(&td->start)); | |
721938ae | 1259 | } |
10ba535a | 1260 | |
660a1cb5 | 1261 | if (td_write(td) && idx == DDIR_WRITE && |
8670579e | 1262 | td->o.do_verify && |
41128405 | 1263 | td->o.verify != VERIFY_NONE) |
10ba535a JA |
1264 | log_io_piece(td, io_u); |
1265 | ||
b29ee5b3 | 1266 | icd->bytes_done[idx] += bytes; |
3af6ef39 | 1267 | |
d7762cf8 | 1268 | if (io_u->end_io) { |
36690c9b | 1269 | ret = io_u->end_io(td, io_u); |
3af6ef39 JA |
1270 | if (ret && !icd->error) |
1271 | icd->error = ret; | |
1272 | } | |
ff58fced | 1273 | } else if (io_u->error) { |
10ba535a | 1274 | icd->error = io_u->error; |
5451792e JA |
1275 | io_u_log_error(td, io_u); |
1276 | } | |
f2bba182 RR |
1277 | if (td->o.continue_on_error && icd->error && |
1278 | td_non_fatal_error(icd->error)) { | |
1279 | /* | |
1280 | * If there is a non_fatal error, then add to the error count | |
1281 | * and clear all the errors. | |
1282 | */ | |
1283 | update_error_count(td, icd->error); | |
1284 | td_clear_error(td); | |
1285 | icd->error = 0; | |
1286 | io_u->error = 0; | |
1287 | } | |
10ba535a JA |
1288 | } |
1289 | ||
9520ebb9 JA |
1290 | static void init_icd(struct thread_data *td, struct io_completion_data *icd, |
1291 | int nr) | |
10ba535a | 1292 | { |
9520ebb9 JA |
1293 | if (!td->o.disable_clat || !td->o.disable_bw) |
1294 | fio_gettime(&icd->time, NULL); | |
02bcaa8c | 1295 | |
3af6ef39 JA |
1296 | icd->nr = nr; |
1297 | ||
10ba535a JA |
1298 | icd->error = 0; |
1299 | icd->bytes_done[0] = icd->bytes_done[1] = 0; | |
36167d82 JA |
1300 | } |
1301 | ||
97601024 JA |
1302 | static void ios_completed(struct thread_data *td, |
1303 | struct io_completion_data *icd) | |
36167d82 JA |
1304 | { |
1305 | struct io_u *io_u; | |
1306 | int i; | |
1307 | ||
10ba535a JA |
1308 | for (i = 0; i < icd->nr; i++) { |
1309 | io_u = td->io_ops->event(td, i); | |
1310 | ||
1311 | io_completed(td, io_u, icd); | |
e8462bd8 JA |
1312 | |
1313 | if (!(io_u->flags & IO_U_F_FREE_DEF)) | |
1314 | put_io_u(td, io_u); | |
10ba535a JA |
1315 | } |
1316 | } | |
97601024 | 1317 | |
e7e6cfb4 JA |
1318 | /* |
1319 | * Complete a single io_u for the sync engines. | |
1320 | */ | |
581e7141 JA |
1321 | int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, |
1322 | unsigned long *bytes) | |
97601024 JA |
1323 | { |
1324 | struct io_completion_data icd; | |
1325 | ||
9520ebb9 | 1326 | init_icd(td, &icd, 1); |
97601024 | 1327 | io_completed(td, io_u, &icd); |
e8462bd8 JA |
1328 | |
1329 | if (!(io_u->flags & IO_U_F_FREE_DEF)) | |
1330 | put_io_u(td, io_u); | |
97601024 | 1331 | |
581e7141 JA |
1332 | if (icd.error) { |
1333 | td_verror(td, icd.error, "io_u_sync_complete"); | |
1334 | return -1; | |
1335 | } | |
97601024 | 1336 | |
581e7141 JA |
1337 | if (bytes) { |
1338 | bytes[0] += icd.bytes_done[0]; | |
1339 | bytes[1] += icd.bytes_done[1]; | |
1340 | } | |
1341 | ||
1342 | return 0; | |
97601024 JA |
1343 | } |
1344 | ||
e7e6cfb4 JA |
1345 | /* |
1346 | * Called to complete min_events number of io for the async engines. | |
1347 | */ | |
581e7141 JA |
1348 | int io_u_queued_complete(struct thread_data *td, int min_evts, |
1349 | unsigned long *bytes) | |
97601024 | 1350 | { |
97601024 | 1351 | struct io_completion_data icd; |
00de55ef | 1352 | struct timespec *tvp = NULL; |
97601024 | 1353 | int ret; |
4d06a338 | 1354 | struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; |
97601024 | 1355 | |
4950421a | 1356 | dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); |
b271fe62 | 1357 | |
4950421a | 1358 | if (!min_evts) |
00de55ef | 1359 | tvp = &ts; |
97601024 | 1360 | |
4950421a | 1361 | ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); |
97601024 | 1362 | if (ret < 0) { |
e1161c32 | 1363 | td_verror(td, -ret, "td_io_getevents"); |
97601024 JA |
1364 | return ret; |
1365 | } else if (!ret) | |
1366 | return ret; | |
1367 | ||
9520ebb9 | 1368 | init_icd(td, &icd, ret); |
97601024 | 1369 | ios_completed(td, &icd); |
581e7141 JA |
1370 | if (icd.error) { |
1371 | td_verror(td, icd.error, "io_u_queued_complete"); | |
1372 | return -1; | |
1373 | } | |
97601024 | 1374 | |
581e7141 JA |
1375 | if (bytes) { |
1376 | bytes[0] += icd.bytes_done[0]; | |
1377 | bytes[1] += icd.bytes_done[1]; | |
1378 | } | |
1379 | ||
1380 | return 0; | |
97601024 | 1381 | } |
7e77dd02 JA |
1382 | |
1383 | /* | |
1384 | * Call when io_u is really queued, to update the submission latency. | |
1385 | */ | |
1386 | void io_u_queued(struct thread_data *td, struct io_u *io_u) | |
1387 | { | |
9520ebb9 JA |
1388 | if (!td->o.disable_slat) { |
1389 | unsigned long slat_time; | |
7e77dd02 | 1390 | |
9520ebb9 | 1391 | slat_time = utime_since(&io_u->start_time, &io_u->issue_time); |
29a90ddb | 1392 | add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen); |
9520ebb9 | 1393 | } |
7e77dd02 | 1394 | } |
433afcb4 | 1395 | |
5973cafb JA |
1396 | /* |
1397 | * "randomly" fill the buffer contents | |
1398 | */ | |
1399 | void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, | |
1400 | unsigned int max_bs) | |
1401 | { | |
e84b371e JA |
1402 | io_u->buf_filled_len = 0; |
1403 | ||
637ef8d9 JA |
1404 | if (!td->o.zero_buffers) |
1405 | fill_random_buf(io_u->buf, max_bs); | |
1406 | else | |
1407 | memset(io_u->buf, 0, max_bs); | |
5973cafb | 1408 | } |