Commit | Line | Data |
---|---|---|
10ba535a | 1 | #include <unistd.h> |
10ba535a | 2 | #include <string.h> |
0c6e7517 | 3 | #include <assert.h> |
10ba535a JA |
4 | |
5 | #include "fio.h" | |
4f5af7b2 | 6 | #include "verify.h" |
0d29de83 | 7 | #include "trim.h" |
1fbbf72e | 8 | #include "lib/rand.h" |
7ebd796f | 9 | #include "lib/axmap.h" |
002fe734 | 10 | #include "err.h" |
0f38bbef | 11 | #include "lib/pow2.h" |
4b157ac6 | 12 | #include "minmax.h" |
bfbdd35b | 13 | #include "zbd.h" |
10ba535a | 14 | |
97601024 JA |
15 | struct io_completion_data { |
16 | int nr; /* input */ | |
97601024 JA |
17 | |
18 | int error; /* output */ | |
100f49f1 | 19 | uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */ |
8b6a404c | 20 | struct timespec time; /* output */ |
97601024 JA |
21 | }; |
22 | ||
10ba535a | 23 | /* |
7ebd796f | 24 | * The ->io_axmap contains a map of blocks we have or have not done io |
10ba535a JA |
25 | * to yet. Used to make sure we cover the entire range in a fair fashion. |
26 | */ | |
e39c0676 | 27 | static bool random_map_free(struct fio_file *f, const uint64_t block) |
10ba535a | 28 | { |
7ebd796f | 29 | return !axmap_isset(f->io_axmap, block); |
10ba535a JA |
30 | } |
31 | ||
df415585 JA |
32 | /* |
33 | * Mark a given offset as used in the map. | |
34 | */ | |
6cc1a3d1 BVA |
35 | static uint64_t mark_random_map(struct thread_data *td, struct io_u *io_u, |
36 | uint64_t offset, uint64_t buflen) | |
df415585 | 37 | { |
5fff9543 | 38 | unsigned long long min_bs = td->o.min_bs[io_u->ddir]; |
9bf2061e | 39 | struct fio_file *f = io_u->file; |
5fff9543 | 40 | unsigned long long nr_blocks; |
1ae83d45 | 41 | uint64_t block; |
df415585 | 42 | |
6cc1a3d1 BVA |
43 | block = (offset - f->file_offset) / (uint64_t) min_bs; |
44 | nr_blocks = (buflen + min_bs - 1) / min_bs; | |
bd6b959a | 45 | assert(nr_blocks > 0); |
df415585 | 46 | |
bd6b959a | 47 | if (!(io_u->flags & IO_U_F_BUSY_OK)) { |
7ebd796f | 48 | nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks); |
bd6b959a BVA |
49 | assert(nr_blocks > 0); |
50 | } | |
df415585 | 51 | |
6cc1a3d1 BVA |
52 | if ((nr_blocks * min_bs) < buflen) |
53 | buflen = nr_blocks * min_bs; | |
54 | ||
55 | return buflen; | |
df415585 JA |
56 | } |
57 | ||
74776733 JA |
58 | static uint64_t last_block(struct thread_data *td, struct fio_file *f, |
59 | enum fio_ddir ddir) | |
2ba1c290 | 60 | { |
74776733 JA |
61 | uint64_t max_blocks; |
62 | uint64_t max_size; | |
2ba1c290 | 63 | |
ff58fced JA |
64 | assert(ddir_rw(ddir)); |
65 | ||
d9dd70f7 JA |
66 | /* |
67 | * Hmm, should we make sure that ->io_size <= ->real_file_size? | |
79591fa9 | 68 | * -> not for now since there is code assuming it could go either. |
d9dd70f7 JA |
69 | */ |
70 | max_size = f->io_size; | |
71 | if (max_size > f->real_file_size) | |
72 | max_size = f->real_file_size; | |
73 | ||
7b865a2f | 74 | if (td->o.zone_mode == ZONE_MODE_STRIDED && td->o.zone_range) |
ed335855 SN |
75 | max_size = td->o.zone_range; |
76 | ||
0412d12e JE |
77 | if (td->o.min_bs[ddir] > td->o.ba[ddir]) |
78 | max_size -= td->o.min_bs[ddir] - td->o.ba[ddir]; | |
79 | ||
1ae83d45 | 80 | max_blocks = max_size / (uint64_t) td->o.ba[ddir]; |
2ba1c290 JA |
81 | if (!max_blocks) |
82 | return 0; | |
83 | ||
67778e88 | 84 | return max_blocks; |
2ba1c290 JA |
85 | } |
86 | ||
e25839d4 | 87 | static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, |
e0a04ac1 JA |
88 | enum fio_ddir ddir, uint64_t *b, |
89 | uint64_t lastb) | |
ec4015da | 90 | { |
6f49f8bc | 91 | uint64_t r; |
5e0baa7f | 92 | |
c3546b53 JA |
93 | if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || |
94 | td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) { | |
6f49f8bc | 95 | |
d6b72507 | 96 | r = __rand(&td->random_state); |
8055e41d | 97 | |
4b91ee8f | 98 | dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); |
8055e41d | 99 | |
e0a04ac1 | 100 | *b = lastb * (r / (rand_max(&td->random_state) + 1.0)); |
51ede0b1 | 101 | } else { |
8055e41d | 102 | uint64_t off = 0; |
43c63a78 | 103 | |
967d1b63 JA |
104 | assert(fio_file_lfsr(f)); |
105 | ||
6f49f8bc | 106 | if (lfsr_next(&f->lfsr, &off)) |
8055e41d | 107 | return 1; |
ec4015da | 108 | |
8055e41d JA |
109 | *b = off; |
110 | } | |
0ce8b119 | 111 | |
ec4015da | 112 | /* |
51ede0b1 | 113 | * if we are not maintaining a random map, we are done. |
ec4015da | 114 | */ |
51ede0b1 JA |
115 | if (!file_randommap(td, f)) |
116 | goto ret; | |
43c63a78 JA |
117 | |
118 | /* | |
51ede0b1 | 119 | * calculate map offset and check if it's free |
43c63a78 | 120 | */ |
51ede0b1 JA |
121 | if (random_map_free(f, *b)) |
122 | goto ret; | |
123 | ||
4b91ee8f JA |
124 | dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", |
125 | (unsigned long long) *b); | |
51ede0b1 | 126 | |
7ebd796f | 127 | *b = axmap_next_free(f->io_axmap, *b); |
51ede0b1 JA |
128 | if (*b == (uint64_t) -1ULL) |
129 | return 1; | |
0ce8b119 JA |
130 | ret: |
131 | return 0; | |
ec4015da JA |
132 | } |
133 | ||
925fee33 JA |
134 | static int __get_next_rand_offset_zipf(struct thread_data *td, |
135 | struct fio_file *f, enum fio_ddir ddir, | |
1ae83d45 | 136 | uint64_t *b) |
e25839d4 | 137 | { |
9c6f6316 | 138 | *b = zipf_next(&f->zipf); |
e25839d4 JA |
139 | return 0; |
140 | } | |
141 | ||
925fee33 JA |
142 | static int __get_next_rand_offset_pareto(struct thread_data *td, |
143 | struct fio_file *f, enum fio_ddir ddir, | |
1ae83d45 | 144 | uint64_t *b) |
925fee33 | 145 | { |
9c6f6316 | 146 | *b = pareto_next(&f->zipf); |
925fee33 JA |
147 | return 0; |
148 | } | |
149 | ||
56d9fa4b JA |
150 | static int __get_next_rand_offset_gauss(struct thread_data *td, |
151 | struct fio_file *f, enum fio_ddir ddir, | |
152 | uint64_t *b) | |
153 | { | |
154 | *b = gauss_next(&f->gauss); | |
155 | return 0; | |
156 | } | |
157 | ||
59466396 JA |
158 | static int __get_next_rand_offset_zoned_abs(struct thread_data *td, |
159 | struct fio_file *f, | |
160 | enum fio_ddir ddir, uint64_t *b) | |
161 | { | |
162 | struct zone_split_index *zsi; | |
e3c8c108 | 163 | uint64_t lastb, send, stotal; |
59466396 JA |
164 | unsigned int v; |
165 | ||
166 | lastb = last_block(td, f, ddir); | |
167 | if (!lastb) | |
168 | return 1; | |
169 | ||
170 | if (!td->o.zone_split_nr[ddir]) { | |
171 | bail: | |
172 | return __get_next_rand_offset(td, f, ddir, b, lastb); | |
173 | } | |
174 | ||
175 | /* | |
176 | * Generate a value, v, between 1 and 100, both inclusive | |
177 | */ | |
1bd5d213 | 178 | v = rand_between(&td->zone_state, 1, 100); |
59466396 | 179 | |
e3c8c108 JA |
180 | /* |
181 | * Find our generated table. 'send' is the end block of this zone, | |
182 | * 'stotal' is our start offset. | |
183 | */ | |
59466396 JA |
184 | zsi = &td->zone_state_index[ddir][v - 1]; |
185 | stotal = zsi->size_prev / td->o.ba[ddir]; | |
186 | send = zsi->size / td->o.ba[ddir]; | |
187 | ||
188 | /* | |
189 | * Should never happen | |
190 | */ | |
191 | if (send == -1U) { | |
264e3d30 | 192 | if (!fio_did_warn(FIO_WARN_ZONED_BUG)) |
59466396 | 193 | log_err("fio: bug in zoned generation\n"); |
59466396 JA |
194 | goto bail; |
195 | } else if (send > lastb) { | |
196 | /* | |
197 | * This happens if the user specifies ranges that exceed | |
198 | * the file/device size. We can't handle that gracefully, | |
199 | * so error and exit. | |
200 | */ | |
201 | log_err("fio: zoned_abs sizes exceed file size\n"); | |
202 | return 1; | |
203 | } | |
204 | ||
205 | /* | |
e3c8c108 | 206 | * Generate index from 0..send-stotal |
59466396 | 207 | */ |
e3c8c108 | 208 | if (__get_next_rand_offset(td, f, ddir, b, send - stotal) == 1) |
59466396 JA |
209 | return 1; |
210 | ||
e3c8c108 | 211 | *b += stotal; |
59466396 JA |
212 | return 0; |
213 | } | |
214 | ||
e0a04ac1 JA |
215 | static int __get_next_rand_offset_zoned(struct thread_data *td, |
216 | struct fio_file *f, enum fio_ddir ddir, | |
217 | uint64_t *b) | |
218 | { | |
219 | unsigned int v, send, stotal; | |
220 | uint64_t offset, lastb; | |
e0a04ac1 JA |
221 | struct zone_split_index *zsi; |
222 | ||
223 | lastb = last_block(td, f, ddir); | |
224 | if (!lastb) | |
225 | return 1; | |
226 | ||
227 | if (!td->o.zone_split_nr[ddir]) { | |
228 | bail: | |
229 | return __get_next_rand_offset(td, f, ddir, b, lastb); | |
230 | } | |
231 | ||
232 | /* | |
233 | * Generate a value, v, between 1 and 100, both inclusive | |
234 | */ | |
1bd5d213 | 235 | v = rand_between(&td->zone_state, 1, 100); |
e0a04ac1 JA |
236 | |
237 | zsi = &td->zone_state_index[ddir][v - 1]; | |
238 | stotal = zsi->size_perc_prev; | |
239 | send = zsi->size_perc; | |
240 | ||
241 | /* | |
242 | * Should never happen | |
243 | */ | |
244 | if (send == -1U) { | |
264e3d30 | 245 | if (!fio_did_warn(FIO_WARN_ZONED_BUG)) |
e0a04ac1 | 246 | log_err("fio: bug in zoned generation\n"); |
e0a04ac1 JA |
247 | goto bail; |
248 | } | |
249 | ||
250 | /* | |
251 | * 'send' is some percentage below or equal to 100 that | |
252 | * marks the end of the current IO range. 'stotal' marks | |
253 | * the start, in percent. | |
254 | */ | |
255 | if (stotal) | |
256 | offset = stotal * lastb / 100ULL; | |
257 | else | |
258 | offset = 0; | |
259 | ||
260 | lastb = lastb * (send - stotal) / 100ULL; | |
261 | ||
262 | /* | |
263 | * Generate index from 0..send-of-lastb | |
264 | */ | |
265 | if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1) | |
266 | return 1; | |
267 | ||
268 | /* | |
269 | * Add our start offset, if any | |
270 | */ | |
271 | if (offset) | |
272 | *b += offset; | |
273 | ||
274 | return 0; | |
275 | } | |
56d9fa4b | 276 | |
f31feaa2 JA |
277 | static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, |
278 | enum fio_ddir ddir, uint64_t *b) | |
e25839d4 | 279 | { |
e0a04ac1 JA |
280 | if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) { |
281 | uint64_t lastb; | |
282 | ||
283 | lastb = last_block(td, f, ddir); | |
284 | if (!lastb) | |
285 | return 1; | |
286 | ||
287 | return __get_next_rand_offset(td, f, ddir, b, lastb); | |
288 | } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) | |
e25839d4 | 289 | return __get_next_rand_offset_zipf(td, f, ddir, b); |
925fee33 JA |
290 | else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) |
291 | return __get_next_rand_offset_pareto(td, f, ddir, b); | |
56d9fa4b JA |
292 | else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS) |
293 | return __get_next_rand_offset_gauss(td, f, ddir, b); | |
e0a04ac1 JA |
294 | else if (td->o.random_distribution == FIO_RAND_DIST_ZONED) |
295 | return __get_next_rand_offset_zoned(td, f, ddir, b); | |
59466396 JA |
296 | else if (td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS) |
297 | return __get_next_rand_offset_zoned_abs(td, f, ddir, b); | |
e25839d4 JA |
298 | |
299 | log_err("fio: unknown random distribution: %d\n", td->o.random_distribution); | |
300 | return 1; | |
301 | } | |
302 | ||
e39c0676 | 303 | static bool should_do_random(struct thread_data *td, enum fio_ddir ddir) |
211c9b89 JA |
304 | { |
305 | unsigned int v; | |
211c9b89 | 306 | |
d9472271 | 307 | if (td->o.perc_rand[ddir] == 100) |
e39c0676 | 308 | return true; |
211c9b89 | 309 | |
1bd5d213 | 310 | v = rand_between(&td->seq_rand_state[ddir], 1, 100); |
211c9b89 | 311 | |
d9472271 | 312 | return v <= td->o.perc_rand[ddir]; |
211c9b89 JA |
313 | } |
314 | ||
0bcf41cd JA |
315 | static void loop_cache_invalidate(struct thread_data *td, struct fio_file *f) |
316 | { | |
317 | struct thread_options *o = &td->o; | |
318 | ||
319 | if (o->invalidate_cache && !o->odirect) { | |
320 | int fio_unused ret; | |
321 | ||
322 | ret = file_invalidate_cache(td, f); | |
323 | } | |
324 | } | |
325 | ||
38dad62d | 326 | static int get_next_rand_block(struct thread_data *td, struct fio_file *f, |
1ae83d45 | 327 | enum fio_ddir ddir, uint64_t *b) |
38dad62d | 328 | { |
c04e4661 DE |
329 | if (!get_next_rand_offset(td, f, ddir, b)) |
330 | return 0; | |
331 | ||
8c07860d JA |
332 | if (td->o.time_based || |
333 | (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) { | |
33c48814 | 334 | fio_file_reset(td, f); |
80f02150 | 335 | loop_cache_invalidate(td, f); |
c04e4661 DE |
336 | if (!get_next_rand_offset(td, f, ddir, b)) |
337 | return 0; | |
38dad62d JA |
338 | } |
339 | ||
c04e4661 | 340 | dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", |
f1dfb668 | 341 | f->file_name, (unsigned long long) f->last_pos[ddir], |
4b91ee8f | 342 | (unsigned long long) f->real_file_size); |
c04e4661 | 343 | return 1; |
38dad62d JA |
344 | } |
345 | ||
37cf9e3c | 346 | static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, |
1ae83d45 | 347 | enum fio_ddir ddir, uint64_t *offset) |
38dad62d | 348 | { |
ac002339 JA |
349 | struct thread_options *o = &td->o; |
350 | ||
ff58fced JA |
351 | assert(ddir_rw(ddir)); |
352 | ||
17373ce2 JA |
353 | /* |
354 | * If we reach the end for a time based run, reset us back to 0 | |
355 | * and invalidate the cache, if we need to. | |
356 | */ | |
f1dfb668 | 357 | if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) && |
46ec8270 | 358 | o->time_based && o->nr_files == 1) { |
c89daa4a | 359 | f->last_pos[ddir] = f->file_offset; |
0bcf41cd | 360 | loop_cache_invalidate(td, f); |
19ddc35b | 361 | } |
c04e4661 | 362 | |
f1dfb668 | 363 | if (f->last_pos[ddir] < f->real_file_size) { |
1ae83d45 | 364 | uint64_t pos; |
059b0802 | 365 | |
69b98f11 JA |
366 | /* |
367 | * Only rewind if we already hit the end | |
368 | */ | |
369 | if (f->last_pos[ddir] == f->file_offset && | |
370 | f->file_offset && o->ddir_seq_add < 0) { | |
c22825bb JA |
371 | if (f->real_file_size > f->io_size) |
372 | f->last_pos[ddir] = f->io_size; | |
373 | else | |
374 | f->last_pos[ddir] = f->real_file_size; | |
375 | } | |
a66da7a2 | 376 | |
f1dfb668 | 377 | pos = f->last_pos[ddir] - f->file_offset; |
ac002339 JA |
378 | if (pos && o->ddir_seq_add) { |
379 | pos += o->ddir_seq_add; | |
380 | ||
381 | /* | |
382 | * If we reach beyond the end of the file | |
383 | * with holed IO, wrap around to the | |
b0a84f48 JA |
384 | * beginning again. If we're doing backwards IO, |
385 | * wrap to the end. | |
ac002339 | 386 | */ |
b0a84f48 JA |
387 | if (pos >= f->real_file_size) { |
388 | if (o->ddir_seq_add > 0) | |
389 | pos = f->file_offset; | |
c22825bb JA |
390 | else { |
391 | if (f->real_file_size > f->io_size) | |
392 | pos = f->io_size; | |
393 | else | |
394 | pos = f->real_file_size; | |
395 | ||
396 | pos += o->ddir_seq_add; | |
397 | } | |
b0a84f48 | 398 | } |
ac002339 | 399 | } |
059b0802 | 400 | |
37cf9e3c | 401 | *offset = pos; |
38dad62d JA |
402 | return 0; |
403 | } | |
404 | ||
405 | return 1; | |
406 | } | |
407 | ||
408 | static int get_next_block(struct thread_data *td, struct io_u *io_u, | |
6aca9b3d | 409 | enum fio_ddir ddir, int rw_seq, |
ec370c22 | 410 | bool *is_random) |
38dad62d JA |
411 | { |
412 | struct fio_file *f = io_u->file; | |
1ae83d45 | 413 | uint64_t b, offset; |
38dad62d JA |
414 | int ret; |
415 | ||
ff58fced JA |
416 | assert(ddir_rw(ddir)); |
417 | ||
37cf9e3c JA |
418 | b = offset = -1ULL; |
419 | ||
f9f1e137 VF |
420 | if (td_randtrimwrite(td) && ddir == DDIR_WRITE) { |
421 | /* don't mark randommap for these writes */ | |
422 | io_u_set(td, io_u, IO_U_F_BUSY_OK); | |
423 | offset = f->last_start[DDIR_TRIM]; | |
424 | *is_random = true; | |
425 | ret = 0; | |
426 | } else if (rw_seq) { | |
211c9b89 | 427 | if (td_random(td)) { |
6aca9b3d | 428 | if (should_do_random(td, ddir)) { |
211c9b89 | 429 | ret = get_next_rand_block(td, f, ddir, &b); |
ec370c22 | 430 | *is_random = true; |
6aca9b3d | 431 | } else { |
ec370c22 | 432 | *is_random = false; |
1651e431 | 433 | io_u_set(td, io_u, IO_U_F_BUSY_OK); |
211c9b89 JA |
434 | ret = get_next_seq_offset(td, f, ddir, &offset); |
435 | if (ret) | |
436 | ret = get_next_rand_block(td, f, ddir, &b); | |
437 | } | |
6aca9b3d | 438 | } else { |
ec370c22 | 439 | *is_random = false; |
37cf9e3c | 440 | ret = get_next_seq_offset(td, f, ddir, &offset); |
6aca9b3d | 441 | } |
38dad62d | 442 | } else { |
1651e431 | 443 | io_u_set(td, io_u, IO_U_F_BUSY_OK); |
ec370c22 | 444 | *is_random = false; |
38dad62d JA |
445 | |
446 | if (td->o.rw_seq == RW_SEQ_SEQ) { | |
37cf9e3c | 447 | ret = get_next_seq_offset(td, f, ddir, &offset); |
6aca9b3d | 448 | if (ret) { |
37cf9e3c | 449 | ret = get_next_rand_block(td, f, ddir, &b); |
ec370c22 | 450 | *is_random = false; |
6aca9b3d | 451 | } |
38dad62d | 452 | } else if (td->o.rw_seq == RW_SEQ_IDENT) { |
f1dfb668 JA |
453 | if (f->last_start[ddir] != -1ULL) |
454 | offset = f->last_start[ddir] - f->file_offset; | |
38dad62d | 455 | else |
37cf9e3c | 456 | offset = 0; |
38dad62d JA |
457 | ret = 0; |
458 | } else { | |
459 | log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); | |
460 | ret = 1; | |
461 | } | |
462 | } | |
6d68b997 | 463 | |
37cf9e3c JA |
464 | if (!ret) { |
465 | if (offset != -1ULL) | |
466 | io_u->offset = offset; | |
467 | else if (b != -1ULL) | |
468 | io_u->offset = b * td->o.ba[ddir]; | |
469 | else { | |
4e0a8fa2 | 470 | log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b); |
37cf9e3c JA |
471 | ret = 1; |
472 | } | |
4fff54cc | 473 | io_u->verify_offset = io_u->offset; |
37cf9e3c JA |
474 | } |
475 | ||
38dad62d JA |
476 | return ret; |
477 | } | |
478 | ||
10ba535a JA |
479 | /* |
480 | * For random io, generate a random new block and see if it's used. Repeat | |
481 | * until we find a free one. For sequential io, just return the end of | |
482 | * the last io issued. | |
483 | */ | |
e8fb335e | 484 | static int get_next_offset(struct thread_data *td, struct io_u *io_u, |
ec370c22 | 485 | bool *is_random) |
10ba535a | 486 | { |
9bf2061e | 487 | struct fio_file *f = io_u->file; |
4ba66134 | 488 | enum fio_ddir ddir = io_u->ddir; |
38dad62d | 489 | int rw_seq_hit = 0; |
10ba535a | 490 | |
ff58fced JA |
491 | assert(ddir_rw(ddir)); |
492 | ||
38dad62d JA |
493 | if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) { |
494 | rw_seq_hit = 1; | |
5736c10d | 495 | td->ddir_seq_nr = td->o.ddir_seq_nr; |
38dad62d | 496 | } |
211097b2 | 497 | |
6aca9b3d | 498 | if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random)) |
38dad62d | 499 | return 1; |
10ba535a | 500 | |
009bd847 JA |
501 | if (io_u->offset >= f->io_size) { |
502 | dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", | |
4b91ee8f JA |
503 | (unsigned long long) io_u->offset, |
504 | (unsigned long long) f->io_size); | |
009bd847 JA |
505 | return 1; |
506 | } | |
507 | ||
508 | io_u->offset += f->file_offset; | |
2ba1c290 JA |
509 | if (io_u->offset >= f->real_file_size) { |
510 | dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n", | |
4b91ee8f JA |
511 | (unsigned long long) io_u->offset, |
512 | (unsigned long long) f->real_file_size); | |
10ba535a | 513 | return 1; |
2ba1c290 | 514 | } |
10ba535a | 515 | |
5e0863da VF |
516 | /* |
517 | * For randtrimwrite, we decide whether to issue a trim or a write | |
518 | * based on whether the offsets for the most recent trim and write | |
519 | * operations match. If they don't match that means we just issued a | |
520 | * new trim and the next operation should be a write. If they *do* | |
521 | * match that means we just completed a trim+write pair and the next | |
522 | * command should be a trim. | |
523 | * | |
524 | * This works fine for sequential workloads but for random workloads | |
525 | * it's possible to complete a trim+write pair and then have the next | |
526 | * randomly generated offset match the previous offset. If that happens | |
527 | * we need to alter the offset for the last write operation in order | |
528 | * to ensure that we issue a write operation the next time through. | |
529 | */ | |
530 | if (td_randtrimwrite(td) && ddir == DDIR_TRIM && | |
531 | f->last_start[DDIR_TRIM] == io_u->offset) | |
793b8686 | 532 | f->last_start[DDIR_WRITE]--; |
5e0863da | 533 | |
9bf46532 | 534 | io_u->verify_offset = io_u->offset; |
10ba535a JA |
535 | return 0; |
536 | } | |
537 | ||
e39c0676 | 538 | static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u, |
5fff9543 | 539 | unsigned long long buflen) |
79944128 JA |
540 | { |
541 | struct fio_file *f = io_u->file; | |
542 | ||
bedc9dc2 | 543 | return io_u->offset + buflen <= f->io_size + get_start_offset(td, f); |
79944128 JA |
544 | } |
545 | ||
5fff9543 | 546 | static unsigned long long get_next_buflen(struct thread_data *td, struct io_u *io_u, |
ec370c22 | 547 | bool is_random) |
10ba535a | 548 | { |
6aca9b3d | 549 | int ddir = io_u->ddir; |
5fff9543 JF |
550 | unsigned long long buflen = 0; |
551 | unsigned long long minbs, maxbs; | |
3dd29f7c | 552 | uint64_t frand_max, r; |
7c961359 | 553 | bool power_2; |
10ba535a | 554 | |
9ee1c647 | 555 | assert(ddir_rw(ddir)); |
6aca9b3d | 556 | |
f9f1e137 VF |
557 | if (td_randtrimwrite(td) && ddir == DDIR_WRITE) { |
558 | struct fio_file *f = io_u->file; | |
559 | ||
560 | return f->last_pos[DDIR_TRIM] - f->last_start[DDIR_TRIM]; | |
561 | } | |
562 | ||
6aca9b3d | 563 | if (td->o.bs_is_seq_rand) |
ec370c22 | 564 | ddir = is_random ? DDIR_WRITE : DDIR_READ; |
ff58fced | 565 | |
f3059de1 JA |
566 | minbs = td->o.min_bs[ddir]; |
567 | maxbs = td->o.max_bs[ddir]; | |
568 | ||
79944128 JA |
569 | if (minbs == maxbs) |
570 | return minbs; | |
571 | ||
52c58027 JA |
572 | /* |
573 | * If we can't satisfy the min block size from here, then fail | |
574 | */ | |
575 | if (!io_u_fits(td, io_u, minbs)) | |
576 | return 0; | |
577 | ||
2f282cec | 578 | frand_max = rand_max(&td->bsrange_state[ddir]); |
79944128 | 579 | do { |
2f282cec | 580 | r = __rand(&td->bsrange_state[ddir]); |
4c07ad86 | 581 | |
720e84ad | 582 | if (!td->o.bssplit_nr[ddir]) { |
5fff9543 | 583 | buflen = minbs + (unsigned long long) ((double) maxbs * |
c3546b53 | 584 | (r / (frand_max + 1.0))); |
5ec10eaa | 585 | } else { |
3dd29f7c | 586 | long long perc = 0; |
564ca972 JA |
587 | unsigned int i; |
588 | ||
720e84ad JA |
589 | for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { |
590 | struct bssplit *bsp = &td->o.bssplit[ddir][i]; | |
564ca972 | 591 | |
cc5e4cb0 DLM |
592 | if (!bsp->perc) |
593 | continue; | |
564ca972 JA |
594 | buflen = bsp->bs; |
595 | perc += bsp->perc; | |
3dd29f7c | 596 | if ((r / perc <= frand_max / 100ULL) && |
79944128 | 597 | io_u_fits(td, io_u, buflen)) |
564ca972 JA |
598 | break; |
599 | } | |
600 | } | |
79944128 | 601 | |
17a6b702 PL |
602 | power_2 = is_power_of_2(minbs); |
603 | if (!td->o.bs_unaligned && power_2) | |
7c306bb1 | 604 | buflen &= ~(minbs - 1); |
8135fe4d JA |
605 | else if (!td->o.bs_unaligned && !power_2) |
606 | buflen -= buflen % minbs; | |
607 | if (buflen > maxbs) | |
608 | buflen = maxbs; | |
79944128 | 609 | } while (!io_u_fits(td, io_u, buflen)); |
6a5e6884 | 610 | |
10ba535a JA |
611 | return buflen; |
612 | } | |
613 | ||
afe24a5a JA |
614 | static void set_rwmix_bytes(struct thread_data *td) |
615 | { | |
afe24a5a JA |
616 | unsigned int diff; |
617 | ||
618 | /* | |
619 | * we do time or byte based switch. this is needed because | |
620 | * buffered writes may issue a lot quicker than they complete, | |
621 | * whereas reads do not. | |
622 | */ | |
e47f799f | 623 | diff = td->o.rwmix[td->rwmix_ddir ^ 1]; |
04c540d9 | 624 | td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100; |
e47f799f JA |
625 | } |
626 | ||
627 | static inline enum fio_ddir get_rand_ddir(struct thread_data *td) | |
628 | { | |
629 | unsigned int v; | |
e47f799f | 630 | |
1bd5d213 | 631 | v = rand_between(&td->rwmix_state, 1, 100); |
4c07ad86 | 632 | |
04c540d9 | 633 | if (v <= td->o.rwmix[DDIR_READ]) |
e47f799f JA |
634 | return DDIR_READ; |
635 | ||
636 | return DDIR_WRITE; | |
afe24a5a JA |
637 | } |
638 | ||
0d593542 | 639 | int io_u_quiesce(struct thread_data *td) |
002e7183 | 640 | { |
2dcfdeb9 | 641 | int ret = 0, completed = 0, err = 0; |
0d593542 | 642 | |
002e7183 JA |
643 | /* |
644 | * We are going to sleep, ensure that we flush anything pending as | |
645 | * not to skew our latency numbers. | |
646 | * | |
647 | * Changed to only monitor 'in flight' requests here instead of the | |
648 | * td->cur_depth, b/c td->cur_depth does not accurately represent | |
649 | * io's that have been actually submitted to an async engine, | |
650 | * and cur_depth is meaningless for sync engines. | |
651 | */ | |
a80cb54b BVA |
652 | if (td->io_u_queued || td->cur_depth) |
653 | td_io_commit(td); | |
9cc80b6d | 654 | |
002e7183 | 655 | while (td->io_u_in_flight) { |
55312f9f | 656 | ret = io_u_queued_complete(td, 1); |
0d593542 JA |
657 | if (ret > 0) |
658 | completed += ret; | |
d28174f0 | 659 | else if (ret < 0) |
2dcfdeb9 | 660 | err = ret; |
002e7183 | 661 | } |
0d593542 | 662 | |
6be06c46 JA |
663 | if (td->flags & TD_F_REGROW_LOGS) |
664 | regrow_logs(td); | |
665 | ||
d28174f0 JA |
666 | if (completed) |
667 | return completed; | |
668 | ||
2dcfdeb9 | 669 | return err; |
002e7183 JA |
670 | } |
671 | ||
581e7141 JA |
672 | static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) |
673 | { | |
674 | enum fio_ddir odir = ddir ^ 1; | |
b5407f8b | 675 | uint64_t usec; |
90eff1c9 | 676 | uint64_t now; |
581e7141 | 677 | |
ff58fced | 678 | assert(ddir_rw(ddir)); |
970d9000 | 679 | now = utime_since_now(&td->epoch); |
ff58fced | 680 | |
50a8ce86 D |
681 | /* |
682 | * if rate_next_io_time is in the past, need to catch up to rate | |
683 | */ | |
684 | if (td->rate_next_io_time[ddir] <= now) | |
581e7141 JA |
685 | return ddir; |
686 | ||
687 | /* | |
50a8ce86 | 688 | * We are ahead of rate in this direction. See if we |
581e7141 JA |
689 | * should switch. |
690 | */ | |
315fcfec | 691 | if (td_rw(td) && td->o.rwmix[odir]) { |
581e7141 | 692 | /* |
50a8ce86 | 693 | * Other direction is behind rate, switch |
581e7141 | 694 | */ |
50a8ce86 | 695 | if (td->rate_next_io_time[odir] <= now) |
581e7141 JA |
696 | return odir; |
697 | ||
698 | /* | |
395feabb JA |
699 | * Both directions are ahead of rate. sleep the min, |
700 | * switch if necessary | |
581e7141 | 701 | */ |
50a8ce86 | 702 | if (td->rate_next_io_time[ddir] <= |
395feabb | 703 | td->rate_next_io_time[odir]) { |
50a8ce86 | 704 | usec = td->rate_next_io_time[ddir] - now; |
581e7141 | 705 | } else { |
50a8ce86 | 706 | usec = td->rate_next_io_time[odir] - now; |
581e7141 JA |
707 | ddir = odir; |
708 | } | |
709 | } else | |
50a8ce86 | 710 | usec = td->rate_next_io_time[ddir] - now; |
581e7141 | 711 | |
a9da8ab2 JA |
712 | if (td->o.io_submit_mode == IO_MODE_INLINE) |
713 | io_u_quiesce(td); | |
78c1eda5 | 714 | |
df06a036 | 715 | if (td->o.timeout && ((usec + now) > td->o.timeout)) { |
716 | /* | |
717 | * check if the usec is capable of taking negative values | |
718 | */ | |
719 | if (now > td->o.timeout) { | |
720 | ddir = DDIR_INVAL; | |
721 | return ddir; | |
722 | } | |
723 | usec = td->o.timeout - now; | |
724 | } | |
1a9bf814 | 725 | usec_sleep(td, usec); |
df06a036 | 726 | |
727 | now = utime_since_now(&td->epoch); | |
728 | if ((td->o.timeout && (now > td->o.timeout)) || td->terminate) | |
729 | ddir = DDIR_INVAL; | |
730 | ||
581e7141 JA |
731 | return ddir; |
732 | } | |
733 | ||
10ba535a JA |
734 | /* |
735 | * Return the data direction for the next io_u. If the job is a | |
736 | * mixed read/write workload, check the rwmix cycle and switch if | |
737 | * necessary. | |
738 | */ | |
1e97cce9 | 739 | static enum fio_ddir get_rw_ddir(struct thread_data *td) |
10ba535a | 740 | { |
581e7141 JA |
741 | enum fio_ddir ddir; |
742 | ||
5f9099ea | 743 | /* |
f6860972 TK |
744 | * See if it's time to fsync/fdatasync/sync_file_range first, |
745 | * and if not then move on to check regular I/Os. | |
5f9099ea | 746 | */ |
f6860972 TK |
747 | if (should_fsync(td)) { |
748 | if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] && | |
749 | !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks)) | |
750 | return DDIR_SYNC; | |
751 | ||
752 | if (td->o.fdatasync_blocks && td->io_issues[DDIR_WRITE] && | |
753 | !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks)) | |
754 | return DDIR_DATASYNC; | |
755 | ||
756 | if (td->sync_file_range_nr && td->io_issues[DDIR_WRITE] && | |
757 | !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr)) | |
758 | return DDIR_SYNC_FILE_RANGE; | |
759 | } | |
44f29692 | 760 | |
10ba535a | 761 | if (td_rw(td)) { |
10ba535a JA |
762 | /* |
763 | * Check if it's time to seed a new data direction. | |
764 | */ | |
e4928662 | 765 | if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) { |
e47f799f JA |
766 | /* |
767 | * Put a top limit on how many bytes we do for | |
768 | * one data direction, to avoid overflowing the | |
769 | * ranges too much | |
770 | */ | |
771 | ddir = get_rand_ddir(td); | |
e47f799f JA |
772 | |
773 | if (ddir != td->rwmix_ddir) | |
774 | set_rwmix_bytes(td); | |
775 | ||
776 | td->rwmix_ddir = ddir; | |
10ba535a | 777 | } |
581e7141 | 778 | ddir = td->rwmix_ddir; |
10ba535a | 779 | } else if (td_read(td)) |
581e7141 | 780 | ddir = DDIR_READ; |
6eaf09d6 | 781 | else if (td_write(td)) |
581e7141 | 782 | ddir = DDIR_WRITE; |
5c8e84ca | 783 | else if (td_trim(td)) |
6eaf09d6 | 784 | ddir = DDIR_TRIM; |
5c8e84ca TK |
785 | else |
786 | ddir = DDIR_INVAL; | |
581e7141 JA |
787 | |
788 | td->rwmix_ddir = rate_ddir(td, ddir); | |
789 | return td->rwmix_ddir; | |
10ba535a JA |
790 | } |
791 | ||
1ef2b6be JA |
792 | static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) |
793 | { | |
0e4dd95c DE |
794 | enum fio_ddir ddir = get_rw_ddir(td); |
795 | ||
731461cc DLM |
796 | if (td->o.zone_mode == ZONE_MODE_ZBD) |
797 | ddir = zbd_adjust_ddir(td, io_u, ddir); | |
c65057f9 | 798 | |
82a90686 | 799 | if (td_trimwrite(td)) { |
0e4dd95c | 800 | struct fio_file *f = io_u->file; |
793b8686 | 801 | if (f->last_start[DDIR_WRITE] == f->last_start[DDIR_TRIM]) |
0e4dd95c DE |
802 | ddir = DDIR_TRIM; |
803 | else | |
804 | ddir = DDIR_WRITE; | |
805 | } | |
806 | ||
807 | io_u->ddir = io_u->acct_ddir = ddir; | |
1ef2b6be | 808 | |
9b87f09b | 809 | if (io_u->ddir == DDIR_WRITE && td_ioengine_flagged(td, FIO_BARRIER) && |
1ef2b6be JA |
810 | td->o.barrier_blocks && |
811 | !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) && | |
812 | td->io_issues[DDIR_WRITE]) | |
1651e431 | 813 | io_u_set(td, io_u, IO_U_F_BARRIER); |
1ef2b6be JA |
814 | } |
815 | ||
e8462bd8 | 816 | void put_file_log(struct thread_data *td, struct fio_file *f) |
60f2c658 | 817 | { |
71b84caa | 818 | unsigned int ret = put_file(td, f); |
60f2c658 JA |
819 | |
820 | if (ret) | |
821 | td_verror(td, ret, "file close"); | |
822 | } | |
823 | ||
10ba535a JA |
824 | void put_io_u(struct thread_data *td, struct io_u *io_u) |
825 | { | |
26b3a188 JA |
826 | const bool needs_lock = td_async_processing(td); |
827 | ||
b2da58c4 | 828 | zbd_put_io_u(td, io_u); |
99952ca7 | 829 | |
a9da8ab2 JA |
830 | if (td->parent) |
831 | td = td->parent; | |
832 | ||
26b3a188 JA |
833 | if (needs_lock) |
834 | __td_io_u_lock(td); | |
e8462bd8 | 835 | |
f8b0bd10 | 836 | if (io_u->file && !(io_u->flags & IO_U_F_NO_FILE_PUT)) |
60f2c658 | 837 | put_file_log(td, io_u->file); |
f8b0bd10 | 838 | |
10ba535a | 839 | io_u->file = NULL; |
1651e431 | 840 | io_u_set(td, io_u, IO_U_F_FREE); |
d7ee2a7d | 841 | |
a9da8ab2 | 842 | if (io_u->flags & IO_U_F_IN_CUR_DEPTH) { |
0c41214f | 843 | td->cur_depth--; |
a9da8ab2 JA |
844 | assert(!(td->flags & TD_F_CHILD)); |
845 | } | |
2ae0b204 | 846 | io_u_qpush(&td->io_u_freelist, io_u); |
e8462bd8 | 847 | td_io_u_free_notify(td); |
26b3a188 JA |
848 | |
849 | if (needs_lock) | |
850 | __td_io_u_unlock(td); | |
10ba535a JA |
851 | } |
852 | ||
f2bba182 RR |
853 | void clear_io_u(struct thread_data *td, struct io_u *io_u) |
854 | { | |
1651e431 | 855 | io_u_clear(td, io_u, IO_U_F_FLIGHT); |
f2bba182 RR |
856 | put_io_u(td, io_u); |
857 | } | |
858 | ||
755200a3 JA |
859 | void requeue_io_u(struct thread_data *td, struct io_u **io_u) |
860 | { | |
26b3a188 | 861 | const bool needs_lock = td_async_processing(td); |
755200a3 | 862 | struct io_u *__io_u = *io_u; |
bcd5abfa | 863 | enum fio_ddir ddir = acct_ddir(__io_u); |
755200a3 | 864 | |
465221b0 JA |
865 | dprint(FD_IO, "requeue %p\n", __io_u); |
866 | ||
a9da8ab2 JA |
867 | if (td->parent) |
868 | td = td->parent; | |
869 | ||
26b3a188 JA |
870 | if (needs_lock) |
871 | __td_io_u_lock(td); | |
e8462bd8 | 872 | |
1651e431 | 873 | io_u_set(td, __io_u, IO_U_F_FREE); |
bcd5abfa JA |
874 | if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir)) |
875 | td->io_issues[ddir]--; | |
5ec10eaa | 876 | |
1651e431 | 877 | io_u_clear(td, __io_u, IO_U_F_FLIGHT); |
a9da8ab2 | 878 | if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) { |
0c41214f | 879 | td->cur_depth--; |
a9da8ab2 JA |
880 | assert(!(td->flags & TD_F_CHILD)); |
881 | } | |
2ae0b204 JA |
882 | |
883 | io_u_rpush(&td->io_u_requeues, __io_u); | |
a9da8ab2 | 884 | td_io_u_free_notify(td); |
26b3a188 JA |
885 | |
886 | if (needs_lock) | |
887 | __td_io_u_unlock(td); | |
888 | ||
755200a3 JA |
889 | *io_u = NULL; |
890 | } | |
891 | ||
7b865a2f | 892 | static void setup_strided_zone_mode(struct thread_data *td, struct io_u *io_u) |
224b3093 | 893 | { |
894 | struct fio_file *f = io_u->file; | |
895 | ||
7b865a2f BVA |
896 | assert(td->o.zone_mode == ZONE_MODE_STRIDED); |
897 | assert(td->o.zone_size); | |
898 | assert(td->o.zone_range); | |
899 | ||
224b3093 | 900 | /* |
901 | * See if it's time to switch to a new zone | |
902 | */ | |
fa9fd914 | 903 | if (td->zone_bytes >= td->o.zone_size) { |
224b3093 | 904 | td->zone_bytes = 0; |
905 | f->file_offset += td->o.zone_range + td->o.zone_skip; | |
906 | ||
907 | /* | |
908 | * Wrap from the beginning, if we exceed the file size | |
909 | */ | |
910 | if (f->file_offset >= f->real_file_size) | |
04bc85a1 JA |
911 | f->file_offset = get_start_offset(td, f); |
912 | ||
224b3093 | 913 | f->last_pos[io_u->ddir] = f->file_offset; |
914 | td->io_skip_bytes += td->o.zone_skip; | |
915 | } | |
916 | ||
917 | /* | |
04bc85a1 JA |
918 | * If zone_size > zone_range, then maintain the same zone until |
919 | * zone_bytes >= zone_size. | |
920 | */ | |
224b3093 | 921 | if (f->last_pos[io_u->ddir] >= (f->file_offset + td->o.zone_range)) { |
922 | dprint(FD_IO, "io_u maintain zone offset=%" PRIu64 "/last_pos=%" PRIu64 "\n", | |
923 | f->file_offset, f->last_pos[io_u->ddir]); | |
924 | f->last_pos[io_u->ddir] = f->file_offset; | |
925 | } | |
926 | ||
927 | /* | |
928 | * For random: if 'norandommap' is not set and zone_size > zone_range, | |
929 | * map needs to be reset as it's done with zone_range everytime. | |
930 | */ | |
04bc85a1 | 931 | if ((td->zone_bytes % td->o.zone_range) == 0) |
224b3093 | 932 | fio_file_reset(td, f); |
224b3093 | 933 | } |
934 | ||
9bf2061e | 935 | static int fill_io_u(struct thread_data *td, struct io_u *io_u) |
10ba535a | 936 | { |
ec370c22 | 937 | bool is_random; |
bfbdd35b BVA |
938 | uint64_t offset; |
939 | enum io_u_action ret; | |
6aca9b3d | 940 | |
9b87f09b | 941 | if (td_ioengine_flagged(td, FIO_NOIO)) |
b4c5e1ac JA |
942 | goto out; |
943 | ||
1ef2b6be | 944 | set_rw_ddir(td, io_u); |
5f9099ea | 945 | |
df06a036 | 946 | if (io_u->ddir == DDIR_INVAL) { |
947 | dprint(FD_IO, "invalid direction received ddir = %d", io_u->ddir); | |
948 | return 1; | |
949 | } | |
87dc1ab1 | 950 | /* |
ff58fced | 951 | * fsync() or fdatasync() or trim etc, we are done |
87dc1ab1 | 952 | */ |
ff58fced | 953 | if (!ddir_rw(io_u->ddir)) |
c38e9468 | 954 | goto out; |
a00735e6 | 955 | |
7b865a2f BVA |
956 | if (td->o.zone_mode == ZONE_MODE_STRIDED) |
957 | setup_strided_zone_mode(td, io_u); | |
4d37720a DLM |
958 | else if (td->o.zone_mode == ZONE_MODE_ZBD) |
959 | setup_zbd_zone_mode(td, io_u); | |
48f5abd3 | 960 | |
10ba535a | 961 | /* |
c685b5b2 JA |
962 | * No log, let the seq/rand engine retrieve the next buflen and |
963 | * position. | |
10ba535a | 964 | */ |
6aca9b3d | 965 | if (get_next_offset(td, io_u, &is_random)) { |
2ba1c290 | 966 | dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); |
bca4ed4d | 967 | return 1; |
2ba1c290 | 968 | } |
10ba535a | 969 | |
6aca9b3d | 970 | io_u->buflen = get_next_buflen(td, io_u, is_random); |
2ba1c290 JA |
971 | if (!io_u->buflen) { |
972 | dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); | |
bca4ed4d | 973 | return 1; |
2ba1c290 | 974 | } |
bca4ed4d | 975 | |
bfbdd35b BVA |
976 | offset = io_u->offset; |
977 | if (td->o.zone_mode == ZONE_MODE_ZBD) { | |
978 | ret = zbd_adjust_block(td, io_u); | |
979 | if (ret == io_u_eof) | |
980 | return 1; | |
981 | } | |
982 | ||
2ba1c290 | 983 | if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { |
5fff9543 | 984 | dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n", |
e5f9a813 | 985 | io_u, |
4b91ee8f JA |
986 | (unsigned long long) io_u->offset, io_u->buflen, |
987 | (unsigned long long) io_u->file->real_file_size); | |
6a5e6884 | 988 | return 1; |
2ba1c290 | 989 | } |
6a5e6884 | 990 | |
bca4ed4d JA |
991 | /* |
992 | * mark entry before potentially trimming io_u | |
993 | */ | |
303032ae | 994 | if (td_random(td) && file_randommap(td, io_u->file)) |
bfbdd35b | 995 | io_u->buflen = mark_random_map(td, io_u, offset, io_u->buflen); |
bca4ed4d | 996 | |
c38e9468 | 997 | out: |
e5f9a813 | 998 | dprint_io_u(io_u, "fill"); |
cb7d7abb | 999 | io_u->verify_offset = io_u->offset; |
d9d91e39 | 1000 | td->zone_bytes += io_u->buflen; |
bca4ed4d | 1001 | return 0; |
10ba535a JA |
1002 | } |
1003 | ||
6cc0e5aa | 1004 | static void __io_u_mark_map(uint64_t *map, unsigned int nr) |
838bc709 | 1005 | { |
2b13e716 | 1006 | int idx = 0; |
838bc709 JA |
1007 | |
1008 | switch (nr) { | |
1009 | default: | |
2b13e716 | 1010 | idx = 6; |
838bc709 JA |
1011 | break; |
1012 | case 33 ... 64: | |
2b13e716 | 1013 | idx = 5; |
838bc709 JA |
1014 | break; |
1015 | case 17 ... 32: | |
2b13e716 | 1016 | idx = 4; |
838bc709 JA |
1017 | break; |
1018 | case 9 ... 16: | |
2b13e716 | 1019 | idx = 3; |
838bc709 JA |
1020 | break; |
1021 | case 5 ... 8: | |
2b13e716 | 1022 | idx = 2; |
838bc709 JA |
1023 | break; |
1024 | case 1 ... 4: | |
2b13e716 | 1025 | idx = 1; |
87933e32 | 1026 | fio_fallthrough; |
838bc709 JA |
1027 | case 0: |
1028 | break; | |
1029 | } | |
1030 | ||
2b13e716 | 1031 | map[idx]++; |
838bc709 JA |
1032 | } |
1033 | ||
1034 | void io_u_mark_submit(struct thread_data *td, unsigned int nr) | |
1035 | { | |
1036 | __io_u_mark_map(td->ts.io_u_submit, nr); | |
1037 | td->ts.total_submit++; | |
1038 | } | |
1039 | ||
1040 | void io_u_mark_complete(struct thread_data *td, unsigned int nr) | |
1041 | { | |
1042 | __io_u_mark_map(td->ts.io_u_complete, nr); | |
1043 | td->ts.total_complete++; | |
1044 | } | |
1045 | ||
d8005759 | 1046 | void io_u_mark_depth(struct thread_data *td, unsigned int nr) |
71619dc2 | 1047 | { |
2b13e716 | 1048 | int idx = 0; |
71619dc2 JA |
1049 | |
1050 | switch (td->cur_depth) { | |
1051 | default: | |
2b13e716 | 1052 | idx = 6; |
a783e61a | 1053 | break; |
71619dc2 | 1054 | case 32 ... 63: |
2b13e716 | 1055 | idx = 5; |
a783e61a | 1056 | break; |
71619dc2 | 1057 | case 16 ... 31: |
2b13e716 | 1058 | idx = 4; |
a783e61a | 1059 | break; |
71619dc2 | 1060 | case 8 ... 15: |
2b13e716 | 1061 | idx = 3; |
a783e61a | 1062 | break; |
71619dc2 | 1063 | case 4 ... 7: |
2b13e716 | 1064 | idx = 2; |
a783e61a | 1065 | break; |
71619dc2 | 1066 | case 2 ... 3: |
2b13e716 | 1067 | idx = 1; |
87933e32 | 1068 | fio_fallthrough; |
71619dc2 JA |
1069 | case 1: |
1070 | break; | |
1071 | } | |
1072 | ||
2b13e716 | 1073 | td->ts.io_u_map[idx] += nr; |
71619dc2 JA |
1074 | } |
1075 | ||
d6bb626e | 1076 | static void io_u_mark_lat_nsec(struct thread_data *td, unsigned long long nsec) |
04a0feae | 1077 | { |
2b13e716 | 1078 | int idx = 0; |
04a0feae | 1079 | |
d6bb626e VF |
1080 | assert(nsec < 1000); |
1081 | ||
1082 | switch (nsec) { | |
1083 | case 750 ... 999: | |
1084 | idx = 9; | |
1085 | break; | |
1086 | case 500 ... 749: | |
1087 | idx = 8; | |
1088 | break; | |
1089 | case 250 ... 499: | |
1090 | idx = 7; | |
1091 | break; | |
1092 | case 100 ... 249: | |
1093 | idx = 6; | |
1094 | break; | |
1095 | case 50 ... 99: | |
1096 | idx = 5; | |
1097 | break; | |
1098 | case 20 ... 49: | |
1099 | idx = 4; | |
1100 | break; | |
1101 | case 10 ... 19: | |
1102 | idx = 3; | |
1103 | break; | |
1104 | case 4 ... 9: | |
1105 | idx = 2; | |
1106 | break; | |
1107 | case 2 ... 3: | |
1108 | idx = 1; | |
87933e32 | 1109 | fio_fallthrough; |
d6bb626e VF |
1110 | case 0 ... 1: |
1111 | break; | |
1112 | } | |
1113 | ||
1114 | assert(idx < FIO_IO_U_LAT_N_NR); | |
1115 | td->ts.io_u_lat_n[idx]++; | |
1116 | } | |
1117 | ||
1118 | static void io_u_mark_lat_usec(struct thread_data *td, unsigned long long usec) | |
1119 | { | |
1120 | int idx = 0; | |
1121 | ||
1122 | assert(usec < 1000 && usec >= 1); | |
04a0feae JA |
1123 | |
1124 | switch (usec) { | |
1125 | case 750 ... 999: | |
2b13e716 | 1126 | idx = 9; |
04a0feae JA |
1127 | break; |
1128 | case 500 ... 749: | |
2b13e716 | 1129 | idx = 8; |
04a0feae JA |
1130 | break; |
1131 | case 250 ... 499: | |
2b13e716 | 1132 | idx = 7; |
04a0feae JA |
1133 | break; |
1134 | case 100 ... 249: | |
2b13e716 | 1135 | idx = 6; |
04a0feae JA |
1136 | break; |
1137 | case 50 ... 99: | |
2b13e716 | 1138 | idx = 5; |
04a0feae JA |
1139 | break; |
1140 | case 20 ... 49: | |
2b13e716 | 1141 | idx = 4; |
04a0feae JA |
1142 | break; |
1143 | case 10 ... 19: | |
2b13e716 | 1144 | idx = 3; |
04a0feae JA |
1145 | break; |
1146 | case 4 ... 9: | |
2b13e716 | 1147 | idx = 2; |
04a0feae JA |
1148 | break; |
1149 | case 2 ... 3: | |
2b13e716 | 1150 | idx = 1; |
87933e32 | 1151 | fio_fallthrough; |
04a0feae JA |
1152 | case 0 ... 1: |
1153 | break; | |
1154 | } | |
1155 | ||
2b13e716 JA |
1156 | assert(idx < FIO_IO_U_LAT_U_NR); |
1157 | td->ts.io_u_lat_u[idx]++; | |
04a0feae JA |
1158 | } |
1159 | ||
d6bb626e | 1160 | static void io_u_mark_lat_msec(struct thread_data *td, unsigned long long msec) |
ec118304 | 1161 | { |
2b13e716 | 1162 | int idx = 0; |
ec118304 | 1163 | |
d6bb626e VF |
1164 | assert(msec >= 1); |
1165 | ||
ec118304 JA |
1166 | switch (msec) { |
1167 | default: | |
2b13e716 | 1168 | idx = 11; |
04a0feae | 1169 | break; |
8abdce66 | 1170 | case 1000 ... 1999: |
2b13e716 | 1171 | idx = 10; |
04a0feae | 1172 | break; |
8abdce66 | 1173 | case 750 ... 999: |
2b13e716 | 1174 | idx = 9; |
04a0feae | 1175 | break; |
8abdce66 | 1176 | case 500 ... 749: |
2b13e716 | 1177 | idx = 8; |
04a0feae | 1178 | break; |
8abdce66 | 1179 | case 250 ... 499: |
2b13e716 | 1180 | idx = 7; |
04a0feae | 1181 | break; |
8abdce66 | 1182 | case 100 ... 249: |
2b13e716 | 1183 | idx = 6; |
04a0feae | 1184 | break; |
8abdce66 | 1185 | case 50 ... 99: |
2b13e716 | 1186 | idx = 5; |
04a0feae | 1187 | break; |
8abdce66 | 1188 | case 20 ... 49: |
2b13e716 | 1189 | idx = 4; |
04a0feae | 1190 | break; |
8abdce66 | 1191 | case 10 ... 19: |
2b13e716 | 1192 | idx = 3; |
04a0feae | 1193 | break; |
8abdce66 | 1194 | case 4 ... 9: |
2b13e716 | 1195 | idx = 2; |
04a0feae | 1196 | break; |
ec118304 | 1197 | case 2 ... 3: |
2b13e716 | 1198 | idx = 1; |
87933e32 | 1199 | fio_fallthrough; |
ec118304 JA |
1200 | case 0 ... 1: |
1201 | break; | |
1202 | } | |
1203 | ||
2b13e716 JA |
1204 | assert(idx < FIO_IO_U_LAT_M_NR); |
1205 | td->ts.io_u_lat_m[idx]++; | |
04a0feae JA |
1206 | } |
1207 | ||
d6bb626e | 1208 | static void io_u_mark_latency(struct thread_data *td, unsigned long long nsec) |
04a0feae | 1209 | { |
d6bb626e VF |
1210 | if (nsec < 1000) |
1211 | io_u_mark_lat_nsec(td, nsec); | |
1212 | else if (nsec < 1000000) | |
1213 | io_u_mark_lat_usec(td, nsec / 1000); | |
04a0feae | 1214 | else |
d6bb626e | 1215 | io_u_mark_lat_msec(td, nsec / 1000000); |
ec118304 JA |
1216 | } |
1217 | ||
8c07860d JA |
1218 | static unsigned int __get_next_fileno_rand(struct thread_data *td) |
1219 | { | |
1220 | unsigned long fileno; | |
1221 | ||
1222 | if (td->o.file_service_type == FIO_FSERVICE_RANDOM) { | |
1223 | uint64_t frand_max = rand_max(&td->next_file_state); | |
1224 | unsigned long r; | |
1225 | ||
1226 | r = __rand(&td->next_file_state); | |
1227 | return (unsigned int) ((double) td->o.nr_files | |
1228 | * (r / (frand_max + 1.0))); | |
1229 | } | |
1230 | ||
1231 | if (td->o.file_service_type == FIO_FSERVICE_ZIPF) | |
1232 | fileno = zipf_next(&td->next_file_zipf); | |
1233 | else if (td->o.file_service_type == FIO_FSERVICE_PARETO) | |
1234 | fileno = pareto_next(&td->next_file_zipf); | |
1235 | else if (td->o.file_service_type == FIO_FSERVICE_GAUSS) | |
1236 | fileno = gauss_next(&td->next_file_gauss); | |
1237 | else { | |
1238 | log_err("fio: bad file service type: %d\n", td->o.file_service_type); | |
1239 | assert(0); | |
1240 | return 0; | |
1241 | } | |
1242 | ||
1243 | return fileno >> FIO_FSERVICE_SHIFT; | |
1244 | } | |
1245 | ||
0aabe160 JA |
1246 | /* |
1247 | * Get next file to service by choosing one at random | |
1248 | */ | |
2cc52930 JA |
1249 | static struct fio_file *get_next_file_rand(struct thread_data *td, |
1250 | enum fio_file_flags goodf, | |
d6aed795 | 1251 | enum fio_file_flags badf) |
0aabe160 | 1252 | { |
0aabe160 | 1253 | struct fio_file *f; |
1c178180 | 1254 | int fno; |
0aabe160 JA |
1255 | |
1256 | do { | |
87b10676 | 1257 | int opened = 0; |
4c07ad86 | 1258 | |
8c07860d | 1259 | fno = __get_next_fileno_rand(td); |
7c83c089 | 1260 | |
126d65c6 | 1261 | f = td->files[fno]; |
d6aed795 | 1262 | if (fio_file_done(f)) |
059e63c0 | 1263 | continue; |
1c178180 | 1264 | |
d6aed795 | 1265 | if (!fio_file_open(f)) { |
87b10676 JA |
1266 | int err; |
1267 | ||
002fe734 JA |
1268 | if (td->nr_open_files >= td->o.open_files) |
1269 | return ERR_PTR(-EBUSY); | |
1270 | ||
87b10676 JA |
1271 | err = td_io_open_file(td, f); |
1272 | if (err) | |
1273 | continue; | |
1274 | opened = 1; | |
1275 | } | |
1276 | ||
2ba1c290 JA |
1277 | if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) { |
1278 | dprint(FD_FILE, "get_next_file_rand: %p\n", f); | |
0aabe160 | 1279 | return f; |
2ba1c290 | 1280 | } |
87b10676 JA |
1281 | if (opened) |
1282 | td_io_close_file(td, f); | |
0aabe160 JA |
1283 | } while (1); |
1284 | } | |
1285 | ||
1286 | /* | |
1287 | * Get next file to service by doing round robin between all available ones | |
1288 | */ | |
1c178180 JA |
1289 | static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, |
1290 | int badf) | |
3d7c391d JA |
1291 | { |
1292 | unsigned int old_next_file = td->next_file; | |
1293 | struct fio_file *f; | |
1294 | ||
1295 | do { | |
87b10676 JA |
1296 | int opened = 0; |
1297 | ||
126d65c6 | 1298 | f = td->files[td->next_file]; |
3d7c391d JA |
1299 | |
1300 | td->next_file++; | |
2dc1bbeb | 1301 | if (td->next_file >= td->o.nr_files) |
3d7c391d JA |
1302 | td->next_file = 0; |
1303 | ||
87b10676 | 1304 | dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags); |
d6aed795 | 1305 | if (fio_file_done(f)) { |
d5ed68ea | 1306 | f = NULL; |
059e63c0 | 1307 | continue; |
d5ed68ea | 1308 | } |
059e63c0 | 1309 | |
d6aed795 | 1310 | if (!fio_file_open(f)) { |
87b10676 JA |
1311 | int err; |
1312 | ||
002fe734 JA |
1313 | if (td->nr_open_files >= td->o.open_files) |
1314 | return ERR_PTR(-EBUSY); | |
1315 | ||
87b10676 | 1316 | err = td_io_open_file(td, f); |
b5696bfc JA |
1317 | if (err) { |
1318 | dprint(FD_FILE, "error %d on open of %s\n", | |
1319 | err, f->file_name); | |
87c27b45 | 1320 | f = NULL; |
87b10676 | 1321 | continue; |
b5696bfc | 1322 | } |
87b10676 JA |
1323 | opened = 1; |
1324 | } | |
1325 | ||
0b9d69ec JA |
1326 | dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf, |
1327 | f->flags); | |
1c178180 | 1328 | if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) |
3d7c391d JA |
1329 | break; |
1330 | ||
87b10676 JA |
1331 | if (opened) |
1332 | td_io_close_file(td, f); | |
1333 | ||
3d7c391d JA |
1334 | f = NULL; |
1335 | } while (td->next_file != old_next_file); | |
1336 | ||
2ba1c290 | 1337 | dprint(FD_FILE, "get_next_file_rr: %p\n", f); |
3d7c391d JA |
1338 | return f; |
1339 | } | |
1340 | ||
7eb36574 | 1341 | static struct fio_file *__get_next_file(struct thread_data *td) |
bdb4e2e9 | 1342 | { |
1907dbc6 JA |
1343 | struct fio_file *f; |
1344 | ||
2dc1bbeb | 1345 | assert(td->o.nr_files <= td->files_index); |
1c178180 | 1346 | |
b5696bfc | 1347 | if (td->nr_done_files >= td->o.nr_files) { |
5ec10eaa JA |
1348 | dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d," |
1349 | " nr_files=%d\n", td->nr_open_files, | |
1350 | td->nr_done_files, | |
1351 | td->o.nr_files); | |
bdb4e2e9 | 1352 | return NULL; |
2ba1c290 | 1353 | } |
bdb4e2e9 | 1354 | |
1907dbc6 | 1355 | f = td->file_service_file; |
d6aed795 | 1356 | if (f && fio_file_open(f) && !fio_file_closing(f)) { |
a086c257 JA |
1357 | if (td->o.file_service_type == FIO_FSERVICE_SEQ) |
1358 | goto out; | |
4ef1562a AK |
1359 | if (td->file_service_left) { |
1360 | td->file_service_left--; | |
1361 | goto out; | |
1362 | } | |
a086c257 | 1363 | } |
1907dbc6 | 1364 | |
a086c257 JA |
1365 | if (td->o.file_service_type == FIO_FSERVICE_RR || |
1366 | td->o.file_service_type == FIO_FSERVICE_SEQ) | |
d6aed795 | 1367 | f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing); |
bdb4e2e9 | 1368 | else |
d6aed795 | 1369 | f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing); |
1907dbc6 | 1370 | |
002fe734 JA |
1371 | if (IS_ERR(f)) |
1372 | return f; | |
1373 | ||
1907dbc6 JA |
1374 | td->file_service_file = f; |
1375 | td->file_service_left = td->file_service_nr - 1; | |
2ba1c290 | 1376 | out: |
0dac421f JA |
1377 | if (f) |
1378 | dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name); | |
1379 | else | |
1380 | dprint(FD_FILE, "get_next_file: NULL\n"); | |
1907dbc6 | 1381 | return f; |
bdb4e2e9 JA |
1382 | } |
1383 | ||
7eb36574 JA |
1384 | static struct fio_file *get_next_file(struct thread_data *td) |
1385 | { | |
7eb36574 JA |
1386 | return __get_next_file(td); |
1387 | } | |
1388 | ||
002fe734 | 1389 | static long set_io_u_file(struct thread_data *td, struct io_u *io_u) |
429f6675 JA |
1390 | { |
1391 | struct fio_file *f; | |
1392 | ||
1393 | do { | |
1394 | f = get_next_file(td); | |
002fe734 JA |
1395 | if (IS_ERR_OR_NULL(f)) |
1396 | return PTR_ERR(f); | |
429f6675 | 1397 | |
429f6675 JA |
1398 | io_u->file = f; |
1399 | get_file(f); | |
1400 | ||
1401 | if (!fill_io_u(td, io_u)) | |
1402 | break; | |
1403 | ||
b2da58c4 | 1404 | zbd_put_io_u(td, io_u); |
99952ca7 | 1405 | |
b5696bfc | 1406 | put_file_log(td, f); |
429f6675 | 1407 | td_io_close_file(td, f); |
b5696bfc | 1408 | io_u->file = NULL; |
8c07860d JA |
1409 | if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM) |
1410 | fio_file_reset(td, f); | |
1411 | else { | |
1412 | fio_file_set_done(f); | |
1413 | td->nr_done_files++; | |
1414 | dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, | |
0b9d69ec | 1415 | td->nr_done_files, td->o.nr_files); |
8c07860d | 1416 | } |
429f6675 JA |
1417 | } while (1); |
1418 | ||
1419 | return 0; | |
1420 | } | |
1421 | ||
f7cf63bf | 1422 | static void lat_fatal(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd, |
c3a32714 | 1423 | unsigned long long tnsec, unsigned long long max_nsec) |
3e260a46 | 1424 | { |
f7cf63bf VR |
1425 | if (!td->error) { |
1426 | log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec): %s %s %llu %llu\n", | |
1427 | tnsec, max_nsec, | |
1428 | io_u->file->file_name, | |
1429 | io_ddir_name(io_u->ddir), | |
1430 | io_u->offset, io_u->buflen); | |
1431 | } | |
3e260a46 JA |
1432 | td_verror(td, ETIMEDOUT, "max latency exceeded"); |
1433 | icd->error = ETIMEDOUT; | |
1434 | } | |
1435 | ||
1436 | static void lat_new_cycle(struct thread_data *td) | |
1437 | { | |
1438 | fio_gettime(&td->latency_ts, NULL); | |
1439 | td->latency_ios = ddir_rw_sum(td->io_blocks); | |
1440 | td->latency_failed = 0; | |
1441 | } | |
1442 | ||
1443 | /* | |
1444 | * We had an IO outside the latency target. Reduce the queue depth. If we | |
1445 | * are at QD=1, then it's time to give up. | |
1446 | */ | |
e39c0676 | 1447 | static bool __lat_target_failed(struct thread_data *td) |
3e260a46 JA |
1448 | { |
1449 | if (td->latency_qd == 1) | |
e39c0676 | 1450 | return true; |
3e260a46 JA |
1451 | |
1452 | td->latency_qd_high = td->latency_qd; | |
6bb58215 JA |
1453 | |
1454 | if (td->latency_qd == td->latency_qd_low) | |
1455 | td->latency_qd_low--; | |
1456 | ||
3e260a46 | 1457 | td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2; |
e1bcd541 | 1458 | td->latency_stable_count = 0; |
3e260a46 JA |
1459 | |
1460 | dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); | |
1461 | ||
1462 | /* | |
1463 | * When we ramp QD down, quiesce existing IO to prevent | |
1464 | * a storm of ramp downs due to pending higher depth. | |
1465 | */ | |
1466 | io_u_quiesce(td); | |
1467 | lat_new_cycle(td); | |
e39c0676 | 1468 | return false; |
3e260a46 JA |
1469 | } |
1470 | ||
e39c0676 | 1471 | static bool lat_target_failed(struct thread_data *td) |
3e260a46 JA |
1472 | { |
1473 | if (td->o.latency_percentile.u.f == 100.0) | |
1474 | return __lat_target_failed(td); | |
1475 | ||
1476 | td->latency_failed++; | |
e39c0676 | 1477 | return false; |
3e260a46 JA |
1478 | } |
1479 | ||
1480 | void lat_target_init(struct thread_data *td) | |
1481 | { | |
6bb58215 JA |
1482 | td->latency_end_run = 0; |
1483 | ||
3e260a46 JA |
1484 | if (td->o.latency_target) { |
1485 | dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target); | |
1486 | fio_gettime(&td->latency_ts, NULL); | |
1487 | td->latency_qd = 1; | |
1488 | td->latency_qd_high = td->o.iodepth; | |
1489 | td->latency_qd_low = 1; | |
1490 | td->latency_ios = ddir_rw_sum(td->io_blocks); | |
1491 | } else | |
1492 | td->latency_qd = td->o.iodepth; | |
1493 | } | |
1494 | ||
6bb58215 JA |
1495 | void lat_target_reset(struct thread_data *td) |
1496 | { | |
1497 | if (!td->latency_end_run) | |
1498 | lat_target_init(td); | |
1499 | } | |
1500 | ||
3e260a46 JA |
1501 | static void lat_target_success(struct thread_data *td) |
1502 | { | |
1503 | const unsigned int qd = td->latency_qd; | |
6bb58215 | 1504 | struct thread_options *o = &td->o; |
3e260a46 JA |
1505 | |
1506 | td->latency_qd_low = td->latency_qd; | |
1507 | ||
e1bcd541 SL |
1508 | if (td->latency_qd + 1 == td->latency_qd_high) { |
1509 | /* | |
1510 | * latency_qd will not incease on lat_target_success(), so | |
1511 | * called stable. If we stick with this queue depth, the | |
1512 | * final latency is likely lower than latency_target. Fix | |
1513 | * this by increasing latency_qd_high slowly. Use a naive | |
1514 | * heuristic here. If we get lat_target_success() 3 times | |
1515 | * in a row, increase latency_qd_high by 1. | |
1516 | */ | |
1517 | if (++td->latency_stable_count >= 3) { | |
1518 | td->latency_qd_high++; | |
1519 | td->latency_stable_count = 0; | |
1520 | } | |
1521 | } | |
1522 | ||
3e260a46 JA |
1523 | /* |
1524 | * If we haven't failed yet, we double up to a failing value instead | |
1525 | * of bisecting from highest possible queue depth. If we have set | |
1526 | * a limit other than td->o.iodepth, bisect between that. | |
1527 | */ | |
6bb58215 | 1528 | if (td->latency_qd_high != o->iodepth) |
3e260a46 JA |
1529 | td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2; |
1530 | else | |
1531 | td->latency_qd *= 2; | |
1532 | ||
6bb58215 JA |
1533 | if (td->latency_qd > o->iodepth) |
1534 | td->latency_qd = o->iodepth; | |
3e260a46 JA |
1535 | |
1536 | dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high); | |
6bb58215 | 1537 | |
3e260a46 | 1538 | /* |
6bb58215 JA |
1539 | * Same as last one, we are done. Let it run a latency cycle, so |
1540 | * we get only the results from the targeted depth. | |
3e260a46 | 1541 | */ |
e1bcd541 | 1542 | if (!o->latency_run && td->latency_qd == qd) { |
6bb58215 JA |
1543 | if (td->latency_end_run) { |
1544 | dprint(FD_RATE, "We are done\n"); | |
1545 | td->done = 1; | |
1546 | } else { | |
1547 | dprint(FD_RATE, "Quiesce and final run\n"); | |
1548 | io_u_quiesce(td); | |
1549 | td->latency_end_run = 1; | |
1550 | reset_all_stats(td); | |
1551 | reset_io_stats(td); | |
1552 | } | |
1553 | } | |
3e260a46 JA |
1554 | |
1555 | lat_new_cycle(td); | |
1556 | } | |
1557 | ||
1558 | /* | |
1559 | * Check if we can bump the queue depth | |
1560 | */ | |
1561 | void lat_target_check(struct thread_data *td) | |
1562 | { | |
1563 | uint64_t usec_window; | |
1564 | uint64_t ios; | |
1565 | double success_ios; | |
1566 | ||
1567 | usec_window = utime_since_now(&td->latency_ts); | |
1568 | if (usec_window < td->o.latency_window) | |
1569 | return; | |
1570 | ||
1571 | ios = ddir_rw_sum(td->io_blocks) - td->latency_ios; | |
1572 | success_ios = (double) (ios - td->latency_failed) / (double) ios; | |
1573 | success_ios *= 100.0; | |
1574 | ||
1575 | dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f); | |
1576 | ||
1577 | if (success_ios >= td->o.latency_percentile.u.f) | |
1578 | lat_target_success(td); | |
1579 | else | |
1580 | __lat_target_failed(td); | |
1581 | } | |
1582 | ||
1583 | /* | |
1584 | * If latency target is enabled, we might be ramping up or down and not | |
1585 | * using the full queue depth available. | |
1586 | */ | |
e39c0676 | 1587 | bool queue_full(const struct thread_data *td) |
3e260a46 JA |
1588 | { |
1589 | const int qempty = io_u_qempty(&td->io_u_freelist); | |
1590 | ||
1591 | if (qempty) | |
e39c0676 | 1592 | return true; |
3e260a46 | 1593 | if (!td->o.latency_target) |
e39c0676 | 1594 | return false; |
3e260a46 JA |
1595 | |
1596 | return td->cur_depth >= td->latency_qd; | |
1597 | } | |
429f6675 | 1598 | |
10ba535a JA |
1599 | struct io_u *__get_io_u(struct thread_data *td) |
1600 | { | |
26b3a188 | 1601 | const bool needs_lock = td_async_processing(td); |
0cae66f6 | 1602 | struct io_u *io_u = NULL; |
93b45bb2 | 1603 | int ret; |
10ba535a | 1604 | |
ca09be4b JA |
1605 | if (td->stop_io) |
1606 | return NULL; | |
1607 | ||
26b3a188 JA |
1608 | if (needs_lock) |
1609 | __td_io_u_lock(td); | |
e8462bd8 JA |
1610 | |
1611 | again: | |
bba6b14f | 1612 | if (!io_u_rempty(&td->io_u_requeues)) { |
2ae0b204 | 1613 | io_u = io_u_rpop(&td->io_u_requeues); |
bba6b14f JA |
1614 | io_u->resid = 0; |
1615 | } else if (!queue_full(td)) { | |
2ae0b204 | 1616 | io_u = io_u_qpop(&td->io_u_freelist); |
10ba535a | 1617 | |
225ba9e3 | 1618 | io_u->file = NULL; |
6040dabc | 1619 | io_u->buflen = 0; |
10ba535a | 1620 | io_u->resid = 0; |
d7762cf8 | 1621 | io_u->end_io = NULL; |
755200a3 JA |
1622 | } |
1623 | ||
1624 | if (io_u) { | |
0c6e7517 | 1625 | assert(io_u->flags & IO_U_F_FREE); |
1651e431 | 1626 | io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT | |
f8b0bd10 | 1627 | IO_U_F_TRIMMED | IO_U_F_BARRIER | |
692dec0c | 1628 | IO_U_F_VER_LIST); |
0c6e7517 | 1629 | |
755200a3 | 1630 | io_u->error = 0; |
bcd5abfa | 1631 | io_u->acct_ddir = -1; |
10ba535a | 1632 | td->cur_depth++; |
a9da8ab2 | 1633 | assert(!(td->flags & TD_F_CHILD)); |
1651e431 | 1634 | io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH); |
f9401285 | 1635 | io_u->ipo = NULL; |
a9da8ab2 | 1636 | } else if (td_async_processing(td)) { |
1dec3e07 JA |
1637 | /* |
1638 | * We ran out, wait for async verify threads to finish and | |
1639 | * return one | |
1640 | */ | |
a9da8ab2 | 1641 | assert(!(td->flags & TD_F_CHILD)); |
93b45bb2 BVA |
1642 | ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock); |
1643 | assert(ret == 0); | |
ee5e08ad JA |
1644 | if (!td->error) |
1645 | goto again; | |
10ba535a JA |
1646 | } |
1647 | ||
26b3a188 JA |
1648 | if (needs_lock) |
1649 | __td_io_u_unlock(td); | |
1650 | ||
10ba535a JA |
1651 | return io_u; |
1652 | } | |
1653 | ||
e39c0676 | 1654 | static bool check_get_trim(struct thread_data *td, struct io_u *io_u) |
10ba535a | 1655 | { |
d72be545 | 1656 | if (!(td->flags & TD_F_TRIM_BACKLOG)) |
e39c0676 | 1657 | return false; |
c9a73054 JA |
1658 | if (!td->trim_entries) |
1659 | return false; | |
d72be545 | 1660 | |
c9a73054 JA |
1661 | if (td->trim_batch) { |
1662 | td->trim_batch--; | |
1663 | if (get_next_trim(td, io_u)) | |
1664 | return true; | |
1665 | } else if (!(td->io_hist_len % td->o.trim_backlog) && | |
1666 | td->last_ddir != DDIR_READ) { | |
1667 | td->trim_batch = td->o.trim_batch; | |
1668 | if (!td->trim_batch) | |
1669 | td->trim_batch = td->o.trim_backlog; | |
1670 | if (get_next_trim(td, io_u)) | |
e39c0676 | 1671 | return true; |
2ba1c290 | 1672 | } |
10ba535a | 1673 | |
e39c0676 | 1674 | return false; |
0d29de83 JA |
1675 | } |
1676 | ||
e39c0676 | 1677 | static bool check_get_verify(struct thread_data *td, struct io_u *io_u) |
0d29de83 | 1678 | { |
d72be545 | 1679 | if (!(td->flags & TD_F_VER_BACKLOG)) |
e39c0676 | 1680 | return false; |
d72be545 JA |
1681 | |
1682 | if (td->io_hist_len) { | |
9e144189 JA |
1683 | int get_verify = 0; |
1684 | ||
d1ece0c7 | 1685 | if (td->verify_batch) |
9e144189 | 1686 | get_verify = 1; |
d1ece0c7 | 1687 | else if (!(td->io_hist_len % td->o.verify_backlog) && |
9e144189 JA |
1688 | td->last_ddir != DDIR_READ) { |
1689 | td->verify_batch = td->o.verify_batch; | |
f8a75c99 JA |
1690 | if (!td->verify_batch) |
1691 | td->verify_batch = td->o.verify_backlog; | |
9e144189 JA |
1692 | get_verify = 1; |
1693 | } | |
1694 | ||
d1ece0c7 JA |
1695 | if (get_verify && !get_next_verify(td, io_u)) { |
1696 | td->verify_batch--; | |
e39c0676 | 1697 | return true; |
d1ece0c7 | 1698 | } |
9e144189 JA |
1699 | } |
1700 | ||
e39c0676 | 1701 | return false; |
0d29de83 JA |
1702 | } |
1703 | ||
de789769 JA |
1704 | /* |
1705 | * Fill offset and start time into the buffer content, to prevent too | |
23f394d5 JA |
1706 | * easy compressible data for simple de-dupe attempts. Do this for every |
1707 | * 512b block in the range, since that should be the smallest block size | |
1708 | * we can expect from a device. | |
de789769 JA |
1709 | */ |
1710 | static void small_content_scramble(struct io_u *io_u) | |
1711 | { | |
5fff9543 | 1712 | unsigned long long i, nr_blocks = io_u->buflen >> 9; |
23f394d5 | 1713 | unsigned int offset; |
319b073f JA |
1714 | uint64_t boffset, *iptr; |
1715 | char *p; | |
de789769 | 1716 | |
23f394d5 JA |
1717 | if (!nr_blocks) |
1718 | return; | |
1719 | ||
1720 | p = io_u->xfer_buf; | |
fba76ee8 | 1721 | boffset = io_u->offset; |
319b073f JA |
1722 | |
1723 | if (io_u->buf_filled_len) | |
1724 | io_u->buf_filled_len = 0; | |
1725 | ||
1726 | /* | |
1727 | * Generate random index between 0..7. We do chunks of 512b, if | |
1728 | * we assume a cacheline is 64 bytes, then we have 8 of those. | |
1729 | * Scramble content within the blocks in the same cacheline to | |
1730 | * speed things up. | |
1731 | */ | |
1732 | offset = (io_u->start_time.tv_nsec ^ boffset) & 7; | |
fad82f76 | 1733 | |
23f394d5 JA |
1734 | for (i = 0; i < nr_blocks; i++) { |
1735 | /* | |
319b073f JA |
1736 | * Fill offset into start of cacheline, time into end |
1737 | * of cacheline | |
23f394d5 | 1738 | */ |
319b073f JA |
1739 | iptr = (void *) p + (offset << 6); |
1740 | *iptr = boffset; | |
1741 | ||
1742 | iptr = (void *) p + 64 - 2 * sizeof(uint64_t); | |
1743 | iptr[0] = io_u->start_time.tv_sec; | |
1744 | iptr[1] = io_u->start_time.tv_nsec; | |
23f394d5 | 1745 | |
23f394d5 | 1746 | p += 512; |
fad82f76 | 1747 | boffset += 512; |
23f394d5 | 1748 | } |
de789769 JA |
1749 | } |
1750 | ||
0d29de83 JA |
1751 | /* |
1752 | * Return an io_u to be processed. Gets a buflen and offset, sets direction, | |
5c5c33c1 | 1753 | * etc. The returned io_u is fully ready to be prepped, populated and submitted. |
0d29de83 JA |
1754 | */ |
1755 | struct io_u *get_io_u(struct thread_data *td) | |
1756 | { | |
1757 | struct fio_file *f; | |
1758 | struct io_u *io_u; | |
de789769 | 1759 | int do_scramble = 0; |
002fe734 | 1760 | long ret = 0; |
0d29de83 JA |
1761 | |
1762 | io_u = __get_io_u(td); | |
1763 | if (!io_u) { | |
1764 | dprint(FD_IO, "__get_io_u failed\n"); | |
1765 | return NULL; | |
1766 | } | |
1767 | ||
1768 | if (check_get_verify(td, io_u)) | |
1769 | goto out; | |
1770 | if (check_get_trim(td, io_u)) | |
1771 | goto out; | |
1772 | ||
755200a3 JA |
1773 | /* |
1774 | * from a requeue, io_u already setup | |
1775 | */ | |
1776 | if (io_u->file) | |
77f392bf | 1777 | goto out; |
755200a3 | 1778 | |
429f6675 JA |
1779 | /* |
1780 | * If using an iolog, grab next piece if any available. | |
1781 | */ | |
d72be545 | 1782 | if (td->flags & TD_F_READ_IOLOG) { |
429f6675 JA |
1783 | if (read_iolog_get(td, io_u)) |
1784 | goto err_put; | |
2ba1c290 | 1785 | } else if (set_io_u_file(td, io_u)) { |
002fe734 | 1786 | ret = -EBUSY; |
2ba1c290 | 1787 | dprint(FD_IO, "io_u %p, setting file failed\n", io_u); |
429f6675 | 1788 | goto err_put; |
2ba1c290 | 1789 | } |
5ec10eaa | 1790 | |
429f6675 | 1791 | f = io_u->file; |
002fe734 JA |
1792 | if (!f) { |
1793 | dprint(FD_IO, "io_u %p, setting file failed\n", io_u); | |
1794 | goto err_put; | |
1795 | } | |
1796 | ||
d6aed795 | 1797 | assert(fio_file_open(f)); |
97af62ce | 1798 | |
ff58fced | 1799 | if (ddir_rw(io_u->ddir)) { |
9b87f09b | 1800 | if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) { |
2ba1c290 | 1801 | dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); |
429f6675 | 1802 | goto err_put; |
2ba1c290 | 1803 | } |
10ba535a | 1804 | |
f1dfb668 JA |
1805 | f->last_start[io_u->ddir] = io_u->offset; |
1806 | f->last_pos[io_u->ddir] = io_u->offset + io_u->buflen; | |
10ba535a | 1807 | |
fd68418e | 1808 | if (io_u->ddir == DDIR_WRITE) { |
d72be545 | 1809 | if (td->flags & TD_F_REFILL_BUFFERS) { |
9c42684e | 1810 | io_u_fill_buffer(td, io_u, |
1066358a | 1811 | td->o.min_bs[DDIR_WRITE], |
9e129577 | 1812 | io_u->buflen); |
ff441ae8 | 1813 | } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) && |
5c5c33c1 BVA |
1814 | !(td->flags & TD_F_COMPRESS) && |
1815 | !(td->flags & TD_F_DO_VERIFY)) | |
fd68418e JA |
1816 | do_scramble = 1; |
1817 | } else if (io_u->ddir == DDIR_READ) { | |
cbe8d756 RR |
1818 | /* |
1819 | * Reset the buf_filled parameters so next time if the | |
1820 | * buffer is used for writes it is refilled. | |
1821 | */ | |
cbe8d756 RR |
1822 | io_u->buf_filled_len = 0; |
1823 | } | |
87dc1ab1 | 1824 | } |
10ba535a | 1825 | |
165faf16 JA |
1826 | /* |
1827 | * Set io data pointers. | |
1828 | */ | |
cec6b55d JA |
1829 | io_u->xfer_buf = io_u->buf; |
1830 | io_u->xfer_buflen = io_u->buflen; | |
5973cafb | 1831 | |
03ec570f DLM |
1832 | /* |
1833 | * Remember the issuing context priority. The IO engine may change this. | |
1834 | */ | |
1835 | io_u->ioprio = td->ioprio; | |
f0547200 | 1836 | io_u->clat_prio_index = 0; |
6ac7a331 | 1837 | out: |
0d29de83 | 1838 | assert(io_u->file); |
429f6675 | 1839 | if (!td_io_prep(td, io_u)) { |
3ecc8c67 | 1840 | if (!td->o.disable_lat) |
993bf48b | 1841 | fio_gettime(&io_u->start_time, NULL); |
03553853 | 1842 | |
de789769 JA |
1843 | if (do_scramble) |
1844 | small_content_scramble(io_u); | |
03553853 | 1845 | |
429f6675 | 1846 | return io_u; |
36167d82 | 1847 | } |
429f6675 | 1848 | err_put: |
2ba1c290 | 1849 | dprint(FD_IO, "get_io_u failed\n"); |
429f6675 | 1850 | put_io_u(td, io_u); |
002fe734 | 1851 | return ERR_PTR(ret); |
10ba535a JA |
1852 | } |
1853 | ||
a9da8ab2 | 1854 | static void __io_u_log_error(struct thread_data *td, struct io_u *io_u) |
5451792e | 1855 | { |
8b28bd41 | 1856 | enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error); |
825f818e | 1857 | |
8b28bd41 DM |
1858 | if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump) |
1859 | return; | |
5451792e | 1860 | |
5fff9543 | 1861 | log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%llu\n", |
709c8313 RE |
1862 | io_u->file ? " on file " : "", |
1863 | io_u->file ? io_u->file->file_name : "", | |
1864 | strerror(io_u->error), | |
1865 | io_ddir_name(io_u->ddir), | |
1866 | io_u->offset, io_u->xfer_buflen); | |
5451792e | 1867 | |
5ad7be56 KD |
1868 | if (td->io_ops->errdetails) { |
1869 | char *err = td->io_ops->errdetails(io_u); | |
1870 | ||
1871 | log_err("fio: %s\n", err); | |
1872 | free(err); | |
1873 | } | |
1874 | ||
5451792e JA |
1875 | if (!td->error) |
1876 | td_verror(td, io_u->error, "io_u error"); | |
1877 | } | |
1878 | ||
a9da8ab2 JA |
1879 | void io_u_log_error(struct thread_data *td, struct io_u *io_u) |
1880 | { | |
1881 | __io_u_log_error(td, io_u); | |
1882 | if (td->parent) | |
094e66cb | 1883 | __io_u_log_error(td->parent, io_u); |
a9da8ab2 JA |
1884 | } |
1885 | ||
e39c0676 | 1886 | static inline bool gtod_reduce(struct thread_data *td) |
aba6c951 | 1887 | { |
3ecc8c67 JA |
1888 | return (td->o.disable_clat && td->o.disable_slat && td->o.disable_bw) |
1889 | || td->o.gtod_reduce; | |
aba6c951 JA |
1890 | } |
1891 | ||
d7e92306 JA |
1892 | static void trim_block_info(struct thread_data *td, struct io_u *io_u) |
1893 | { | |
1894 | uint32_t *info = io_u_block_info(td, io_u); | |
1895 | ||
1896 | if (BLOCK_INFO_STATE(*info) >= BLOCK_STATE_TRIM_FAILURE) | |
1897 | return; | |
1898 | ||
1899 | *info = BLOCK_INFO(BLOCK_STATE_TRIMMED, BLOCK_INFO_TRIMS(*info) + 1); | |
1900 | } | |
1901 | ||
c8eeb9df JA |
1902 | static void account_io_completion(struct thread_data *td, struct io_u *io_u, |
1903 | struct io_completion_data *icd, | |
1904 | const enum fio_ddir idx, unsigned int bytes) | |
1905 | { | |
a9da8ab2 | 1906 | const int no_reduce = !gtod_reduce(td); |
d6bb626e | 1907 | unsigned long long llnsec = 0; |
c8eeb9df | 1908 | |
75dc383e JA |
1909 | if (td->parent) |
1910 | td = td->parent; | |
1911 | ||
132b1ee4 | 1912 | if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS)) |
8243be59 JA |
1913 | return; |
1914 | ||
a9da8ab2 | 1915 | if (no_reduce) |
d6bb626e | 1916 | llnsec = ntime_since(&io_u->issue_time, &icd->time); |
c8eeb9df JA |
1917 | |
1918 | if (!td->o.disable_lat) { | |
c3a32714 | 1919 | unsigned long long tnsec; |
c8eeb9df | 1920 | |
d6bb626e | 1921 | tnsec = ntime_since(&io_u->start_time, &icd->time); |
03ec570f | 1922 | add_lat_sample(td, idx, tnsec, bytes, io_u->offset, |
692dec0c | 1923 | io_u->ioprio, io_u->clat_prio_index); |
15501535 | 1924 | |
d4afedfd JA |
1925 | if (td->flags & TD_F_PROFILE_OPS) { |
1926 | struct prof_io_ops *ops = &td->prof_io_ops; | |
1927 | ||
1928 | if (ops->io_u_lat) | |
c3a32714 | 1929 | icd->error = ops->io_u_lat(td, tnsec); |
d4afedfd JA |
1930 | } |
1931 | ||
f7cf63bf VR |
1932 | if (ddir_rw(idx)) { |
1933 | if (td->o.max_latency[idx] && tnsec > td->o.max_latency[idx]) | |
1934 | lat_fatal(td, io_u, icd, tnsec, td->o.max_latency[idx]); | |
1935 | if (td->o.latency_target && tnsec > td->o.latency_target) { | |
1936 | if (lat_target_failed(td)) | |
1937 | lat_fatal(td, io_u, icd, tnsec, td->o.latency_target); | |
1938 | } | |
15501535 | 1939 | } |
c8eeb9df JA |
1940 | } |
1941 | ||
a47591e4 JA |
1942 | if (ddir_rw(idx)) { |
1943 | if (!td->o.disable_clat) { | |
03ec570f | 1944 | add_clat_sample(td, idx, llnsec, bytes, io_u->offset, |
692dec0c | 1945 | io_u->ioprio, io_u->clat_prio_index); |
d6bb626e | 1946 | io_u_mark_latency(td, llnsec); |
a47591e4 | 1947 | } |
c8eeb9df | 1948 | |
a47591e4 | 1949 | if (!td->o.disable_bw && per_unit_log(td->bw_log)) |
d6bb626e | 1950 | add_bw_sample(td, io_u, bytes, llnsec); |
c8eeb9df | 1951 | |
a47591e4 JA |
1952 | if (no_reduce && per_unit_log(td->iops_log)) |
1953 | add_iops_sample(td, io_u, bytes); | |
b2b3eefe JA |
1954 | } else if (ddir_sync(idx) && !td->o.disable_clat) |
1955 | add_sync_clat_sample(&td->ts, llnsec); | |
66347cfa | 1956 | |
d7e92306 JA |
1957 | if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) |
1958 | trim_block_info(td, io_u); | |
c8eeb9df JA |
1959 | } |
1960 | ||
94a6e1bb JA |
1961 | static void file_log_write_comp(const struct thread_data *td, struct fio_file *f, |
1962 | uint64_t offset, unsigned int bytes) | |
1963 | { | |
1964 | int idx; | |
1965 | ||
639ad1ea JA |
1966 | if (!f) |
1967 | return; | |
1968 | ||
94a6e1bb JA |
1969 | if (f->first_write == -1ULL || offset < f->first_write) |
1970 | f->first_write = offset; | |
1971 | if (f->last_write == -1ULL || ((offset + bytes) > f->last_write)) | |
1972 | f->last_write = offset + bytes; | |
1973 | ||
1974 | if (!f->last_write_comp) | |
1975 | return; | |
1976 | ||
1977 | idx = f->last_write_idx++; | |
1978 | f->last_write_comp[idx] = offset; | |
1979 | if (f->last_write_idx == td->o.iodepth) | |
1980 | f->last_write_idx = 0; | |
1981 | } | |
1982 | ||
b2b3eefe JA |
1983 | static bool should_account(struct thread_data *td) |
1984 | { | |
1985 | return ramp_time_over(td) && (td->runstate == TD_RUNNING || | |
1986 | td->runstate == TD_VERIFYING); | |
1987 | } | |
1988 | ||
f8b0bd10 | 1989 | static void io_completed(struct thread_data *td, struct io_u **io_u_ptr, |
97601024 | 1990 | struct io_completion_data *icd) |
10ba535a | 1991 | { |
f8b0bd10 JA |
1992 | struct io_u *io_u = *io_u_ptr; |
1993 | enum fio_ddir ddir = io_u->ddir; | |
1994 | struct fio_file *f = io_u->file; | |
10ba535a | 1995 | |
e5f9a813 | 1996 | dprint_io_u(io_u, "complete"); |
2ba1c290 | 1997 | |
0c6e7517 | 1998 | assert(io_u->flags & IO_U_F_FLIGHT); |
1651e431 | 1999 | io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK); |
f9401285 JA |
2000 | |
2001 | /* | |
2002 | * Mark IO ok to verify | |
2003 | */ | |
2004 | if (io_u->ipo) { | |
890b6656 JA |
2005 | /* |
2006 | * Remove errored entry from the verification list | |
2007 | */ | |
2008 | if (io_u->error) | |
2009 | unlog_io_piece(td, io_u); | |
2010 | else { | |
0337173e BVA |
2011 | atomic_store_release(&io_u->ipo->flags, |
2012 | io_u->ipo->flags & ~IP_F_IN_FLIGHT); | |
890b6656 | 2013 | } |
f9401285 JA |
2014 | } |
2015 | ||
f8b0bd10 | 2016 | if (ddir_sync(ddir)) { |
2ea93f98 | 2017 | td->last_was_sync = true; |
44f29692 JA |
2018 | if (f) { |
2019 | f->first_write = -1ULL; | |
2020 | f->last_write = -1ULL; | |
2021 | } | |
b2b3eefe JA |
2022 | if (should_account(td)) |
2023 | account_io_completion(td, io_u, icd, ddir, io_u->buflen); | |
87dc1ab1 JA |
2024 | return; |
2025 | } | |
2026 | ||
2ea93f98 | 2027 | td->last_was_sync = false; |
f8b0bd10 | 2028 | td->last_ddir = ddir; |
87dc1ab1 | 2029 | |
f8b0bd10 | 2030 | if (!io_u->error && ddir_rw(ddir)) { |
84abeef7 | 2031 | unsigned long long bytes = io_u->xfer_buflen - io_u->resid; |
b29ee5b3 | 2032 | int ret; |
10ba535a | 2033 | |
7d33649c JA |
2034 | /* |
2035 | * Make sure we notice short IO from here, and requeue them | |
2036 | * appropriately! | |
2037 | */ | |
c4cb947e | 2038 | if (bytes && io_u->resid) { |
7d33649c JA |
2039 | io_u->xfer_buflen = io_u->resid; |
2040 | io_u->xfer_buf += bytes; | |
2041 | io_u->offset += bytes; | |
2042 | td->ts.short_io_u[io_u->ddir]++; | |
2043 | if (io_u->offset < io_u->file->real_file_size) { | |
2044 | requeue_io_u(td, io_u_ptr); | |
2045 | return; | |
2046 | } | |
2047 | } | |
2048 | ||
f8b0bd10 | 2049 | td->io_blocks[ddir]++; |
f8b0bd10 | 2050 | td->io_bytes[ddir] += bytes; |
ae2fafc8 | 2051 | |
e1c325d2 JA |
2052 | if (!(io_u->flags & IO_U_F_VER_LIST)) { |
2053 | td->this_io_blocks[ddir]++; | |
f8b0bd10 | 2054 | td->this_io_bytes[ddir] += bytes; |
e1c325d2 | 2055 | } |
f8b0bd10 | 2056 | |
639ad1ea | 2057 | if (ddir == DDIR_WRITE) |
94a6e1bb | 2058 | file_log_write_comp(td, f, io_u->offset, bytes); |
44f29692 | 2059 | |
b2b3eefe | 2060 | if (should_account(td)) |
f8b0bd10 | 2061 | account_io_completion(td, io_u, icd, ddir, bytes); |
40e1a6f0 | 2062 | |
f8b0bd10 | 2063 | icd->bytes_done[ddir] += bytes; |
3af6ef39 | 2064 | |
d7762cf8 | 2065 | if (io_u->end_io) { |
f8b0bd10 JA |
2066 | ret = io_u->end_io(td, io_u_ptr); |
2067 | io_u = *io_u_ptr; | |
3af6ef39 JA |
2068 | if (ret && !icd->error) |
2069 | icd->error = ret; | |
2070 | } | |
ff58fced | 2071 | } else if (io_u->error) { |
10ba535a | 2072 | icd->error = io_u->error; |
5451792e JA |
2073 | io_u_log_error(td, io_u); |
2074 | } | |
8b28bd41 | 2075 | if (icd->error) { |
f8b0bd10 JA |
2076 | enum error_type_bit eb = td_error_type(ddir, icd->error); |
2077 | ||
8b28bd41 DM |
2078 | if (!td_non_fatal_error(td, eb, icd->error)) |
2079 | return; | |
f8b0bd10 | 2080 | |
f2bba182 RR |
2081 | /* |
2082 | * If there is a non_fatal error, then add to the error count | |
2083 | * and clear all the errors. | |
2084 | */ | |
2085 | update_error_count(td, icd->error); | |
2086 | td_clear_error(td); | |
2087 | icd->error = 0; | |
f8b0bd10 JA |
2088 | if (io_u) |
2089 | io_u->error = 0; | |
f2bba182 | 2090 | } |
10ba535a JA |
2091 | } |
2092 | ||
9520ebb9 JA |
2093 | static void init_icd(struct thread_data *td, struct io_completion_data *icd, |
2094 | int nr) | |
10ba535a | 2095 | { |
6eaf09d6 | 2096 | int ddir; |
aba6c951 JA |
2097 | |
2098 | if (!gtod_reduce(td)) | |
9520ebb9 | 2099 | fio_gettime(&icd->time, NULL); |
02bcaa8c | 2100 | |
3af6ef39 JA |
2101 | icd->nr = nr; |
2102 | ||
10ba535a | 2103 | icd->error = 0; |
c1f50f76 | 2104 | for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) |
6eaf09d6 | 2105 | icd->bytes_done[ddir] = 0; |
36167d82 JA |
2106 | } |
2107 | ||
97601024 JA |
2108 | static void ios_completed(struct thread_data *td, |
2109 | struct io_completion_data *icd) | |
36167d82 JA |
2110 | { |
2111 | struct io_u *io_u; | |
2112 | int i; | |
2113 | ||
10ba535a JA |
2114 | for (i = 0; i < icd->nr; i++) { |
2115 | io_u = td->io_ops->event(td, i); | |
2116 | ||
f8b0bd10 | 2117 | io_completed(td, &io_u, icd); |
e8462bd8 | 2118 | |
f8b0bd10 | 2119 | if (io_u) |
e8462bd8 | 2120 | put_io_u(td, io_u); |
10ba535a JA |
2121 | } |
2122 | } | |
97601024 | 2123 | |
191d6634 SK |
2124 | static void io_u_update_bytes_done(struct thread_data *td, |
2125 | struct io_completion_data *icd) | |
2126 | { | |
2127 | int ddir; | |
2128 | ||
2129 | if (td->runstate == TD_VERIFYING) { | |
2130 | td->bytes_verified += icd->bytes_done[DDIR_READ]; | |
2131 | return; | |
2132 | } | |
2133 | ||
2134 | for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) | |
2135 | td->bytes_done[ddir] += icd->bytes_done[ddir]; | |
2136 | } | |
2137 | ||
e7e6cfb4 JA |
2138 | /* |
2139 | * Complete a single io_u for the sync engines. | |
2140 | */ | |
55312f9f | 2141 | int io_u_sync_complete(struct thread_data *td, struct io_u *io_u) |
97601024 JA |
2142 | { |
2143 | struct io_completion_data icd; | |
2144 | ||
9520ebb9 | 2145 | init_icd(td, &icd, 1); |
f8b0bd10 | 2146 | io_completed(td, &io_u, &icd); |
e8462bd8 | 2147 | |
f8b0bd10 | 2148 | if (io_u) |
e8462bd8 | 2149 | put_io_u(td, io_u); |
97601024 | 2150 | |
581e7141 JA |
2151 | if (icd.error) { |
2152 | td_verror(td, icd.error, "io_u_sync_complete"); | |
2153 | return -1; | |
2154 | } | |
97601024 | 2155 | |
191d6634 | 2156 | io_u_update_bytes_done(td, &icd); |
581e7141 JA |
2157 | |
2158 | return 0; | |
97601024 JA |
2159 | } |
2160 | ||
e7e6cfb4 JA |
2161 | /* |
2162 | * Called to complete min_events number of io for the async engines. | |
2163 | */ | |
55312f9f | 2164 | int io_u_queued_complete(struct thread_data *td, int min_evts) |
97601024 | 2165 | { |
97601024 | 2166 | struct io_completion_data icd; |
00de55ef | 2167 | struct timespec *tvp = NULL; |
191d6634 | 2168 | int ret; |
4d06a338 | 2169 | struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; |
97601024 | 2170 | |
12bb8569 | 2171 | dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts); |
b271fe62 | 2172 | |
4950421a | 2173 | if (!min_evts) |
00de55ef | 2174 | tvp = &ts; |
5fb4b366 RE |
2175 | else if (min_evts > td->cur_depth) |
2176 | min_evts = td->cur_depth; | |
97601024 | 2177 | |
82407585 RP |
2178 | /* No worries, td_io_getevents fixes min and max if they are |
2179 | * set incorrectly */ | |
2180 | ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete_max, tvp); | |
97601024 | 2181 | if (ret < 0) { |
e1161c32 | 2182 | td_verror(td, -ret, "td_io_getevents"); |
97601024 JA |
2183 | return ret; |
2184 | } else if (!ret) | |
2185 | return ret; | |
2186 | ||
9520ebb9 | 2187 | init_icd(td, &icd, ret); |
97601024 | 2188 | ios_completed(td, &icd); |
581e7141 JA |
2189 | if (icd.error) { |
2190 | td_verror(td, icd.error, "io_u_queued_complete"); | |
2191 | return -1; | |
2192 | } | |
97601024 | 2193 | |
191d6634 | 2194 | io_u_update_bytes_done(td, &icd); |
581e7141 | 2195 | |
0d593542 | 2196 | return ret; |
97601024 | 2197 | } |
7e77dd02 JA |
2198 | |
2199 | /* | |
2200 | * Call when io_u is really queued, to update the submission latency. | |
2201 | */ | |
2202 | void io_u_queued(struct thread_data *td, struct io_u *io_u) | |
2203 | { | |
8243be59 | 2204 | if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) { |
9520ebb9 | 2205 | unsigned long slat_time; |
7e77dd02 | 2206 | |
d6bb626e | 2207 | slat_time = ntime_since(&io_u->start_time, &io_u->issue_time); |
75dc383e JA |
2208 | |
2209 | if (td->parent) | |
2210 | td = td->parent; | |
2211 | ||
ae588852 | 2212 | add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen, |
03ec570f | 2213 | io_u->offset, io_u->ioprio); |
9520ebb9 | 2214 | } |
7e77dd02 | 2215 | } |
433afcb4 | 2216 | |
5c94b008 JA |
2217 | /* |
2218 | * See if we should reuse the last seed, if dedupe is enabled | |
2219 | */ | |
9451b93e | 2220 | static struct frand_state *get_buf_state(struct thread_data *td) |
5c94b008 JA |
2221 | { |
2222 | unsigned int v; | |
0d71aa98 | 2223 | unsigned long long i; |
5c94b008 JA |
2224 | |
2225 | if (!td->o.dedupe_percentage) | |
2226 | return &td->buf_state; | |
732eedd0 | 2227 | else if (td->o.dedupe_percentage == 100) { |
9451b93e JA |
2228 | frand_copy(&td->buf_state_prev, &td->buf_state); |
2229 | return &td->buf_state; | |
732eedd0 | 2230 | } |
5c94b008 | 2231 | |
1bd5d213 | 2232 | v = rand_between(&td->dedupe_state, 1, 100); |
5c94b008 | 2233 | |
0d71aa98 BD |
2234 | if (v <= td->o.dedupe_percentage) |
2235 | switch (td->o.dedupe_mode) { | |
2236 | case DEDUPE_MODE_REPEAT: | |
2237 | /* | |
2238 | * The caller advances the returned frand_state. | |
2239 | * A copy of prev should be returned instead since | |
2240 | * a subsequent intention to generate a deduped buffer | |
2241 | * might result in generating a unique one | |
2242 | */ | |
2243 | frand_copy(&td->buf_state_ret, &td->buf_state_prev); | |
2244 | return &td->buf_state_ret; | |
2245 | case DEDUPE_MODE_WORKING_SET: | |
2246 | i = rand_between(&td->dedupe_working_set_index_state, 0, td->num_unique_pages - 1); | |
2247 | frand_copy(&td->buf_state_ret, &td->dedupe_working_set_states[i]); | |
2248 | return &td->buf_state_ret; | |
2249 | default: | |
2250 | log_err("unexpected dedupe mode %u\n", td->o.dedupe_mode); | |
2251 | assert(0); | |
2252 | } | |
5c94b008 JA |
2253 | |
2254 | return &td->buf_state; | |
2255 | } | |
2256 | ||
9451b93e | 2257 | static void save_buf_state(struct thread_data *td, struct frand_state *rs) |
5c94b008 | 2258 | { |
9451b93e JA |
2259 | if (td->o.dedupe_percentage == 100) |
2260 | frand_copy(rs, &td->buf_state_prev); | |
2261 | else if (rs == &td->buf_state) | |
5c94b008 JA |
2262 | frand_copy(&td->buf_state_prev, rs); |
2263 | } | |
2264 | ||
5fff9543 JF |
2265 | void fill_io_buffer(struct thread_data *td, void *buf, unsigned long long min_write, |
2266 | unsigned long long max_bs) | |
5973cafb | 2267 | { |
d1af2894 JA |
2268 | struct thread_options *o = &td->o; |
2269 | ||
15600335 JA |
2270 | if (o->mem_type == MEM_CUDA_MALLOC) |
2271 | return; | |
03553853 | 2272 | |
4eff3e57 | 2273 | if (o->compress_percentage || o->dedupe_percentage) { |
9c42684e | 2274 | unsigned int perc = td->o.compress_percentage; |
eb57e710 | 2275 | struct frand_state *rs = NULL; |
5fff9543 JF |
2276 | unsigned long long left = max_bs; |
2277 | unsigned long long this_write; | |
5c94b008 | 2278 | |
1066358a | 2279 | do { |
eb57e710 BD |
2280 | /* |
2281 | * Buffers are either entirely dedupe-able or not. | |
2282 | * If we choose to dedup, the buffer should undergo | |
2283 | * the same manipulation as the original write. Which | |
2284 | * means we should retrack the steps we took for compression | |
2285 | * as well. | |
2286 | */ | |
2287 | if (!rs) | |
2288 | rs = get_buf_state(td); | |
9c42684e | 2289 | |
1066358a | 2290 | min_write = min(min_write, left); |
f97a43a1 | 2291 | |
eb57e710 BD |
2292 | this_write = min_not_zero(min_write, |
2293 | (unsigned long long) td->o.compress_chunk); | |
1e7f82e2 | 2294 | |
eb57e710 BD |
2295 | fill_random_buf_percentage(rs, buf, perc, |
2296 | this_write, this_write, | |
2297 | o->buffer_pattern, | |
2298 | o->buffer_pattern_bytes); | |
1066358a | 2299 | |
1e7f82e2 JA |
2300 | buf += this_write; |
2301 | left -= this_write; | |
9451b93e | 2302 | save_buf_state(td, rs); |
1066358a | 2303 | } while (left); |
d1af2894 JA |
2304 | } else if (o->buffer_pattern_bytes) |
2305 | fill_buffer_pattern(td, buf, max_bs); | |
999d245e | 2306 | else if (o->zero_buffers) |
cc86c395 | 2307 | memset(buf, 0, max_bs); |
999d245e | 2308 | else |
9451b93e | 2309 | fill_random_buf(get_buf_state(td), buf, max_bs); |
cc86c395 JA |
2310 | } |
2311 | ||
2312 | /* | |
2313 | * "randomly" fill the buffer contents | |
2314 | */ | |
2315 | void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, | |
5fff9543 | 2316 | unsigned long long min_write, unsigned long long max_bs) |
cc86c395 JA |
2317 | { |
2318 | io_u->buf_filled_len = 0; | |
2319 | fill_io_buffer(td, io_u->buf, min_write, max_bs); | |
5973cafb | 2320 | } |
e2c75fc4 TK |
2321 | |
2322 | static int do_sync_file_range(const struct thread_data *td, | |
2323 | struct fio_file *f) | |
2324 | { | |
7e2a317e | 2325 | uint64_t offset, nbytes; |
e2c75fc4 TK |
2326 | |
2327 | offset = f->first_write; | |
2328 | nbytes = f->last_write - f->first_write; | |
2329 | ||
2330 | if (!nbytes) | |
2331 | return 0; | |
2332 | ||
2333 | return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range); | |
2334 | } | |
2335 | ||
2336 | int do_io_u_sync(const struct thread_data *td, struct io_u *io_u) | |
2337 | { | |
2338 | int ret; | |
2339 | ||
2340 | if (io_u->ddir == DDIR_SYNC) { | |
a04e0665 | 2341 | #ifdef CONFIG_FCNTL_SYNC |
c99c81ad | 2342 | ret = fcntl(io_u->file->fd, F_FULLFSYNC); |
a04e0665 | 2343 | #else |
e2c75fc4 | 2344 | ret = fsync(io_u->file->fd); |
a04e0665 | 2345 | #endif |
e2c75fc4 TK |
2346 | } else if (io_u->ddir == DDIR_DATASYNC) { |
2347 | #ifdef CONFIG_FDATASYNC | |
2348 | ret = fdatasync(io_u->file->fd); | |
2349 | #else | |
2350 | ret = io_u->xfer_buflen; | |
2351 | io_u->error = EINVAL; | |
2352 | #endif | |
2353 | } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE) | |
2354 | ret = do_sync_file_range(td, io_u->file); | |
2355 | else { | |
2356 | ret = io_u->xfer_buflen; | |
2357 | io_u->error = EINVAL; | |
2358 | } | |
2359 | ||
2360 | if (ret < 0) | |
2361 | io_u->error = errno; | |
2362 | ||
2363 | return ret; | |
2364 | } | |
2365 | ||
2366 | int do_io_u_trim(const struct thread_data *td, struct io_u *io_u) | |
2367 | { | |
2368 | #ifndef FIO_HAVE_TRIM | |
2369 | io_u->error = EINVAL; | |
2370 | return 0; | |
2371 | #else | |
2372 | struct fio_file *f = io_u->file; | |
2373 | int ret; | |
2374 | ||
e3be810b SK |
2375 | if (td->o.zone_mode == ZONE_MODE_ZBD) { |
2376 | ret = zbd_do_io_u_trim(td, io_u); | |
2377 | if (ret == io_u_completed) | |
2378 | return io_u->xfer_buflen; | |
2379 | if (ret) | |
2380 | goto err; | |
2381 | } | |
2382 | ||
496b1f9e | 2383 | ret = os_trim(f, io_u->offset, io_u->xfer_buflen); |
e2c75fc4 TK |
2384 | if (!ret) |
2385 | return io_u->xfer_buflen; | |
2386 | ||
e3be810b | 2387 | err: |
e2c75fc4 TK |
2388 | io_u->error = ret; |
2389 | return 0; | |
2390 | #endif | |
2391 | } |